diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-11-01 15:53:19 +1100 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-11-01 15:53:19 +1100 |
commit | 48fe4871569f019c653efb95b26dda976f84c468 (patch) | |
tree | 3958d2a6d88fde19e73e9a59b6b85b1e551f793e /include | |
parent | 3c4cf5ee5a9224a800a74b5dfcb435550ed30737 (diff) | |
download | kernel_samsung_aries-48fe4871569f019c653efb95b26dda976f84c468.zip kernel_samsung_aries-48fe4871569f019c653efb95b26dda976f84c468.tar.gz kernel_samsung_aries-48fe4871569f019c653efb95b26dda976f84c468.tar.bz2 |
powerpc: clean up uaccess.h
Use the best from each architecture.
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-powerpc/uaccess.h | 29 |
1 files changed, 8 insertions, 21 deletions
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h index 035338b..33af730 100644 --- a/include/asm-powerpc/uaccess.h +++ b/include/asm-powerpc/uaccess.h @@ -115,10 +115,8 @@ struct exception_table_entry { #define __put_user64(x, ptr) __put_user(x, ptr) #endif -#ifdef __powerpc64__ #define __get_user_unaligned __get_user #define __put_user_unaligned __put_user -#endif extern long __put_user_bad(void); @@ -333,9 +331,6 @@ extern inline unsigned long copy_to_user(void __user *to, return n; } -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - #else /* __powerpc64__ */ #define __copy_in_user(to, from, size) \ @@ -348,6 +343,8 @@ extern unsigned long copy_to_user(void __user *to, const void *from, extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long n); +#endif /* __powerpc64__ */ + static inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { @@ -368,9 +365,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, __get_user_size(*(u64 *)to, from, 8, ret); break; } - return (ret == -EFAULT) ? n : 0; + if (ret == 0) + return 0; } - return __copy_tofrom_user((__force void __user *) to, from, n); + return __copy_tofrom_user((__force void __user *)to, from, n); } static inline unsigned long __copy_to_user_inatomic(void __user *to, @@ -393,33 +391,24 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); break; } - return (ret == -EFAULT) ? n : 0; + if (ret == 0) + return 0; } - return __copy_tofrom_user(to, (__force const void __user *) from, n); + return __copy_tofrom_user(to, (__force const void __user *)from, n); } -#endif /* __powerpc64__ */ - static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long size) { might_sleep(); -#ifndef __powerpc64__ - return __copy_tofrom_user((__force void __user *)to, from, size); -#else /* __powerpc64__ */ return __copy_from_user_inatomic(to, from, size); -#endif /* __powerpc64__ */ } static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long size) { might_sleep(); -#ifndef __powerpc64__ - return __copy_tofrom_user(to, (__force void __user *)from, size); -#else /* __powerpc64__ */ return __copy_to_user_inatomic(to, from, size); -#endif /* __powerpc64__ */ } extern unsigned long __clear_user(void __user *addr, unsigned long size); @@ -429,12 +418,10 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size) might_sleep(); if (likely(access_ok(VERIFY_WRITE, addr, size))) return __clear_user(addr, size); -#ifndef __powerpc64__ if ((unsigned long)addr < TASK_SIZE) { unsigned long over = (unsigned long)addr + size - TASK_SIZE; return __clear_user(addr, size - over) + over; } -#endif /* __powerpc64__ */ return size; } |