diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-10-31 23:53:19 -0500 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-10-31 23:53:19 -0500 |
commit | 48fe4871569f019c653efb95b26dda976f84c468 (patch) | |
tree | 3958d2a6d88fde19e73e9a59b6b85b1e551f793e /include/asm-powerpc/uaccess.h | |
parent | 3c4cf5ee5a9224a800a74b5dfcb435550ed30737 (diff) |
powerpc: clean up uaccess.h
Use the best from each architecture.
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Diffstat (limited to 'include/asm-powerpc/uaccess.h')
-rw-r--r-- | include/asm-powerpc/uaccess.h | 29 |
1 files changed, 8 insertions, 21 deletions
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h index 035338b0c5ee..33af730f0d19 100644 --- a/include/asm-powerpc/uaccess.h +++ b/include/asm-powerpc/uaccess.h | |||
@@ -115,10 +115,8 @@ struct exception_table_entry { | |||
115 | #define __put_user64(x, ptr) __put_user(x, ptr) | 115 | #define __put_user64(x, ptr) __put_user(x, ptr) |
116 | #endif | 116 | #endif |
117 | 117 | ||
118 | #ifdef __powerpc64__ | ||
119 | #define __get_user_unaligned __get_user | 118 | #define __get_user_unaligned __get_user |
120 | #define __put_user_unaligned __put_user | 119 | #define __put_user_unaligned __put_user |
121 | #endif | ||
122 | 120 | ||
123 | extern long __put_user_bad(void); | 121 | extern long __put_user_bad(void); |
124 | 122 | ||
@@ -333,9 +331,6 @@ extern inline unsigned long copy_to_user(void __user *to, | |||
333 | return n; | 331 | return n; |
334 | } | 332 | } |
335 | 333 | ||
336 | #define __copy_to_user_inatomic __copy_to_user | ||
337 | #define __copy_from_user_inatomic __copy_from_user | ||
338 | |||
339 | #else /* __powerpc64__ */ | 334 | #else /* __powerpc64__ */ |
340 | 335 | ||
341 | #define __copy_in_user(to, from, size) \ | 336 | #define __copy_in_user(to, from, size) \ |
@@ -348,6 +343,8 @@ extern unsigned long copy_to_user(void __user *to, const void *from, | |||
348 | extern unsigned long copy_in_user(void __user *to, const void __user *from, | 343 | extern unsigned long copy_in_user(void __user *to, const void __user *from, |
349 | unsigned long n); | 344 | unsigned long n); |
350 | 345 | ||
346 | #endif /* __powerpc64__ */ | ||
347 | |||
351 | static inline unsigned long __copy_from_user_inatomic(void *to, | 348 | static inline unsigned long __copy_from_user_inatomic(void *to, |
352 | const void __user *from, unsigned long n) | 349 | const void __user *from, unsigned long n) |
353 | { | 350 | { |
@@ -368,9 +365,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, | |||
368 | __get_user_size(*(u64 *)to, from, 8, ret); | 365 | __get_user_size(*(u64 *)to, from, 8, ret); |
369 | break; | 366 | break; |
370 | } | 367 | } |
371 | return (ret == -EFAULT) ? n : 0; | 368 | if (ret == 0) |
369 | return 0; | ||
372 | } | 370 | } |
373 | return __copy_tofrom_user((__force void __user *) to, from, n); | 371 | return __copy_tofrom_user((__force void __user *)to, from, n); |
374 | } | 372 | } |
375 | 373 | ||
376 | static inline unsigned long __copy_to_user_inatomic(void __user *to, | 374 | static inline unsigned long __copy_to_user_inatomic(void __user *to, |
@@ -393,33 +391,24 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, | |||
393 | __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); | 391 | __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); |
394 | break; | 392 | break; |
395 | } | 393 | } |
396 | return (ret == -EFAULT) ? n : 0; | 394 | if (ret == 0) |
395 | return 0; | ||
397 | } | 396 | } |
398 | return __copy_tofrom_user(to, (__force const void __user *) from, n); | 397 | return __copy_tofrom_user(to, (__force const void __user *)from, n); |
399 | } | 398 | } |
400 | 399 | ||
401 | #endif /* __powerpc64__ */ | ||
402 | |||
403 | static inline unsigned long __copy_from_user(void *to, | 400 | static inline unsigned long __copy_from_user(void *to, |
404 | const void __user *from, unsigned long size) | 401 | const void __user *from, unsigned long size) |
405 | { | 402 | { |
406 | might_sleep(); | 403 | might_sleep(); |
407 | #ifndef __powerpc64__ | ||
408 | return __copy_tofrom_user((__force void __user *)to, from, size); | ||
409 | #else /* __powerpc64__ */ | ||
410 | return __copy_from_user_inatomic(to, from, size); | 404 | return __copy_from_user_inatomic(to, from, size); |
411 | #endif /* __powerpc64__ */ | ||
412 | } | 405 | } |
413 | 406 | ||
414 | static inline unsigned long __copy_to_user(void __user *to, | 407 | static inline unsigned long __copy_to_user(void __user *to, |
415 | const void *from, unsigned long size) | 408 | const void *from, unsigned long size) |
416 | { | 409 | { |
417 | might_sleep(); | 410 | might_sleep(); |
418 | #ifndef __powerpc64__ | ||
419 | return __copy_tofrom_user(to, (__force void __user *)from, size); | ||
420 | #else /* __powerpc64__ */ | ||
421 | return __copy_to_user_inatomic(to, from, size); | 411 | return __copy_to_user_inatomic(to, from, size); |
422 | #endif /* __powerpc64__ */ | ||
423 | } | 412 | } |
424 | 413 | ||
425 | extern unsigned long __clear_user(void __user *addr, unsigned long size); | 414 | extern unsigned long __clear_user(void __user *addr, unsigned long size); |
@@ -429,12 +418,10 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size) | |||
429 | might_sleep(); | 418 | might_sleep(); |
430 | if (likely(access_ok(VERIFY_WRITE, addr, size))) | 419 | if (likely(access_ok(VERIFY_WRITE, addr, size))) |
431 | return __clear_user(addr, size); | 420 | return __clear_user(addr, size); |
432 | #ifndef __powerpc64__ | ||
433 | if ((unsigned long)addr < TASK_SIZE) { | 421 | if ((unsigned long)addr < TASK_SIZE) { |
434 | unsigned long over = (unsigned long)addr + size - TASK_SIZE; | 422 | unsigned long over = (unsigned long)addr + size - TASK_SIZE; |
435 | return __clear_user(addr, size - over) + over; | 423 | return __clear_user(addr, size - over) + over; |
436 | } | 424 | } |
437 | #endif /* __powerpc64__ */ | ||
438 | return size; | 425 | return size; |
439 | } | 426 | } |
440 | 427 | ||