aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/uaccess.h')
-rw-r--r--include/asm-i386/uaccess.h46
1 files changed, 34 insertions, 12 deletions
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index d0d253277be5..54d905ebc63d 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -390,8 +390,12 @@ unsigned long __must_check __copy_to_user_ll(void __user *to,
390 const void *from, unsigned long n); 390 const void *from, unsigned long n);
391unsigned long __must_check __copy_from_user_ll(void *to, 391unsigned long __must_check __copy_from_user_ll(void *to,
392 const void __user *from, unsigned long n); 392 const void __user *from, unsigned long n);
393unsigned long __must_check __copy_from_user_ll_nozero(void *to,
394 const void __user *from, unsigned long n);
393unsigned long __must_check __copy_from_user_ll_nocache(void *to, 395unsigned long __must_check __copy_from_user_ll_nocache(void *to,
394 const void __user *from, unsigned long n); 396 const void __user *from, unsigned long n);
397unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
398 const void __user *from, unsigned long n);
395 399
396/* 400/*
397 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault 401 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
@@ -463,11 +467,36 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
463 * atomic context and will fail rather than sleep. In this case the 467 * atomic context and will fail rather than sleep. In this case the
464 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h 468 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
465 * for explanation of why this is needed. 469 * for explanation of why this is needed.
466 * FIXME this isn't implimented yet EMXIF
467 */ 470 */
468static __always_inline unsigned long 471static __always_inline unsigned long
469__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 472__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
470{ 473{
474 /* Avoid zeroing the tail if the copy fails..
475 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
476 * but as the zeroing behaviour is only significant when n is not
477 * constant, that shouldn't be a problem.
478 */
479 if (__builtin_constant_p(n)) {
480 unsigned long ret;
481
482 switch (n) {
483 case 1:
484 __get_user_size(*(u8 *)to, from, 1, ret, 1);
485 return ret;
486 case 2:
487 __get_user_size(*(u16 *)to, from, 2, ret, 2);
488 return ret;
489 case 4:
490 __get_user_size(*(u32 *)to, from, 4, ret, 4);
491 return ret;
492 }
493 }
494 return __copy_from_user_ll_nozero(to, from, n);
495}
496static __always_inline unsigned long
497__copy_from_user(void *to, const void __user *from, unsigned long n)
498{
499 might_sleep();
471 if (__builtin_constant_p(n)) { 500 if (__builtin_constant_p(n)) {
472 unsigned long ret; 501 unsigned long ret;
473 502
@@ -488,9 +517,10 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
488 517
489#define ARCH_HAS_NOCACHE_UACCESS 518#define ARCH_HAS_NOCACHE_UACCESS
490 519
491static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, 520static __always_inline unsigned long __copy_from_user_nocache(void *to,
492 const void __user *from, unsigned long n) 521 const void __user *from, unsigned long n)
493{ 522{
523 might_sleep();
494 if (__builtin_constant_p(n)) { 524 if (__builtin_constant_p(n)) {
495 unsigned long ret; 525 unsigned long ret;
496 526
@@ -510,17 +540,9 @@ static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to,
510} 540}
511 541
512static __always_inline unsigned long 542static __always_inline unsigned long
513__copy_from_user(void *to, const void __user *from, unsigned long n) 543__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n)
514{ 544{
515 might_sleep(); 545 return __copy_from_user_ll_nocache_nozero(to, from, n);
516 return __copy_from_user_inatomic(to, from, n);
517}
518
519static __always_inline unsigned long
520__copy_from_user_nocache(void *to, const void __user *from, unsigned long n)
521{
522 might_sleep();
523 return __copy_from_user_inatomic_nocache(to, from, n);
524} 546}
525 547
526unsigned long __must_check copy_to_user(void __user *to, 548unsigned long __must_check copy_to_user(void __user *to,