aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/uaccess.h')
-rw-r--r--include/asm-i386/uaccess.h69
1 files changed, 65 insertions, 4 deletions
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 1ec65523ea5..54d905ebc63 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -58,7 +58,7 @@ extern struct movsl_mask {
58 __chk_user_ptr(addr); \ 58 __chk_user_ptr(addr); \
59 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ 59 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
60 :"=&r" (flag), "=r" (sum) \ 60 :"=&r" (flag), "=r" (sum) \
61 :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \ 61 :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \
62 flag; }) 62 flag; })
63 63
64/** 64/**
@@ -390,6 +390,12 @@ unsigned long __must_check __copy_to_user_ll(void __user *to,
390 const void *from, unsigned long n); 390 const void *from, unsigned long n);
391unsigned long __must_check __copy_from_user_ll(void *to, 391unsigned long __must_check __copy_from_user_ll(void *to,
392 const void __user *from, unsigned long n); 392 const void __user *from, unsigned long n);
393unsigned long __must_check __copy_from_user_ll_nozero(void *to,
394 const void __user *from, unsigned long n);
395unsigned long __must_check __copy_from_user_ll_nocache(void *to,
396 const void __user *from, unsigned long n);
397unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
398 const void __user *from, unsigned long n);
393 399
394/* 400/*
395 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault 401 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
@@ -456,10 +462,41 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
456 * 462 *
457 * If some data could not be copied, this function will pad the copied 463 * If some data could not be copied, this function will pad the copied
458 * data to the requested size using zero bytes. 464 * data to the requested size using zero bytes.
465 *
466 * An alternate version - __copy_from_user_inatomic() - may be called from
467 * atomic context and will fail rather than sleep. In this case the
468 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
469 * for explanation of why this is needed.
459 */ 470 */
460static __always_inline unsigned long 471static __always_inline unsigned long
461__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 472__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
462{ 473{
474 /* Avoid zeroing the tail if the copy fails..
475 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
476 * but as the zeroing behaviour is only significant when n is not
477 * constant, that shouldn't be a problem.
478 */
479 if (__builtin_constant_p(n)) {
480 unsigned long ret;
481
482 switch (n) {
483 case 1:
484 __get_user_size(*(u8 *)to, from, 1, ret, 1);
485 return ret;
486 case 2:
487 __get_user_size(*(u16 *)to, from, 2, ret, 2);
488 return ret;
489 case 4:
490 __get_user_size(*(u32 *)to, from, 4, ret, 4);
491 return ret;
492 }
493 }
494 return __copy_from_user_ll_nozero(to, from, n);
495}
496static __always_inline unsigned long
497__copy_from_user(void *to, const void __user *from, unsigned long n)
498{
499 might_sleep();
463 if (__builtin_constant_p(n)) { 500 if (__builtin_constant_p(n)) {
464 unsigned long ret; 501 unsigned long ret;
465 502
@@ -478,12 +515,36 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
478 return __copy_from_user_ll(to, from, n); 515 return __copy_from_user_ll(to, from, n);
479} 516}
480 517
518#define ARCH_HAS_NOCACHE_UACCESS
519
520static __always_inline unsigned long __copy_from_user_nocache(void *to,
521 const void __user *from, unsigned long n)
522{
523 might_sleep();
524 if (__builtin_constant_p(n)) {
525 unsigned long ret;
526
527 switch (n) {
528 case 1:
529 __get_user_size(*(u8 *)to, from, 1, ret, 1);
530 return ret;
531 case 2:
532 __get_user_size(*(u16 *)to, from, 2, ret, 2);
533 return ret;
534 case 4:
535 __get_user_size(*(u32 *)to, from, 4, ret, 4);
536 return ret;
537 }
538 }
539 return __copy_from_user_ll_nocache(to, from, n);
540}
541
481static __always_inline unsigned long 542static __always_inline unsigned long
482__copy_from_user(void *to, const void __user *from, unsigned long n) 543__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n)
483{ 544{
484 might_sleep(); 545 return __copy_from_user_ll_nocache_nozero(to, from, n);
485 return __copy_from_user_inatomic(to, from, n);
486} 546}
547
487unsigned long __must_check copy_to_user(void __user *to, 548unsigned long __must_check copy_to_user(void __user *to,
488 const void *from, unsigned long n); 549 const void *from, unsigned long n);
489unsigned long __must_check copy_from_user(void *to, 550unsigned long __must_check copy_from_user(void *to,