aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/uaccess.h')
-rw-r--r--include/asm-i386/uaccess.h70
1 files changed, 65 insertions, 5 deletions
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 371457b1ceb6..54d905ebc63d 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -4,7 +4,6 @@
4/* 4/*
5 * User space memory access functions 5 * User space memory access functions
6 */ 6 */
7#include <linux/config.h>
8#include <linux/errno.h> 7#include <linux/errno.h>
9#include <linux/thread_info.h> 8#include <linux/thread_info.h>
10#include <linux/prefetch.h> 9#include <linux/prefetch.h>
@@ -59,7 +58,7 @@ extern struct movsl_mask {
59 __chk_user_ptr(addr); \ 58 __chk_user_ptr(addr); \
60 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ 59 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
61 :"=&r" (flag), "=r" (sum) \ 60 :"=&r" (flag), "=r" (sum) \
62 :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \ 61 :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \
63 flag; }) 62 flag; })
64 63
65/** 64/**
@@ -391,6 +390,12 @@ unsigned long __must_check __copy_to_user_ll(void __user *to,
391 const void *from, unsigned long n); 390 const void *from, unsigned long n);
392unsigned long __must_check __copy_from_user_ll(void *to, 391unsigned long __must_check __copy_from_user_ll(void *to,
393 const void __user *from, unsigned long n); 392 const void __user *from, unsigned long n);
393unsigned long __must_check __copy_from_user_ll_nozero(void *to,
394 const void __user *from, unsigned long n);
395unsigned long __must_check __copy_from_user_ll_nocache(void *to,
396 const void __user *from, unsigned long n);
397unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
398 const void __user *from, unsigned long n);
394 399
395/* 400/*
396 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault 401 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
@@ -457,10 +462,41 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
457 * 462 *
458 * If some data could not be copied, this function will pad the copied 463 * If some data could not be copied, this function will pad the copied
459 * data to the requested size using zero bytes. 464 * data to the requested size using zero bytes.
465 *
466 * An alternate version - __copy_from_user_inatomic() - may be called from
467 * atomic context and will fail rather than sleep. In this case the
468 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
469 * for explanation of why this is needed.
460 */ 470 */
461static __always_inline unsigned long 471static __always_inline unsigned long
462__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 472__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
463{ 473{
474 /* Avoid zeroing the tail if the copy fails..
475 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
476 * but as the zeroing behaviour is only significant when n is not
477 * constant, that shouldn't be a problem.
478 */
479 if (__builtin_constant_p(n)) {
480 unsigned long ret;
481
482 switch (n) {
483 case 1:
484 __get_user_size(*(u8 *)to, from, 1, ret, 1);
485 return ret;
486 case 2:
487 __get_user_size(*(u16 *)to, from, 2, ret, 2);
488 return ret;
489 case 4:
490 __get_user_size(*(u32 *)to, from, 4, ret, 4);
491 return ret;
492 }
493 }
494 return __copy_from_user_ll_nozero(to, from, n);
495}
496static __always_inline unsigned long
497__copy_from_user(void *to, const void __user *from, unsigned long n)
498{
499 might_sleep();
464 if (__builtin_constant_p(n)) { 500 if (__builtin_constant_p(n)) {
465 unsigned long ret; 501 unsigned long ret;
466 502
@@ -479,12 +515,36 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
479 return __copy_from_user_ll(to, from, n); 515 return __copy_from_user_ll(to, from, n);
480} 516}
481 517
518#define ARCH_HAS_NOCACHE_UACCESS
519
520static __always_inline unsigned long __copy_from_user_nocache(void *to,
521 const void __user *from, unsigned long n)
522{
523 might_sleep();
524 if (__builtin_constant_p(n)) {
525 unsigned long ret;
526
527 switch (n) {
528 case 1:
529 __get_user_size(*(u8 *)to, from, 1, ret, 1);
530 return ret;
531 case 2:
532 __get_user_size(*(u16 *)to, from, 2, ret, 2);
533 return ret;
534 case 4:
535 __get_user_size(*(u32 *)to, from, 4, ret, 4);
536 return ret;
537 }
538 }
539 return __copy_from_user_ll_nocache(to, from, n);
540}
541
482static __always_inline unsigned long 542static __always_inline unsigned long
483__copy_from_user(void *to, const void __user *from, unsigned long n) 543__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n)
484{ 544{
485 might_sleep(); 545 return __copy_from_user_ll_nocache_nozero(to, from, n);
486 return __copy_from_user_inatomic(to, from, n);
487} 546}
547
488unsigned long __must_check copy_to_user(void __user *to, 548unsigned long __must_check copy_to_user(void __user *to,
489 const void *from, unsigned long n); 549 const void *from, unsigned long n);
490unsigned long __must_check copy_from_user(void *to, 550unsigned long __must_check copy_from_user(void *to,