diff options
Diffstat (limited to 'include/asm-i386/uaccess.h')
| -rw-r--r-- | include/asm-i386/uaccess.h | 35 |
1 files changed, 34 insertions, 1 deletions
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 1ec65523ea5e..8462f8e0e658 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h | |||
| @@ -58,7 +58,7 @@ extern struct movsl_mask { | |||
| 58 | __chk_user_ptr(addr); \ | 58 | __chk_user_ptr(addr); \ |
| 59 | asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ | 59 | asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ |
| 60 | :"=&r" (flag), "=r" (sum) \ | 60 | :"=&r" (flag), "=r" (sum) \ |
| 61 | :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \ | 61 | :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \ |
| 62 | flag; }) | 62 | flag; }) |
| 63 | 63 | ||
| 64 | /** | 64 | /** |
| @@ -390,6 +390,8 @@ unsigned long __must_check __copy_to_user_ll(void __user *to, | |||
| 390 | const void *from, unsigned long n); | 390 | const void *from, unsigned long n); |
| 391 | unsigned long __must_check __copy_from_user_ll(void *to, | 391 | unsigned long __must_check __copy_from_user_ll(void *to, |
| 392 | const void __user *from, unsigned long n); | 392 | const void __user *from, unsigned long n); |
| 393 | unsigned long __must_check __copy_from_user_ll_nocache(void *to, | ||
| 394 | const void __user *from, unsigned long n); | ||
| 393 | 395 | ||
| 394 | /* | 396 | /* |
| 395 | * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault | 397 | * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault |
| @@ -478,12 +480,43 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | |||
| 478 | return __copy_from_user_ll(to, from, n); | 480 | return __copy_from_user_ll(to, from, n); |
| 479 | } | 481 | } |
| 480 | 482 | ||
| 483 | #define ARCH_HAS_NOCACHE_UACCESS | ||
| 484 | |||
| 485 | static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, | ||
| 486 | const void __user *from, unsigned long n) | ||
| 487 | { | ||
| 488 | if (__builtin_constant_p(n)) { | ||
| 489 | unsigned long ret; | ||
| 490 | |||
| 491 | switch (n) { | ||
| 492 | case 1: | ||
| 493 | __get_user_size(*(u8 *)to, from, 1, ret, 1); | ||
| 494 | return ret; | ||
| 495 | case 2: | ||
| 496 | __get_user_size(*(u16 *)to, from, 2, ret, 2); | ||
| 497 | return ret; | ||
| 498 | case 4: | ||
| 499 | __get_user_size(*(u32 *)to, from, 4, ret, 4); | ||
| 500 | return ret; | ||
| 501 | } | ||
| 502 | } | ||
| 503 | return __copy_from_user_ll_nocache(to, from, n); | ||
| 504 | } | ||
| 505 | |||
| 481 | static __always_inline unsigned long | 506 | static __always_inline unsigned long |
| 482 | __copy_from_user(void *to, const void __user *from, unsigned long n) | 507 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
| 483 | { | 508 | { |
| 484 | might_sleep(); | 509 | might_sleep(); |
| 485 | return __copy_from_user_inatomic(to, from, n); | 510 | return __copy_from_user_inatomic(to, from, n); |
| 486 | } | 511 | } |
| 512 | |||
| 513 | static __always_inline unsigned long | ||
| 514 | __copy_from_user_nocache(void *to, const void __user *from, unsigned long n) | ||
| 515 | { | ||
| 516 | might_sleep(); | ||
| 517 | return __copy_from_user_inatomic_nocache(to, from, n); | ||
| 518 | } | ||
| 519 | |||
| 487 | unsigned long __must_check copy_to_user(void __user *to, | 520 | unsigned long __must_check copy_to_user(void __user *to, |
| 488 | const void *from, unsigned long n); | 521 | const void *from, unsigned long n); |
| 489 | unsigned long __must_check copy_from_user(void *to, | 522 | unsigned long __must_check copy_from_user(void *to, |
