diff options
author | NeilBrown <neilb@suse.de> | 2006-06-25 08:48:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-25 13:01:09 -0400 |
commit | 7c12d81134b130ccd4c286b434ca48c4cda71a2f (patch) | |
tree | 32f1451ef52d568d57c75da28313069dc675a206 /include/asm-i386 | |
parent | 01408c4939479ec46c15aa7ef6e2406be50eeeca (diff) |
[PATCH] Make copy_from_user_inatomic NOT zero the tail on i386
As described in a previous patch and documented in mm/filemap.h,
copy_from_user_inatomic* shouldn't zero out the tail of the buffer after an
incomplete copy.
This patch implements that change for i386.
For the _nocache version, a new __copy_user_intel_nocache is defined similar
to copy_user_zeroio_intel_nocache, and this is ultimately used for the copy.
For the regular version, __copy_from_user_ll_nozero is defined which uses
__copy_user and __copy_user_intel - the later needs casts to reposition the
__user annotations.
If copy_from_user_atomic is given a constant length of 1, 2, or 4, then we do
still zero the destintion on failure. This didn't seem worth the effort of
fixing as the places where it is used really don't care.
Signed-off-by: Neil Brown <neilb@suse.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/uaccess.h | 46 |
1 files changed, 34 insertions, 12 deletions
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index d0d253277be5..54d905ebc63d 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h | |||
@@ -390,8 +390,12 @@ unsigned long __must_check __copy_to_user_ll(void __user *to, | |||
390 | const void *from, unsigned long n); | 390 | const void *from, unsigned long n); |
391 | unsigned long __must_check __copy_from_user_ll(void *to, | 391 | unsigned long __must_check __copy_from_user_ll(void *to, |
392 | const void __user *from, unsigned long n); | 392 | const void __user *from, unsigned long n); |
393 | unsigned long __must_check __copy_from_user_ll_nozero(void *to, | ||
394 | const void __user *from, unsigned long n); | ||
393 | unsigned long __must_check __copy_from_user_ll_nocache(void *to, | 395 | unsigned long __must_check __copy_from_user_ll_nocache(void *to, |
394 | const void __user *from, unsigned long n); | 396 | const void __user *from, unsigned long n); |
397 | unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, | ||
398 | const void __user *from, unsigned long n); | ||
395 | 399 | ||
396 | /* | 400 | /* |
397 | * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault | 401 | * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault |
@@ -463,11 +467,36 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) | |||
463 | * atomic context and will fail rather than sleep. In this case the | 467 | * atomic context and will fail rather than sleep. In this case the |
464 | * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h | 468 | * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h |
465 | * for explanation of why this is needed. | 469 | * for explanation of why this is needed. |
466 | * FIXME this isn't implimented yet EMXIF | ||
467 | */ | 470 | */ |
468 | static __always_inline unsigned long | 471 | static __always_inline unsigned long |
469 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | 472 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) |
470 | { | 473 | { |
474 | /* Avoid zeroing the tail if the copy fails.. | ||
475 | * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, | ||
476 | * but as the zeroing behaviour is only significant when n is not | ||
477 | * constant, that shouldn't be a problem. | ||
478 | */ | ||
479 | if (__builtin_constant_p(n)) { | ||
480 | unsigned long ret; | ||
481 | |||
482 | switch (n) { | ||
483 | case 1: | ||
484 | __get_user_size(*(u8 *)to, from, 1, ret, 1); | ||
485 | return ret; | ||
486 | case 2: | ||
487 | __get_user_size(*(u16 *)to, from, 2, ret, 2); | ||
488 | return ret; | ||
489 | case 4: | ||
490 | __get_user_size(*(u32 *)to, from, 4, ret, 4); | ||
491 | return ret; | ||
492 | } | ||
493 | } | ||
494 | return __copy_from_user_ll_nozero(to, from, n); | ||
495 | } | ||
496 | static __always_inline unsigned long | ||
497 | __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
498 | { | ||
499 | might_sleep(); | ||
471 | if (__builtin_constant_p(n)) { | 500 | if (__builtin_constant_p(n)) { |
472 | unsigned long ret; | 501 | unsigned long ret; |
473 | 502 | ||
@@ -488,9 +517,10 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | |||
488 | 517 | ||
489 | #define ARCH_HAS_NOCACHE_UACCESS | 518 | #define ARCH_HAS_NOCACHE_UACCESS |
490 | 519 | ||
491 | static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, | 520 | static __always_inline unsigned long __copy_from_user_nocache(void *to, |
492 | const void __user *from, unsigned long n) | 521 | const void __user *from, unsigned long n) |
493 | { | 522 | { |
523 | might_sleep(); | ||
494 | if (__builtin_constant_p(n)) { | 524 | if (__builtin_constant_p(n)) { |
495 | unsigned long ret; | 525 | unsigned long ret; |
496 | 526 | ||
@@ -510,17 +540,9 @@ static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, | |||
510 | } | 540 | } |
511 | 541 | ||
512 | static __always_inline unsigned long | 542 | static __always_inline unsigned long |
513 | __copy_from_user(void *to, const void __user *from, unsigned long n) | 543 | __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) |
514 | { | 544 | { |
515 | might_sleep(); | 545 | return __copy_from_user_ll_nocache_nozero(to, from, n); |
516 | return __copy_from_user_inatomic(to, from, n); | ||
517 | } | ||
518 | |||
519 | static __always_inline unsigned long | ||
520 | __copy_from_user_nocache(void *to, const void __user *from, unsigned long n) | ||
521 | { | ||
522 | might_sleep(); | ||
523 | return __copy_from_user_inatomic_nocache(to, from, n); | ||
524 | } | 546 | } |
525 | 547 | ||
526 | unsigned long __must_check copy_to_user(void __user *to, | 548 | unsigned long __must_check copy_to_user(void __user *to, |