aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2006-06-25 08:48:02 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 13:01:09 -0400
commit7c12d81134b130ccd4c286b434ca48c4cda71a2f (patch)
tree32f1451ef52d568d57c75da28313069dc675a206
parent01408c4939479ec46c15aa7ef6e2406be50eeeca (diff)
[PATCH] Make copy_from_user_inatomic NOT zero the tail on i386
As described in a previous patch and documented in mm/filemap.h, copy_from_user_inatomic* shouldn't zero out the tail of the buffer after an incomplete copy. This patch implements that change for i386. For the _nocache version, a new __copy_user_intel_nocache is defined similar to copy_user_zeroio_intel_nocache, and this is ultimately used for the copy. For the regular version, __copy_from_user_ll_nozero is defined which uses __copy_user and __copy_user_intel - the later needs casts to reposition the __user annotations. If copy_from_user_atomic is given a constant length of 1, 2, or 4, then we do still zero the destintion on failure. This didn't seem worth the effort of fixing as the places where it is used really don't care. Signed-off-by: Neil Brown <neilb@suse.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/lib/usercopy.c119
-rw-r--r--include/asm-i386/uaccess.h46
2 files changed, 153 insertions, 12 deletions
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c
index 6979297ce278..c5aa65f7c02a 100644
--- a/arch/i386/lib/usercopy.c
+++ b/arch/i386/lib/usercopy.c
@@ -528,6 +528,97 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
528 return size; 528 return size;
529} 529}
530 530
531static unsigned long __copy_user_intel_nocache(void *to,
532 const void __user *from, unsigned long size)
533{
534 int d0, d1;
535
536 __asm__ __volatile__(
537 " .align 2,0x90\n"
538 "0: movl 32(%4), %%eax\n"
539 " cmpl $67, %0\n"
540 " jbe 2f\n"
541 "1: movl 64(%4), %%eax\n"
542 " .align 2,0x90\n"
543 "2: movl 0(%4), %%eax\n"
544 "21: movl 4(%4), %%edx\n"
545 " movnti %%eax, 0(%3)\n"
546 " movnti %%edx, 4(%3)\n"
547 "3: movl 8(%4), %%eax\n"
548 "31: movl 12(%4),%%edx\n"
549 " movnti %%eax, 8(%3)\n"
550 " movnti %%edx, 12(%3)\n"
551 "4: movl 16(%4), %%eax\n"
552 "41: movl 20(%4), %%edx\n"
553 " movnti %%eax, 16(%3)\n"
554 " movnti %%edx, 20(%3)\n"
555 "10: movl 24(%4), %%eax\n"
556 "51: movl 28(%4), %%edx\n"
557 " movnti %%eax, 24(%3)\n"
558 " movnti %%edx, 28(%3)\n"
559 "11: movl 32(%4), %%eax\n"
560 "61: movl 36(%4), %%edx\n"
561 " movnti %%eax, 32(%3)\n"
562 " movnti %%edx, 36(%3)\n"
563 "12: movl 40(%4), %%eax\n"
564 "71: movl 44(%4), %%edx\n"
565 " movnti %%eax, 40(%3)\n"
566 " movnti %%edx, 44(%3)\n"
567 "13: movl 48(%4), %%eax\n"
568 "81: movl 52(%4), %%edx\n"
569 " movnti %%eax, 48(%3)\n"
570 " movnti %%edx, 52(%3)\n"
571 "14: movl 56(%4), %%eax\n"
572 "91: movl 60(%4), %%edx\n"
573 " movnti %%eax, 56(%3)\n"
574 " movnti %%edx, 60(%3)\n"
575 " addl $-64, %0\n"
576 " addl $64, %4\n"
577 " addl $64, %3\n"
578 " cmpl $63, %0\n"
579 " ja 0b\n"
580 " sfence \n"
581 "5: movl %0, %%eax\n"
582 " shrl $2, %0\n"
583 " andl $3, %%eax\n"
584 " cld\n"
585 "6: rep; movsl\n"
586 " movl %%eax,%0\n"
587 "7: rep; movsb\n"
588 "8:\n"
589 ".section .fixup,\"ax\"\n"
590 "9: lea 0(%%eax,%0,4),%0\n"
591 "16: jmp 8b\n"
592 ".previous\n"
593 ".section __ex_table,\"a\"\n"
594 " .align 4\n"
595 " .long 0b,16b\n"
596 " .long 1b,16b\n"
597 " .long 2b,16b\n"
598 " .long 21b,16b\n"
599 " .long 3b,16b\n"
600 " .long 31b,16b\n"
601 " .long 4b,16b\n"
602 " .long 41b,16b\n"
603 " .long 10b,16b\n"
604 " .long 51b,16b\n"
605 " .long 11b,16b\n"
606 " .long 61b,16b\n"
607 " .long 12b,16b\n"
608 " .long 71b,16b\n"
609 " .long 13b,16b\n"
610 " .long 81b,16b\n"
611 " .long 14b,16b\n"
612 " .long 91b,16b\n"
613 " .long 6b,9b\n"
614 " .long 7b,16b\n"
615 ".previous"
616 : "=&c"(size), "=&D" (d0), "=&S" (d1)
617 : "1"(to), "2"(from), "0"(size)
618 : "eax", "edx", "memory");
619 return size;
620}
621
531#else 622#else
532 623
533/* 624/*
@@ -694,6 +785,19 @@ unsigned long __copy_from_user_ll(void *to, const void __user *from,
694} 785}
695EXPORT_SYMBOL(__copy_from_user_ll); 786EXPORT_SYMBOL(__copy_from_user_ll);
696 787
788unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
789 unsigned long n)
790{
791 BUG_ON((long)n < 0);
792 if (movsl_is_ok(to, from, n))
793 __copy_user(to, from, n);
794 else
795 n = __copy_user_intel((void __user *)to,
796 (const void *)from, n);
797 return n;
798}
799EXPORT_SYMBOL(__copy_from_user_ll_nozero);
800
697unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, 801unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
698 unsigned long n) 802 unsigned long n)
699{ 803{
@@ -709,6 +813,21 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
709 return n; 813 return n;
710} 814}
711 815
816unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
817 unsigned long n)
818{
819 BUG_ON((long)n < 0);
820#ifdef CONFIG_X86_INTEL_USERCOPY
821 if ( n > 64 && cpu_has_xmm2)
822 n = __copy_user_intel_nocache(to, from, n);
823 else
824 __copy_user(to, from, n);
825#else
826 __copy_user(to, from, n);
827#endif
828 return n;
829}
830
712/** 831/**
713 * copy_to_user: - Copy a block of data into user space. 832 * copy_to_user: - Copy a block of data into user space.
714 * @to: Destination address, in user space. 833 * @to: Destination address, in user space.
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index d0d253277be5..54d905ebc63d 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -390,8 +390,12 @@ unsigned long __must_check __copy_to_user_ll(void __user *to,
390 const void *from, unsigned long n); 390 const void *from, unsigned long n);
391unsigned long __must_check __copy_from_user_ll(void *to, 391unsigned long __must_check __copy_from_user_ll(void *to,
392 const void __user *from, unsigned long n); 392 const void __user *from, unsigned long n);
393unsigned long __must_check __copy_from_user_ll_nozero(void *to,
394 const void __user *from, unsigned long n);
393unsigned long __must_check __copy_from_user_ll_nocache(void *to, 395unsigned long __must_check __copy_from_user_ll_nocache(void *to,
394 const void __user *from, unsigned long n); 396 const void __user *from, unsigned long n);
397unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
398 const void __user *from, unsigned long n);
395 399
396/* 400/*
397 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault 401 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
@@ -463,11 +467,36 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
463 * atomic context and will fail rather than sleep. In this case the 467 * atomic context and will fail rather than sleep. In this case the
464 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h 468 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
465 * for explanation of why this is needed. 469 * for explanation of why this is needed.
466 * FIXME this isn't implimented yet EMXIF
467 */ 470 */
468static __always_inline unsigned long 471static __always_inline unsigned long
469__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 472__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
470{ 473{
474 /* Avoid zeroing the tail if the copy fails..
475 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
476 * but as the zeroing behaviour is only significant when n is not
477 * constant, that shouldn't be a problem.
478 */
479 if (__builtin_constant_p(n)) {
480 unsigned long ret;
481
482 switch (n) {
483 case 1:
484 __get_user_size(*(u8 *)to, from, 1, ret, 1);
485 return ret;
486 case 2:
487 __get_user_size(*(u16 *)to, from, 2, ret, 2);
488 return ret;
489 case 4:
490 __get_user_size(*(u32 *)to, from, 4, ret, 4);
491 return ret;
492 }
493 }
494 return __copy_from_user_ll_nozero(to, from, n);
495}
496static __always_inline unsigned long
497__copy_from_user(void *to, const void __user *from, unsigned long n)
498{
499 might_sleep();
471 if (__builtin_constant_p(n)) { 500 if (__builtin_constant_p(n)) {
472 unsigned long ret; 501 unsigned long ret;
473 502
@@ -488,9 +517,10 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
488 517
489#define ARCH_HAS_NOCACHE_UACCESS 518#define ARCH_HAS_NOCACHE_UACCESS
490 519
491static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, 520static __always_inline unsigned long __copy_from_user_nocache(void *to,
492 const void __user *from, unsigned long n) 521 const void __user *from, unsigned long n)
493{ 522{
523 might_sleep();
494 if (__builtin_constant_p(n)) { 524 if (__builtin_constant_p(n)) {
495 unsigned long ret; 525 unsigned long ret;
496 526
@@ -510,17 +540,9 @@ static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to,
510} 540}
511 541
512static __always_inline unsigned long 542static __always_inline unsigned long
513__copy_from_user(void *to, const void __user *from, unsigned long n) 543__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n)
514{ 544{
515 might_sleep(); 545 return __copy_from_user_ll_nocache_nozero(to, from, n);
516 return __copy_from_user_inatomic(to, from, n);
517}
518
519static __always_inline unsigned long
520__copy_from_user_nocache(void *to, const void __user *from, unsigned long n)
521{
522 might_sleep();
523 return __copy_from_user_inatomic_nocache(to, from, n);
524} 546}
525 547
526unsigned long __must_check copy_to_user(void __user *to, 548unsigned long __must_check copy_to_user(void __user *to,