diff options
Diffstat (limited to 'arch/i386/lib/usercopy.c')
-rw-r--r-- | arch/i386/lib/usercopy.c | 137 |
1 files changed, 129 insertions, 8 deletions
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c index 4cf981d70f45..6979297ce278 100644 --- a/arch/i386/lib/usercopy.c +++ b/arch/i386/lib/usercopy.c | |||
@@ -425,15 +425,121 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) | |||
425 | : "eax", "edx", "memory"); | 425 | : "eax", "edx", "memory"); |
426 | return size; | 426 | return size; |
427 | } | 427 | } |
428 | |||
429 | /* | ||
430 | * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware. | ||
431 | * hyoshiok@miraclelinux.com | ||
432 | */ | ||
433 | |||
434 | static unsigned long __copy_user_zeroing_intel_nocache(void *to, | ||
435 | const void __user *from, unsigned long size) | ||
436 | { | ||
437 | int d0, d1; | ||
438 | |||
439 | __asm__ __volatile__( | ||
440 | " .align 2,0x90\n" | ||
441 | "0: movl 32(%4), %%eax\n" | ||
442 | " cmpl $67, %0\n" | ||
443 | " jbe 2f\n" | ||
444 | "1: movl 64(%4), %%eax\n" | ||
445 | " .align 2,0x90\n" | ||
446 | "2: movl 0(%4), %%eax\n" | ||
447 | "21: movl 4(%4), %%edx\n" | ||
448 | " movnti %%eax, 0(%3)\n" | ||
449 | " movnti %%edx, 4(%3)\n" | ||
450 | "3: movl 8(%4), %%eax\n" | ||
451 | "31: movl 12(%4),%%edx\n" | ||
452 | " movnti %%eax, 8(%3)\n" | ||
453 | " movnti %%edx, 12(%3)\n" | ||
454 | "4: movl 16(%4), %%eax\n" | ||
455 | "41: movl 20(%4), %%edx\n" | ||
456 | " movnti %%eax, 16(%3)\n" | ||
457 | " movnti %%edx, 20(%3)\n" | ||
458 | "10: movl 24(%4), %%eax\n" | ||
459 | "51: movl 28(%4), %%edx\n" | ||
460 | " movnti %%eax, 24(%3)\n" | ||
461 | " movnti %%edx, 28(%3)\n" | ||
462 | "11: movl 32(%4), %%eax\n" | ||
463 | "61: movl 36(%4), %%edx\n" | ||
464 | " movnti %%eax, 32(%3)\n" | ||
465 | " movnti %%edx, 36(%3)\n" | ||
466 | "12: movl 40(%4), %%eax\n" | ||
467 | "71: movl 44(%4), %%edx\n" | ||
468 | " movnti %%eax, 40(%3)\n" | ||
469 | " movnti %%edx, 44(%3)\n" | ||
470 | "13: movl 48(%4), %%eax\n" | ||
471 | "81: movl 52(%4), %%edx\n" | ||
472 | " movnti %%eax, 48(%3)\n" | ||
473 | " movnti %%edx, 52(%3)\n" | ||
474 | "14: movl 56(%4), %%eax\n" | ||
475 | "91: movl 60(%4), %%edx\n" | ||
476 | " movnti %%eax, 56(%3)\n" | ||
477 | " movnti %%edx, 60(%3)\n" | ||
478 | " addl $-64, %0\n" | ||
479 | " addl $64, %4\n" | ||
480 | " addl $64, %3\n" | ||
481 | " cmpl $63, %0\n" | ||
482 | " ja 0b\n" | ||
483 | " sfence \n" | ||
484 | "5: movl %0, %%eax\n" | ||
485 | " shrl $2, %0\n" | ||
486 | " andl $3, %%eax\n" | ||
487 | " cld\n" | ||
488 | "6: rep; movsl\n" | ||
489 | " movl %%eax,%0\n" | ||
490 | "7: rep; movsb\n" | ||
491 | "8:\n" | ||
492 | ".section .fixup,\"ax\"\n" | ||
493 | "9: lea 0(%%eax,%0,4),%0\n" | ||
494 | "16: pushl %0\n" | ||
495 | " pushl %%eax\n" | ||
496 | " xorl %%eax,%%eax\n" | ||
497 | " rep; stosb\n" | ||
498 | " popl %%eax\n" | ||
499 | " popl %0\n" | ||
500 | " jmp 8b\n" | ||
501 | ".previous\n" | ||
502 | ".section __ex_table,\"a\"\n" | ||
503 | " .align 4\n" | ||
504 | " .long 0b,16b\n" | ||
505 | " .long 1b,16b\n" | ||
506 | " .long 2b,16b\n" | ||
507 | " .long 21b,16b\n" | ||
508 | " .long 3b,16b\n" | ||
509 | " .long 31b,16b\n" | ||
510 | " .long 4b,16b\n" | ||
511 | " .long 41b,16b\n" | ||
512 | " .long 10b,16b\n" | ||
513 | " .long 51b,16b\n" | ||
514 | " .long 11b,16b\n" | ||
515 | " .long 61b,16b\n" | ||
516 | " .long 12b,16b\n" | ||
517 | " .long 71b,16b\n" | ||
518 | " .long 13b,16b\n" | ||
519 | " .long 81b,16b\n" | ||
520 | " .long 14b,16b\n" | ||
521 | " .long 91b,16b\n" | ||
522 | " .long 6b,9b\n" | ||
523 | " .long 7b,16b\n" | ||
524 | ".previous" | ||
525 | : "=&c"(size), "=&D" (d0), "=&S" (d1) | ||
526 | : "1"(to), "2"(from), "0"(size) | ||
527 | : "eax", "edx", "memory"); | ||
528 | return size; | ||
529 | } | ||
530 | |||
428 | #else | 531 | #else |
532 | |||
429 | /* | 533 | /* |
430 | * Leave these declared but undefined. They should not be any references to | 534 | * Leave these declared but undefined. They should not be any references to |
431 | * them | 535 | * them |
432 | */ | 536 | */ |
433 | unsigned long | 537 | unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, |
434 | __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size); | 538 | unsigned long size); |
435 | unsigned long | 539 | unsigned long __copy_user_intel(void __user *to, const void *from, |
436 | __copy_user_intel(void __user *to, const void *from, unsigned long size); | 540 | unsigned long size); |
541 | unsigned long __copy_user_zeroing_intel_nocache(void *to, | ||
542 | const void __user *from, unsigned long size); | ||
437 | #endif /* CONFIG_X86_INTEL_USERCOPY */ | 543 | #endif /* CONFIG_X86_INTEL_USERCOPY */ |
438 | 544 | ||
439 | /* Generic arbitrary sized copy. */ | 545 | /* Generic arbitrary sized copy. */ |
@@ -515,8 +621,8 @@ do { \ | |||
515 | : "memory"); \ | 621 | : "memory"); \ |
516 | } while (0) | 622 | } while (0) |
517 | 623 | ||
518 | 624 | unsigned long __copy_to_user_ll(void __user *to, const void *from, | |
519 | unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n) | 625 | unsigned long n) |
520 | { | 626 | { |
521 | BUG_ON((long) n < 0); | 627 | BUG_ON((long) n < 0); |
522 | #ifndef CONFIG_X86_WP_WORKS_OK | 628 | #ifndef CONFIG_X86_WP_WORKS_OK |
@@ -576,8 +682,8 @@ survive: | |||
576 | } | 682 | } |
577 | EXPORT_SYMBOL(__copy_to_user_ll); | 683 | EXPORT_SYMBOL(__copy_to_user_ll); |
578 | 684 | ||
579 | unsigned long | 685 | unsigned long __copy_from_user_ll(void *to, const void __user *from, |
580 | __copy_from_user_ll(void *to, const void __user *from, unsigned long n) | 686 | unsigned long n) |
581 | { | 687 | { |
582 | BUG_ON((long)n < 0); | 688 | BUG_ON((long)n < 0); |
583 | if (movsl_is_ok(to, from, n)) | 689 | if (movsl_is_ok(to, from, n)) |
@@ -588,6 +694,21 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned long n) | |||
588 | } | 694 | } |
589 | EXPORT_SYMBOL(__copy_from_user_ll); | 695 | EXPORT_SYMBOL(__copy_from_user_ll); |
590 | 696 | ||
697 | unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, | ||
698 | unsigned long n) | ||
699 | { | ||
700 | BUG_ON((long)n < 0); | ||
701 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
702 | if ( n > 64 && cpu_has_xmm2) | ||
703 | n = __copy_user_zeroing_intel_nocache(to, from, n); | ||
704 | else | ||
705 | __copy_user_zeroing(to, from, n); | ||
706 | #else | ||
707 | __copy_user_zeroing(to, from, n); | ||
708 | #endif | ||
709 | return n; | ||
710 | } | ||
711 | |||
591 | /** | 712 | /** |
592 | * copy_to_user: - Copy a block of data into user space. | 713 | * copy_to_user: - Copy a block of data into user space. |
593 | * @to: Destination address, in user space. | 714 | * @to: Destination address, in user space. |