diff options
author | Paolo Ciarrocchi <paolo.ciarrocchi@gmail.com> | 2008-02-29 06:50:56 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:40:51 -0400 |
commit | 3f50dbc1aec96c4d66ffa1c564014f9f43fb9e11 (patch) | |
tree | 07b5bfa4d9a6a0b5f6b3220a9f6eaf3914005199 /arch/x86/lib | |
parent | bdd3cee2e4b7279457139058615ced6c2b41e7de (diff) |
x86: coding style fixes to arch/x86/lib/usercopy_32.c
Before:
total: 63 errors, 2 warnings, 878 lines checked
After:
total: 0 errors, 2 warnings, 878 lines checked
Compile tested, no change in the binary output:
text data bss dec hex filename
3231 0 0 3231 c9f usercopy_32.o.after
3231 0 0 3231 c9f usercopy_32.o.before
md5sum:
9f9a3eb43970359ae7cecfd1c9e7cf42 usercopy_32.o.after
9f9a3eb43970359ae7cecfd1c9e7cf42 usercopy_32.o.before
Signed-off-by: Paolo Ciarrocchi <paolo.ciarrocchi@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r-- | arch/x86/lib/usercopy_32.c | 122 |
1 files changed, 61 insertions, 61 deletions
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index e849b9998b0e..24e60944971a 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * User address space access functions. | 2 | * User address space access functions. |
3 | * The non inlined parts of asm-i386/uaccess.h are here. | 3 | * The non inlined parts of asm-i386/uaccess.h are here. |
4 | * | 4 | * |
@@ -22,14 +22,14 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon | |||
22 | #endif | 22 | #endif |
23 | return 1; | 23 | return 1; |
24 | } | 24 | } |
25 | #define movsl_is_ok(a1,a2,n) \ | 25 | #define movsl_is_ok(a1, a2, n) \ |
26 | __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n)) | 26 | __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Copy a null terminated string from userspace. | 29 | * Copy a null terminated string from userspace. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #define __do_strncpy_from_user(dst,src,count,res) \ | 32 | #define __do_strncpy_from_user(dst, src, count, res) \ |
33 | do { \ | 33 | do { \ |
34 | int __d0, __d1, __d2; \ | 34 | int __d0, __d1, __d2; \ |
35 | might_sleep(); \ | 35 | might_sleep(); \ |
@@ -61,7 +61,7 @@ do { \ | |||
61 | * least @count bytes long. | 61 | * least @count bytes long. |
62 | * @src: Source address, in user space. | 62 | * @src: Source address, in user space. |
63 | * @count: Maximum number of bytes to copy, including the trailing NUL. | 63 | * @count: Maximum number of bytes to copy, including the trailing NUL. |
64 | * | 64 | * |
65 | * Copies a NUL-terminated string from userspace to kernel space. | 65 | * Copies a NUL-terminated string from userspace to kernel space. |
66 | * Caller must check the specified block with access_ok() before calling | 66 | * Caller must check the specified block with access_ok() before calling |
67 | * this function. | 67 | * this function. |
@@ -90,7 +90,7 @@ EXPORT_SYMBOL(__strncpy_from_user); | |||
90 | * least @count bytes long. | 90 | * least @count bytes long. |
91 | * @src: Source address, in user space. | 91 | * @src: Source address, in user space. |
92 | * @count: Maximum number of bytes to copy, including the trailing NUL. | 92 | * @count: Maximum number of bytes to copy, including the trailing NUL. |
93 | * | 93 | * |
94 | * Copies a NUL-terminated string from userspace to kernel space. | 94 | * Copies a NUL-terminated string from userspace to kernel space. |
95 | * | 95 | * |
96 | * On success, returns the length of the string (not including the trailing | 96 | * On success, returns the length of the string (not including the trailing |
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(strncpy_from_user); | |||
120 | do { \ | 120 | do { \ |
121 | int __d0; \ | 121 | int __d0; \ |
122 | might_sleep(); \ | 122 | might_sleep(); \ |
123 | __asm__ __volatile__( \ | 123 | __asm__ __volatile__( \ |
124 | "0: rep; stosl\n" \ | 124 | "0: rep; stosl\n" \ |
125 | " movl %2,%0\n" \ | 125 | " movl %2,%0\n" \ |
126 | "1: rep; stosb\n" \ | 126 | "1: rep; stosb\n" \ |
@@ -333,17 +333,17 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) | |||
333 | __asm__ __volatile__( | 333 | __asm__ __volatile__( |
334 | " .align 2,0x90\n" | 334 | " .align 2,0x90\n" |
335 | "0: movl 32(%4), %%eax\n" | 335 | "0: movl 32(%4), %%eax\n" |
336 | " cmpl $67, %0\n" | 336 | " cmpl $67, %0\n" |
337 | " jbe 2f\n" | 337 | " jbe 2f\n" |
338 | "1: movl 64(%4), %%eax\n" | 338 | "1: movl 64(%4), %%eax\n" |
339 | " .align 2,0x90\n" | 339 | " .align 2,0x90\n" |
340 | "2: movl 0(%4), %%eax\n" | 340 | "2: movl 0(%4), %%eax\n" |
341 | "21: movl 4(%4), %%edx\n" | 341 | "21: movl 4(%4), %%edx\n" |
342 | " movl %%eax, 0(%3)\n" | 342 | " movl %%eax, 0(%3)\n" |
343 | " movl %%edx, 4(%3)\n" | 343 | " movl %%edx, 4(%3)\n" |
344 | "3: movl 8(%4), %%eax\n" | 344 | "3: movl 8(%4), %%eax\n" |
345 | "31: movl 12(%4),%%edx\n" | 345 | "31: movl 12(%4),%%edx\n" |
346 | " movl %%eax, 8(%3)\n" | 346 | " movl %%eax, 8(%3)\n" |
347 | " movl %%edx, 12(%3)\n" | 347 | " movl %%edx, 12(%3)\n" |
348 | "4: movl 16(%4), %%eax\n" | 348 | "4: movl 16(%4), %%eax\n" |
349 | "41: movl 20(%4), %%edx\n" | 349 | "41: movl 20(%4), %%edx\n" |
@@ -369,38 +369,38 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) | |||
369 | "91: movl 60(%4), %%edx\n" | 369 | "91: movl 60(%4), %%edx\n" |
370 | " movl %%eax, 56(%3)\n" | 370 | " movl %%eax, 56(%3)\n" |
371 | " movl %%edx, 60(%3)\n" | 371 | " movl %%edx, 60(%3)\n" |
372 | " addl $-64, %0\n" | 372 | " addl $-64, %0\n" |
373 | " addl $64, %4\n" | 373 | " addl $64, %4\n" |
374 | " addl $64, %3\n" | 374 | " addl $64, %3\n" |
375 | " cmpl $63, %0\n" | 375 | " cmpl $63, %0\n" |
376 | " ja 0b\n" | 376 | " ja 0b\n" |
377 | "5: movl %0, %%eax\n" | 377 | "5: movl %0, %%eax\n" |
378 | " shrl $2, %0\n" | 378 | " shrl $2, %0\n" |
379 | " andl $3, %%eax\n" | 379 | " andl $3, %%eax\n" |
380 | " cld\n" | 380 | " cld\n" |
381 | "6: rep; movsl\n" | 381 | "6: rep; movsl\n" |
382 | " movl %%eax,%0\n" | 382 | " movl %%eax,%0\n" |
383 | "7: rep; movsb\n" | 383 | "7: rep; movsb\n" |
384 | "8:\n" | 384 | "8:\n" |
385 | ".section .fixup,\"ax\"\n" | 385 | ".section .fixup,\"ax\"\n" |
386 | "9: lea 0(%%eax,%0,4),%0\n" | 386 | "9: lea 0(%%eax,%0,4),%0\n" |
387 | "16: pushl %0\n" | 387 | "16: pushl %0\n" |
388 | " pushl %%eax\n" | 388 | " pushl %%eax\n" |
389 | " xorl %%eax,%%eax\n" | 389 | " xorl %%eax,%%eax\n" |
390 | " rep; stosb\n" | 390 | " rep; stosb\n" |
391 | " popl %%eax\n" | 391 | " popl %%eax\n" |
392 | " popl %0\n" | 392 | " popl %0\n" |
393 | " jmp 8b\n" | 393 | " jmp 8b\n" |
394 | ".previous\n" | 394 | ".previous\n" |
395 | ".section __ex_table,\"a\"\n" | 395 | ".section __ex_table,\"a\"\n" |
396 | " .align 4\n" | 396 | " .align 4\n" |
397 | " .long 0b,16b\n" | 397 | " .long 0b,16b\n" |
398 | " .long 1b,16b\n" | 398 | " .long 1b,16b\n" |
399 | " .long 2b,16b\n" | 399 | " .long 2b,16b\n" |
400 | " .long 21b,16b\n" | 400 | " .long 21b,16b\n" |
401 | " .long 3b,16b\n" | 401 | " .long 3b,16b\n" |
402 | " .long 31b,16b\n" | 402 | " .long 31b,16b\n" |
403 | " .long 4b,16b\n" | 403 | " .long 4b,16b\n" |
404 | " .long 41b,16b\n" | 404 | " .long 41b,16b\n" |
405 | " .long 10b,16b\n" | 405 | " .long 10b,16b\n" |
406 | " .long 51b,16b\n" | 406 | " .long 51b,16b\n" |
@@ -412,9 +412,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) | |||
412 | " .long 81b,16b\n" | 412 | " .long 81b,16b\n" |
413 | " .long 14b,16b\n" | 413 | " .long 14b,16b\n" |
414 | " .long 91b,16b\n" | 414 | " .long 91b,16b\n" |
415 | " .long 6b,9b\n" | 415 | " .long 6b,9b\n" |
416 | " .long 7b,16b\n" | 416 | " .long 7b,16b\n" |
417 | ".previous" | 417 | ".previous" |
418 | : "=&c"(size), "=&D" (d0), "=&S" (d1) | 418 | : "=&c"(size), "=&D" (d0), "=&S" (d1) |
419 | : "1"(to), "2"(from), "0"(size) | 419 | : "1"(to), "2"(from), "0"(size) |
420 | : "eax", "edx", "memory"); | 420 | : "eax", "edx", "memory"); |
@@ -429,7 +429,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) | |||
429 | static unsigned long __copy_user_zeroing_intel_nocache(void *to, | 429 | static unsigned long __copy_user_zeroing_intel_nocache(void *to, |
430 | const void __user *from, unsigned long size) | 430 | const void __user *from, unsigned long size) |
431 | { | 431 | { |
432 | int d0, d1; | 432 | int d0, d1; |
433 | 433 | ||
434 | __asm__ __volatile__( | 434 | __asm__ __volatile__( |
435 | " .align 2,0x90\n" | 435 | " .align 2,0x90\n" |
@@ -526,7 +526,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, | |||
526 | static unsigned long __copy_user_intel_nocache(void *to, | 526 | static unsigned long __copy_user_intel_nocache(void *to, |
527 | const void __user *from, unsigned long size) | 527 | const void __user *from, unsigned long size) |
528 | { | 528 | { |
529 | int d0, d1; | 529 | int d0, d1; |
530 | 530 | ||
531 | __asm__ __volatile__( | 531 | __asm__ __volatile__( |
532 | " .align 2,0x90\n" | 532 | " .align 2,0x90\n" |
@@ -629,7 +629,7 @@ unsigned long __copy_user_zeroing_intel_nocache(void *to, | |||
629 | #endif /* CONFIG_X86_INTEL_USERCOPY */ | 629 | #endif /* CONFIG_X86_INTEL_USERCOPY */ |
630 | 630 | ||
631 | /* Generic arbitrary sized copy. */ | 631 | /* Generic arbitrary sized copy. */ |
632 | #define __copy_user(to,from,size) \ | 632 | #define __copy_user(to, from, size) \ |
633 | do { \ | 633 | do { \ |
634 | int __d0, __d1, __d2; \ | 634 | int __d0, __d1, __d2; \ |
635 | __asm__ __volatile__( \ | 635 | __asm__ __volatile__( \ |
@@ -665,7 +665,7 @@ do { \ | |||
665 | : "memory"); \ | 665 | : "memory"); \ |
666 | } while (0) | 666 | } while (0) |
667 | 667 | ||
668 | #define __copy_user_zeroing(to,from,size) \ | 668 | #define __copy_user_zeroing(to, from, size) \ |
669 | do { \ | 669 | do { \ |
670 | int __d0, __d1, __d2; \ | 670 | int __d0, __d1, __d2; \ |
671 | __asm__ __volatile__( \ | 671 | __asm__ __volatile__( \ |
@@ -712,7 +712,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, | |||
712 | { | 712 | { |
713 | #ifndef CONFIG_X86_WP_WORKS_OK | 713 | #ifndef CONFIG_X86_WP_WORKS_OK |
714 | if (unlikely(boot_cpu_data.wp_works_ok == 0) && | 714 | if (unlikely(boot_cpu_data.wp_works_ok == 0) && |
715 | ((unsigned long )to) < TASK_SIZE) { | 715 | ((unsigned long)to) < TASK_SIZE) { |
716 | /* | 716 | /* |
717 | * When we are in an atomic section (see | 717 | * When we are in an atomic section (see |
718 | * mm/filemap.c:file_read_actor), return the full | 718 | * mm/filemap.c:file_read_actor), return the full |
@@ -721,26 +721,26 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, | |||
721 | if (in_atomic()) | 721 | if (in_atomic()) |
722 | return n; | 722 | return n; |
723 | 723 | ||
724 | /* | 724 | /* |
725 | * CPU does not honor the WP bit when writing | 725 | * CPU does not honor the WP bit when writing |
726 | * from supervisory mode, and due to preemption or SMP, | 726 | * from supervisory mode, and due to preemption or SMP, |
727 | * the page tables can change at any time. | 727 | * the page tables can change at any time. |
728 | * Do it manually. Manfred <manfred@colorfullife.com> | 728 | * Do it manually. Manfred <manfred@colorfullife.com> |
729 | */ | 729 | */ |
730 | while (n) { | 730 | while (n) { |
731 | unsigned long offset = ((unsigned long)to)%PAGE_SIZE; | 731 | unsigned long offset = ((unsigned long)to)%PAGE_SIZE; |
732 | unsigned long len = PAGE_SIZE - offset; | 732 | unsigned long len = PAGE_SIZE - offset; |
733 | int retval; | 733 | int retval; |
734 | struct page *pg; | 734 | struct page *pg; |
735 | void *maddr; | 735 | void *maddr; |
736 | 736 | ||
737 | if (len > n) | 737 | if (len > n) |
738 | len = n; | 738 | len = n; |
739 | 739 | ||
740 | survive: | 740 | survive: |
741 | down_read(¤t->mm->mmap_sem); | 741 | down_read(¤t->mm->mmap_sem); |
742 | retval = get_user_pages(current, current->mm, | 742 | retval = get_user_pages(current, current->mm, |
743 | (unsigned long )to, 1, 1, 0, &pg, NULL); | 743 | (unsigned long)to, 1, 1, 0, &pg, NULL); |
744 | 744 | ||
745 | if (retval == -ENOMEM && is_global_init(current)) { | 745 | if (retval == -ENOMEM && is_global_init(current)) { |
746 | up_read(¤t->mm->mmap_sem); | 746 | up_read(¤t->mm->mmap_sem); |
@@ -750,8 +750,8 @@ survive: | |||
750 | 750 | ||
751 | if (retval != 1) { | 751 | if (retval != 1) { |
752 | up_read(¤t->mm->mmap_sem); | 752 | up_read(¤t->mm->mmap_sem); |
753 | break; | 753 | break; |
754 | } | 754 | } |
755 | 755 | ||
756 | maddr = kmap_atomic(pg, KM_USER0); | 756 | maddr = kmap_atomic(pg, KM_USER0); |
757 | memcpy(maddr + offset, from, len); | 757 | memcpy(maddr + offset, from, len); |
@@ -802,12 +802,12 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, | |||
802 | unsigned long n) | 802 | unsigned long n) |
803 | { | 803 | { |
804 | #ifdef CONFIG_X86_INTEL_USERCOPY | 804 | #ifdef CONFIG_X86_INTEL_USERCOPY |
805 | if ( n > 64 && cpu_has_xmm2) | 805 | if (n > 64 && cpu_has_xmm2) |
806 | n = __copy_user_zeroing_intel_nocache(to, from, n); | 806 | n = __copy_user_zeroing_intel_nocache(to, from, n); |
807 | else | 807 | else |
808 | __copy_user_zeroing(to, from, n); | 808 | __copy_user_zeroing(to, from, n); |
809 | #else | 809 | #else |
810 | __copy_user_zeroing(to, from, n); | 810 | __copy_user_zeroing(to, from, n); |
811 | #endif | 811 | #endif |
812 | return n; | 812 | return n; |
813 | } | 813 | } |
@@ -817,12 +817,12 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr | |||
817 | unsigned long n) | 817 | unsigned long n) |
818 | { | 818 | { |
819 | #ifdef CONFIG_X86_INTEL_USERCOPY | 819 | #ifdef CONFIG_X86_INTEL_USERCOPY |
820 | if ( n > 64 && cpu_has_xmm2) | 820 | if (n > 64 && cpu_has_xmm2) |
821 | n = __copy_user_intel_nocache(to, from, n); | 821 | n = __copy_user_intel_nocache(to, from, n); |
822 | else | 822 | else |
823 | __copy_user(to, from, n); | 823 | __copy_user(to, from, n); |
824 | #else | 824 | #else |
825 | __copy_user(to, from, n); | 825 | __copy_user(to, from, n); |
826 | #endif | 826 | #endif |
827 | return n; | 827 | return n; |
828 | } | 828 | } |