diff options
Diffstat (limited to 'arch/i386/lib')
-rw-r--r-- | arch/i386/lib/bitops.c | 4 | ||||
-rw-r--r-- | arch/i386/lib/checksum.S | 69 | ||||
-rw-r--r-- | arch/i386/lib/getuser.S | 26 | ||||
-rw-r--r-- | arch/i386/lib/putuser.S | 39 | ||||
-rw-r--r-- | arch/i386/lib/usercopy.c | 7 |
5 files changed, 104 insertions, 41 deletions
diff --git a/arch/i386/lib/bitops.c b/arch/i386/lib/bitops.c index 97db3853dc82..afd0045595d4 100644 --- a/arch/i386/lib/bitops.c +++ b/arch/i386/lib/bitops.c | |||
@@ -43,7 +43,7 @@ EXPORT_SYMBOL(find_next_bit); | |||
43 | */ | 43 | */ |
44 | int find_next_zero_bit(const unsigned long *addr, int size, int offset) | 44 | int find_next_zero_bit(const unsigned long *addr, int size, int offset) |
45 | { | 45 | { |
46 | unsigned long * p = ((unsigned long *) addr) + (offset >> 5); | 46 | const unsigned long *p = addr + (offset >> 5); |
47 | int set = 0, bit = offset & 31, res; | 47 | int set = 0, bit = offset & 31, res; |
48 | 48 | ||
49 | if (bit) { | 49 | if (bit) { |
@@ -64,7 +64,7 @@ int find_next_zero_bit(const unsigned long *addr, int size, int offset) | |||
64 | /* | 64 | /* |
65 | * No zero yet, search remaining full bytes for a zero | 65 | * No zero yet, search remaining full bytes for a zero |
66 | */ | 66 | */ |
67 | res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr)); | 67 | res = find_first_zero_bit(p, size - 32 * (p - addr)); |
68 | return (offset + set + res); | 68 | return (offset + set + res); |
69 | } | 69 | } |
70 | EXPORT_SYMBOL(find_next_zero_bit); | 70 | EXPORT_SYMBOL(find_next_zero_bit); |
diff --git a/arch/i386/lib/checksum.S b/arch/i386/lib/checksum.S index 75ffd02654fc..adbccd0bbb78 100644 --- a/arch/i386/lib/checksum.S +++ b/arch/i386/lib/checksum.S | |||
@@ -25,6 +25,8 @@ | |||
25 | * 2 of the License, or (at your option) any later version. | 25 | * 2 of the License, or (at your option) any later version. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/linkage.h> | ||
29 | #include <asm/dwarf2.h> | ||
28 | #include <asm/errno.h> | 30 | #include <asm/errno.h> |
29 | 31 | ||
30 | /* | 32 | /* |
@@ -36,8 +38,6 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |||
36 | */ | 38 | */ |
37 | 39 | ||
38 | .text | 40 | .text |
39 | .align 4 | ||
40 | .globl csum_partial | ||
41 | 41 | ||
42 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | 42 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM |
43 | 43 | ||
@@ -48,9 +48,14 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |||
48 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte | 48 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte |
49 | * alignment for the unrolled loop. | 49 | * alignment for the unrolled loop. |
50 | */ | 50 | */ |
51 | csum_partial: | 51 | ENTRY(csum_partial) |
52 | CFI_STARTPROC | ||
52 | pushl %esi | 53 | pushl %esi |
54 | CFI_ADJUST_CFA_OFFSET 4 | ||
55 | CFI_REL_OFFSET esi, 0 | ||
53 | pushl %ebx | 56 | pushl %ebx |
57 | CFI_ADJUST_CFA_OFFSET 4 | ||
58 | CFI_REL_OFFSET ebx, 0 | ||
54 | movl 20(%esp),%eax # Function arg: unsigned int sum | 59 | movl 20(%esp),%eax # Function arg: unsigned int sum |
55 | movl 16(%esp),%ecx # Function arg: int len | 60 | movl 16(%esp),%ecx # Function arg: int len |
56 | movl 12(%esp),%esi # Function arg: unsigned char *buff | 61 | movl 12(%esp),%esi # Function arg: unsigned char *buff |
@@ -128,16 +133,27 @@ csum_partial: | |||
128 | roll $8, %eax | 133 | roll $8, %eax |
129 | 8: | 134 | 8: |
130 | popl %ebx | 135 | popl %ebx |
136 | CFI_ADJUST_CFA_OFFSET -4 | ||
137 | CFI_RESTORE ebx | ||
131 | popl %esi | 138 | popl %esi |
139 | CFI_ADJUST_CFA_OFFSET -4 | ||
140 | CFI_RESTORE esi | ||
132 | ret | 141 | ret |
142 | CFI_ENDPROC | ||
143 | ENDPROC(csum_partial) | ||
133 | 144 | ||
134 | #else | 145 | #else |
135 | 146 | ||
136 | /* Version for PentiumII/PPro */ | 147 | /* Version for PentiumII/PPro */ |
137 | 148 | ||
138 | csum_partial: | 149 | ENTRY(csum_partial) |
150 | CFI_STARTPROC | ||
139 | pushl %esi | 151 | pushl %esi |
152 | CFI_ADJUST_CFA_OFFSET 4 | ||
153 | CFI_REL_OFFSET esi, 0 | ||
140 | pushl %ebx | 154 | pushl %ebx |
155 | CFI_ADJUST_CFA_OFFSET 4 | ||
156 | CFI_REL_OFFSET ebx, 0 | ||
141 | movl 20(%esp),%eax # Function arg: unsigned int sum | 157 | movl 20(%esp),%eax # Function arg: unsigned int sum |
142 | movl 16(%esp),%ecx # Function arg: int len | 158 | movl 16(%esp),%ecx # Function arg: int len |
143 | movl 12(%esp),%esi # Function arg: const unsigned char *buf | 159 | movl 12(%esp),%esi # Function arg: const unsigned char *buf |
@@ -245,8 +261,14 @@ csum_partial: | |||
245 | roll $8, %eax | 261 | roll $8, %eax |
246 | 90: | 262 | 90: |
247 | popl %ebx | 263 | popl %ebx |
264 | CFI_ADJUST_CFA_OFFSET -4 | ||
265 | CFI_RESTORE ebx | ||
248 | popl %esi | 266 | popl %esi |
267 | CFI_ADJUST_CFA_OFFSET -4 | ||
268 | CFI_RESTORE esi | ||
249 | ret | 269 | ret |
270 | CFI_ENDPROC | ||
271 | ENDPROC(csum_partial) | ||
250 | 272 | ||
251 | #endif | 273 | #endif |
252 | 274 | ||
@@ -278,19 +300,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, | |||
278 | .long 9999b, 6002f ; \ | 300 | .long 9999b, 6002f ; \ |
279 | .previous | 301 | .previous |
280 | 302 | ||
281 | .align 4 | ||
282 | .globl csum_partial_copy_generic | ||
283 | |||
284 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | 303 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM |
285 | 304 | ||
286 | #define ARGBASE 16 | 305 | #define ARGBASE 16 |
287 | #define FP 12 | 306 | #define FP 12 |
288 | 307 | ||
289 | csum_partial_copy_generic: | 308 | ENTRY(csum_partial_copy_generic) |
309 | CFI_STARTPROC | ||
290 | subl $4,%esp | 310 | subl $4,%esp |
311 | CFI_ADJUST_CFA_OFFSET 4 | ||
291 | pushl %edi | 312 | pushl %edi |
313 | CFI_ADJUST_CFA_OFFSET 4 | ||
314 | CFI_REL_OFFSET edi, 0 | ||
292 | pushl %esi | 315 | pushl %esi |
316 | CFI_ADJUST_CFA_OFFSET 4 | ||
317 | CFI_REL_OFFSET esi, 0 | ||
293 | pushl %ebx | 318 | pushl %ebx |
319 | CFI_ADJUST_CFA_OFFSET 4 | ||
320 | CFI_REL_OFFSET ebx, 0 | ||
294 | movl ARGBASE+16(%esp),%eax # sum | 321 | movl ARGBASE+16(%esp),%eax # sum |
295 | movl ARGBASE+12(%esp),%ecx # len | 322 | movl ARGBASE+12(%esp),%ecx # len |
296 | movl ARGBASE+4(%esp),%esi # src | 323 | movl ARGBASE+4(%esp),%esi # src |
@@ -400,10 +427,19 @@ DST( movb %cl, (%edi) ) | |||
400 | .previous | 427 | .previous |
401 | 428 | ||
402 | popl %ebx | 429 | popl %ebx |
430 | CFI_ADJUST_CFA_OFFSET -4 | ||
431 | CFI_RESTORE ebx | ||
403 | popl %esi | 432 | popl %esi |
433 | CFI_ADJUST_CFA_OFFSET -4 | ||
434 | CFI_RESTORE esi | ||
404 | popl %edi | 435 | popl %edi |
436 | CFI_ADJUST_CFA_OFFSET -4 | ||
437 | CFI_RESTORE edi | ||
405 | popl %ecx # equivalent to addl $4,%esp | 438 | popl %ecx # equivalent to addl $4,%esp |
439 | CFI_ADJUST_CFA_OFFSET -4 | ||
406 | ret | 440 | ret |
441 | CFI_ENDPROC | ||
442 | ENDPROC(csum_partial_copy_generic) | ||
407 | 443 | ||
408 | #else | 444 | #else |
409 | 445 | ||
@@ -421,10 +457,17 @@ DST( movb %cl, (%edi) ) | |||
421 | 457 | ||
422 | #define ARGBASE 12 | 458 | #define ARGBASE 12 |
423 | 459 | ||
424 | csum_partial_copy_generic: | 460 | ENTRY(csum_partial_copy_generic) |
461 | CFI_STARTPROC | ||
425 | pushl %ebx | 462 | pushl %ebx |
463 | CFI_ADJUST_CFA_OFFSET 4 | ||
464 | CFI_REL_OFFSET ebx, 0 | ||
426 | pushl %edi | 465 | pushl %edi |
466 | CFI_ADJUST_CFA_OFFSET 4 | ||
467 | CFI_REL_OFFSET edi, 0 | ||
427 | pushl %esi | 468 | pushl %esi |
469 | CFI_ADJUST_CFA_OFFSET 4 | ||
470 | CFI_REL_OFFSET esi, 0 | ||
428 | movl ARGBASE+4(%esp),%esi #src | 471 | movl ARGBASE+4(%esp),%esi #src |
429 | movl ARGBASE+8(%esp),%edi #dst | 472 | movl ARGBASE+8(%esp),%edi #dst |
430 | movl ARGBASE+12(%esp),%ecx #len | 473 | movl ARGBASE+12(%esp),%ecx #len |
@@ -485,9 +528,17 @@ DST( movb %dl, (%edi) ) | |||
485 | .previous | 528 | .previous |
486 | 529 | ||
487 | popl %esi | 530 | popl %esi |
531 | CFI_ADJUST_CFA_OFFSET -4 | ||
532 | CFI_RESTORE esi | ||
488 | popl %edi | 533 | popl %edi |
534 | CFI_ADJUST_CFA_OFFSET -4 | ||
535 | CFI_RESTORE edi | ||
489 | popl %ebx | 536 | popl %ebx |
537 | CFI_ADJUST_CFA_OFFSET -4 | ||
538 | CFI_RESTORE ebx | ||
490 | ret | 539 | ret |
540 | CFI_ENDPROC | ||
541 | ENDPROC(csum_partial_copy_generic) | ||
491 | 542 | ||
492 | #undef ROUND | 543 | #undef ROUND |
493 | #undef ROUND1 | 544 | #undef ROUND1 |
diff --git a/arch/i386/lib/getuser.S b/arch/i386/lib/getuser.S index 62d7f178a326..6d84b53f12a2 100644 --- a/arch/i386/lib/getuser.S +++ b/arch/i386/lib/getuser.S | |||
@@ -8,6 +8,8 @@ | |||
8 | * return an error value in addition to the "real" | 8 | * return an error value in addition to the "real" |
9 | * return value. | 9 | * return value. |
10 | */ | 10 | */ |
11 | #include <linux/linkage.h> | ||
12 | #include <asm/dwarf2.h> | ||
11 | #include <asm/thread_info.h> | 13 | #include <asm/thread_info.h> |
12 | 14 | ||
13 | 15 | ||
@@ -24,19 +26,19 @@ | |||
24 | */ | 26 | */ |
25 | 27 | ||
26 | .text | 28 | .text |
27 | .align 4 | 29 | ENTRY(__get_user_1) |
28 | .globl __get_user_1 | 30 | CFI_STARTPROC |
29 | __get_user_1: | ||
30 | GET_THREAD_INFO(%edx) | 31 | GET_THREAD_INFO(%edx) |
31 | cmpl TI_addr_limit(%edx),%eax | 32 | cmpl TI_addr_limit(%edx),%eax |
32 | jae bad_get_user | 33 | jae bad_get_user |
33 | 1: movzbl (%eax),%edx | 34 | 1: movzbl (%eax),%edx |
34 | xorl %eax,%eax | 35 | xorl %eax,%eax |
35 | ret | 36 | ret |
37 | CFI_ENDPROC | ||
38 | ENDPROC(__get_user_1) | ||
36 | 39 | ||
37 | .align 4 | 40 | ENTRY(__get_user_2) |
38 | .globl __get_user_2 | 41 | CFI_STARTPROC |
39 | __get_user_2: | ||
40 | addl $1,%eax | 42 | addl $1,%eax |
41 | jc bad_get_user | 43 | jc bad_get_user |
42 | GET_THREAD_INFO(%edx) | 44 | GET_THREAD_INFO(%edx) |
@@ -45,10 +47,11 @@ __get_user_2: | |||
45 | 2: movzwl -1(%eax),%edx | 47 | 2: movzwl -1(%eax),%edx |
46 | xorl %eax,%eax | 48 | xorl %eax,%eax |
47 | ret | 49 | ret |
50 | CFI_ENDPROC | ||
51 | ENDPROC(__get_user_2) | ||
48 | 52 | ||
49 | .align 4 | 53 | ENTRY(__get_user_4) |
50 | .globl __get_user_4 | 54 | CFI_STARTPROC |
51 | __get_user_4: | ||
52 | addl $3,%eax | 55 | addl $3,%eax |
53 | jc bad_get_user | 56 | jc bad_get_user |
54 | GET_THREAD_INFO(%edx) | 57 | GET_THREAD_INFO(%edx) |
@@ -57,11 +60,16 @@ __get_user_4: | |||
57 | 3: movl -3(%eax),%edx | 60 | 3: movl -3(%eax),%edx |
58 | xorl %eax,%eax | 61 | xorl %eax,%eax |
59 | ret | 62 | ret |
63 | CFI_ENDPROC | ||
64 | ENDPROC(__get_user_4) | ||
60 | 65 | ||
61 | bad_get_user: | 66 | bad_get_user: |
67 | CFI_STARTPROC | ||
62 | xorl %edx,%edx | 68 | xorl %edx,%edx |
63 | movl $-14,%eax | 69 | movl $-14,%eax |
64 | ret | 70 | ret |
71 | CFI_ENDPROC | ||
72 | END(bad_get_user) | ||
65 | 73 | ||
66 | .section __ex_table,"a" | 74 | .section __ex_table,"a" |
67 | .long 1b,bad_get_user | 75 | .long 1b,bad_get_user |
diff --git a/arch/i386/lib/putuser.S b/arch/i386/lib/putuser.S index a32d9f570f48..f58fba109d18 100644 --- a/arch/i386/lib/putuser.S +++ b/arch/i386/lib/putuser.S | |||
@@ -8,6 +8,8 @@ | |||
8 | * return an error value in addition to the "real" | 8 | * return an error value in addition to the "real" |
9 | * return value. | 9 | * return value. |
10 | */ | 10 | */ |
11 | #include <linux/linkage.h> | ||
12 | #include <asm/dwarf2.h> | ||
11 | #include <asm/thread_info.h> | 13 | #include <asm/thread_info.h> |
12 | 14 | ||
13 | 15 | ||
@@ -23,23 +25,28 @@ | |||
23 | * as they get called from within inline assembly. | 25 | * as they get called from within inline assembly. |
24 | */ | 26 | */ |
25 | 27 | ||
26 | #define ENTER pushl %ebx ; GET_THREAD_INFO(%ebx) | 28 | #define ENTER CFI_STARTPROC ; \ |
27 | #define EXIT popl %ebx ; ret | 29 | pushl %ebx ; \ |
30 | CFI_ADJUST_CFA_OFFSET 4 ; \ | ||
31 | CFI_REL_OFFSET ebx, 0 ; \ | ||
32 | GET_THREAD_INFO(%ebx) | ||
33 | #define EXIT popl %ebx ; \ | ||
34 | CFI_ADJUST_CFA_OFFSET -4 ; \ | ||
35 | CFI_RESTORE ebx ; \ | ||
36 | ret ; \ | ||
37 | CFI_ENDPROC | ||
28 | 38 | ||
29 | .text | 39 | .text |
30 | .align 4 | 40 | ENTRY(__put_user_1) |
31 | .globl __put_user_1 | ||
32 | __put_user_1: | ||
33 | ENTER | 41 | ENTER |
34 | cmpl TI_addr_limit(%ebx),%ecx | 42 | cmpl TI_addr_limit(%ebx),%ecx |
35 | jae bad_put_user | 43 | jae bad_put_user |
36 | 1: movb %al,(%ecx) | 44 | 1: movb %al,(%ecx) |
37 | xorl %eax,%eax | 45 | xorl %eax,%eax |
38 | EXIT | 46 | EXIT |
47 | ENDPROC(__put_user_1) | ||
39 | 48 | ||
40 | .align 4 | 49 | ENTRY(__put_user_2) |
41 | .globl __put_user_2 | ||
42 | __put_user_2: | ||
43 | ENTER | 50 | ENTER |
44 | movl TI_addr_limit(%ebx),%ebx | 51 | movl TI_addr_limit(%ebx),%ebx |
45 | subl $1,%ebx | 52 | subl $1,%ebx |
@@ -48,10 +55,9 @@ __put_user_2: | |||
48 | 2: movw %ax,(%ecx) | 55 | 2: movw %ax,(%ecx) |
49 | xorl %eax,%eax | 56 | xorl %eax,%eax |
50 | EXIT | 57 | EXIT |
58 | ENDPROC(__put_user_2) | ||
51 | 59 | ||
52 | .align 4 | 60 | ENTRY(__put_user_4) |
53 | .globl __put_user_4 | ||
54 | __put_user_4: | ||
55 | ENTER | 61 | ENTER |
56 | movl TI_addr_limit(%ebx),%ebx | 62 | movl TI_addr_limit(%ebx),%ebx |
57 | subl $3,%ebx | 63 | subl $3,%ebx |
@@ -60,10 +66,9 @@ __put_user_4: | |||
60 | 3: movl %eax,(%ecx) | 66 | 3: movl %eax,(%ecx) |
61 | xorl %eax,%eax | 67 | xorl %eax,%eax |
62 | EXIT | 68 | EXIT |
69 | ENDPROC(__put_user_4) | ||
63 | 70 | ||
64 | .align 4 | 71 | ENTRY(__put_user_8) |
65 | .globl __put_user_8 | ||
66 | __put_user_8: | ||
67 | ENTER | 72 | ENTER |
68 | movl TI_addr_limit(%ebx),%ebx | 73 | movl TI_addr_limit(%ebx),%ebx |
69 | subl $7,%ebx | 74 | subl $7,%ebx |
@@ -73,10 +78,16 @@ __put_user_8: | |||
73 | 5: movl %edx,4(%ecx) | 78 | 5: movl %edx,4(%ecx) |
74 | xorl %eax,%eax | 79 | xorl %eax,%eax |
75 | EXIT | 80 | EXIT |
81 | ENDPROC(__put_user_8) | ||
76 | 82 | ||
77 | bad_put_user: | 83 | bad_put_user: |
84 | CFI_STARTPROC simple | ||
85 | CFI_DEF_CFA esp, 2*4 | ||
86 | CFI_OFFSET eip, -1*4 | ||
87 | CFI_OFFSET ebx, -2*4 | ||
78 | movl $-14,%eax | 88 | movl $-14,%eax |
79 | EXIT | 89 | EXIT |
90 | END(bad_put_user) | ||
80 | 91 | ||
81 | .section __ex_table,"a" | 92 | .section __ex_table,"a" |
82 | .long 1b,bad_put_user | 93 | .long 1b,bad_put_user |
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c index 086b3726862a..9f38b12b4af1 100644 --- a/arch/i386/lib/usercopy.c +++ b/arch/i386/lib/usercopy.c | |||
@@ -716,7 +716,6 @@ do { \ | |||
716 | unsigned long __copy_to_user_ll(void __user *to, const void *from, | 716 | unsigned long __copy_to_user_ll(void __user *to, const void *from, |
717 | unsigned long n) | 717 | unsigned long n) |
718 | { | 718 | { |
719 | BUG_ON((long) n < 0); | ||
720 | #ifndef CONFIG_X86_WP_WORKS_OK | 719 | #ifndef CONFIG_X86_WP_WORKS_OK |
721 | if (unlikely(boot_cpu_data.wp_works_ok == 0) && | 720 | if (unlikely(boot_cpu_data.wp_works_ok == 0) && |
722 | ((unsigned long )to) < TASK_SIZE) { | 721 | ((unsigned long )to) < TASK_SIZE) { |
@@ -785,7 +784,6 @@ EXPORT_SYMBOL(__copy_to_user_ll); | |||
785 | unsigned long __copy_from_user_ll(void *to, const void __user *from, | 784 | unsigned long __copy_from_user_ll(void *to, const void __user *from, |
786 | unsigned long n) | 785 | unsigned long n) |
787 | { | 786 | { |
788 | BUG_ON((long)n < 0); | ||
789 | if (movsl_is_ok(to, from, n)) | 787 | if (movsl_is_ok(to, from, n)) |
790 | __copy_user_zeroing(to, from, n); | 788 | __copy_user_zeroing(to, from, n); |
791 | else | 789 | else |
@@ -797,7 +795,6 @@ EXPORT_SYMBOL(__copy_from_user_ll); | |||
797 | unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, | 795 | unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, |
798 | unsigned long n) | 796 | unsigned long n) |
799 | { | 797 | { |
800 | BUG_ON((long)n < 0); | ||
801 | if (movsl_is_ok(to, from, n)) | 798 | if (movsl_is_ok(to, from, n)) |
802 | __copy_user(to, from, n); | 799 | __copy_user(to, from, n); |
803 | else | 800 | else |
@@ -810,7 +807,6 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero); | |||
810 | unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, | 807 | unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, |
811 | unsigned long n) | 808 | unsigned long n) |
812 | { | 809 | { |
813 | BUG_ON((long)n < 0); | ||
814 | #ifdef CONFIG_X86_INTEL_USERCOPY | 810 | #ifdef CONFIG_X86_INTEL_USERCOPY |
815 | if ( n > 64 && cpu_has_xmm2) | 811 | if ( n > 64 && cpu_has_xmm2) |
816 | n = __copy_user_zeroing_intel_nocache(to, from, n); | 812 | n = __copy_user_zeroing_intel_nocache(to, from, n); |
@@ -825,7 +821,6 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, | |||
825 | unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, | 821 | unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, |
826 | unsigned long n) | 822 | unsigned long n) |
827 | { | 823 | { |
828 | BUG_ON((long)n < 0); | ||
829 | #ifdef CONFIG_X86_INTEL_USERCOPY | 824 | #ifdef CONFIG_X86_INTEL_USERCOPY |
830 | if ( n > 64 && cpu_has_xmm2) | 825 | if ( n > 64 && cpu_has_xmm2) |
831 | n = __copy_user_intel_nocache(to, from, n); | 826 | n = __copy_user_intel_nocache(to, from, n); |
@@ -853,7 +848,6 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr | |||
853 | unsigned long | 848 | unsigned long |
854 | copy_to_user(void __user *to, const void *from, unsigned long n) | 849 | copy_to_user(void __user *to, const void *from, unsigned long n) |
855 | { | 850 | { |
856 | BUG_ON((long) n < 0); | ||
857 | if (access_ok(VERIFY_WRITE, to, n)) | 851 | if (access_ok(VERIFY_WRITE, to, n)) |
858 | n = __copy_to_user(to, from, n); | 852 | n = __copy_to_user(to, from, n); |
859 | return n; | 853 | return n; |
@@ -879,7 +873,6 @@ EXPORT_SYMBOL(copy_to_user); | |||
879 | unsigned long | 873 | unsigned long |
880 | copy_from_user(void *to, const void __user *from, unsigned long n) | 874 | copy_from_user(void *to, const void __user *from, unsigned long n) |
881 | { | 875 | { |
882 | BUG_ON((long) n < 0); | ||
883 | if (access_ok(VERIFY_READ, from, n)) | 876 | if (access_ok(VERIFY_READ, from, n)) |
884 | n = __copy_from_user(to, from, n); | 877 | n = __copy_from_user(to, from, n); |
885 | else | 878 | else |