diff options
Diffstat (limited to 'arch/x86/lib')
-rw-r--r-- | arch/x86/lib/copy_user_64.S | 7 | ||||
-rw-r--r-- | arch/x86/lib/copy_user_nocache_64.S | 3 | ||||
-rw-r--r-- | arch/x86/lib/getuser.S | 10 | ||||
-rw-r--r-- | arch/x86/lib/insn.c | 4 | ||||
-rw-r--r-- | arch/x86/lib/putuser.S | 8 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_32.c | 13 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_64.c | 3 |
7 files changed, 46 insertions, 2 deletions
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 5b2995f4557a..a30ca15be21c 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/cpufeature.h> | 17 | #include <asm/cpufeature.h> |
18 | #include <asm/alternative-asm.h> | 18 | #include <asm/alternative-asm.h> |
19 | #include <asm/asm.h> | 19 | #include <asm/asm.h> |
20 | #include <asm/smap.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * By placing feature2 after feature1 in altinstructions section, we logically | 23 | * By placing feature2 after feature1 in altinstructions section, we logically |
@@ -130,6 +131,7 @@ ENDPROC(bad_from_user) | |||
130 | */ | 131 | */ |
131 | ENTRY(copy_user_generic_unrolled) | 132 | ENTRY(copy_user_generic_unrolled) |
132 | CFI_STARTPROC | 133 | CFI_STARTPROC |
134 | ASM_STAC | ||
133 | cmpl $8,%edx | 135 | cmpl $8,%edx |
134 | jb 20f /* less then 8 bytes, go to byte copy loop */ | 136 | jb 20f /* less then 8 bytes, go to byte copy loop */ |
135 | ALIGN_DESTINATION | 137 | ALIGN_DESTINATION |
@@ -177,6 +179,7 @@ ENTRY(copy_user_generic_unrolled) | |||
177 | decl %ecx | 179 | decl %ecx |
178 | jnz 21b | 180 | jnz 21b |
179 | 23: xor %eax,%eax | 181 | 23: xor %eax,%eax |
182 | ASM_CLAC | ||
180 | ret | 183 | ret |
181 | 184 | ||
182 | .section .fixup,"ax" | 185 | .section .fixup,"ax" |
@@ -232,6 +235,7 @@ ENDPROC(copy_user_generic_unrolled) | |||
232 | */ | 235 | */ |
233 | ENTRY(copy_user_generic_string) | 236 | ENTRY(copy_user_generic_string) |
234 | CFI_STARTPROC | 237 | CFI_STARTPROC |
238 | ASM_STAC | ||
235 | andl %edx,%edx | 239 | andl %edx,%edx |
236 | jz 4f | 240 | jz 4f |
237 | cmpl $8,%edx | 241 | cmpl $8,%edx |
@@ -246,6 +250,7 @@ ENTRY(copy_user_generic_string) | |||
246 | 3: rep | 250 | 3: rep |
247 | movsb | 251 | movsb |
248 | 4: xorl %eax,%eax | 252 | 4: xorl %eax,%eax |
253 | ASM_CLAC | ||
249 | ret | 254 | ret |
250 | 255 | ||
251 | .section .fixup,"ax" | 256 | .section .fixup,"ax" |
@@ -273,12 +278,14 @@ ENDPROC(copy_user_generic_string) | |||
273 | */ | 278 | */ |
274 | ENTRY(copy_user_enhanced_fast_string) | 279 | ENTRY(copy_user_enhanced_fast_string) |
275 | CFI_STARTPROC | 280 | CFI_STARTPROC |
281 | ASM_STAC | ||
276 | andl %edx,%edx | 282 | andl %edx,%edx |
277 | jz 2f | 283 | jz 2f |
278 | movl %edx,%ecx | 284 | movl %edx,%ecx |
279 | 1: rep | 285 | 1: rep |
280 | movsb | 286 | movsb |
281 | 2: xorl %eax,%eax | 287 | 2: xorl %eax,%eax |
288 | ASM_CLAC | ||
282 | ret | 289 | ret |
283 | 290 | ||
284 | .section .fixup,"ax" | 291 | .section .fixup,"ax" |
diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S index cacddc7163eb..6a4f43c2d9e6 100644 --- a/arch/x86/lib/copy_user_nocache_64.S +++ b/arch/x86/lib/copy_user_nocache_64.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/asm-offsets.h> | 15 | #include <asm/asm-offsets.h> |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | #include <asm/asm.h> | 17 | #include <asm/asm.h> |
18 | #include <asm/smap.h> | ||
18 | 19 | ||
19 | .macro ALIGN_DESTINATION | 20 | .macro ALIGN_DESTINATION |
20 | #ifdef FIX_ALIGNMENT | 21 | #ifdef FIX_ALIGNMENT |
@@ -48,6 +49,7 @@ | |||
48 | */ | 49 | */ |
49 | ENTRY(__copy_user_nocache) | 50 | ENTRY(__copy_user_nocache) |
50 | CFI_STARTPROC | 51 | CFI_STARTPROC |
52 | ASM_STAC | ||
51 | cmpl $8,%edx | 53 | cmpl $8,%edx |
52 | jb 20f /* less then 8 bytes, go to byte copy loop */ | 54 | jb 20f /* less then 8 bytes, go to byte copy loop */ |
53 | ALIGN_DESTINATION | 55 | ALIGN_DESTINATION |
@@ -95,6 +97,7 @@ ENTRY(__copy_user_nocache) | |||
95 | decl %ecx | 97 | decl %ecx |
96 | jnz 21b | 98 | jnz 21b |
97 | 23: xorl %eax,%eax | 99 | 23: xorl %eax,%eax |
100 | ASM_CLAC | ||
98 | sfence | 101 | sfence |
99 | ret | 102 | ret |
100 | 103 | ||
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index b33b1fb1e6d4..156b9c804670 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/asm-offsets.h> | 33 | #include <asm/asm-offsets.h> |
34 | #include <asm/thread_info.h> | 34 | #include <asm/thread_info.h> |
35 | #include <asm/asm.h> | 35 | #include <asm/asm.h> |
36 | #include <asm/smap.h> | ||
36 | 37 | ||
37 | .text | 38 | .text |
38 | ENTRY(__get_user_1) | 39 | ENTRY(__get_user_1) |
@@ -40,8 +41,10 @@ ENTRY(__get_user_1) | |||
40 | GET_THREAD_INFO(%_ASM_DX) | 41 | GET_THREAD_INFO(%_ASM_DX) |
41 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX | 42 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX |
42 | jae bad_get_user | 43 | jae bad_get_user |
44 | ASM_STAC | ||
43 | 1: movzb (%_ASM_AX),%edx | 45 | 1: movzb (%_ASM_AX),%edx |
44 | xor %eax,%eax | 46 | xor %eax,%eax |
47 | ASM_CLAC | ||
45 | ret | 48 | ret |
46 | CFI_ENDPROC | 49 | CFI_ENDPROC |
47 | ENDPROC(__get_user_1) | 50 | ENDPROC(__get_user_1) |
@@ -53,8 +56,10 @@ ENTRY(__get_user_2) | |||
53 | GET_THREAD_INFO(%_ASM_DX) | 56 | GET_THREAD_INFO(%_ASM_DX) |
54 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX | 57 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX |
55 | jae bad_get_user | 58 | jae bad_get_user |
59 | ASM_STAC | ||
56 | 2: movzwl -1(%_ASM_AX),%edx | 60 | 2: movzwl -1(%_ASM_AX),%edx |
57 | xor %eax,%eax | 61 | xor %eax,%eax |
62 | ASM_CLAC | ||
58 | ret | 63 | ret |
59 | CFI_ENDPROC | 64 | CFI_ENDPROC |
60 | ENDPROC(__get_user_2) | 65 | ENDPROC(__get_user_2) |
@@ -66,8 +71,10 @@ ENTRY(__get_user_4) | |||
66 | GET_THREAD_INFO(%_ASM_DX) | 71 | GET_THREAD_INFO(%_ASM_DX) |
67 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX | 72 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX |
68 | jae bad_get_user | 73 | jae bad_get_user |
74 | ASM_STAC | ||
69 | 3: mov -3(%_ASM_AX),%edx | 75 | 3: mov -3(%_ASM_AX),%edx |
70 | xor %eax,%eax | 76 | xor %eax,%eax |
77 | ASM_CLAC | ||
71 | ret | 78 | ret |
72 | CFI_ENDPROC | 79 | CFI_ENDPROC |
73 | ENDPROC(__get_user_4) | 80 | ENDPROC(__get_user_4) |
@@ -80,8 +87,10 @@ ENTRY(__get_user_8) | |||
80 | GET_THREAD_INFO(%_ASM_DX) | 87 | GET_THREAD_INFO(%_ASM_DX) |
81 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX | 88 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX |
82 | jae bad_get_user | 89 | jae bad_get_user |
90 | ASM_STAC | ||
83 | 4: movq -7(%_ASM_AX),%_ASM_DX | 91 | 4: movq -7(%_ASM_AX),%_ASM_DX |
84 | xor %eax,%eax | 92 | xor %eax,%eax |
93 | ASM_CLAC | ||
85 | ret | 94 | ret |
86 | CFI_ENDPROC | 95 | CFI_ENDPROC |
87 | ENDPROC(__get_user_8) | 96 | ENDPROC(__get_user_8) |
@@ -91,6 +100,7 @@ bad_get_user: | |||
91 | CFI_STARTPROC | 100 | CFI_STARTPROC |
92 | xor %edx,%edx | 101 | xor %edx,%edx |
93 | mov $(-EFAULT),%_ASM_AX | 102 | mov $(-EFAULT),%_ASM_AX |
103 | ASM_CLAC | ||
94 | ret | 104 | ret |
95 | CFI_ENDPROC | 105 | CFI_ENDPROC |
96 | END(bad_get_user) | 106 | END(bad_get_user) |
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index b1e6c4b2e8eb..54fcffed28ed 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c | |||
@@ -18,7 +18,11 @@ | |||
18 | * Copyright (C) IBM Corporation, 2002, 2004, 2009 | 18 | * Copyright (C) IBM Corporation, 2002, 2004, 2009 |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #ifdef __KERNEL__ | ||
21 | #include <linux/string.h> | 22 | #include <linux/string.h> |
23 | #else | ||
24 | #include <string.h> | ||
25 | #endif | ||
22 | #include <asm/inat.h> | 26 | #include <asm/inat.h> |
23 | #include <asm/insn.h> | 27 | #include <asm/insn.h> |
24 | 28 | ||
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 7f951c8f76c4..fc6ba17a7eec 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/thread_info.h> | 15 | #include <asm/thread_info.h> |
16 | #include <asm/errno.h> | 16 | #include <asm/errno.h> |
17 | #include <asm/asm.h> | 17 | #include <asm/asm.h> |
18 | #include <asm/smap.h> | ||
18 | 19 | ||
19 | 20 | ||
20 | /* | 21 | /* |
@@ -31,7 +32,8 @@ | |||
31 | 32 | ||
32 | #define ENTER CFI_STARTPROC ; \ | 33 | #define ENTER CFI_STARTPROC ; \ |
33 | GET_THREAD_INFO(%_ASM_BX) | 34 | GET_THREAD_INFO(%_ASM_BX) |
34 | #define EXIT ret ; \ | 35 | #define EXIT ASM_CLAC ; \ |
36 | ret ; \ | ||
35 | CFI_ENDPROC | 37 | CFI_ENDPROC |
36 | 38 | ||
37 | .text | 39 | .text |
@@ -39,6 +41,7 @@ ENTRY(__put_user_1) | |||
39 | ENTER | 41 | ENTER |
40 | cmp TI_addr_limit(%_ASM_BX),%_ASM_CX | 42 | cmp TI_addr_limit(%_ASM_BX),%_ASM_CX |
41 | jae bad_put_user | 43 | jae bad_put_user |
44 | ASM_STAC | ||
42 | 1: movb %al,(%_ASM_CX) | 45 | 1: movb %al,(%_ASM_CX) |
43 | xor %eax,%eax | 46 | xor %eax,%eax |
44 | EXIT | 47 | EXIT |
@@ -50,6 +53,7 @@ ENTRY(__put_user_2) | |||
50 | sub $1,%_ASM_BX | 53 | sub $1,%_ASM_BX |
51 | cmp %_ASM_BX,%_ASM_CX | 54 | cmp %_ASM_BX,%_ASM_CX |
52 | jae bad_put_user | 55 | jae bad_put_user |
56 | ASM_STAC | ||
53 | 2: movw %ax,(%_ASM_CX) | 57 | 2: movw %ax,(%_ASM_CX) |
54 | xor %eax,%eax | 58 | xor %eax,%eax |
55 | EXIT | 59 | EXIT |
@@ -61,6 +65,7 @@ ENTRY(__put_user_4) | |||
61 | sub $3,%_ASM_BX | 65 | sub $3,%_ASM_BX |
62 | cmp %_ASM_BX,%_ASM_CX | 66 | cmp %_ASM_BX,%_ASM_CX |
63 | jae bad_put_user | 67 | jae bad_put_user |
68 | ASM_STAC | ||
64 | 3: movl %eax,(%_ASM_CX) | 69 | 3: movl %eax,(%_ASM_CX) |
65 | xor %eax,%eax | 70 | xor %eax,%eax |
66 | EXIT | 71 | EXIT |
@@ -72,6 +77,7 @@ ENTRY(__put_user_8) | |||
72 | sub $7,%_ASM_BX | 77 | sub $7,%_ASM_BX |
73 | cmp %_ASM_BX,%_ASM_CX | 78 | cmp %_ASM_BX,%_ASM_CX |
74 | jae bad_put_user | 79 | jae bad_put_user |
80 | ASM_STAC | ||
75 | 4: mov %_ASM_AX,(%_ASM_CX) | 81 | 4: mov %_ASM_AX,(%_ASM_CX) |
76 | #ifdef CONFIG_X86_32 | 82 | #ifdef CONFIG_X86_32 |
77 | 5: movl %edx,4(%_ASM_CX) | 83 | 5: movl %edx,4(%_ASM_CX) |
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 1781b2f950e2..98f6d6b68f5a 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -42,10 +42,11 @@ do { \ | |||
42 | int __d0; \ | 42 | int __d0; \ |
43 | might_fault(); \ | 43 | might_fault(); \ |
44 | __asm__ __volatile__( \ | 44 | __asm__ __volatile__( \ |
45 | ASM_STAC "\n" \ | ||
45 | "0: rep; stosl\n" \ | 46 | "0: rep; stosl\n" \ |
46 | " movl %2,%0\n" \ | 47 | " movl %2,%0\n" \ |
47 | "1: rep; stosb\n" \ | 48 | "1: rep; stosb\n" \ |
48 | "2:\n" \ | 49 | "2: " ASM_CLAC "\n" \ |
49 | ".section .fixup,\"ax\"\n" \ | 50 | ".section .fixup,\"ax\"\n" \ |
50 | "3: lea 0(%2,%0,4),%0\n" \ | 51 | "3: lea 0(%2,%0,4),%0\n" \ |
51 | " jmp 2b\n" \ | 52 | " jmp 2b\n" \ |
@@ -626,10 +627,12 @@ survive: | |||
626 | return n; | 627 | return n; |
627 | } | 628 | } |
628 | #endif | 629 | #endif |
630 | stac(); | ||
629 | if (movsl_is_ok(to, from, n)) | 631 | if (movsl_is_ok(to, from, n)) |
630 | __copy_user(to, from, n); | 632 | __copy_user(to, from, n); |
631 | else | 633 | else |
632 | n = __copy_user_intel(to, from, n); | 634 | n = __copy_user_intel(to, from, n); |
635 | clac(); | ||
633 | return n; | 636 | return n; |
634 | } | 637 | } |
635 | EXPORT_SYMBOL(__copy_to_user_ll); | 638 | EXPORT_SYMBOL(__copy_to_user_ll); |
@@ -637,10 +640,12 @@ EXPORT_SYMBOL(__copy_to_user_ll); | |||
637 | unsigned long __copy_from_user_ll(void *to, const void __user *from, | 640 | unsigned long __copy_from_user_ll(void *to, const void __user *from, |
638 | unsigned long n) | 641 | unsigned long n) |
639 | { | 642 | { |
643 | stac(); | ||
640 | if (movsl_is_ok(to, from, n)) | 644 | if (movsl_is_ok(to, from, n)) |
641 | __copy_user_zeroing(to, from, n); | 645 | __copy_user_zeroing(to, from, n); |
642 | else | 646 | else |
643 | n = __copy_user_zeroing_intel(to, from, n); | 647 | n = __copy_user_zeroing_intel(to, from, n); |
648 | clac(); | ||
644 | return n; | 649 | return n; |
645 | } | 650 | } |
646 | EXPORT_SYMBOL(__copy_from_user_ll); | 651 | EXPORT_SYMBOL(__copy_from_user_ll); |
@@ -648,11 +653,13 @@ EXPORT_SYMBOL(__copy_from_user_ll); | |||
648 | unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, | 653 | unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, |
649 | unsigned long n) | 654 | unsigned long n) |
650 | { | 655 | { |
656 | stac(); | ||
651 | if (movsl_is_ok(to, from, n)) | 657 | if (movsl_is_ok(to, from, n)) |
652 | __copy_user(to, from, n); | 658 | __copy_user(to, from, n); |
653 | else | 659 | else |
654 | n = __copy_user_intel((void __user *)to, | 660 | n = __copy_user_intel((void __user *)to, |
655 | (const void *)from, n); | 661 | (const void *)from, n); |
662 | clac(); | ||
656 | return n; | 663 | return n; |
657 | } | 664 | } |
658 | EXPORT_SYMBOL(__copy_from_user_ll_nozero); | 665 | EXPORT_SYMBOL(__copy_from_user_ll_nozero); |
@@ -660,6 +667,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero); | |||
660 | unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, | 667 | unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, |
661 | unsigned long n) | 668 | unsigned long n) |
662 | { | 669 | { |
670 | stac(); | ||
663 | #ifdef CONFIG_X86_INTEL_USERCOPY | 671 | #ifdef CONFIG_X86_INTEL_USERCOPY |
664 | if (n > 64 && cpu_has_xmm2) | 672 | if (n > 64 && cpu_has_xmm2) |
665 | n = __copy_user_zeroing_intel_nocache(to, from, n); | 673 | n = __copy_user_zeroing_intel_nocache(to, from, n); |
@@ -668,6 +676,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, | |||
668 | #else | 676 | #else |
669 | __copy_user_zeroing(to, from, n); | 677 | __copy_user_zeroing(to, from, n); |
670 | #endif | 678 | #endif |
679 | clac(); | ||
671 | return n; | 680 | return n; |
672 | } | 681 | } |
673 | EXPORT_SYMBOL(__copy_from_user_ll_nocache); | 682 | EXPORT_SYMBOL(__copy_from_user_ll_nocache); |
@@ -675,6 +684,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache); | |||
675 | unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, | 684 | unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, |
676 | unsigned long n) | 685 | unsigned long n) |
677 | { | 686 | { |
687 | stac(); | ||
678 | #ifdef CONFIG_X86_INTEL_USERCOPY | 688 | #ifdef CONFIG_X86_INTEL_USERCOPY |
679 | if (n > 64 && cpu_has_xmm2) | 689 | if (n > 64 && cpu_has_xmm2) |
680 | n = __copy_user_intel_nocache(to, from, n); | 690 | n = __copy_user_intel_nocache(to, from, n); |
@@ -683,6 +693,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr | |||
683 | #else | 693 | #else |
684 | __copy_user(to, from, n); | 694 | __copy_user(to, from, n); |
685 | #endif | 695 | #endif |
696 | clac(); | ||
686 | return n; | 697 | return n; |
687 | } | 698 | } |
688 | EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); | 699 | EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); |
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index e5b130bc2d0e..05928aae911e 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) | |||
18 | might_fault(); | 18 | might_fault(); |
19 | /* no memory constraint because it doesn't change any memory gcc knows | 19 | /* no memory constraint because it doesn't change any memory gcc knows |
20 | about */ | 20 | about */ |
21 | stac(); | ||
21 | asm volatile( | 22 | asm volatile( |
22 | " testq %[size8],%[size8]\n" | 23 | " testq %[size8],%[size8]\n" |
23 | " jz 4f\n" | 24 | " jz 4f\n" |
@@ -40,6 +41,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) | |||
40 | : [size8] "=&c"(size), [dst] "=&D" (__d0) | 41 | : [size8] "=&c"(size), [dst] "=&D" (__d0) |
41 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), | 42 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), |
42 | [zero] "r" (0UL), [eight] "r" (8UL)); | 43 | [zero] "r" (0UL), [eight] "r" (8UL)); |
44 | clac(); | ||
43 | return size; | 45 | return size; |
44 | } | 46 | } |
45 | EXPORT_SYMBOL(__clear_user); | 47 | EXPORT_SYMBOL(__clear_user); |
@@ -82,5 +84,6 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) | |||
82 | for (c = 0, zero_len = len; zerorest && zero_len; --zero_len) | 84 | for (c = 0, zero_len = len; zerorest && zero_len; --zero_len) |
83 | if (__put_user_nocheck(c, to++, sizeof(char))) | 85 | if (__put_user_nocheck(c, to++, sizeof(char))) |
84 | break; | 86 | break; |
87 | clac(); | ||
85 | return len; | 88 | return len; |
86 | } | 89 | } |