aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-03-01 09:24:33 -0500
committerIngo Molnar <mingo@kernel.org>2019-04-03 03:36:29 -0400
commit3693ca81151eacd498675baae56abede577e8b31 (patch)
tree3a5a28311c024636af112d9d89dcdcb723ea960b /arch/x86/lib
parent8f4faed01e3015955801c8ef066ec7fd7a8b3902 (diff)
x86/uaccess: Move copy_user_handle_tail() into asm
By writing the function in asm we avoid cross object code flow and objtool no longer gets confused about a 'stray' CLAC. Also; the asm version is actually _simpler_. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/copy_user_64.S48
-rw-r--r--arch/x86/lib/usercopy_64.c20
2 files changed, 48 insertions, 20 deletions
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index db4e5aa0858b..b2f1822084ae 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -16,6 +16,30 @@
16#include <asm/smap.h> 16#include <asm/smap.h>
17#include <asm/export.h> 17#include <asm/export.h>
18 18
19.macro ALIGN_DESTINATION
20 /* check for bad alignment of destination */
21 movl %edi,%ecx
22 andl $7,%ecx
23 jz 102f /* already aligned */
24 subl $8,%ecx
25 negl %ecx
26 subl %ecx,%edx
27100: movb (%rsi),%al
28101: movb %al,(%rdi)
29 incq %rsi
30 incq %rdi
31 decl %ecx
32 jnz 100b
33102:
34 .section .fixup,"ax"
35103: addl %ecx,%edx /* ecx is zerorest also */
36 jmp copy_user_handle_tail
37 .previous
38
39 _ASM_EXTABLE_UA(100b, 103b)
40 _ASM_EXTABLE_UA(101b, 103b)
41 .endm
42
19/* 43/*
20 * copy_user_generic_unrolled - memory copy with exception handling. 44 * copy_user_generic_unrolled - memory copy with exception handling.
21 * This version is for CPUs like P4 that don't have efficient micro 45 * This version is for CPUs like P4 that don't have efficient micro
@@ -194,6 +218,30 @@ ENDPROC(copy_user_enhanced_fast_string)
194EXPORT_SYMBOL(copy_user_enhanced_fast_string) 218EXPORT_SYMBOL(copy_user_enhanced_fast_string)
195 219
196/* 220/*
221 * Try to copy last bytes and clear the rest if needed.
222 * Since protection fault in copy_from/to_user is not a normal situation,
223 * it is not necessary to optimize tail handling.
224 *
225 * Input:
226 * rdi destination
227 * rsi source
228 * rdx count
229 *
230 * Output:
231 * eax uncopied bytes or 0 if successful.
232 */
233ALIGN;
234copy_user_handle_tail:
235 movl %edx,%ecx
2361: rep movsb
2372: mov %ecx,%eax
238 ASM_CLAC
239 ret
240
241 _ASM_EXTABLE_UA(1b, 2b)
242ENDPROC(copy_user_handle_tail)
243
244/*
197 * copy_user_nocache - Uncached memory copy with exception handling 245 * copy_user_nocache - Uncached memory copy with exception handling
198 * This will force destination out of cache for more performance. 246 * This will force destination out of cache for more performance.
199 * 247 *
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index ee42bb0cbeb3..9952a01cad24 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -55,26 +55,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
55EXPORT_SYMBOL(clear_user); 55EXPORT_SYMBOL(clear_user);
56 56
57/* 57/*
58 * Try to copy last bytes and clear the rest if needed.
59 * Since protection fault in copy_from/to_user is not a normal situation,
60 * it is not necessary to optimize tail handling.
61 */
62__visible unsigned long
63copy_user_handle_tail(char *to, char *from, unsigned len)
64{
65 for (; len; --len, to++) {
66 char c;
67
68 if (__get_user_nocheck(c, from++, sizeof(char)))
69 break;
70 if (__put_user_nocheck(c, to, sizeof(char)))
71 break;
72 }
73 clac();
74 return len;
75}
76
77/*
78 * Similar to copy_user_handle_tail, probe for the write fault point, 58 * Similar to copy_user_handle_tail, probe for the write fault point,
79 * but reuse __memcpy_mcsafe in case a new read error is encountered. 59 * but reuse __memcpy_mcsafe in case a new read error is encountered.
80 * clac() is handled in _copy_to_iter_mcsafe(). 60 * clac() is handled in _copy_to_iter_mcsafe().