summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2018-05-03 20:06:26 -0400
committerIngo Molnar <mingo@kernel.org>2018-05-15 02:32:42 -0400
commit12c89130a56ae8e8d85db753d70333c4ee0ea835 (patch)
treeb3ec091a965a9a0674c005e6d5b0df429c5c70a3
parent60622d68227d6d71fdfba5fb39f7f3d44cdd8815 (diff)
x86/asm/memcpy_mcsafe: Add write-protection-fault handling
In preparation for using memcpy_mcsafe() to handle user copies it needs to be to handle write-protection faults while writing user pages. Add MMU-fault handlers alongside the machine-check exception handlers. Note that the machine check fault exception handling makes assumptions about source buffer alignment and poison alignment. In the write fault case, given the destination buffer is arbitrarily aligned, it needs a separate / additional fault handling approach. The mcsafe_handle_tail() helper is reused. The @limit argument is set to @len since there is no safety concern about retriggering an MMU fault, and this simplifies the assembly. Co-developed-by: Tony Luck <tony.luck@intel.com> Reported-by: Mika Penttilä <mika.penttila@nextfour.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: hch@lst.de Cc: linux-fsdevel@vger.kernel.org Cc: linux-nvdimm@lists.01.org Link: http://lkml.kernel.org/r/152539238635.31796.14056325365122961778.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/uaccess_64.h3
-rw-r--r--arch/x86/lib/memcpy_64.S14
-rw-r--r--arch/x86/lib/usercopy_64.c21
3 files changed, 38 insertions, 0 deletions
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 62546b3a398e..c63efc07891f 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -194,4 +194,7 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
194unsigned long 194unsigned long
195copy_user_handle_tail(char *to, char *from, unsigned len); 195copy_user_handle_tail(char *to, char *from, unsigned len);
196 196
197unsigned long
198mcsafe_handle_tail(char *to, char *from, unsigned len);
199
197#endif /* _ASM_X86_UACCESS_64_H */ 200#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index f01a88391c98..c3b527a9f95d 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -265,9 +265,23 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
265 mov %ecx, %eax 265 mov %ecx, %eax
266 ret 266 ret
267 267
268 /*
269 * For write fault handling, given the destination is unaligned,
270 * we handle faults on multi-byte writes with a byte-by-byte
271 * copy up to the write-protected page.
272 */
273.E_write_words:
274 shll $3, %ecx
275 addl %edx, %ecx
276 movl %ecx, %edx
277 jmp mcsafe_handle_tail
278
268 .previous 279 .previous
269 280
270 _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes) 281 _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
271 _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words) 282 _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
272 _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes) 283 _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
284 _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
285 _ASM_EXTABLE(.L_write_words, .E_write_words)
286 _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
273#endif 287#endif
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 75d3776123cc..7ebc9901dd05 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -75,6 +75,27 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
75 return len; 75 return len;
76} 76}
77 77
78/*
79 * Similar to copy_user_handle_tail, probe for the write fault point,
80 * but reuse __memcpy_mcsafe in case a new read error is encountered.
81 * clac() is handled in _copy_to_iter_mcsafe().
82 */
83__visible unsigned long
84mcsafe_handle_tail(char *to, char *from, unsigned len)
85{
86 for (; len; --len, to++, from++) {
87 /*
88 * Call the assembly routine back directly since
89 * memcpy_mcsafe() may silently fallback to memcpy.
90 */
91 unsigned long rem = __memcpy_mcsafe(to, from, 1);
92
93 if (rem)
94 break;
95 }
96 return len;
97}
98
78#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 99#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
79/** 100/**
80 * clean_cache_range - write back a cache range with CLWB 101 * clean_cache_range - write back a cache range with CLWB