aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2016-09-01 14:39:33 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-05 05:47:31 -0400
commit9a6fb28a355d2609ace4dab4e6425442c647894d (patch)
tree7c66135ce9e31daa18d5486e17c2f44ab3cfefd5
parent3637efb00864f465baebd49464e58319fd295b65 (diff)
x86/mce: Improve memcpy_mcsafe()
Use the mcsafe_key defined in the previous patch to make decisions on which copy function to use. We can't use the FEATURE bit any more because PCI quirks run too late to affect the patching of code. So we use a static key. Turn memcpy_mcsafe() into an inline function to make life easier for callers. The assembly code that actually does the copy is now named memcpy_mcsafe_unrolled() Signed-off-by: Tony Luck <tony.luck@intel.com> Acked-by: Borislav Petkov <bp@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Boris Petkov <bp@suse.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/bfde2fc774e94f53d91b70a4321c85a0d33e7118.1472754712.git.tony.luck@intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/include/asm/pmem.h5
-rw-r--r--arch/x86/include/asm/string_64.h16
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c2
-rw-r--r--arch/x86/lib/memcpy_64.S6
4 files changed, 20 insertions, 9 deletions
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 643eba42d620..2c1ebeb4d737 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -46,10 +46,7 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
46 46
47static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n) 47static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
48{ 48{
49 if (static_cpu_has(X86_FEATURE_MCE_RECOVERY)) 49 return memcpy_mcsafe(dst, src, n);
50 return memcpy_mcsafe(dst, src, n);
51 memcpy(dst, src, n);
52 return 0;
53} 50}
54 51
55/** 52/**
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 877a1dfbf770..a164862d77e3 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -79,6 +79,7 @@ int strcmp(const char *cs, const char *ct);
79#define memset(s, c, n) __memset(s, c, n) 79#define memset(s, c, n) __memset(s, c, n)
80#endif 80#endif
81 81
82__must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
82DECLARE_STATIC_KEY_FALSE(mcsafe_key); 83DECLARE_STATIC_KEY_FALSE(mcsafe_key);
83 84
84/** 85/**
@@ -89,10 +90,23 @@ DECLARE_STATIC_KEY_FALSE(mcsafe_key);
89 * @cnt: number of bytes to copy 90 * @cnt: number of bytes to copy
90 * 91 *
91 * Low level memory copy function that catches machine checks 92 * Low level memory copy function that catches machine checks
93 * We only call into the "safe" function on systems that can
94 * actually do machine check recovery. Everyone else can just
95 * use memcpy().
92 * 96 *
93 * Return 0 for success, -EFAULT for fail 97 * Return 0 for success, -EFAULT for fail
94 */ 98 */
95int memcpy_mcsafe(void *dst, const void *src, size_t cnt); 99static __always_inline __must_check int
100memcpy_mcsafe(void *dst, const void *src, size_t cnt)
101{
102#ifdef CONFIG_X86_MCE
103 if (static_branch_unlikely(&mcsafe_key))
104 return memcpy_mcsafe_unrolled(dst, src, cnt);
105 else
106#endif
107 memcpy(dst, src, cnt);
108 return 0;
109}
96 110
97#endif /* __KERNEL__ */ 111#endif /* __KERNEL__ */
98 112
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 95e49f6e4fc3..b2cee3d19477 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(__copy_user_nocache);
38EXPORT_SYMBOL(_copy_from_user); 38EXPORT_SYMBOL(_copy_from_user);
39EXPORT_SYMBOL(_copy_to_user); 39EXPORT_SYMBOL(_copy_to_user);
40 40
41EXPORT_SYMBOL_GPL(memcpy_mcsafe); 41EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled);
42 42
43EXPORT_SYMBOL(copy_page); 43EXPORT_SYMBOL(copy_page);
44EXPORT_SYMBOL(clear_page); 44EXPORT_SYMBOL(clear_page);
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 2ec0b0abbfaa..49e6ebac7e73 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -181,11 +181,11 @@ ENDPROC(memcpy_orig)
181 181
182#ifndef CONFIG_UML 182#ifndef CONFIG_UML
183/* 183/*
184 * memcpy_mcsafe - memory copy with machine check exception handling 184 * memcpy_mcsafe_unrolled - memory copy with machine check exception handling
185 * Note that we only catch machine checks when reading the source addresses. 185 * Note that we only catch machine checks when reading the source addresses.
186 * Writes to target are posted and don't generate machine checks. 186 * Writes to target are posted and don't generate machine checks.
187 */ 187 */
188ENTRY(memcpy_mcsafe) 188ENTRY(memcpy_mcsafe_unrolled)
189 cmpl $8, %edx 189 cmpl $8, %edx
190 /* Less than 8 bytes? Go to byte copy loop */ 190 /* Less than 8 bytes? Go to byte copy loop */
191 jb .L_no_whole_words 191 jb .L_no_whole_words
@@ -273,7 +273,7 @@ ENTRY(memcpy_mcsafe)
273.L_done_memcpy_trap: 273.L_done_memcpy_trap:
274 xorq %rax, %rax 274 xorq %rax, %rax
275 ret 275 ret
276ENDPROC(memcpy_mcsafe) 276ENDPROC(memcpy_mcsafe_unrolled)
277 277
278 .section .fixup, "ax" 278 .section .fixup, "ax"
279 /* Return -EFAULT for any failure */ 279 /* Return -EFAULT for any failure */