aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2016-09-01 14:39:33 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-05 05:47:31 -0400
commit9a6fb28a355d2609ace4dab4e6425442c647894d (patch)
tree7c66135ce9e31daa18d5486e17c2f44ab3cfefd5 /arch/x86/lib
parent3637efb00864f465baebd49464e58319fd295b65 (diff)
x86/mce: Improve memcpy_mcsafe()
Use the mcsafe_key defined in the previous patch to make decisions on which copy function to use. We can't use the FEATURE bit any more because PCI quirks run too late to affect the patching of code. So we use a static key. Turn memcpy_mcsafe() into an inline function to make life easier for callers. The assembly code that actually does the copy is now named memcpy_mcsafe_unrolled() Signed-off-by: Tony Luck <tony.luck@intel.com> Acked-by: Borislav Petkov <bp@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Boris Petkov <bp@suse.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/bfde2fc774e94f53d91b70a4321c85a0d33e7118.1472754712.git.tony.luck@intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/memcpy_64.S6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 2ec0b0abbfaa..49e6ebac7e73 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -181,11 +181,11 @@ ENDPROC(memcpy_orig)
181 181
182#ifndef CONFIG_UML 182#ifndef CONFIG_UML
183/* 183/*
184 * memcpy_mcsafe - memory copy with machine check exception handling 184 * memcpy_mcsafe_unrolled - memory copy with machine check exception handling
185 * Note that we only catch machine checks when reading the source addresses. 185 * Note that we only catch machine checks when reading the source addresses.
186 * Writes to target are posted and don't generate machine checks. 186 * Writes to target are posted and don't generate machine checks.
187 */ 187 */
188ENTRY(memcpy_mcsafe) 188ENTRY(memcpy_mcsafe_unrolled)
189 cmpl $8, %edx 189 cmpl $8, %edx
190 /* Less than 8 bytes? Go to byte copy loop */ 190 /* Less than 8 bytes? Go to byte copy loop */
191 jb .L_no_whole_words 191 jb .L_no_whole_words
@@ -273,7 +273,7 @@ ENTRY(memcpy_mcsafe)
273.L_done_memcpy_trap: 273.L_done_memcpy_trap:
274 xorq %rax, %rax 274 xorq %rax, %rax
275 ret 275 ret
276ENDPROC(memcpy_mcsafe) 276ENDPROC(memcpy_mcsafe_unrolled)
277 277
278 .section .fixup, "ax" 278 .section .fixup, "ax"
279 /* Return -EFAULT for any failure */ 279 /* Return -EFAULT for any failure */