aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2015-11-05 10:57:56 -0500
committerThomas Gleixner <tglx@linutronix.de>2015-11-07 04:45:02 -0500
commit04633df0c43d710e5f696b06539c100898678235 (patch)
treef89040bf51f76c386da00dd8a18312c5bf959d70
parent68accac392d859d24adcf1be3a90e41f978bd54c (diff)
x86/cpu: Call verify_cpu() after having entered long mode too
When we get loaded by a 64-bit bootloader, kernel entry point is startup_64 in head_64.S. We don't trust any and all bootloaders because some will fiddle with CPU configuration so we go ahead and massage each CPU into sanity again. For example, some dell BIOSes have this XD disable feature which set IA32_MISC_ENABLE[34] and disable NX. This might be some dumb workaround for other OSes but Linux sure doesn't need it. A similar thing is present in the Surface 3 firmware - see https://bugzilla.kernel.org/show_bug.cgi?id=106051 - which sets this bit only on the BSP: # rdmsr -a 0x1a0 400850089 850089 850089 850089 I know, right?! There's not even an off switch in there. So fix all those cases by sanitizing the 64-bit entry point too. For that, make verify_cpu() callable in 64-bit mode also. Requested-and-debugged-by: "H. Peter Anvin" <hpa@zytor.com> Reported-and-tested-by: Bastien Nocera <bugzilla@hadess.net> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Peter Zijlstra <peterz@infradead.org> Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1446739076-21303-1-git-send-email-bp@alien8.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/head_64.S8
-rw-r--r--arch/x86/kernel/verify_cpu.S12
2 files changed, 15 insertions, 5 deletions
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 1d40ca8a73f2..ffdc0e860390 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -65,6 +65,9 @@ startup_64:
65 * tables and then reload them. 65 * tables and then reload them.
66 */ 66 */
67 67
68 /* Sanitize CPU configuration */
69 call verify_cpu
70
68 /* 71 /*
69 * Compute the delta between the address I am compiled to run at and the 72 * Compute the delta between the address I am compiled to run at and the
70 * address I am actually running at. 73 * address I am actually running at.
@@ -174,6 +177,9 @@ ENTRY(secondary_startup_64)
174 * after the boot processor executes this code. 177 * after the boot processor executes this code.
175 */ 178 */
176 179
180 /* Sanitize CPU configuration */
181 call verify_cpu
182
177 movq $(init_level4_pgt - __START_KERNEL_map), %rax 183 movq $(init_level4_pgt - __START_KERNEL_map), %rax
1781: 1841:
179 185
@@ -288,6 +294,8 @@ ENTRY(secondary_startup_64)
288 pushq %rax # target address in negative space 294 pushq %rax # target address in negative space
289 lretq 295 lretq
290 296
297#include "verify_cpu.S"
298
291#ifdef CONFIG_HOTPLUG_CPU 299#ifdef CONFIG_HOTPLUG_CPU
292/* 300/*
293 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 301 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index b9242bacbe59..4cf401f581e7 100644
--- a/arch/x86/kernel/verify_cpu.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -34,10 +34,11 @@
34#include <asm/msr-index.h> 34#include <asm/msr-index.h>
35 35
36verify_cpu: 36verify_cpu:
37 pushfl # Save caller passed flags 37 pushf # Save caller passed flags
38 pushl $0 # Kill any dangerous flags 38 push $0 # Kill any dangerous flags
39 popfl 39 popf
40 40
41#ifndef __x86_64__
41 pushfl # standard way to check for cpuid 42 pushfl # standard way to check for cpuid
42 popl %eax 43 popl %eax
43 movl %eax,%ebx 44 movl %eax,%ebx
@@ -48,6 +49,7 @@ verify_cpu:
48 popl %eax 49 popl %eax
49 cmpl %eax,%ebx 50 cmpl %eax,%ebx
50 jz verify_cpu_no_longmode # cpu has no cpuid 51 jz verify_cpu_no_longmode # cpu has no cpuid
52#endif
51 53
52 movl $0x0,%eax # See if cpuid 1 is implemented 54 movl $0x0,%eax # See if cpuid 1 is implemented
53 cpuid 55 cpuid
@@ -130,10 +132,10 @@ verify_cpu_sse_test:
130 jmp verify_cpu_sse_test # try again 132 jmp verify_cpu_sse_test # try again
131 133
132verify_cpu_no_longmode: 134verify_cpu_no_longmode:
133 popfl # Restore caller passed flags 135 popf # Restore caller passed flags
134 movl $1,%eax 136 movl $1,%eax
135 ret 137 ret
136verify_cpu_sse_ok: 138verify_cpu_sse_ok:
137 popfl # Restore caller passed flags 139 popf # Restore caller passed flags
138 xorl %eax, %eax 140 xorl %eax, %eax
139 ret 141 ret