aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSai Praneeth <sai.praneeth.prakhya@intel.com>2018-08-01 14:42:25 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-08-03 06:50:34 -0400
commit706d51681d636a0c4a5ef53395ec3b803e45ed4d (patch)
tree8b9b6176ebfedbd6394d1d2121e83ddf97178760
parentfdf82a7856b32d905c39afc85e34364491e46346 (diff)
x86/speculation: Support Enhanced IBRS on future CPUs
Future Intel processors will support "Enhanced IBRS" which is an "always on" mode i.e. IBRS bit in SPEC_CTRL MSR is enabled once and never disabled. From the specification [1]: "With enhanced IBRS, the predicted targets of indirect branches executed cannot be controlled by software that was executed in a less privileged predictor mode or on another logical processor. As a result, software operating on a processor with enhanced IBRS need not use WRMSR to set IA32_SPEC_CTRL.IBRS after every transition to a more privileged predictor mode. Software can isolate predictor modes effectively simply by setting the bit once. Software need not disable enhanced IBRS prior to entering a sleep state such as MWAIT or HLT." If Enhanced IBRS is supported by the processor then use it as the preferred spectre v2 mitigation mechanism instead of Retpoline. Intel's Retpoline white paper [2] states: "Retpoline is known to be an effective branch target injection (Spectre variant 2) mitigation on Intel processors belonging to family 6 (enumerated by the CPUID instruction) that do not have support for enhanced IBRS. On processors that support enhanced IBRS, it should be used for mitigation instead of retpoline." The reason why Enhanced IBRS is the recommended mitigation on processors which support it is that these processors also support CET which provides a defense against ROP attacks. Retpoline is very similar to ROP techniques and might trigger false positives in the CET defense. If Enhanced IBRS is selected as the mitigation technique for spectre v2, the IBRS bit in SPEC_CTRL MSR is set once at boot time and never cleared. Kernel also has to make sure that IBRS bit remains set after VMEXIT because the guest might have cleared the bit. This is already covered by the existing x86_spec_ctrl_set_guest() and x86_spec_ctrl_restore_host() speculation control functions. Enhanced IBRS still requires IBPB for full mitigation. [1] Speculative-Execution-Side-Channel-Mitigations.pdf [2] Retpoline-A-Branch-Target-Injection-Mitigation.pdf Both documents are available at: https://bugzilla.kernel.org/show_bug.cgi?id=199511 Originally-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Tim C Chen <tim.c.chen@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Ravi Shankar <ravi.v.shankar@intel.com> Link: https://lkml.kernel.org/r/1533148945-24095-1-git-send-email-sai.praneeth.prakhya@intel.com
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/nospec-branch.h1
-rw-r--r--arch/x86/kernel/cpu/bugs.c20
-rw-r--r--arch/x86/kernel/cpu/common.c3
4 files changed, 23 insertions, 2 deletions
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 5701f5cecd31..2687cd8e8d58 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -219,6 +219,7 @@
219#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ 219#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
220#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ 220#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
221#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ 221#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
222#define X86_FEATURE_IBRS_ENHANCED ( 7*32+29) /* Enhanced IBRS */
222 223
223/* Virtualization flags: Linux defined, word 8 */ 224/* Virtualization flags: Linux defined, word 8 */
224#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 225#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index c99082e2ef13..fd2a8c1b88bc 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -214,6 +214,7 @@ enum spectre_v2_mitigation {
214 SPECTRE_V2_RETPOLINE_MINIMAL_AMD, 214 SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
215 SPECTRE_V2_RETPOLINE_GENERIC, 215 SPECTRE_V2_RETPOLINE_GENERIC,
216 SPECTRE_V2_RETPOLINE_AMD, 216 SPECTRE_V2_RETPOLINE_AMD,
217 SPECTRE_V2_IBRS_ENHANCED,
217}; 218};
218 219
219/* The Speculative Store Bypass disable variants */ 220/* The Speculative Store Bypass disable variants */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index bc8c43b22460..405a9a61bb89 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -130,6 +130,7 @@ static const char *spectre_v2_strings[] = {
130 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", 130 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
131 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", 131 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
132 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", 132 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
133 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
133}; 134};
134 135
135#undef pr_fmt 136#undef pr_fmt
@@ -332,6 +333,13 @@ static void __init spectre_v2_select_mitigation(void)
332 333
333 case SPECTRE_V2_CMD_FORCE: 334 case SPECTRE_V2_CMD_FORCE:
334 case SPECTRE_V2_CMD_AUTO: 335 case SPECTRE_V2_CMD_AUTO:
336 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
337 mode = SPECTRE_V2_IBRS_ENHANCED;
338 /* Force it so VMEXIT will restore correctly */
339 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
340 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
341 goto specv2_set_mode;
342 }
335 if (IS_ENABLED(CONFIG_RETPOLINE)) 343 if (IS_ENABLED(CONFIG_RETPOLINE))
336 goto retpoline_auto; 344 goto retpoline_auto;
337 break; 345 break;
@@ -369,6 +377,7 @@ retpoline_auto:
369 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 377 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
370 } 378 }
371 379
380specv2_set_mode:
372 spectre_v2_enabled = mode; 381 spectre_v2_enabled = mode;
373 pr_info("%s\n", spectre_v2_strings[mode]); 382 pr_info("%s\n", spectre_v2_strings[mode]);
374 383
@@ -391,9 +400,16 @@ retpoline_auto:
391 400
392 /* 401 /*
393 * Retpoline means the kernel is safe because it has no indirect 402 * Retpoline means the kernel is safe because it has no indirect
394 * branches. But firmware isn't, so use IBRS to protect that. 403 * branches. Enhanced IBRS protects firmware too, so, enable restricted
404 * speculation around firmware calls only when Enhanced IBRS isn't
405 * supported.
406 *
407 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
408 * the user might select retpoline on the kernel command line and if
409 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
410 * enable IBRS around firmware calls.
395 */ 411 */
396 if (boot_cpu_has(X86_FEATURE_IBRS)) { 412 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
397 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 413 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
398 pr_info("Enabling Restricted Speculation for firmware calls\n"); 414 pr_info("Enabling Restricted Speculation for firmware calls\n");
399 } 415 }
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 43a927eb9c09..df28e931d732 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1005,6 +1005,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1005 !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) 1005 !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1006 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 1006 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1007 1007
1008 if (ia32_cap & ARCH_CAP_IBRS_ALL)
1009 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1010
1008 if (x86_match_cpu(cpu_no_meltdown)) 1011 if (x86_match_cpu(cpu_no_meltdown))
1009 return; 1012 return;
1010 1013