aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2018-09-25 08:38:55 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-09-26 08:26:52 -0400
commit53c613fe6349994f023245519265999eed75957f (patch)
treeb4cc05de3a6421aa9dadc5de257371c9cb677674
parentdbfe2953f63c640463c630746cd5d9de8b2f63ae (diff)
x86/speculation: Enable cross-hyperthread spectre v2 STIBP mitigation
STIBP is a feature provided by certain Intel ucodes / CPUs. This feature (once enabled) prevents cross-hyperthread control of decisions made by indirect branch predictors. Enable this feature if - the CPU is vulnerable to spectre v2 - the CPU supports SMT and has SMT siblings online - spectre_v2 mitigation autoselection is enabled (default) After some previous discussion, this leaves STIBP on all the time, as wrmsr on crossing kernel boundary is a no-no. This could perhaps later be a bit more optimized (like disabling it in NOHZ, experiment with disabling it in idle, etc) if needed. Note that the synchronization of the mask manipulation via newly added spec_ctrl_mutex is currently not strictly needed, as the only updater is already being serialized by cpu_add_remove_lock, but let's make this a little bit more future-proof. Signed-off-by: Jiri Kosina <jkosina@suse.cz> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "WoodhouseDavid" <dwmw@amazon.co.uk> Cc: Andi Kleen <ak@linux.intel.com> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: "SchauflerCasey" <casey.schaufler@intel.com> Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1809251438240.15880@cbobk.fhfr.pm
-rw-r--r--arch/x86/kernel/cpu/bugs.c57
-rw-r--r--kernel/cpu.c11
2 files changed, 61 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 40bdaea97fe7..53eb14a65610 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -35,12 +35,10 @@ static void __init spectre_v2_select_mitigation(void);
35static void __init ssb_select_mitigation(void); 35static void __init ssb_select_mitigation(void);
36static void __init l1tf_select_mitigation(void); 36static void __init l1tf_select_mitigation(void);
37 37
38/* 38/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
39 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any 39u64 x86_spec_ctrl_base;
40 * writes to SPEC_CTRL contain whatever reserved bits have been set.
41 */
42u64 __ro_after_init x86_spec_ctrl_base;
43EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); 40EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
41static DEFINE_MUTEX(spec_ctrl_mutex);
44 42
45/* 43/*
46 * The vendor and possibly platform specific bits which can be modified in 44 * The vendor and possibly platform specific bits which can be modified in
@@ -325,6 +323,46 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
325 return cmd; 323 return cmd;
326} 324}
327 325
326static bool stibp_needed(void)
327{
328 if (spectre_v2_enabled == SPECTRE_V2_NONE)
329 return false;
330
331 if (!boot_cpu_has(X86_FEATURE_STIBP))
332 return false;
333
334 return true;
335}
336
337static void update_stibp_msr(void *info)
338{
339 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
340}
341
342void arch_smt_update(void)
343{
344 u64 mask;
345
346 if (!stibp_needed())
347 return;
348
349 mutex_lock(&spec_ctrl_mutex);
350 mask = x86_spec_ctrl_base;
351 if (cpu_smt_control == CPU_SMT_ENABLED)
352 mask |= SPEC_CTRL_STIBP;
353 else
354 mask &= ~SPEC_CTRL_STIBP;
355
356 if (mask != x86_spec_ctrl_base) {
357 pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
358 cpu_smt_control == CPU_SMT_ENABLED ?
359 "Enabling" : "Disabling");
360 x86_spec_ctrl_base = mask;
361 on_each_cpu(update_stibp_msr, NULL, 1);
362 }
363 mutex_unlock(&spec_ctrl_mutex);
364}
365
328static void __init spectre_v2_select_mitigation(void) 366static void __init spectre_v2_select_mitigation(void)
329{ 367{
330 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 368 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -424,6 +462,9 @@ specv2_set_mode:
424 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 462 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
425 pr_info("Enabling Restricted Speculation for firmware calls\n"); 463 pr_info("Enabling Restricted Speculation for firmware calls\n");
426 } 464 }
465
466 /* Enable STIBP if appropriate */
467 arch_smt_update();
427} 468}
428 469
429#undef pr_fmt 470#undef pr_fmt
@@ -814,6 +855,8 @@ static ssize_t l1tf_show_state(char *buf)
814static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 855static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
815 char *buf, unsigned int bug) 856 char *buf, unsigned int bug)
816{ 857{
858 int ret;
859
817 if (!boot_cpu_has_bug(bug)) 860 if (!boot_cpu_has_bug(bug))
818 return sprintf(buf, "Not affected\n"); 861 return sprintf(buf, "Not affected\n");
819 862
@@ -831,10 +874,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
831 return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 874 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
832 875
833 case X86_BUG_SPECTRE_V2: 876 case X86_BUG_SPECTRE_V2:
834 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], 877 ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
835 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", 878 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
836 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 879 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
880 (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
837 spectre_v2_module_string()); 881 spectre_v2_module_string());
882 return ret;
838 883
839 case X86_BUG_SPEC_STORE_BYPASS: 884 case X86_BUG_SPEC_STORE_BYPASS:
840 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); 885 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index aa7fe85ad62e..2fb49916ea56 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2025,6 +2025,12 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
2025 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 2025 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2026} 2026}
2027 2027
2028/*
2029 * Architectures that need SMT-specific errata handling during SMT hotplug
2030 * should override this.
2031 */
2032void __weak arch_smt_update(void) { };
2033
2028static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) 2034static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2029{ 2035{
2030 int cpu, ret = 0; 2036 int cpu, ret = 0;
@@ -2051,8 +2057,10 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2051 */ 2057 */
2052 cpuhp_offline_cpu_device(cpu); 2058 cpuhp_offline_cpu_device(cpu);
2053 } 2059 }
2054 if (!ret) 2060 if (!ret) {
2055 cpu_smt_control = ctrlval; 2061 cpu_smt_control = ctrlval;
2062 arch_smt_update();
2063 }
2056 cpu_maps_update_done(); 2064 cpu_maps_update_done();
2057 return ret; 2065 return ret;
2058} 2066}
@@ -2063,6 +2071,7 @@ static int cpuhp_smt_enable(void)
2063 2071
2064 cpu_maps_update_begin(); 2072 cpu_maps_update_begin();
2065 cpu_smt_control = CPU_SMT_ENABLED; 2073 cpu_smt_control = CPU_SMT_ENABLED;
2074 arch_smt_update();
2066 for_each_present_cpu(cpu) { 2075 for_each_present_cpu(cpu) {
2067 /* Skip online CPUs and CPUs on offline nodes */ 2076 /* Skip online CPUs and CPUs on offline nodes */
2068 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) 2077 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))