aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-05-10 14:21:36 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-05-17 11:09:17 -0400
commit52817587e706686fcdb27f14c1b000c92f266c96 (patch)
treec28b0f3ac505e6a598b43622a9a05dcedf8d7c7c
parent7eb8956a7fec3c1f0abc2a5517dada99ccc8a961 (diff)
x86/cpufeatures: Disentangle SSBD enumeration
The SSBD enumeration is similarly to the other bits magically shared between Intel and AMD though the mechanisms are different. Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific features or family dependent setup. Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is controlled via MSR_SPEC_CTRL and fix up the usage sites. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--arch/x86/include/asm/cpufeatures.h7
-rw-r--r--arch/x86/kernel/cpu/amd.c7
-rw-r--r--arch/x86/kernel/cpu/bugs.c10
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/intel.c1
-rw-r--r--arch/x86/kernel/process.c2
6 files changed, 14 insertions, 16 deletions
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 7d34eb0d3715..61c34c1a525c 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -207,15 +207,14 @@
207#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 207#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
208#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ 208#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
209#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ 209#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
210 210#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
211#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 211#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
212#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ 212#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
213#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ 213#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
214
215#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ 214#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
216#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ 215#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
217#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ 216#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
218#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */ 217#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
219#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ 218#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
220#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ 219#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
221#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ 220#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
@@ -339,7 +338,7 @@
339#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ 338#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
340#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ 339#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
341#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ 340#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
342#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */ 341#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
343 342
344/* 343/*
345 * BUG word(s) 344 * BUG word(s)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7bde990b0385..2d2d8985654b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -570,8 +570,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
570 * avoid RMW. If that faults, do not enable SSBD. 570 * avoid RMW. If that faults, do not enable SSBD.
571 */ 571 */
572 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 572 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
573 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
573 setup_force_cpu_cap(X86_FEATURE_SSBD); 574 setup_force_cpu_cap(X86_FEATURE_SSBD);
574 setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
575 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; 575 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
576 } 576 }
577 } 577 }
@@ -919,11 +919,6 @@ static void init_amd(struct cpuinfo_x86 *c)
919 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ 919 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
920 if (!cpu_has(c, X86_FEATURE_XENPV)) 920 if (!cpu_has(c, X86_FEATURE_XENPV))
921 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 921 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
922
923 if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
924 set_cpu_cap(c, X86_FEATURE_SSBD);
925 set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
926 }
927} 922}
928 923
929#ifdef CONFIG_X86_32 924#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 316cb24092a3..7ebd6373fc31 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -159,8 +159,8 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
159 if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 159 if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
160 return; 160 return;
161 161
162 /* Intel controls SSB in MSR_SPEC_CTRL */ 162 /* SSBD controlled in MSR_SPEC_CTRL */
163 if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) 163 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
164 host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); 164 host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
165 165
166 if (host != guest_spec_ctrl) 166 if (host != guest_spec_ctrl)
@@ -176,8 +176,8 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
176 if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 176 if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
177 return; 177 return;
178 178
179 /* Intel controls SSB in MSR_SPEC_CTRL */ 179 /* SSBD controlled in MSR_SPEC_CTRL */
180 if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) 180 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
181 host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); 181 host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
182 182
183 if (host != guest_spec_ctrl) 183 if (host != guest_spec_ctrl)
@@ -189,7 +189,7 @@ static void x86_amd_ssb_disable(void)
189{ 189{
190 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 190 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
191 191
192 if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) 192 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
193 wrmsrl(MSR_AMD64_LS_CFG, msrval); 193 wrmsrl(MSR_AMD64_LS_CFG, msrval);
194} 194}
195 195
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index af54dbe2df9a..68282514c025 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -767,6 +767,9 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
767 if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) 767 if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
768 set_cpu_cap(c, X86_FEATURE_STIBP); 768 set_cpu_cap(c, X86_FEATURE_STIBP);
769 769
770 if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
771 set_cpu_cap(c, X86_FEATURE_SSBD);
772
770 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { 773 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
771 set_cpu_cap(c, X86_FEATURE_IBRS); 774 set_cpu_cap(c, X86_FEATURE_IBRS);
772 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 775 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index dd37244c587a..577e7f7ae273 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -191,6 +191,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
191 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); 191 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
192 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); 192 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
193 setup_clear_cpu_cap(X86_FEATURE_SSBD); 193 setup_clear_cpu_cap(X86_FEATURE_SSBD);
194 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
194 } 195 }
195 196
196 /* 197 /*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index b77a091bf3b8..d71ef7eaa7ef 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -283,7 +283,7 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
283{ 283{
284 u64 msr; 284 u64 msr;
285 285
286 if (static_cpu_has(X86_FEATURE_AMD_SSBD)) { 286 if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
287 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); 287 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
288 wrmsrl(MSR_AMD64_LS_CFG, msr); 288 wrmsrl(MSR_AMD64_LS_CFG, msr);
289 } else { 289 } else {