aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2018-05-09 15:41:38 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-05-09 15:41:38 -0400
commit9f65fb29374ee37856dbad847b4e121aab72b510 (patch)
treebe99b8bc2090f8c5b92f0c4be65eea4e6d6f1510
parentf21b53b20c754021935ea43364dbf53778eeba32 (diff)
x86/bugs: Rename _RDS to _SSBD
Intel collateral will reference the SSB mitigation bit in IA32_SPEC_CTL[2] as SSBD (Speculative Store Bypass Disable). Hence changing it. It is unclear yet what the MSR_IA32_ARCH_CAPABILITIES (0x10a) Bit(4) name is going to be. Following the rename it would be SSBD_NO but that rolls out to Speculative Store Bypass Disable No. Also fixed the missing space in X86_FEATURE_AMD_SSBD. [ tglx: Fixup x86_amd_rds_enable() and rds_tif_to_amd_ls_cfg() as well ] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/include/asm/cpufeatures.h4
-rw-r--r--arch/x86/include/asm/msr-index.h10
-rw-r--r--arch/x86/include/asm/spec-ctrl.h12
-rw-r--r--arch/x86/include/asm/thread_info.h6
-rw-r--r--arch/x86/kernel/cpu/amd.c14
-rw-r--r--arch/x86/kernel/cpu/bugs.c36
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/process.c8
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/vmx.c6
11 files changed, 51 insertions, 51 deletions
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index b2464c1787df..4e1c747acbf8 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -215,7 +215,7 @@
215#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ 215#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
216#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ 216#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
217#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ 217#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
218#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */ 218#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
219 219
220/* Virtualization flags: Linux defined, word 8 */ 220/* Virtualization flags: Linux defined, word 8 */
221#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 221#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -336,7 +336,7 @@
336#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ 336#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
337#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ 337#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
338#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ 338#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
339#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */ 339#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
340 340
341/* 341/*
342 * BUG word(s) 342 * BUG word(s)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 810f50bb338d..0da3ca260b06 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -42,8 +42,8 @@
42#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ 42#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
43#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ 43#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
44#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ 44#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
45#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */ 45#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
46#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */ 46#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
47 47
48#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ 48#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
49#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ 49#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
@@ -70,10 +70,10 @@
70#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a 70#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
71#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ 71#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
72#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ 72#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
73#define ARCH_CAP_RDS_NO (1 << 4) /* 73#define ARCH_CAP_SSBD_NO (1 << 4) /*
74 * Not susceptible to Speculative Store Bypass 74 * Not susceptible to Speculative Store Bypass
75 * attack, so no Reduced Data Speculation control 75 * attack, so no Speculative Store Bypass
76 * required. 76 * control required.
77 */ 77 */
78 78
79#define MSR_IA32_BBL_CR_CTL 0x00000119 79#define MSR_IA32_BBL_CR_CTL 0x00000119
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index 45ef00ad5105..dc21209790bf 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -17,20 +17,20 @@ extern void x86_spec_ctrl_restore_host(u64);
17 17
18/* AMD specific Speculative Store Bypass MSR data */ 18/* AMD specific Speculative Store Bypass MSR data */
19extern u64 x86_amd_ls_cfg_base; 19extern u64 x86_amd_ls_cfg_base;
20extern u64 x86_amd_ls_cfg_rds_mask; 20extern u64 x86_amd_ls_cfg_ssbd_mask;
21 21
22/* The Intel SPEC CTRL MSR base value cache */ 22/* The Intel SPEC CTRL MSR base value cache */
23extern u64 x86_spec_ctrl_base; 23extern u64 x86_spec_ctrl_base;
24 24
25static inline u64 rds_tif_to_spec_ctrl(u64 tifn) 25static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
26{ 26{
27 BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT); 27 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
28 return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT); 28 return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
29} 29}
30 30
31static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn) 31static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
32{ 32{
33 return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL; 33 return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
34} 34}
35 35
36extern void speculative_store_bypass_update(void); 36extern void speculative_store_bypass_update(void);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e5c26cc59619..2ff2a30a264f 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -79,7 +79,7 @@ struct thread_info {
79#define TIF_SIGPENDING 2 /* signal pending */ 79#define TIF_SIGPENDING 2 /* signal pending */
80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
82#define TIF_RDS 5 /* Reduced data speculation */ 82#define TIF_SSBD 5 /* Reduced data speculation */
83#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 83#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
84#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 84#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
85#define TIF_SECCOMP 8 /* secure computing */ 85#define TIF_SECCOMP 8 /* secure computing */
@@ -106,7 +106,7 @@ struct thread_info {
106#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 106#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
107#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 107#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
108#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 108#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
109#define _TIF_RDS (1 << TIF_RDS) 109#define _TIF_SSBD (1 << TIF_SSBD)
110#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 110#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
111#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 111#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
112#define _TIF_SECCOMP (1 << TIF_SECCOMP) 112#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -146,7 +146,7 @@ struct thread_info {
146 146
147/* flags to check in __switch_to() */ 147/* flags to check in __switch_to() */
148#define _TIF_WORK_CTXSW \ 148#define _TIF_WORK_CTXSW \
149 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS) 149 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
150 150
151#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) 151#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
152#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) 152#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 18efc33a8d2e..7bde990b0385 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -567,12 +567,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
567 } 567 }
568 /* 568 /*
569 * Try to cache the base value so further operations can 569 * Try to cache the base value so further operations can
570 * avoid RMW. If that faults, do not enable RDS. 570 * avoid RMW. If that faults, do not enable SSBD.
571 */ 571 */
572 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 572 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
573 setup_force_cpu_cap(X86_FEATURE_RDS); 573 setup_force_cpu_cap(X86_FEATURE_SSBD);
574 setup_force_cpu_cap(X86_FEATURE_AMD_RDS); 574 setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
575 x86_amd_ls_cfg_rds_mask = 1ULL << bit; 575 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
576 } 576 }
577 } 577 }
578} 578}
@@ -920,9 +920,9 @@ static void init_amd(struct cpuinfo_x86 *c)
920 if (!cpu_has(c, X86_FEATURE_XENPV)) 920 if (!cpu_has(c, X86_FEATURE_XENPV))
921 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 921 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
922 922
923 if (boot_cpu_has(X86_FEATURE_AMD_RDS)) { 923 if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
924 set_cpu_cap(c, X86_FEATURE_RDS); 924 set_cpu_cap(c, X86_FEATURE_SSBD);
925 set_cpu_cap(c, X86_FEATURE_AMD_RDS); 925 set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
926 } 926 }
927} 927}
928 928
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 563d8e54c863..09b116b7f3bf 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -45,10 +45,10 @@ static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
45 45
46/* 46/*
47 * AMD specific MSR info for Speculative Store Bypass control. 47 * AMD specific MSR info for Speculative Store Bypass control.
48 * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu(). 48 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
49 */ 49 */
50u64 __ro_after_init x86_amd_ls_cfg_base; 50u64 __ro_after_init x86_amd_ls_cfg_base;
51u64 __ro_after_init x86_amd_ls_cfg_rds_mask; 51u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
52 52
53void __init check_bugs(void) 53void __init check_bugs(void)
54{ 54{
@@ -146,7 +146,7 @@ u64 x86_spec_ctrl_get_default(void)
146 u64 msrval = x86_spec_ctrl_base; 146 u64 msrval = x86_spec_ctrl_base;
147 147
148 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 148 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
149 msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags); 149 msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
150 return msrval; 150 return msrval;
151} 151}
152EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); 152EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
@@ -159,7 +159,7 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
159 return; 159 return;
160 160
161 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 161 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
162 host |= rds_tif_to_spec_ctrl(current_thread_info()->flags); 162 host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
163 163
164 if (host != guest_spec_ctrl) 164 if (host != guest_spec_ctrl)
165 wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); 165 wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
@@ -174,18 +174,18 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
174 return; 174 return;
175 175
176 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 176 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
177 host |= rds_tif_to_spec_ctrl(current_thread_info()->flags); 177 host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
178 178
179 if (host != guest_spec_ctrl) 179 if (host != guest_spec_ctrl)
180 wrmsrl(MSR_IA32_SPEC_CTRL, host); 180 wrmsrl(MSR_IA32_SPEC_CTRL, host);
181} 181}
182EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); 182EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
183 183
184static void x86_amd_rds_enable(void) 184static void x86_amd_ssb_disable(void)
185{ 185{
186 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask; 186 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
187 187
188 if (boot_cpu_has(X86_FEATURE_AMD_RDS)) 188 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
189 wrmsrl(MSR_AMD64_LS_CFG, msrval); 189 wrmsrl(MSR_AMD64_LS_CFG, msrval);
190} 190}
191 191
@@ -473,7 +473,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
473 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; 473 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
474 enum ssb_mitigation_cmd cmd; 474 enum ssb_mitigation_cmd cmd;
475 475
476 if (!boot_cpu_has(X86_FEATURE_RDS)) 476 if (!boot_cpu_has(X86_FEATURE_SSBD))
477 return mode; 477 return mode;
478 478
479 cmd = ssb_parse_cmdline(); 479 cmd = ssb_parse_cmdline();
@@ -507,7 +507,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
507 /* 507 /*
508 * We have three CPU feature flags that are in play here: 508 * We have three CPU feature flags that are in play here:
509 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 509 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
510 * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass 510 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
511 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 511 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
512 */ 512 */
513 if (mode == SPEC_STORE_BYPASS_DISABLE) { 513 if (mode == SPEC_STORE_BYPASS_DISABLE) {
@@ -518,12 +518,12 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
518 */ 518 */
519 switch (boot_cpu_data.x86_vendor) { 519 switch (boot_cpu_data.x86_vendor) {
520 case X86_VENDOR_INTEL: 520 case X86_VENDOR_INTEL:
521 x86_spec_ctrl_base |= SPEC_CTRL_RDS; 521 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
522 x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS; 522 x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
523 x86_spec_ctrl_set(SPEC_CTRL_RDS); 523 x86_spec_ctrl_set(SPEC_CTRL_SSBD);
524 break; 524 break;
525 case X86_VENDOR_AMD: 525 case X86_VENDOR_AMD:
526 x86_amd_rds_enable(); 526 x86_amd_ssb_disable();
527 break; 527 break;
528 } 528 }
529 } 529 }
@@ -556,16 +556,16 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
556 if (task_spec_ssb_force_disable(task)) 556 if (task_spec_ssb_force_disable(task))
557 return -EPERM; 557 return -EPERM;
558 task_clear_spec_ssb_disable(task); 558 task_clear_spec_ssb_disable(task);
559 update = test_and_clear_tsk_thread_flag(task, TIF_RDS); 559 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
560 break; 560 break;
561 case PR_SPEC_DISABLE: 561 case PR_SPEC_DISABLE:
562 task_set_spec_ssb_disable(task); 562 task_set_spec_ssb_disable(task);
563 update = !test_and_set_tsk_thread_flag(task, TIF_RDS); 563 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
564 break; 564 break;
565 case PR_SPEC_FORCE_DISABLE: 565 case PR_SPEC_FORCE_DISABLE:
566 task_set_spec_ssb_disable(task); 566 task_set_spec_ssb_disable(task);
567 task_set_spec_ssb_force_disable(task); 567 task_set_spec_ssb_force_disable(task);
568 update = !test_and_set_tsk_thread_flag(task, TIF_RDS); 568 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
569 break; 569 break;
570 default: 570 default:
571 return -ERANGE; 571 return -ERANGE;
@@ -635,7 +635,7 @@ void x86_spec_ctrl_setup_ap(void)
635 x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); 635 x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
636 636
637 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 637 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
638 x86_amd_rds_enable(); 638 x86_amd_ssb_disable();
639} 639}
640 640
641#ifdef CONFIG_SYSFS 641#ifdef CONFIG_SYSFS
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e0517bcee446..9fbb388fadac 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -959,7 +959,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
959 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); 959 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
960 960
961 if (!x86_match_cpu(cpu_no_spec_store_bypass) && 961 if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
962 !(ia32_cap & ARCH_CAP_RDS_NO)) 962 !(ia32_cap & ARCH_CAP_SSBD_NO))
963 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 963 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
964 964
965 if (x86_match_cpu(cpu_no_speculation)) 965 if (x86_match_cpu(cpu_no_speculation))
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index ef3f9c01c274..0eab6c89c8d9 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -189,7 +189,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
189 setup_clear_cpu_cap(X86_FEATURE_STIBP); 189 setup_clear_cpu_cap(X86_FEATURE_STIBP);
190 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); 190 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
191 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); 191 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
192 setup_clear_cpu_cap(X86_FEATURE_RDS); 192 setup_clear_cpu_cap(X86_FEATURE_SSBD);
193 } 193 }
194 194
195 /* 195 /*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 397342725046..b77a091bf3b8 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -283,11 +283,11 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
283{ 283{
284 u64 msr; 284 u64 msr;
285 285
286 if (static_cpu_has(X86_FEATURE_AMD_RDS)) { 286 if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
287 msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn); 287 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
288 wrmsrl(MSR_AMD64_LS_CFG, msr); 288 wrmsrl(MSR_AMD64_LS_CFG, msr);
289 } else { 289 } else {
290 msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn); 290 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
291 wrmsrl(MSR_IA32_SPEC_CTRL, msr); 291 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
292 } 292 }
293} 293}
@@ -329,7 +329,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
329 if ((tifp ^ tifn) & _TIF_NOCPUID) 329 if ((tifp ^ tifn) & _TIF_NOCPUID)
330 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); 330 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
331 331
332 if ((tifp ^ tifn) & _TIF_RDS) 332 if ((tifp ^ tifn) & _TIF_SSBD)
333 __speculative_store_bypass_update(tifn); 333 __speculative_store_bypass_update(tifn);
334} 334}
335 335
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 376ac9a2a2b9..865c9a769864 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -407,7 +407,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
407 407
408 /* cpuid 7.0.edx*/ 408 /* cpuid 7.0.edx*/
409 const u32 kvm_cpuid_7_0_edx_x86_features = 409 const u32 kvm_cpuid_7_0_edx_x86_features =
410 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(RDS) | 410 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
411 F(ARCH_CAPABILITIES); 411 F(ARCH_CAPABILITIES);
412 412
413 /* all calls to cpuid_count() should be made on the same cpu */ 413 /* all calls to cpuid_count() should be made on the same cpu */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 16a111e44691..9b8d80bf3889 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3525,7 +3525,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3525 if (!msr_info->host_initiated && 3525 if (!msr_info->host_initiated &&
3526 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && 3526 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3527 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) && 3527 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
3528 !guest_cpuid_has(vcpu, X86_FEATURE_RDS)) 3528 !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
3529 return 1; 3529 return 1;
3530 3530
3531 msr_info->data = to_vmx(vcpu)->spec_ctrl; 3531 msr_info->data = to_vmx(vcpu)->spec_ctrl;
@@ -3645,11 +3645,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3645 if (!msr_info->host_initiated && 3645 if (!msr_info->host_initiated &&
3646 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && 3646 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3647 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) && 3647 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
3648 !guest_cpuid_has(vcpu, X86_FEATURE_RDS)) 3648 !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
3649 return 1; 3649 return 1;
3650 3650
3651 /* The STIBP bit doesn't fault even if it's not advertised */ 3651 /* The STIBP bit doesn't fault even if it's not advertised */
3652 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS)) 3652 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
3653 return 1; 3653 return 1;
3654 3654
3655 vmx->spec_ctrl = data; 3655 vmx->spec_ctrl = data;