diff options
-rw-r--r-- | arch/x86/include/asm/nospec-branch.h | 16 | ||||
-rw-r--r-- | arch/x86/include/asm/spec-ctrl.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 11 |
3 files changed, 6 insertions, 24 deletions
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index bc258e644e5e..8d9deec00de9 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h | |||
@@ -217,16 +217,7 @@ enum spectre_v2_mitigation { | |||
217 | SPECTRE_V2_IBRS, | 217 | SPECTRE_V2_IBRS, |
218 | }; | 218 | }; |
219 | 219 | ||
220 | /* | ||
221 | * The Intel specification for the SPEC_CTRL MSR requires that we | ||
222 | * preserve any already set reserved bits at boot time (e.g. for | ||
223 | * future additions that this kernel is not currently aware of). | ||
224 | * We then set any additional mitigation bits that we want | ||
225 | * ourselves and always use this as the base for SPEC_CTRL. | ||
226 | * We also use this when handling guest entry/exit as below. | ||
227 | */ | ||
228 | extern void x86_spec_ctrl_set(u64); | 220 | extern void x86_spec_ctrl_set(u64); |
229 | extern u64 x86_spec_ctrl_get_default(void); | ||
230 | 221 | ||
231 | /* The Speculative Store Bypass disable variants */ | 222 | /* The Speculative Store Bypass disable variants */ |
232 | enum ssb_mitigation { | 223 | enum ssb_mitigation { |
@@ -278,6 +269,9 @@ static inline void indirect_branch_prediction_barrier(void) | |||
278 | alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); | 269 | alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); |
279 | } | 270 | } |
280 | 271 | ||
272 | /* The Intel SPEC CTRL MSR base value cache */ | ||
273 | extern u64 x86_spec_ctrl_base; | ||
274 | |||
281 | /* | 275 | /* |
282 | * With retpoline, we must use IBRS to restrict branch prediction | 276 | * With retpoline, we must use IBRS to restrict branch prediction |
283 | * before calling into firmware. | 277 | * before calling into firmware. |
@@ -286,7 +280,7 @@ static inline void indirect_branch_prediction_barrier(void) | |||
286 | */ | 280 | */ |
287 | #define firmware_restrict_branch_speculation_start() \ | 281 | #define firmware_restrict_branch_speculation_start() \ |
288 | do { \ | 282 | do { \ |
289 | u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \ | 283 | u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ |
290 | \ | 284 | \ |
291 | preempt_disable(); \ | 285 | preempt_disable(); \ |
292 | alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ | 286 | alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ |
@@ -295,7 +289,7 @@ do { \ | |||
295 | 289 | ||
296 | #define firmware_restrict_branch_speculation_end() \ | 290 | #define firmware_restrict_branch_speculation_end() \ |
297 | do { \ | 291 | do { \ |
298 | u64 val = x86_spec_ctrl_get_default(); \ | 292 | u64 val = x86_spec_ctrl_base; \ |
299 | \ | 293 | \ |
300 | alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ | 294 | alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ |
301 | X86_FEATURE_USE_IBRS_FW); \ | 295 | X86_FEATURE_USE_IBRS_FW); \ |
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h index 9cecbe5e57ee..763d49710329 100644 --- a/arch/x86/include/asm/spec-ctrl.h +++ b/arch/x86/include/asm/spec-ctrl.h | |||
@@ -47,9 +47,6 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) | |||
47 | extern u64 x86_amd_ls_cfg_base; | 47 | extern u64 x86_amd_ls_cfg_base; |
48 | extern u64 x86_amd_ls_cfg_ssbd_mask; | 48 | extern u64 x86_amd_ls_cfg_ssbd_mask; |
49 | 49 | ||
50 | /* The Intel SPEC CTRL MSR base value cache */ | ||
51 | extern u64 x86_spec_ctrl_base; | ||
52 | |||
53 | static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) | 50 | static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) |
54 | { | 51 | { |
55 | BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); | 52 | BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index feb7d597c265..00f51deba493 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -36,6 +36,7 @@ static void __init ssb_select_mitigation(void); | |||
36 | * writes to SPEC_CTRL contain whatever reserved bits have been set. | 36 | * writes to SPEC_CTRL contain whatever reserved bits have been set. |
37 | */ | 37 | */ |
38 | u64 __ro_after_init x86_spec_ctrl_base; | 38 | u64 __ro_after_init x86_spec_ctrl_base; |
39 | EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); | ||
39 | 40 | ||
40 | /* | 41 | /* |
41 | * The vendor and possibly platform specific bits which can be modified in | 42 | * The vendor and possibly platform specific bits which can be modified in |
@@ -141,16 +142,6 @@ void x86_spec_ctrl_set(u64 val) | |||
141 | } | 142 | } |
142 | EXPORT_SYMBOL_GPL(x86_spec_ctrl_set); | 143 | EXPORT_SYMBOL_GPL(x86_spec_ctrl_set); |
143 | 144 | ||
144 | u64 x86_spec_ctrl_get_default(void) | ||
145 | { | ||
146 | u64 msrval = x86_spec_ctrl_base; | ||
147 | |||
148 | if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) | ||
149 | msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); | ||
150 | return msrval; | ||
151 | } | ||
152 | EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); | ||
153 | |||
154 | void | 145 | void |
155 | x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) | 146 | x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) |
156 | { | 147 | { |