aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-04-29 09:21:42 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-05-03 07:55:50 -0400
commit885f82bfbc6fefb6664ea27965c3ab9ac4194b8c (patch)
tree513fe3316a1be1de813ee01a90be689f3e542365
parentb617cfc858161140d69cc0b5cc211996b557a1c7 (diff)
x86/process: Allow runtime control of Speculative Store Bypass
The Speculative Store Bypass vulnerability can be mitigated with the Reduced Data Speculation (RDS) feature. To allow finer grained control of this eventually expensive mitigation a per task mitigation control is required. Add a new TIF_RDS flag and put it into the group of TIF flags which are evaluated for mismatch in switch_to(). If these bits differ in the previous and the next task, then the slow path function __switch_to_xtra() is invoked. Implement the TIF_RDS dependent mitigation control in the slow path. If the prctl for controlling Speculative Store Bypass is disabled or no task uses the prctl then there is no overhead in the switch_to() fast path. Update the KVM related speculation control functions to take TID_RDS into account as well. Based on a patch from Tim Chen. Completely rewritten. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/spec-ctrl.h17
-rw-r--r--arch/x86/include/asm/thread_info.h4
-rw-r--r--arch/x86/kernel/cpu/bugs.c26
-rw-r--r--arch/x86/kernel/process.c22
5 files changed, 65 insertions, 7 deletions
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 21e1a6df9907..810f50bb338d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -42,7 +42,8 @@
42#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ 42#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
43#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ 43#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
44#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ 44#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
45#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */ 45#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
46#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
46 47
47#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ 48#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
48#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ 49#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index 3ad64420a06e..45ef00ad5105 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -2,6 +2,7 @@
2#ifndef _ASM_X86_SPECCTRL_H_ 2#ifndef _ASM_X86_SPECCTRL_H_
3#define _ASM_X86_SPECCTRL_H_ 3#define _ASM_X86_SPECCTRL_H_
4 4
5#include <linux/thread_info.h>
5#include <asm/nospec-branch.h> 6#include <asm/nospec-branch.h>
6 7
7/* 8/*
@@ -18,4 +19,20 @@ extern void x86_spec_ctrl_restore_host(u64);
18extern u64 x86_amd_ls_cfg_base; 19extern u64 x86_amd_ls_cfg_base;
19extern u64 x86_amd_ls_cfg_rds_mask; 20extern u64 x86_amd_ls_cfg_rds_mask;
20 21
22/* The Intel SPEC CTRL MSR base value cache */
23extern u64 x86_spec_ctrl_base;
24
25static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
26{
27 BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
28 return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
29}
30
31static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
32{
33 return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
34}
35
36extern void speculative_store_bypass_update(void);
37
21#endif 38#endif
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a5d9521bb2cb..e5c26cc59619 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -79,6 +79,7 @@ struct thread_info {
79#define TIF_SIGPENDING 2 /* signal pending */ 79#define TIF_SIGPENDING 2 /* signal pending */
80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
82#define TIF_RDS 5 /* Reduced data speculation */
82#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 83#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
83#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 84#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
84#define TIF_SECCOMP 8 /* secure computing */ 85#define TIF_SECCOMP 8 /* secure computing */
@@ -105,6 +106,7 @@ struct thread_info {
105#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 106#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
106#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 107#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
107#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 108#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
109#define _TIF_RDS (1 << TIF_RDS)
108#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 110#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
109#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 111#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
110#define _TIF_SECCOMP (1 << TIF_SECCOMP) 112#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -144,7 +146,7 @@ struct thread_info {
144 146
145/* flags to check in __switch_to() */ 147/* flags to check in __switch_to() */
146#define _TIF_WORK_CTXSW \ 148#define _TIF_WORK_CTXSW \
147 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP) 149 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
148 150
149#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) 151#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
150#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) 152#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ec171873167a..2bc109d0f8ae 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -33,7 +33,7 @@ static void __init ssb_select_mitigation(void);
33 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any 33 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
34 * writes to SPEC_CTRL contain whatever reserved bits have been set. 34 * writes to SPEC_CTRL contain whatever reserved bits have been set.
35 */ 35 */
36static u64 __ro_after_init x86_spec_ctrl_base; 36u64 __ro_after_init x86_spec_ctrl_base;
37 37
38/* 38/*
39 * The vendor and possibly platform specific bits which can be modified in 39 * The vendor and possibly platform specific bits which can be modified in
@@ -140,25 +140,41 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
140 140
141u64 x86_spec_ctrl_get_default(void) 141u64 x86_spec_ctrl_get_default(void)
142{ 142{
143 return x86_spec_ctrl_base; 143 u64 msrval = x86_spec_ctrl_base;
144
145 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
146 msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
147 return msrval;
144} 148}
145EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); 149EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
146 150
147void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) 151void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
148{ 152{
153 u64 host = x86_spec_ctrl_base;
154
149 if (!boot_cpu_has(X86_FEATURE_IBRS)) 155 if (!boot_cpu_has(X86_FEATURE_IBRS))
150 return; 156 return;
151 if (x86_spec_ctrl_base != guest_spec_ctrl) 157
158 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
159 host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
160
161 if (host != guest_spec_ctrl)
152 wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); 162 wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
153} 163}
154EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest); 164EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
155 165
156void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) 166void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
157{ 167{
168 u64 host = x86_spec_ctrl_base;
169
158 if (!boot_cpu_has(X86_FEATURE_IBRS)) 170 if (!boot_cpu_has(X86_FEATURE_IBRS))
159 return; 171 return;
160 if (x86_spec_ctrl_base != guest_spec_ctrl) 172
161 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 173 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
174 host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
175
176 if (host != guest_spec_ctrl)
177 wrmsrl(MSR_IA32_SPEC_CTRL, host);
162} 178}
163EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); 179EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
164 180
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 03408b942adb..397342725046 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -38,6 +38,7 @@
38#include <asm/switch_to.h> 38#include <asm/switch_to.h>
39#include <asm/desc.h> 39#include <asm/desc.h>
40#include <asm/prctl.h> 40#include <asm/prctl.h>
41#include <asm/spec-ctrl.h>
41 42
42/* 43/*
43 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 44 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -278,6 +279,24 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
278 } 279 }
279} 280}
280 281
282static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
283{
284 u64 msr;
285
286 if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
287 msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
288 wrmsrl(MSR_AMD64_LS_CFG, msr);
289 } else {
290 msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
291 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
292 }
293}
294
295void speculative_store_bypass_update(void)
296{
297 __speculative_store_bypass_update(current_thread_info()->flags);
298}
299
281void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 300void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
282 struct tss_struct *tss) 301 struct tss_struct *tss)
283{ 302{
@@ -309,6 +328,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
309 328
310 if ((tifp ^ tifn) & _TIF_NOCPUID) 329 if ((tifp ^ tifn) & _TIF_NOCPUID)
311 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); 330 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
331
332 if ((tifp ^ tifn) & _TIF_RDS)
333 __speculative_store_bypass_update(tifn);
312} 334}
313 335
314/* 336/*