aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/bugs.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
-rw-r--r--arch/x86/kernel/cpu/bugs.c221
1 files changed, 221 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ba0b2424c9b0..390b3dc3d438 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -10,6 +10,10 @@
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/utsname.h> 12#include <linux/utsname.h>
13#include <linux/cpu.h>
14
15#include <asm/nospec-branch.h>
16#include <asm/cmdline.h>
13#include <asm/bugs.h> 17#include <asm/bugs.h>
14#include <asm/processor.h> 18#include <asm/processor.h>
15#include <asm/processor-flags.h> 19#include <asm/processor-flags.h>
@@ -19,6 +23,9 @@
19#include <asm/alternative.h> 23#include <asm/alternative.h>
20#include <asm/pgtable.h> 24#include <asm/pgtable.h>
21#include <asm/set_memory.h> 25#include <asm/set_memory.h>
26#include <asm/intel-family.h>
27
28static void __init spectre_v2_select_mitigation(void);
22 29
23void __init check_bugs(void) 30void __init check_bugs(void)
24{ 31{
@@ -29,6 +36,9 @@ void __init check_bugs(void)
29 print_cpu_info(&boot_cpu_data); 36 print_cpu_info(&boot_cpu_data);
30 } 37 }
31 38
39 /* Select the proper spectre mitigation before patching alternatives */
40 spectre_v2_select_mitigation();
41
32#ifdef CONFIG_X86_32 42#ifdef CONFIG_X86_32
33 /* 43 /*
34 * Check whether we are able to run this kernel safely on SMP. 44 * Check whether we are able to run this kernel safely on SMP.
@@ -60,3 +70,214 @@ void __init check_bugs(void)
60 set_memory_4k((unsigned long)__va(0), 1); 70 set_memory_4k((unsigned long)__va(0), 1);
61#endif 71#endif
62} 72}
73
74/* The kernel command line selection */
75enum spectre_v2_mitigation_cmd {
76 SPECTRE_V2_CMD_NONE,
77 SPECTRE_V2_CMD_AUTO,
78 SPECTRE_V2_CMD_FORCE,
79 SPECTRE_V2_CMD_RETPOLINE,
80 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
81 SPECTRE_V2_CMD_RETPOLINE_AMD,
82};
83
84static const char *spectre_v2_strings[] = {
85 [SPECTRE_V2_NONE] = "Vulnerable",
86 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
87 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
88 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
89 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
90};
91
92#undef pr_fmt
93#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
94
95static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
96
97static void __init spec2_print_if_insecure(const char *reason)
98{
99 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
100 pr_info("%s\n", reason);
101}
102
103static void __init spec2_print_if_secure(const char *reason)
104{
105 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
106 pr_info("%s\n", reason);
107}
108
109static inline bool retp_compiler(void)
110{
111 return __is_defined(RETPOLINE);
112}
113
114static inline bool match_option(const char *arg, int arglen, const char *opt)
115{
116 int len = strlen(opt);
117
118 return len == arglen && !strncmp(arg, opt, len);
119}
120
121static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
122{
123 char arg[20];
124 int ret;
125
126 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
127 sizeof(arg));
128 if (ret > 0) {
129 if (match_option(arg, ret, "off")) {
130 goto disable;
131 } else if (match_option(arg, ret, "on")) {
132 spec2_print_if_secure("force enabled on command line.");
133 return SPECTRE_V2_CMD_FORCE;
134 } else if (match_option(arg, ret, "retpoline")) {
135 spec2_print_if_insecure("retpoline selected on command line.");
136 return SPECTRE_V2_CMD_RETPOLINE;
137 } else if (match_option(arg, ret, "retpoline,amd")) {
138 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
139 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
140 return SPECTRE_V2_CMD_AUTO;
141 }
142 spec2_print_if_insecure("AMD retpoline selected on command line.");
143 return SPECTRE_V2_CMD_RETPOLINE_AMD;
144 } else if (match_option(arg, ret, "retpoline,generic")) {
145 spec2_print_if_insecure("generic retpoline selected on command line.");
146 return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
147 } else if (match_option(arg, ret, "auto")) {
148 return SPECTRE_V2_CMD_AUTO;
149 }
150 }
151
152 if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
153 return SPECTRE_V2_CMD_AUTO;
154disable:
155 spec2_print_if_insecure("disabled on command line.");
156 return SPECTRE_V2_CMD_NONE;
157}
158
159/* Check for Skylake-like CPUs (for RSB handling) */
160static bool __init is_skylake_era(void)
161{
162 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
163 boot_cpu_data.x86 == 6) {
164 switch (boot_cpu_data.x86_model) {
165 case INTEL_FAM6_SKYLAKE_MOBILE:
166 case INTEL_FAM6_SKYLAKE_DESKTOP:
167 case INTEL_FAM6_SKYLAKE_X:
168 case INTEL_FAM6_KABYLAKE_MOBILE:
169 case INTEL_FAM6_KABYLAKE_DESKTOP:
170 return true;
171 }
172 }
173 return false;
174}
175
176static void __init spectre_v2_select_mitigation(void)
177{
178 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
179 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
180
181 /*
182 * If the CPU is not affected and the command line mode is NONE or AUTO
183 * then nothing to do.
184 */
185 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
186 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
187 return;
188
189 switch (cmd) {
190 case SPECTRE_V2_CMD_NONE:
191 return;
192
193 case SPECTRE_V2_CMD_FORCE:
194 /* FALLTRHU */
195 case SPECTRE_V2_CMD_AUTO:
196 goto retpoline_auto;
197
198 case SPECTRE_V2_CMD_RETPOLINE_AMD:
199 if (IS_ENABLED(CONFIG_RETPOLINE))
200 goto retpoline_amd;
201 break;
202 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
203 if (IS_ENABLED(CONFIG_RETPOLINE))
204 goto retpoline_generic;
205 break;
206 case SPECTRE_V2_CMD_RETPOLINE:
207 if (IS_ENABLED(CONFIG_RETPOLINE))
208 goto retpoline_auto;
209 break;
210 }
211 pr_err("kernel not compiled with retpoline; no mitigation available!");
212 return;
213
214retpoline_auto:
215 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
216 retpoline_amd:
217 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
218 pr_err("LFENCE not serializing. Switching to generic retpoline\n");
219 goto retpoline_generic;
220 }
221 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
222 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
223 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
224 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
225 } else {
226 retpoline_generic:
227 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
228 SPECTRE_V2_RETPOLINE_MINIMAL;
229 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
230 }
231
232 spectre_v2_enabled = mode;
233 pr_info("%s\n", spectre_v2_strings[mode]);
234
235 /*
236 * If neither SMEP or KPTI are available, there is a risk of
237 * hitting userspace addresses in the RSB after a context switch
238 * from a shallow call stack to a deeper one. To prevent this fill
239 * the entire RSB, even when using IBRS.
240 *
241 * Skylake era CPUs have a separate issue with *underflow* of the
242 * RSB, when they will predict 'ret' targets from the generic BTB.
243 * The proper mitigation for this is IBRS. If IBRS is not supported
244 * or deactivated in favour of retpolines the RSB fill on context
245 * switch is required.
246 */
247 if ((!boot_cpu_has(X86_FEATURE_PTI) &&
248 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
249 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
250 pr_info("Filling RSB on context switch\n");
251 }
252}
253
254#undef pr_fmt
255
256#ifdef CONFIG_SYSFS
257ssize_t cpu_show_meltdown(struct device *dev,
258 struct device_attribute *attr, char *buf)
259{
260 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
261 return sprintf(buf, "Not affected\n");
262 if (boot_cpu_has(X86_FEATURE_PTI))
263 return sprintf(buf, "Mitigation: PTI\n");
264 return sprintf(buf, "Vulnerable\n");
265}
266
267ssize_t cpu_show_spectre_v1(struct device *dev,
268 struct device_attribute *attr, char *buf)
269{
270 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
271 return sprintf(buf, "Not affected\n");
272 return sprintf(buf, "Vulnerable\n");
273}
274
275ssize_t cpu_show_spectre_v2(struct device *dev,
276 struct device_attribute *attr, char *buf)
277{
278 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
279 return sprintf(buf, "Not affected\n");
280
281 return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
282}
283#endif