diff options
author | Andy Lutomirski <luto@kernel.org> | 2016-04-26 15:23:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-04-29 05:56:41 -0400 |
commit | 45e876f794e8e566bf827c25ef0791875081724f (patch) | |
tree | 0c725250ba4073785997ea5a74bdb1f7e84cb9d7 | |
parent | f005f5d860e0231fe212cfda8c1a3148b99609f4 (diff) |
x86/segments/64: When loadsegment(fs, ...) fails, clear the base
On AMD CPUs, a failed loadsegment currently may not clear the FS
base. Fix it.
While we're at it, prevent loadsegment(gs, xyz) from even compiling
on 64-bit kernels. It shouldn't be used.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/a084c1b93b7b1408b58d3fd0b5d6e47da8e7d7cf.1461698311.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/include/asm/segment.h | 42 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/extable.c | 10 |
3 files changed, 50 insertions, 4 deletions
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 7d5a1929d76b..e1a4afd20223 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_X86_SEGMENT_H | 2 | #define _ASM_X86_SEGMENT_H |
3 | 3 | ||
4 | #include <linux/const.h> | 4 | #include <linux/const.h> |
5 | #include <asm/alternative.h> | ||
5 | 6 | ||
6 | /* | 7 | /* |
7 | * Constructor for a conventional segment GDT (or LDT) entry. | 8 | * Constructor for a conventional segment GDT (or LDT) entry. |
@@ -249,10 +250,13 @@ extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDL | |||
249 | #endif | 250 | #endif |
250 | 251 | ||
251 | /* | 252 | /* |
252 | * Load a segment. Fall back on loading the zero | 253 | * Load a segment. Fall back on loading the zero segment if something goes |
253 | * segment if something goes wrong.. | 254 | * wrong. This variant assumes that loading zero fully clears the segment. |
255 | * This is always the case on Intel CPUs and, even on 64-bit AMD CPUs, any | ||
256 | * failure to fully clear the cached descriptor is only observable for | ||
257 | * FS and GS. | ||
254 | */ | 258 | */ |
255 | #define loadsegment(seg, value) \ | 259 | #define __loadsegment_simple(seg, value) \ |
256 | do { \ | 260 | do { \ |
257 | unsigned short __val = (value); \ | 261 | unsigned short __val = (value); \ |
258 | \ | 262 | \ |
@@ -269,6 +273,38 @@ do { \ | |||
269 | : "+r" (__val) : : "memory"); \ | 273 | : "+r" (__val) : : "memory"); \ |
270 | } while (0) | 274 | } while (0) |
271 | 275 | ||
276 | #define __loadsegment_ss(value) __loadsegment_simple(ss, (value)) | ||
277 | #define __loadsegment_ds(value) __loadsegment_simple(ds, (value)) | ||
278 | #define __loadsegment_es(value) __loadsegment_simple(es, (value)) | ||
279 | |||
280 | #ifdef CONFIG_X86_32 | ||
281 | |||
282 | /* | ||
283 | * On 32-bit systems, the hidden parts of FS and GS are unobservable if | ||
284 | * the selector is NULL, so there's no funny business here. | ||
285 | */ | ||
286 | #define __loadsegment_fs(value) __loadsegment_simple(fs, (value)) | ||
287 | #define __loadsegment_gs(value) __loadsegment_simple(gs, (value)) | ||
288 | |||
289 | #else | ||
290 | |||
291 | static inline void __loadsegment_fs(unsigned short value) | ||
292 | { | ||
293 | asm volatile(" \n" | ||
294 | "1: movw %0, %%fs \n" | ||
295 | "2: \n" | ||
296 | |||
297 | _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_clear_fs) | ||
298 | |||
299 | : : "rm" (value) : "memory"); | ||
300 | } | ||
301 | |||
302 | /* __loadsegment_gs is intentionally undefined. Use load_gs_index instead. */ | ||
303 | |||
304 | #endif | ||
305 | |||
306 | #define loadsegment(seg, value) __loadsegment_ ## seg (value) | ||
307 | |||
272 | /* | 308 | /* |
273 | * Save a segment register away: | 309 | * Save a segment register away: |
274 | */ | 310 | */ |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 6bfa36de6d9f..088106140c4b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -430,7 +430,7 @@ void load_percpu_segment(int cpu) | |||
430 | #ifdef CONFIG_X86_32 | 430 | #ifdef CONFIG_X86_32 |
431 | loadsegment(fs, __KERNEL_PERCPU); | 431 | loadsegment(fs, __KERNEL_PERCPU); |
432 | #else | 432 | #else |
433 | loadsegment(gs, 0); | 433 | __loadsegment_simple(gs, 0); |
434 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); | 434 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); |
435 | #endif | 435 | #endif |
436 | load_stack_canary_segment(); | 436 | load_stack_canary_segment(); |
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index aaeda3ffaafe..4bb53b89f3c5 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c | |||
@@ -70,6 +70,16 @@ bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup, | |||
70 | } | 70 | } |
71 | EXPORT_SYMBOL(ex_handler_wrmsr_unsafe); | 71 | EXPORT_SYMBOL(ex_handler_wrmsr_unsafe); |
72 | 72 | ||
73 | bool ex_handler_clear_fs(const struct exception_table_entry *fixup, | ||
74 | struct pt_regs *regs, int trapnr) | ||
75 | { | ||
76 | if (static_cpu_has(X86_BUG_NULL_SEG)) | ||
77 | asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS)); | ||
78 | asm volatile ("mov %0, %%fs" : : "rm" (0)); | ||
79 | return ex_handler_default(fixup, regs, trapnr); | ||
80 | } | ||
81 | EXPORT_SYMBOL(ex_handler_clear_fs); | ||
82 | |||
73 | bool ex_has_fault_handler(unsigned long ip) | 83 | bool ex_has_fault_handler(unsigned long ip) |
74 | { | 84 | { |
75 | const struct exception_table_entry *e; | 85 | const struct exception_table_entry *e; |