aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2016-04-26 15:23:26 -0400
committerIngo Molnar <mingo@kernel.org>2016-04-29 05:56:41 -0400
commit45e876f794e8e566bf827c25ef0791875081724f (patch)
tree0c725250ba4073785997ea5a74bdb1f7e84cb9d7
parentf005f5d860e0231fe212cfda8c1a3148b99609f4 (diff)
x86/segments/64: When loadsegment(fs, ...) fails, clear the base
On AMD CPUs, a failed loadsegment currently may not clear the FS base. Fix it. While we're at it, prevent loadsegment(gs, xyz) from even compiling on 64-bit kernels. It shouldn't be used. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/a084c1b93b7b1408b58d3fd0b5d6e47da8e7d7cf.1461698311.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/segment.h42
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/mm/extable.c10
3 files changed, 50 insertions, 4 deletions
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 7d5a1929d76b..e1a4afd20223 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -2,6 +2,7 @@
2#define _ASM_X86_SEGMENT_H 2#define _ASM_X86_SEGMENT_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5#include <asm/alternative.h>
5 6
6/* 7/*
7 * Constructor for a conventional segment GDT (or LDT) entry. 8 * Constructor for a conventional segment GDT (or LDT) entry.
@@ -249,10 +250,13 @@ extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDL
249#endif 250#endif
250 251
251/* 252/*
252 * Load a segment. Fall back on loading the zero 253 * Load a segment. Fall back on loading the zero segment if something goes
253 * segment if something goes wrong.. 254 * wrong. This variant assumes that loading zero fully clears the segment.
255 * This is always the case on Intel CPUs and, even on 64-bit AMD CPUs, any
256 * failure to fully clear the cached descriptor is only observable for
257 * FS and GS.
254 */ 258 */
255#define loadsegment(seg, value) \ 259#define __loadsegment_simple(seg, value) \
256do { \ 260do { \
257 unsigned short __val = (value); \ 261 unsigned short __val = (value); \
258 \ 262 \
@@ -269,6 +273,38 @@ do { \
269 : "+r" (__val) : : "memory"); \ 273 : "+r" (__val) : : "memory"); \
270} while (0) 274} while (0)
271 275
276#define __loadsegment_ss(value) __loadsegment_simple(ss, (value))
277#define __loadsegment_ds(value) __loadsegment_simple(ds, (value))
278#define __loadsegment_es(value) __loadsegment_simple(es, (value))
279
280#ifdef CONFIG_X86_32
281
282/*
283 * On 32-bit systems, the hidden parts of FS and GS are unobservable if
284 * the selector is NULL, so there's no funny business here.
285 */
286#define __loadsegment_fs(value) __loadsegment_simple(fs, (value))
287#define __loadsegment_gs(value) __loadsegment_simple(gs, (value))
288
289#else
290
291static inline void __loadsegment_fs(unsigned short value)
292{
293 asm volatile(" \n"
294 "1: movw %0, %%fs \n"
295 "2: \n"
296
297 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_clear_fs)
298
299 : : "rm" (value) : "memory");
300}
301
302/* __loadsegment_gs is intentionally undefined. Use load_gs_index instead. */
303
304#endif
305
306#define loadsegment(seg, value) __loadsegment_ ## seg (value)
307
272/* 308/*
273 * Save a segment register away: 309 * Save a segment register away:
274 */ 310 */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6bfa36de6d9f..088106140c4b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -430,7 +430,7 @@ void load_percpu_segment(int cpu)
430#ifdef CONFIG_X86_32 430#ifdef CONFIG_X86_32
431 loadsegment(fs, __KERNEL_PERCPU); 431 loadsegment(fs, __KERNEL_PERCPU);
432#else 432#else
433 loadsegment(gs, 0); 433 __loadsegment_simple(gs, 0);
434 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); 434 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
435#endif 435#endif
436 load_stack_canary_segment(); 436 load_stack_canary_segment();
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index aaeda3ffaafe..4bb53b89f3c5 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -70,6 +70,16 @@ bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
70} 70}
71EXPORT_SYMBOL(ex_handler_wrmsr_unsafe); 71EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
72 72
73bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
74 struct pt_regs *regs, int trapnr)
75{
76 if (static_cpu_has(X86_BUG_NULL_SEG))
77 asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
78 asm volatile ("mov %0, %%fs" : : "rm" (0));
79 return ex_handler_default(fixup, regs, trapnr);
80}
81EXPORT_SYMBOL(ex_handler_clear_fs);
82
73bool ex_has_fault_handler(unsigned long ip) 83bool ex_has_fault_handler(unsigned long ip)
74{ 84{
75 const struct exception_table_entry *e; 85 const struct exception_table_entry *e;