diff options
author | Andy Lutomirski <luto@kernel.org> | 2017-07-26 10:16:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-07-27 03:12:57 -0400 |
commit | a632375764aa25c97b78beb56c71b0ba59d1cf83 (patch) | |
tree | 367b9197b23b4010591750bff661656f7bce7a6e | |
parent | 81d387190039c14edac8de2b3ec789beb899afd9 (diff) |
x86/ldt/64: Refresh DS and ES when modify_ldt changes an entry
On x86_32, modify_ldt() implicitly refreshes the cached DS and ES
segments because they are refreshed on return to usermode.
On x86_64, they're not refreshed on return to usermode. To improve
determinism and match x86_32's behavior, refresh them when we update
the LDT.
This avoids a situation in which the DS points to a descriptor that is
changed but the old cached segment persists until the next reschedule.
If this happens, then the user-visible state will change
nondeterministically some time after modify_ldt() returns, which is
unfortunate.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Chang Seok <chang.seok.bae@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/kernel/ldt.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index a870910c8565..f0e64db18ac8 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -21,6 +21,25 @@ | |||
21 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
22 | #include <asm/syscalls.h> | 22 | #include <asm/syscalls.h> |
23 | 23 | ||
24 | static void refresh_ldt_segments(void) | ||
25 | { | ||
26 | #ifdef CONFIG_X86_64 | ||
27 | unsigned short sel; | ||
28 | |||
29 | /* | ||
30 | * Make sure that the cached DS and ES descriptors match the updated | ||
31 | * LDT. | ||
32 | */ | ||
33 | savesegment(ds, sel); | ||
34 | if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) | ||
35 | loadsegment(ds, sel); | ||
36 | |||
37 | savesegment(es, sel); | ||
38 | if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) | ||
39 | loadsegment(es, sel); | ||
40 | #endif | ||
41 | } | ||
42 | |||
24 | /* context.lock is held for us, so we don't need any locking. */ | 43 | /* context.lock is held for us, so we don't need any locking. */ |
25 | static void flush_ldt(void *__mm) | 44 | static void flush_ldt(void *__mm) |
26 | { | 45 | { |
@@ -32,6 +51,8 @@ static void flush_ldt(void *__mm) | |||
32 | 51 | ||
33 | pc = &mm->context; | 52 | pc = &mm->context; |
34 | set_ldt(pc->ldt->entries, pc->ldt->nr_entries); | 53 | set_ldt(pc->ldt->entries, pc->ldt->nr_entries); |
54 | |||
55 | refresh_ldt_segments(); | ||
35 | } | 56 | } |
36 | 57 | ||
37 | /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ | 58 | /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ |