diff options
author | Zachary Amsden <zach@vmware.com> | 2005-09-03 18:56:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 03:06:11 -0400 |
commit | e7a2ff593c0e48b130434dee4d2fd3452a850e6f (patch) | |
tree | 89bca4c0a0818d1e217c76866f62c1b133a425d7 /arch | |
parent | 2f2984eb4afb2a4298e3186cb49cc7e88dd6d929 (diff) |
[PATCH] i386: load_tls() fix
Subtle fix: load_TLS has been moved after saving %fs and %gs segments to avoid
creating non-reversible segments. This could conceivably cause a bug if the
kernel ever needed to save and restore fs/gs from the NMI handler. It
currently does not, but this is the safest approach to avoiding fs/gs
corruption. SMIs are safe, since SMI saves the descriptor hidden state.
Signed-off-by: Zachary Amsden <zach@vmware.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/process.c | 19 |
1 files changed, 12 insertions, 7 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 761d4ed47ef3..9d94995e9672 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -678,21 +678,26 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas | |||
678 | __unlazy_fpu(prev_p); | 678 | __unlazy_fpu(prev_p); |
679 | 679 | ||
680 | /* | 680 | /* |
681 | * Reload esp0, LDT and the page table pointer: | 681 | * Reload esp0. |
682 | */ | 682 | */ |
683 | load_esp0(tss, next); | 683 | load_esp0(tss, next); |
684 | 684 | ||
685 | /* | 685 | /* |
686 | * Load the per-thread Thread-Local Storage descriptor. | 686 | * Save away %fs and %gs. No need to save %es and %ds, as |
687 | * those are always kernel segments while inside the kernel. | ||
688 | * Doing this before setting the new TLS descriptors avoids | ||
689 | * the situation where we temporarily have non-reloadable | ||
690 | * segments in %fs and %gs. This could be an issue if the | ||
691 | * NMI handler ever used %fs or %gs (it does not today), or | ||
692 | * if the kernel is running inside of a hypervisor layer. | ||
687 | */ | 693 | */ |
688 | load_TLS(next, cpu); | 694 | savesegment(fs, prev->fs); |
695 | savesegment(gs, prev->gs); | ||
689 | 696 | ||
690 | /* | 697 | /* |
691 | * Save away %fs and %gs. No need to save %es and %ds, as | 698 | * Load the per-thread Thread-Local Storage descriptor. |
692 | * those are always kernel segments while inside the kernel. | ||
693 | */ | 699 | */ |
694 | asm volatile("mov %%fs,%0":"=m" (prev->fs)); | 700 | load_TLS(next, cpu); |
695 | asm volatile("mov %%gs,%0":"=m" (prev->gs)); | ||
696 | 701 | ||
697 | /* | 702 | /* |
698 | * Restore %fs and %gs if needed. | 703 | * Restore %fs and %gs if needed. |