diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-02-16 22:11:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-02-16 22:11:15 -0500 |
commit | 4903062b5485f0e2c286a23b44c9b59d9b017d53 (patch) | |
tree | c521dd28c5aa409dcd76ca8a522886fa3c272a31 /arch/x86/kernel/process_64.c | |
parent | b3b0870ef3ffed72b92415423da864f440f57ad6 (diff) |
i387: move AMD K7/K8 fpu fxsave/fxrstor workaround from save to restore
The AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
pending. In order to not leak FIP state from one process to another, we
need to do a floating point load after the fxsave of the old process,
and before the fxrstor of the new FPU state. That resets the state to
the (uninteresting) kernel load, rather than some potentially sensitive
user information.
We used to do this directly after the FPU state save, but that is
actually very inconvenient, since it
(a) corrupts what is potentially perfectly good FPU state that we might
want to lazy avoid restoring later and
(b) on x86-64 it resulted in a very annoying ordering constraint, where
"__unlazy_fpu()" in the task switch needs to be delayed until after
the DS segment has been reloaded just to get the new DS value.
Coupling it to the fxrstor instead of the fxsave automatically avoids
both of these issues, and also ensures that we only do it when actually
necessary (the FP state after a save may never actually get used). It's
simply a much more natural place for the leaked state cleanup.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r-- | arch/x86/kernel/process_64.c | 5 |
1 files changed, 2 insertions, 3 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 992b4e542bc3..753e803f7197 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -387,6 +387,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
387 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 387 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
388 | unsigned fsindex, gsindex; | 388 | unsigned fsindex, gsindex; |
389 | 389 | ||
390 | __unlazy_fpu(prev_p); | ||
391 | |||
390 | /* | 392 | /* |
391 | * Reload esp0, LDT and the page table pointer: | 393 | * Reload esp0, LDT and the page table pointer: |
392 | */ | 394 | */ |
@@ -415,9 +417,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
415 | 417 | ||
416 | load_TLS(next, cpu); | 418 | load_TLS(next, cpu); |
417 | 419 | ||
418 | /* Must be after DS reload */ | ||
419 | __unlazy_fpu(prev_p); | ||
420 | |||
421 | /* | 420 | /* |
422 | * Leave lazy mode, flushing any hypercalls made here. | 421 | * Leave lazy mode, flushing any hypercalls made here. |
423 | * This must be done before restoring TLS segments so | 422 | * This must be done before restoring TLS segments so |