aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-02-16 22:11:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-02-16 22:11:15 -0500
commit4903062b5485f0e2c286a23b44c9b59d9b017d53 (patch)
treec521dd28c5aa409dcd76ca8a522886fa3c272a31
parentb3b0870ef3ffed72b92415423da864f440f57ad6 (diff)
i387: move AMD K7/K8 fpu fxsave/fxrstor workaround from save to restore
The AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. In order to not leak FIP state from one process to another, we need to do a floating point load after the fxsave of the old process, and before the fxrstor of the new FPU state. That resets the state to the (uninteresting) kernel load, rather than some potentially sensitive user information. We used to do this directly after the FPU state save, but that is actually very inconvenient, since it (a) corrupts what is potentially perfectly good FPU state that we might want to lazy avoid restoring later and (b) on x86-64 it resulted in a very annoying ordering constraint, where "__unlazy_fpu()" in the task switch needs to be delayed until after the DS segment has been reloaded just to get the new DS value. Coupling it to the fxrstor instead of the fxsave automatically avoids both of these issues, and also ensures that we only do it when actually necessary (the FP state after a save may never actually get used). It's simply a much more natural place for the leaked state cleanup. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/include/asm/i387.h19
-rw-r--r--arch/x86/kernel/process_64.c5
-rw-r--r--arch/x86/kernel/traps.c14
3 files changed, 16 insertions, 22 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 86974c72d0d0..01b115d86770 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -211,15 +211,6 @@ static inline void fpu_fxsave(struct fpu *fpu)
211 211
212#endif /* CONFIG_X86_64 */ 212#endif /* CONFIG_X86_64 */
213 213
214/* We need a safe address that is cheap to find and that is already
215 in L1 during context switch. The best choices are unfortunately
216 different for UP and SMP */
217#ifdef CONFIG_SMP
218#define safe_address (__per_cpu_offset[0])
219#else
220#define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER])
221#endif
222
223/* 214/*
224 * These must be called with preempt disabled 215 * These must be called with preempt disabled
225 */ 216 */
@@ -243,16 +234,6 @@ static inline void fpu_save_init(struct fpu *fpu)
243 234
244 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) 235 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
245 asm volatile("fnclex"); 236 asm volatile("fnclex");
246
247 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
248 is pending. Clear the x87 state here by setting it to fixed
249 values. safe_address is a random variable that should be in L1 */
250 alternative_input(
251 ASM_NOP8 ASM_NOP2,
252 "emms\n\t" /* clear stack tags */
253 "fildl %P[addr]", /* set F?P to defined value */
254 X86_FEATURE_FXSAVE_LEAK,
255 [addr] "m" (safe_address));
256} 237}
257 238
258static inline void __save_init_fpu(struct task_struct *tsk) 239static inline void __save_init_fpu(struct task_struct *tsk)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 992b4e542bc3..753e803f7197 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -387,6 +387,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
387 struct tss_struct *tss = &per_cpu(init_tss, cpu); 387 struct tss_struct *tss = &per_cpu(init_tss, cpu);
388 unsigned fsindex, gsindex; 388 unsigned fsindex, gsindex;
389 389
390 __unlazy_fpu(prev_p);
391
390 /* 392 /*
391 * Reload esp0, LDT and the page table pointer: 393 * Reload esp0, LDT and the page table pointer:
392 */ 394 */
@@ -415,9 +417,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
415 417
416 load_TLS(next, cpu); 418 load_TLS(next, cpu);
417 419
418 /* Must be after DS reload */
419 __unlazy_fpu(prev_p);
420
421 /* 420 /*
422 * Leave lazy mode, flushing any hypercalls made here. 421 * Leave lazy mode, flushing any hypercalls made here.
423 * This must be done before restoring TLS segments so 422 * This must be done before restoring TLS segments so
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 5afe824c66e5..4d42300dcd2c 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -585,6 +585,10 @@ void math_state_restore(void)
585 struct thread_info *thread = current_thread_info(); 585 struct thread_info *thread = current_thread_info();
586 struct task_struct *tsk = thread->task; 586 struct task_struct *tsk = thread->task;
587 587
588 /* We need a safe address that is cheap to find and that is already
589 in L1. We just brought in "thread->task", so use that */
590#define safe_address (thread->task)
591
588 if (!tsk_used_math(tsk)) { 592 if (!tsk_used_math(tsk)) {
589 local_irq_enable(); 593 local_irq_enable();
590 /* 594 /*
@@ -602,6 +606,16 @@ void math_state_restore(void)
602 606
603 __thread_fpu_begin(thread); 607 __thread_fpu_begin(thread);
604 608
609 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
610 is pending. Clear the x87 state here by setting it to fixed
611 values. safe_address is a random variable that should be in L1 */
612 alternative_input(
613 ASM_NOP8 ASM_NOP2,
614 "emms\n\t" /* clear stack tags */
615 "fildl %P[addr]", /* set F?P to defined value */
616 X86_FEATURE_FXSAVE_LEAK,
617 [addr] "m" (safe_address));
618
605 /* 619 /*
606 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 620 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
607 */ 621 */