aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-02-19 14:48:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-02-20 13:58:28 -0500
commit80ab6f1e8c981b1b6604b2f22e36c917526235cd (patch)
treedfd8c2e909d614bc230aa87c0ea5742cf8510c57 /arch
parentcea20ca3f3181fc36788a15bc65d1062b96a0a6c (diff)
i387: use 'restore_fpu_checking()' directly in task switching code
This inlines what is usually just a couple of instructions, but more importantly it also fixes the theoretical error case (can that FPU restore really ever fail? Maybe we should remove the checking). We can't start sending signals from within the scheduler, we're much too deep in the kernel and are holding the runqueue lock etc. So don't bother even trying. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/i387.h17
-rw-r--r--arch/x86/kernel/traps.c40
2 files changed, 22 insertions, 35 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 8df95849721d..74c607b37e87 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -29,7 +29,6 @@ extern unsigned int sig_xstate_size;
29extern void fpu_init(void); 29extern void fpu_init(void);
30extern void mxcsr_feature_mask_init(void); 30extern void mxcsr_feature_mask_init(void);
31extern int init_fpu(struct task_struct *child); 31extern int init_fpu(struct task_struct *child);
32extern void __math_state_restore(struct task_struct *);
33extern void math_state_restore(void); 32extern void math_state_restore(void);
34extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 33extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
35 34
@@ -269,6 +268,16 @@ static inline int fpu_restore_checking(struct fpu *fpu)
269 268
270static inline int restore_fpu_checking(struct task_struct *tsk) 269static inline int restore_fpu_checking(struct task_struct *tsk)
271{ 270{
271 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
272 is pending. Clear the x87 state here by setting it to fixed
273 values. "m" is a random variable that should be in L1 */
274 alternative_input(
275 ASM_NOP8 ASM_NOP2,
276 "emms\n\t" /* clear stack tags */
277 "fildl %P[addr]", /* set F?P to defined value */
278 X86_FEATURE_FXSAVE_LEAK,
279 [addr] "m" (tsk->thread.has_fpu));
280
272 return fpu_restore_checking(&tsk->thread.fpu); 281 return fpu_restore_checking(&tsk->thread.fpu);
273} 282}
274 283
@@ -378,8 +387,10 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
378 */ 387 */
379static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) 388static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
380{ 389{
381 if (fpu.preload) 390 if (fpu.preload) {
382 __math_state_restore(new); 391 if (unlikely(restore_fpu_checking(new)))
392 __thread_fpu_end(new);
393 }
383} 394}
384 395
385/* 396/*
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 77da5b475ad2..4bbe04d96744 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -571,37 +571,6 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
571} 571}
572 572
573/* 573/*
574 * This gets called with the process already owning the
575 * FPU state, and with CR0.TS cleared. It just needs to
576 * restore the FPU register state.
577 */
578void __math_state_restore(struct task_struct *tsk)
579{
580 /* We need a safe address that is cheap to find and that is already
581 in L1. We've just brought in "tsk->thread.has_fpu", so use that */
582#define safe_address (tsk->thread.has_fpu)
583
584 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
585 is pending. Clear the x87 state here by setting it to fixed
586 values. safe_address is a random variable that should be in L1 */
587 alternative_input(
588 ASM_NOP8 ASM_NOP2,
589 "emms\n\t" /* clear stack tags */
590 "fildl %P[addr]", /* set F?P to defined value */
591 X86_FEATURE_FXSAVE_LEAK,
592 [addr] "m" (safe_address));
593
594 /*
595 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
596 */
597 if (unlikely(restore_fpu_checking(tsk))) {
598 __thread_fpu_end(tsk);
599 force_sig(SIGSEGV, tsk);
600 return;
601 }
602}
603
604/*
605 * 'math_state_restore()' saves the current math information in the 574 * 'math_state_restore()' saves the current math information in the
606 * old math state array, and gets the new ones from the current task 575 * old math state array, and gets the new ones from the current task
607 * 576 *
@@ -631,7 +600,14 @@ void math_state_restore(void)
631 } 600 }
632 601
633 __thread_fpu_begin(tsk); 602 __thread_fpu_begin(tsk);
634 __math_state_restore(tsk); 603 /*
604 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
605 */
606 if (unlikely(restore_fpu_checking(tsk))) {
607 __thread_fpu_end(tsk);
608 force_sig(SIGSEGV, tsk);
609 return;
610 }
635 611
636 tsk->fpu_counter++; 612 tsk->fpu_counter++;
637} 613}