aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/traps.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-02-16 18:45:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-02-16 18:45:23 -0500
commitb3b0870ef3ffed72b92415423da864f440f57ad6 (patch)
treeb3e128019581669d44e6634d3b1bfb169c73598d /arch/x86/kernel/traps.c
parent6d59d7a9f5b723a7ac1925c136e93ec83c0c3043 (diff)
i387: do not preload FPU state at task switch time
Yes, taking the trap to re-load the FPU/MMX state is expensive, but so is spending several days looking for a bug in the state save/restore code. And the preload code has some rather subtle interactions with both paravirtualization support and segment state restore, so it's not nearly as simple as it should be. Also, now that we no longer necessarily depend on a single bit (ie TS_USEDFPU) for keeping track of the state of the FPU, we migth be able to do better. If we are really switching between two processes that keep touching the FP state, save/restore is inevitable, but in the case of having one process that does most of the FPU usage, we may actually be able to do much better than the preloading. In particular, we may be able to keep track of which CPU the process ran on last, and also per CPU keep track of which process' FP state that CPU has. For modern CPU's that don't destroy the FPU contents on save time, that would allow us to do a lazy restore by just re-enabling the existing FPU state - with no restore cost at all! Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/kernel/traps.c')
-rw-r--r--arch/x86/kernel/traps.c35
1 files changed, 11 insertions, 24 deletions
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index fc676e44c77..5afe824c66e 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -571,28 +571,6 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
571} 571}
572 572
573/* 573/*
574 * __math_state_restore assumes that cr0.TS is already clear and the
575 * fpu state is all ready for use. Used during context switch.
576 */
577void __math_state_restore(void)
578{
579 struct thread_info *thread = current_thread_info();
580 struct task_struct *tsk = thread->task;
581
582 /*
583 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
584 */
585 if (unlikely(restore_fpu_checking(tsk))) {
586 stts();
587 force_sig(SIGSEGV, tsk);
588 return;
589 }
590
591 __thread_set_has_fpu(thread); /* clts in caller! */
592 tsk->fpu_counter++;
593}
594
595/*
596 * 'math_state_restore()' saves the current math information in the 574 * 'math_state_restore()' saves the current math information in the
597 * old math state array, and gets the new ones from the current task 575 * old math state array, and gets the new ones from the current task
598 * 576 *
@@ -622,9 +600,18 @@ void math_state_restore(void)
622 local_irq_disable(); 600 local_irq_disable();
623 } 601 }
624 602
625 clts(); /* Allow maths ops (or we recurse) */ 603 __thread_fpu_begin(thread);
626 604
627 __math_state_restore(); 605 /*
606 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
607 */
608 if (unlikely(restore_fpu_checking(tsk))) {
609 __thread_fpu_end(thread);
610 force_sig(SIGSEGV, tsk);
611 return;
612 }
613
614 tsk->fpu_counter++;
628} 615}
629EXPORT_SYMBOL_GPL(math_state_restore); 616EXPORT_SYMBOL_GPL(math_state_restore);
630 617