aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/traps.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-04-24 03:40:59 -0400
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-06-17 16:21:25 -0400
commite6e9cac8c3417b43498b243c1f8f11780e157168 (patch)
tree4a3a2995f8d930f7baaf19b827043c6f9bf62db3 /arch/x86/kernel/traps.c
parent3fe0344faf7fdcb158bd5c1a9aec960a8d70c8e8 (diff)
x86: split out core __math_state_restore
Split the core fpu state restoration out into __math_state_restore, which assumes that cr0.TS is clear and that the fpu context has been initialized. This will be used during context switch. There are two reasons this is desireable: - There's a small clarification. When __switch_to() calls math_state_restore, it relies on the fact that tsk_used_math() returns true, and so will never do a blocking init_fpu(). __math_state_restore() does not have (or need) that logic, so the question never arises. - It allows the clts() to be moved earler in __switch_to() so it can be performed while cpu context updates are batched (will be done in a later patch). [ Impact: refactor code to make reuse cleaner; no functional change ] Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Alok Kataria <akataria@vmware.com> Cc: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch/x86/kernel/traps.c')
-rw-r--r--arch/x86/kernel/traps.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 5f935f0d5861..71b91669ad19 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -814,6 +814,28 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
814} 814}
815 815
816/* 816/*
817 * __math_state_restore assumes that cr0.TS is already clear and the
818 * fpu state is all ready for use. Used during context switch.
819 */
820void __math_state_restore(void)
821{
822 struct thread_info *thread = current_thread_info();
823 struct task_struct *tsk = thread->task;
824
825 /*
826 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
827 */
828 if (unlikely(restore_fpu_checking(tsk))) {
829 stts();
830 force_sig(SIGSEGV, tsk);
831 return;
832 }
833
834 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
835 tsk->fpu_counter++;
836}
837
838/*
817 * 'math_state_restore()' saves the current math information in the 839 * 'math_state_restore()' saves the current math information in the
818 * old math state array, and gets the new ones from the current task 840 * old math state array, and gets the new ones from the current task
819 * 841 *
@@ -844,17 +866,8 @@ asmlinkage void math_state_restore(void)
844 } 866 }
845 867
846 clts(); /* Allow maths ops (or we recurse) */ 868 clts(); /* Allow maths ops (or we recurse) */
847 /*
848 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
849 */
850 if (unlikely(restore_fpu_checking(tsk))) {
851 stts();
852 force_sig(SIGSEGV, tsk);
853 return;
854 }
855 869
856 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ 870 __math_state_restore();
857 tsk->fpu_counter++;
858} 871}
859EXPORT_SYMBOL_GPL(math_state_restore); 872EXPORT_SYMBOL_GPL(math_state_restore);
860 873