aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/process.c')
-rw-r--r--arch/x86_64/kernel/process.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 6fbd19564e4e..9e9a70e50c72 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -552,6 +552,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
552 int cpu = smp_processor_id(); 552 int cpu = smp_processor_id();
553 struct tss_struct *tss = &per_cpu(init_tss, cpu); 553 struct tss_struct *tss = &per_cpu(init_tss, cpu);
554 554
555 /* we're going to use this soon, after a few expensive things */
556 if (next_p->fpu_counter>5)
557 prefetch(&next->i387.fxsave);
558
555 /* 559 /*
556 * Reload esp0, LDT and the page table pointer: 560 * Reload esp0, LDT and the page table pointer:
557 */ 561 */
@@ -629,6 +633,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
629 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) 633 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
630 __switch_to_xtra(prev_p, next_p, tss); 634 __switch_to_xtra(prev_p, next_p, tss);
631 635
636 /* If the task has used fpu the last 5 timeslices, just do a full
637 * restore of the math state immediately to avoid the trap; the
638 * chances of needing FPU soon are obviously high now
639 */
640 if (next_p->fpu_counter>5)
641 math_state_restore();
632 return prev_p; 642 return prev_p;
633} 643}
634 644