aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process_32.c')
-rw-r--r--arch/x86/kernel/process_32.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 00a8fe4c58bb..209e74801763 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -63,9 +63,6 @@
63 63
64asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 64asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
65 65
66DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
67EXPORT_PER_CPU_SYMBOL(current_task);
68
69/* 66/*
70 * Return saved PC of a blocked thread. 67 * Return saved PC of a blocked thread.
71 */ 68 */
@@ -361,14 +358,21 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
361 *next = &next_p->thread; 358 *next = &next_p->thread;
362 int cpu = smp_processor_id(); 359 int cpu = smp_processor_id();
363 struct tss_struct *tss = &per_cpu(init_tss, cpu); 360 struct tss_struct *tss = &per_cpu(init_tss, cpu);
361 bool preload_fpu;
364 362
365 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 363 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
366 364
367 __unlazy_fpu(prev_p); 365 /*
366 * If the task has used fpu the last 5 timeslices, just do a full
367 * restore of the math state immediately to avoid the trap; the
368 * chances of needing FPU soon are obviously high now
369 */
370 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
368 371
372 __unlazy_fpu(prev_p);
369 373
370 /* we're going to use this soon, after a few expensive things */ 374 /* we're going to use this soon, after a few expensive things */
371 if (next_p->fpu_counter > 5) 375 if (preload_fpu)
372 prefetch(next->xstate); 376 prefetch(next->xstate);
373 377
374 /* 378 /*
@@ -409,6 +413,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
409 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) 413 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
410 __switch_to_xtra(prev_p, next_p, tss); 414 __switch_to_xtra(prev_p, next_p, tss);
411 415
416 /* If we're going to preload the fpu context, make sure clts
417 is run while we're batching the cpu state updates. */
418 if (preload_fpu)
419 clts();
420
412 /* 421 /*
413 * Leave lazy mode, flushing any hypercalls made here. 422 * Leave lazy mode, flushing any hypercalls made here.
414 * This must be done before restoring TLS segments so 423 * This must be done before restoring TLS segments so
@@ -418,15 +427,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
418 */ 427 */
419 arch_end_context_switch(next_p); 428 arch_end_context_switch(next_p);
420 429
421 /* If the task has used fpu the last 5 timeslices, just do a full 430 if (preload_fpu)
422 * restore of the math state immediately to avoid the trap; the 431 __math_state_restore();
423 * chances of needing FPU soon are obviously high now
424 *
425 * tsk_used_math() checks prevent calling math_state_restore(),
426 * which can sleep in the case of !tsk_used_math()
427 */
428 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
429 math_state_restore();
430 432
431 /* 433 /*
432 * Restore %gs if needed (which is common) 434 * Restore %gs if needed (which is common)