aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c30
1 files changed, 5 insertions, 25 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9b9fe4a85c87..cfa5c90c01db 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -286,6 +286,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
286 286
287 set_tsk_thread_flag(p, TIF_FORK); 287 set_tsk_thread_flag(p, TIF_FORK);
288 288
289 p->fpu_counter = 0;
289 p->thread.io_bitmap_ptr = NULL; 290 p->thread.io_bitmap_ptr = NULL;
290 291
291 savesegment(gs, p->thread.gsindex); 292 savesegment(gs, p->thread.gsindex);
@@ -386,18 +387,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
386 int cpu = smp_processor_id(); 387 int cpu = smp_processor_id();
387 struct tss_struct *tss = &per_cpu(init_tss, cpu); 388 struct tss_struct *tss = &per_cpu(init_tss, cpu);
388 unsigned fsindex, gsindex; 389 unsigned fsindex, gsindex;
389 bool preload_fpu; 390 fpu_switch_t fpu;
390 391
391 /* 392 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
392 * If the task has used fpu the last 5 timeslices, just do a full
393 * restore of the math state immediately to avoid the trap; the
394 * chances of needing FPU soon are obviously high now
395 */
396 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
397
398 /* we're going to use this soon, after a few expensive things */
399 if (preload_fpu)
400 prefetch(next->fpu.state);
401 393
402 /* 394 /*
403 * Reload esp0, LDT and the page table pointer: 395 * Reload esp0, LDT and the page table pointer:
@@ -427,13 +419,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
427 419
428 load_TLS(next, cpu); 420 load_TLS(next, cpu);
429 421
430 /* Must be after DS reload */
431 __unlazy_fpu(prev_p);
432
433 /* Make sure cpu is ready for new context */
434 if (preload_fpu)
435 clts();
436
437 /* 422 /*
438 * Leave lazy mode, flushing any hypercalls made here. 423 * Leave lazy mode, flushing any hypercalls made here.
439 * This must be done before restoring TLS segments so 424 * This must be done before restoring TLS segments so
@@ -474,6 +459,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
474 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 459 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
475 prev->gsindex = gsindex; 460 prev->gsindex = gsindex;
476 461
462 switch_fpu_finish(next_p, fpu);
463
477 /* 464 /*
478 * Switch the PDA and FPU contexts. 465 * Switch the PDA and FPU contexts.
479 */ 466 */
@@ -492,13 +479,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
492 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) 479 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
493 __switch_to_xtra(prev_p, next_p, tss); 480 __switch_to_xtra(prev_p, next_p, tss);
494 481
495 /*
496 * Preload the FPU context, now that we've determined that the
497 * task is likely to be using it.
498 */
499 if (preload_fpu)
500 __math_state_restore();
501
502 return prev_p; 482 return prev_p;
503} 483}
504 484