diff options
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r-- | arch/x86/kernel/process_64.c | 36 |
1 files changed, 21 insertions, 15 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ebefb5407b9d..ad535b683170 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -55,9 +55,6 @@ | |||
55 | 55 | ||
56 | asmlinkage extern void ret_from_fork(void); | 56 | asmlinkage extern void ret_from_fork(void); |
57 | 57 | ||
58 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | ||
59 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
60 | |||
61 | DEFINE_PER_CPU(unsigned long, old_rsp); | 58 | DEFINE_PER_CPU(unsigned long, old_rsp); |
62 | static DEFINE_PER_CPU(unsigned char, is_idle); | 59 | static DEFINE_PER_CPU(unsigned char, is_idle); |
63 | 60 | ||
@@ -386,9 +383,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
386 | int cpu = smp_processor_id(); | 383 | int cpu = smp_processor_id(); |
387 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 384 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
388 | unsigned fsindex, gsindex; | 385 | unsigned fsindex, gsindex; |
386 | bool preload_fpu; | ||
387 | |||
388 | /* | ||
389 | * If the task has used fpu the last 5 timeslices, just do a full | ||
390 | * restore of the math state immediately to avoid the trap; the | ||
391 | * chances of needing FPU soon are obviously high now | ||
392 | */ | ||
393 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
389 | 394 | ||
390 | /* we're going to use this soon, after a few expensive things */ | 395 | /* we're going to use this soon, after a few expensive things */ |
391 | if (next_p->fpu_counter > 5) | 396 | if (preload_fpu) |
392 | prefetch(next->xstate); | 397 | prefetch(next->xstate); |
393 | 398 | ||
394 | /* | 399 | /* |
@@ -419,6 +424,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
419 | 424 | ||
420 | load_TLS(next, cpu); | 425 | load_TLS(next, cpu); |
421 | 426 | ||
427 | /* Must be after DS reload */ | ||
428 | unlazy_fpu(prev_p); | ||
429 | |||
430 | /* Make sure cpu is ready for new context */ | ||
431 | if (preload_fpu) | ||
432 | clts(); | ||
433 | |||
422 | /* | 434 | /* |
423 | * Leave lazy mode, flushing any hypercalls made here. | 435 | * Leave lazy mode, flushing any hypercalls made here. |
424 | * This must be done before restoring TLS segments so | 436 | * This must be done before restoring TLS segments so |
@@ -459,9 +471,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
459 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 471 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
460 | prev->gsindex = gsindex; | 472 | prev->gsindex = gsindex; |
461 | 473 | ||
462 | /* Must be after DS reload */ | ||
463 | unlazy_fpu(prev_p); | ||
464 | |||
465 | /* | 474 | /* |
466 | * Switch the PDA and FPU contexts. | 475 | * Switch the PDA and FPU contexts. |
467 | */ | 476 | */ |
@@ -480,15 +489,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
480 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) | 489 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) |
481 | __switch_to_xtra(prev_p, next_p, tss); | 490 | __switch_to_xtra(prev_p, next_p, tss); |
482 | 491 | ||
483 | /* If the task has used fpu the last 5 timeslices, just do a full | 492 | /* |
484 | * restore of the math state immediately to avoid the trap; the | 493 | * Preload the FPU context, now that we've determined that the |
485 | * chances of needing FPU soon are obviously high now | 494 | * task is likely to be using it. |
486 | * | ||
487 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
488 | * which can sleep in the case of !tsk_used_math() | ||
489 | */ | 495 | */ |
490 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) | 496 | if (preload_fpu) |
491 | math_state_restore(); | 497 | __math_state_restore(); |
492 | return prev_p; | 498 | return prev_p; |
493 | } | 499 | } |
494 | 500 | ||