aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-15 06:18:15 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-15 06:18:15 -0400
commitdca2d6ac09d9ef59ff46820d4f0c94b08a671202 (patch)
treefdec753b842dad09e3a4151954fab3eb5c43500d /arch/x86/kernel/process_64.c
parentd6a65dffb30d8636b1e5d4c201564ef401a246cf (diff)
parent18240904960a39e582ced8ba8ececb10b8c22dd3 (diff)
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts: arch/x86/kernel/process_64.c Semantic conflict fixed in: arch/x86/kvm/x86.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 89c46f1259d..72edac026a7 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -57,9 +57,6 @@
57 57
58asmlinkage extern void ret_from_fork(void); 58asmlinkage extern void ret_from_fork(void);
59 59
60DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
61EXPORT_PER_CPU_SYMBOL(current_task);
62
63DEFINE_PER_CPU(unsigned long, old_rsp); 60DEFINE_PER_CPU(unsigned long, old_rsp);
64static DEFINE_PER_CPU(unsigned char, is_idle); 61static DEFINE_PER_CPU(unsigned char, is_idle);
65 62
@@ -399,9 +396,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
399 int cpu = smp_processor_id(); 396 int cpu = smp_processor_id();
400 struct tss_struct *tss = &per_cpu(init_tss, cpu); 397 struct tss_struct *tss = &per_cpu(init_tss, cpu);
401 unsigned fsindex, gsindex; 398 unsigned fsindex, gsindex;
399 bool preload_fpu;
400
401 /*
402 * If the task has used fpu the last 5 timeslices, just do a full
403 * restore of the math state immediately to avoid the trap; the
404 * chances of needing FPU soon are obviously high now
405 */
406 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
402 407
403 /* we're going to use this soon, after a few expensive things */ 408 /* we're going to use this soon, after a few expensive things */
404 if (next_p->fpu_counter > 5) 409 if (preload_fpu)
405 prefetch(next->xstate); 410 prefetch(next->xstate);
406 411
407 /* 412 /*
@@ -432,6 +437,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
432 437
433 load_TLS(next, cpu); 438 load_TLS(next, cpu);
434 439
440 /* Must be after DS reload */
441 unlazy_fpu(prev_p);
442
443 /* Make sure cpu is ready for new context */
444 if (preload_fpu)
445 clts();
446
435 /* 447 /*
436 * Leave lazy mode, flushing any hypercalls made here. 448 * Leave lazy mode, flushing any hypercalls made here.
437 * This must be done before restoring TLS segments so 449 * This must be done before restoring TLS segments so
@@ -472,9 +484,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
472 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 484 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
473 prev->gsindex = gsindex; 485 prev->gsindex = gsindex;
474 486
475 /* Must be after DS reload */
476 unlazy_fpu(prev_p);
477
478 /* 487 /*
479 * Switch the PDA and FPU contexts. 488 * Switch the PDA and FPU contexts.
480 */ 489 */
@@ -493,15 +502,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
493 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) 502 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
494 __switch_to_xtra(prev_p, next_p, tss); 503 __switch_to_xtra(prev_p, next_p, tss);
495 504
496 /* If the task has used fpu the last 5 timeslices, just do a full 505 /*
497 * restore of the math state immediately to avoid the trap; the 506 * Preload the FPU context, now that we've determined that the
498 * chances of needing FPU soon are obviously high now 507 * task is likely to be using it.
499 *
500 * tsk_used_math() checks prevent calling math_state_restore(),
501 * which can sleep in the case of !tsk_used_math()
502 */ 508 */
503 if (tsk_used_math(next_p) && next_p->fpu_counter > 5) 509 if (preload_fpu)
504 math_state_restore(); 510 __math_state_restore();
505 /* 511 /*
506 * There's a problem with moving the arch_install_thread_hw_breakpoint() 512 * There's a problem with moving the arch_install_thread_hw_breakpoint()
507 * call before current is updated. Suppose a kernel breakpoint is 513 * call before current is updated. Suppose a kernel breakpoint is