diff options
Diffstat (limited to 'arch/x86/kernel/process_32.c')
-rw-r--r-- | arch/x86/kernel/process_32.c | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 59f4524984af..4cf79567cdab 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -61,9 +61,6 @@ | |||
61 | 61 | ||
62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
63 | 63 | ||
64 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | ||
65 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
66 | |||
67 | /* | 64 | /* |
68 | * Return saved PC of a blocked thread. | 65 | * Return saved PC of a blocked thread. |
69 | */ | 66 | */ |
@@ -350,14 +347,21 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
350 | *next = &next_p->thread; | 347 | *next = &next_p->thread; |
351 | int cpu = smp_processor_id(); | 348 | int cpu = smp_processor_id(); |
352 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 349 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
350 | bool preload_fpu; | ||
353 | 351 | ||
354 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 352 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
355 | 353 | ||
356 | __unlazy_fpu(prev_p); | 354 | /* |
355 | * If the task has used fpu the last 5 timeslices, just do a full | ||
356 | * restore of the math state immediately to avoid the trap; the | ||
357 | * chances of needing FPU soon are obviously high now | ||
358 | */ | ||
359 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
357 | 360 | ||
361 | __unlazy_fpu(prev_p); | ||
358 | 362 | ||
359 | /* we're going to use this soon, after a few expensive things */ | 363 | /* we're going to use this soon, after a few expensive things */ |
360 | if (next_p->fpu_counter > 5) | 364 | if (preload_fpu) |
361 | prefetch(next->xstate); | 365 | prefetch(next->xstate); |
362 | 366 | ||
363 | /* | 367 | /* |
@@ -398,6 +402,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
398 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | 402 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
399 | __switch_to_xtra(prev_p, next_p, tss); | 403 | __switch_to_xtra(prev_p, next_p, tss); |
400 | 404 | ||
405 | /* If we're going to preload the fpu context, make sure clts | ||
406 | is run while we're batching the cpu state updates. */ | ||
407 | if (preload_fpu) | ||
408 | clts(); | ||
409 | |||
401 | /* | 410 | /* |
402 | * Leave lazy mode, flushing any hypercalls made here. | 411 | * Leave lazy mode, flushing any hypercalls made here. |
403 | * This must be done before restoring TLS segments so | 412 | * This must be done before restoring TLS segments so |
@@ -407,15 +416,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
407 | */ | 416 | */ |
408 | arch_end_context_switch(next_p); | 417 | arch_end_context_switch(next_p); |
409 | 418 | ||
410 | /* If the task has used fpu the last 5 timeslices, just do a full | 419 | if (preload_fpu) |
411 | * restore of the math state immediately to avoid the trap; the | 420 | __math_state_restore(); |
412 | * chances of needing FPU soon are obviously high now | ||
413 | * | ||
414 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
415 | * which can sleep in the case of !tsk_used_math() | ||
416 | */ | ||
417 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) | ||
418 | math_state_restore(); | ||
419 | 421 | ||
420 | /* | 422 | /* |
421 | * Restore %gs if needed (which is common) | 423 | * Restore %gs if needed (which is common) |