diff options
Diffstat (limited to 'arch/x86/kernel/process_32.c')
-rw-r--r-- | arch/x86/kernel/process_32.c | 40 |
1 files changed, 24 insertions, 16 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 59f4524984af..075580b35682 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -58,12 +58,10 @@ | |||
58 | #include <asm/idle.h> | 58 | #include <asm/idle.h> |
59 | #include <asm/syscalls.h> | 59 | #include <asm/syscalls.h> |
60 | #include <asm/ds.h> | 60 | #include <asm/ds.h> |
61 | #include <asm/debugreg.h> | ||
61 | 62 | ||
62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 63 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
63 | 64 | ||
64 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | ||
65 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
66 | |||
67 | /* | 65 | /* |
68 | * Return saved PC of a blocked thread. | 66 | * Return saved PC of a blocked thread. |
69 | */ | 67 | */ |
@@ -137,7 +135,7 @@ void __show_regs(struct pt_regs *regs, int all) | |||
137 | ss = regs->ss & 0xffff; | 135 | ss = regs->ss & 0xffff; |
138 | gs = get_user_gs(regs); | 136 | gs = get_user_gs(regs); |
139 | } else { | 137 | } else { |
140 | sp = (unsigned long) (®s->sp); | 138 | sp = kernel_stack_pointer(regs); |
141 | savesegment(ss, ss); | 139 | savesegment(ss, ss); |
142 | savesegment(gs, gs); | 140 | savesegment(gs, gs); |
143 | } | 141 | } |
@@ -190,7 +188,7 @@ void __show_regs(struct pt_regs *regs, int all) | |||
190 | 188 | ||
191 | void show_regs(struct pt_regs *regs) | 189 | void show_regs(struct pt_regs *regs) |
192 | { | 190 | { |
193 | __show_regs(regs, 1); | 191 | show_registers(regs); |
194 | show_trace(NULL, regs, ®s->sp, regs->bp); | 192 | show_trace(NULL, regs, ®s->sp, regs->bp); |
195 | } | 193 | } |
196 | 194 | ||
@@ -262,7 +260,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
262 | 260 | ||
263 | task_user_gs(p) = get_user_gs(regs); | 261 | task_user_gs(p) = get_user_gs(regs); |
264 | 262 | ||
263 | p->thread.io_bitmap_ptr = NULL; | ||
265 | tsk = current; | 264 | tsk = current; |
265 | err = -ENOMEM; | ||
266 | |||
267 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); | ||
268 | |||
266 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { | 269 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
267 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, | 270 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
268 | IO_BITMAP_BYTES, GFP_KERNEL); | 271 | IO_BITMAP_BYTES, GFP_KERNEL); |
@@ -350,14 +353,21 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
350 | *next = &next_p->thread; | 353 | *next = &next_p->thread; |
351 | int cpu = smp_processor_id(); | 354 | int cpu = smp_processor_id(); |
352 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 355 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
356 | bool preload_fpu; | ||
353 | 357 | ||
354 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 358 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
355 | 359 | ||
356 | __unlazy_fpu(prev_p); | 360 | /* |
361 | * If the task has used fpu the last 5 timeslices, just do a full | ||
362 | * restore of the math state immediately to avoid the trap; the | ||
363 | * chances of needing FPU soon are obviously high now | ||
364 | */ | ||
365 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
357 | 366 | ||
367 | __unlazy_fpu(prev_p); | ||
358 | 368 | ||
359 | /* we're going to use this soon, after a few expensive things */ | 369 | /* we're going to use this soon, after a few expensive things */ |
360 | if (next_p->fpu_counter > 5) | 370 | if (preload_fpu) |
361 | prefetch(next->xstate); | 371 | prefetch(next->xstate); |
362 | 372 | ||
363 | /* | 373 | /* |
@@ -398,6 +408,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
398 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | 408 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
399 | __switch_to_xtra(prev_p, next_p, tss); | 409 | __switch_to_xtra(prev_p, next_p, tss); |
400 | 410 | ||
411 | /* If we're going to preload the fpu context, make sure clts | ||
412 | is run while we're batching the cpu state updates. */ | ||
413 | if (preload_fpu) | ||
414 | clts(); | ||
415 | |||
401 | /* | 416 | /* |
402 | * Leave lazy mode, flushing any hypercalls made here. | 417 | * Leave lazy mode, flushing any hypercalls made here. |
403 | * This must be done before restoring TLS segments so | 418 | * This must be done before restoring TLS segments so |
@@ -407,15 +422,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
407 | */ | 422 | */ |
408 | arch_end_context_switch(next_p); | 423 | arch_end_context_switch(next_p); |
409 | 424 | ||
410 | /* If the task has used fpu the last 5 timeslices, just do a full | 425 | if (preload_fpu) |
411 | * restore of the math state immediately to avoid the trap; the | 426 | __math_state_restore(); |
412 | * chances of needing FPU soon are obviously high now | ||
413 | * | ||
414 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
415 | * which can sleep in the case of !tsk_used_math() | ||
416 | */ | ||
417 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) | ||
418 | math_state_restore(); | ||
419 | 427 | ||
420 | /* | 428 | /* |
421 | * Restore %gs if needed (which is common) | 429 | * Restore %gs if needed (which is common) |