aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/process.c')
-rw-r--r--arch/i386/kernel/process.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index b2203e21acb3..96e3ea6b17c7 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -400,11 +400,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
400 int err; 400 int err;
401 401
402 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; 402 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
403 *childregs = *regs;
404 childregs->eax = 0;
405 childregs->esp = esp;
406
407 p->thread.esp = (unsigned long) childregs;
408 /* 403 /*
409 * The below -8 is to reserve 8 bytes on top of the ring0 stack. 404 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
410 * This is necessary to guarantee that the entire "struct pt_regs" 405 * This is necessary to guarantee that the entire "struct pt_regs"
@@ -415,7 +410,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
415 * "struct pt_regs" is possible, but they may contain the 410 * "struct pt_regs" is possible, but they may contain the
416 * completely wrong values. 411 * completely wrong values.
417 */ 412 */
418 p->thread.esp0 = (unsigned long) (childregs+1) - 8; 413 childregs = (struct pt_regs *) ((unsigned long) childregs - 8);
414 *childregs = *regs;
415 childregs->eax = 0;
416 childregs->esp = esp;
417
418 p->thread.esp = (unsigned long) childregs;
419 p->thread.esp0 = (unsigned long) (childregs+1);
419 420
420 p->thread.eip = (unsigned long) ret_from_fork; 421 p->thread.eip = (unsigned long) ret_from_fork;
421 422
@@ -611,8 +612,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
611 * Save away %fs and %gs. No need to save %es and %ds, as 612 * Save away %fs and %gs. No need to save %es and %ds, as
612 * those are always kernel segments while inside the kernel. 613 * those are always kernel segments while inside the kernel.
613 */ 614 */
614 asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); 615 asm volatile("mov %%fs,%0":"=m" (prev->fs));
615 asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); 616 asm volatile("mov %%gs,%0":"=m" (prev->gs));
616 617
617 /* 618 /*
618 * Restore %fs and %gs if needed. 619 * Restore %fs and %gs if needed.