aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process_32.c')
-rw-r--r--arch/x86/kernel/process_32.c59
1 files changed, 33 insertions, 26 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index a546f55c77b4..fec79ad85dc6 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -11,6 +11,7 @@
11 11
12#include <stdarg.h> 12#include <stdarg.h>
13 13
14#include <linux/stackprotector.h>
14#include <linux/cpu.h> 15#include <linux/cpu.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
@@ -66,9 +67,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
66DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 67DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
67EXPORT_PER_CPU_SYMBOL(current_task); 68EXPORT_PER_CPU_SYMBOL(current_task);
68 69
69DEFINE_PER_CPU(int, cpu_number);
70EXPORT_PER_CPU_SYMBOL(cpu_number);
71
72/* 70/*
73 * Return saved PC of a blocked thread. 71 * Return saved PC of a blocked thread.
74 */ 72 */
@@ -94,6 +92,15 @@ void cpu_idle(void)
94{ 92{
95 int cpu = smp_processor_id(); 93 int cpu = smp_processor_id();
96 94
95 /*
96 * If we're the non-boot CPU, nothing set the stack canary up
97 * for us. CPU0 already has it initialized but no harm in
98 * doing it again. This is a good place for updating it, as
99 * we wont ever return from this function (so the invalid
100 * canaries already on the stack wont ever trigger).
101 */
102 boot_init_stack_canary();
103
97 current_thread_info()->status |= TS_POLLING; 104 current_thread_info()->status |= TS_POLLING;
98 105
99 /* endless idle loop with no priority at all */ 106 /* endless idle loop with no priority at all */
@@ -111,7 +118,6 @@ void cpu_idle(void)
111 play_dead(); 118 play_dead();
112 119
113 local_irq_disable(); 120 local_irq_disable();
114 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
115 /* Don't trace irqs off for idle */ 121 /* Don't trace irqs off for idle */
116 stop_critical_timings(); 122 stop_critical_timings();
117 pm_idle(); 123 pm_idle();
@@ -135,7 +141,7 @@ void __show_regs(struct pt_regs *regs, int all)
135 if (user_mode_vm(regs)) { 141 if (user_mode_vm(regs)) {
136 sp = regs->sp; 142 sp = regs->sp;
137 ss = regs->ss & 0xffff; 143 ss = regs->ss & 0xffff;
138 savesegment(gs, gs); 144 gs = get_user_gs(regs);
139 } else { 145 } else {
140 sp = (unsigned long) (&regs->sp); 146 sp = (unsigned long) (&regs->sp);
141 savesegment(ss, ss); 147 savesegment(ss, ss);
@@ -216,6 +222,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
216 regs.ds = __USER_DS; 222 regs.ds = __USER_DS;
217 regs.es = __USER_DS; 223 regs.es = __USER_DS;
218 regs.fs = __KERNEL_PERCPU; 224 regs.fs = __KERNEL_PERCPU;
225 regs.gs = __KERNEL_STACK_CANARY;
219 regs.orig_ax = -1; 226 regs.orig_ax = -1;
220 regs.ip = (unsigned long) kernel_thread_helper; 227 regs.ip = (unsigned long) kernel_thread_helper;
221 regs.cs = __KERNEL_CS | get_kernel_rpl(); 228 regs.cs = __KERNEL_CS | get_kernel_rpl();
@@ -308,7 +315,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
308 315
309 p->thread.ip = (unsigned long) ret_from_fork; 316 p->thread.ip = (unsigned long) ret_from_fork;
310 317
311 savesegment(gs, p->thread.gs); 318 task_user_gs(p) = get_user_gs(regs);
312 319
313 tsk = current; 320 tsk = current;
314 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 321 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -346,7 +353,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
346void 353void
347start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 354start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
348{ 355{
349 __asm__("movl %0, %%gs" : : "r"(0)); 356 set_user_gs(regs, 0);
350 regs->fs = 0; 357 regs->fs = 0;
351 set_fs(USER_DS); 358 set_fs(USER_DS);
352 regs->ds = __USER_DS; 359 regs->ds = __USER_DS;
@@ -543,7 +550,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
543 * used %fs or %gs (it does not today), or if the kernel is 550 * used %fs or %gs (it does not today), or if the kernel is
544 * running inside of a hypervisor layer. 551 * running inside of a hypervisor layer.
545 */ 552 */
546 savesegment(gs, prev->gs); 553 lazy_save_gs(prev->gs);
547 554
548 /* 555 /*
549 * Load the per-thread Thread-Local Storage descriptor. 556 * Load the per-thread Thread-Local Storage descriptor.
@@ -589,31 +596,31 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
589 * Restore %gs if needed (which is common) 596 * Restore %gs if needed (which is common)
590 */ 597 */
591 if (prev->gs | next->gs) 598 if (prev->gs | next->gs)
592 loadsegment(gs, next->gs); 599 lazy_load_gs(next->gs);
593 600
594 x86_write_percpu(current_task, next_p); 601 percpu_write(current_task, next_p);
595 602
596 return prev_p; 603 return prev_p;
597} 604}
598 605
599asmlinkage int sys_fork(struct pt_regs regs) 606int sys_fork(struct pt_regs *regs)
600{ 607{
601 return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL); 608 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
602} 609}
603 610
604asmlinkage int sys_clone(struct pt_regs regs) 611int sys_clone(struct pt_regs *regs)
605{ 612{
606 unsigned long clone_flags; 613 unsigned long clone_flags;
607 unsigned long newsp; 614 unsigned long newsp;
608 int __user *parent_tidptr, *child_tidptr; 615 int __user *parent_tidptr, *child_tidptr;
609 616
610 clone_flags = regs.bx; 617 clone_flags = regs->bx;
611 newsp = regs.cx; 618 newsp = regs->cx;
612 parent_tidptr = (int __user *)regs.dx; 619 parent_tidptr = (int __user *)regs->dx;
613 child_tidptr = (int __user *)regs.di; 620 child_tidptr = (int __user *)regs->di;
614 if (!newsp) 621 if (!newsp)
615 newsp = regs.sp; 622 newsp = regs->sp;
616 return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr); 623 return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
617} 624}
618 625
619/* 626/*
@@ -626,27 +633,27 @@ asmlinkage int sys_clone(struct pt_regs regs)
626 * do not have enough call-clobbered registers to hold all 633 * do not have enough call-clobbered registers to hold all
627 * the information you need. 634 * the information you need.
628 */ 635 */
629asmlinkage int sys_vfork(struct pt_regs regs) 636int sys_vfork(struct pt_regs *regs)
630{ 637{
631 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL); 638 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL);
632} 639}
633 640
634/* 641/*
635 * sys_execve() executes a new program. 642 * sys_execve() executes a new program.
636 */ 643 */
637asmlinkage int sys_execve(struct pt_regs regs) 644int sys_execve(struct pt_regs *regs)
638{ 645{
639 int error; 646 int error;
640 char *filename; 647 char *filename;
641 648
642 filename = getname((char __user *) regs.bx); 649 filename = getname((char __user *) regs->bx);
643 error = PTR_ERR(filename); 650 error = PTR_ERR(filename);
644 if (IS_ERR(filename)) 651 if (IS_ERR(filename))
645 goto out; 652 goto out;
646 error = do_execve(filename, 653 error = do_execve(filename,
647 (char __user * __user *) regs.cx, 654 (char __user * __user *) regs->cx,
648 (char __user * __user *) regs.dx, 655 (char __user * __user *) regs->dx,
649 &regs); 656 regs);
650 if (error == 0) { 657 if (error == 0) {
651 /* Make sure we don't return using sysenter.. */ 658 /* Make sure we don't return using sysenter.. */
652 set_thread_flag(TIF_IRET); 659 set_thread_flag(TIF_IRET);