aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-15 07:46:29 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-15 07:46:29 -0400
commitb2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (patch)
tree53ccb1c2c14751fe69cf93102e76e97021f6df07 /arch/x86/kernel/process_64.c
parent4f962d4d65923d7b722192e729840cfb79af0a5a (diff)
parent278429cff8809958d25415ba0ed32b59866ab1a8 (diff)
Merge branch 'linus' into stackprotector
Conflicts: arch/x86/kernel/Makefile include/asm-x86/pda.h
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c338
1 files changed, 168 insertions, 170 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index f73cfbc2c281..749d5f888d4d 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -38,11 +38,11 @@
38#include <linux/kdebug.h> 38#include <linux/kdebug.h>
39#include <linux/tick.h> 39#include <linux/tick.h>
40#include <linux/prctl.h> 40#include <linux/prctl.h>
41#include <linux/uaccess.h>
42#include <linux/io.h>
41 43
42#include <asm/uaccess.h>
43#include <asm/pgtable.h> 44#include <asm/pgtable.h>
44#include <asm/system.h> 45#include <asm/system.h>
45#include <asm/io.h>
46#include <asm/processor.h> 46#include <asm/processor.h>
47#include <asm/i387.h> 47#include <asm/i387.h>
48#include <asm/mmu_context.h> 48#include <asm/mmu_context.h>
@@ -52,20 +52,12 @@
52#include <asm/proto.h> 52#include <asm/proto.h>
53#include <asm/ia32.h> 53#include <asm/ia32.h>
54#include <asm/idle.h> 54#include <asm/idle.h>
55#include <asm/syscalls.h>
55 56
56asmlinkage extern void ret_from_fork(void); 57asmlinkage extern void ret_from_fork(void);
57 58
58unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; 59unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59 60
60unsigned long boot_option_idle_override = 0;
61EXPORT_SYMBOL(boot_option_idle_override);
62
63/*
64 * Powermanagement idle function, if any..
65 */
66void (*pm_idle)(void);
67EXPORT_SYMBOL(pm_idle);
68
69static ATOMIC_NOTIFIER_HEAD(idle_notifier); 61static ATOMIC_NOTIFIER_HEAD(idle_notifier);
70 62
71void idle_notifier_register(struct notifier_block *n) 63void idle_notifier_register(struct notifier_block *n)
@@ -95,48 +87,12 @@ void exit_idle(void)
95 __exit_idle(); 87 __exit_idle();
96} 88}
97 89
98/* 90#ifndef CONFIG_SMP
99 * We use this if we don't have any better
100 * idle routine..
101 */
102void default_idle(void)
103{
104 current_thread_info()->status &= ~TS_POLLING;
105 /*
106 * TS_POLLING-cleared state must be visible before we
107 * test NEED_RESCHED:
108 */
109 smp_mb();
110 if (!need_resched())
111 safe_halt(); /* enables interrupts racelessly */
112 else
113 local_irq_enable();
114 current_thread_info()->status |= TS_POLLING;
115}
116
117#ifdef CONFIG_HOTPLUG_CPU
118DECLARE_PER_CPU(int, cpu_state);
119
120#include <asm/nmi.h>
121/* We halt the CPU with physical CPU hotplug */
122static inline void play_dead(void)
123{
124 idle_task_exit();
125 wbinvd();
126 mb();
127 /* Ack it */
128 __get_cpu_var(cpu_state) = CPU_DEAD;
129
130 local_irq_disable();
131 while (1)
132 halt();
133}
134#else
135static inline void play_dead(void) 91static inline void play_dead(void)
136{ 92{
137 BUG(); 93 BUG();
138} 94}
139#endif /* CONFIG_HOTPLUG_CPU */ 95#endif
140 96
141/* 97/*
142 * The idle thread. There's no useful work to be 98 * The idle thread. There's no useful work to be
@@ -160,14 +116,11 @@ void cpu_idle(void)
160 116
161 /* endless idle loop with no priority at all */ 117 /* endless idle loop with no priority at all */
162 while (1) { 118 while (1) {
163 tick_nohz_stop_sched_tick(); 119 tick_nohz_stop_sched_tick(1);
164 while (!need_resched()) { 120 while (!need_resched()) {
165 void (*idle)(void);
166 121
167 rmb(); 122 rmb();
168 idle = pm_idle; 123
169 if (!idle)
170 idle = default_idle;
171 if (cpu_is_offline(smp_processor_id())) 124 if (cpu_is_offline(smp_processor_id()))
172 play_dead(); 125 play_dead();
173 /* 126 /*
@@ -177,7 +130,10 @@ void cpu_idle(void)
177 */ 130 */
178 local_irq_disable(); 131 local_irq_disable();
179 enter_idle(); 132 enter_idle();
180 idle(); 133 /* Don't trace irqs off for idle */
134 stop_critical_timings();
135 pm_idle();
136 start_critical_timings();
181 /* In many cases the interrupt that ended idle 137 /* In many cases the interrupt that ended idle
182 has already called exit_idle. But some idle 138 has already called exit_idle. But some idle
183 loops can be woken up without interrupt. */ 139 loops can be woken up without interrupt. */
@@ -192,7 +148,7 @@ void cpu_idle(void)
192} 148}
193 149
194/* Prints also some state that isn't saved in the pt_regs */ 150/* Prints also some state that isn't saved in the pt_regs */
195void __show_regs(struct pt_regs * regs) 151void __show_regs(struct pt_regs *regs, int all)
196{ 152{
197 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 153 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
198 unsigned long d0, d1, d2, d3, d6, d7; 154 unsigned long d0, d1, d2, d3, d6, d7;
@@ -201,60 +157,65 @@ void __show_regs(struct pt_regs * regs)
201 157
202 printk("\n"); 158 printk("\n");
203 print_modules(); 159 print_modules();
204 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 160 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
205 current->pid, current->comm, print_tainted(), 161 current->pid, current->comm, print_tainted(),
206 init_utsname()->release, 162 init_utsname()->release,
207 (int)strcspn(init_utsname()->version, " "), 163 (int)strcspn(init_utsname()->version, " "),
208 init_utsname()->version); 164 init_utsname()->version);
209 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 165 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
210 printk_address(regs->ip, 1); 166 printk_address(regs->ip, 1);
211 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, 167 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
212 regs->flags); 168 regs->sp, regs->flags);
213 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", 169 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
214 regs->ax, regs->bx, regs->cx); 170 regs->ax, regs->bx, regs->cx);
215 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", 171 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
216 regs->dx, regs->si, regs->di); 172 regs->dx, regs->si, regs->di);
217 printk("RBP: %016lx R08: %016lx R09: %016lx\n", 173 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
218 regs->bp, regs->r8, regs->r9); 174 regs->bp, regs->r8, regs->r9);
219 printk("R10: %016lx R11: %016lx R12: %016lx\n", 175 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
220 regs->r10, regs->r11, regs->r12); 176 regs->r10, regs->r11, regs->r12);
221 printk("R13: %016lx R14: %016lx R15: %016lx\n", 177 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
222 regs->r13, regs->r14, regs->r15); 178 regs->r13, regs->r14, regs->r15);
223 179
224 asm("movl %%ds,%0" : "=r" (ds)); 180 asm("movl %%ds,%0" : "=r" (ds));
225 asm("movl %%cs,%0" : "=r" (cs)); 181 asm("movl %%cs,%0" : "=r" (cs));
226 asm("movl %%es,%0" : "=r" (es)); 182 asm("movl %%es,%0" : "=r" (es));
227 asm("movl %%fs,%0" : "=r" (fsindex)); 183 asm("movl %%fs,%0" : "=r" (fsindex));
228 asm("movl %%gs,%0" : "=r" (gsindex)); 184 asm("movl %%gs,%0" : "=r" (gsindex));
229 185
230 rdmsrl(MSR_FS_BASE, fs); 186 rdmsrl(MSR_FS_BASE, fs);
231 rdmsrl(MSR_GS_BASE, gs); 187 rdmsrl(MSR_GS_BASE, gs);
232 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 188 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
189
190 if (!all)
191 return;
233 192
234 cr0 = read_cr0(); 193 cr0 = read_cr0();
235 cr2 = read_cr2(); 194 cr2 = read_cr2();
236 cr3 = read_cr3(); 195 cr3 = read_cr3();
237 cr4 = read_cr4(); 196 cr4 = read_cr4();
238 197
239 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 198 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
240 fs,fsindex,gs,gsindex,shadowgs); 199 fs, fsindex, gs, gsindex, shadowgs);
241 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 200 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
242 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); 201 es, cr0);
202 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
203 cr4);
243 204
244 get_debugreg(d0, 0); 205 get_debugreg(d0, 0);
245 get_debugreg(d1, 1); 206 get_debugreg(d1, 1);
246 get_debugreg(d2, 2); 207 get_debugreg(d2, 2);
247 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); 208 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
248 get_debugreg(d3, 3); 209 get_debugreg(d3, 3);
249 get_debugreg(d6, 6); 210 get_debugreg(d6, 6);
250 get_debugreg(d7, 7); 211 get_debugreg(d7, 7);
251 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); 212 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
252} 213}
253 214
254void show_regs(struct pt_regs *regs) 215void show_regs(struct pt_regs *regs)
255{ 216{
256 printk("CPU %d:", smp_processor_id()); 217 printk(KERN_INFO "CPU %d:", smp_processor_id());
257 __show_regs(regs); 218 __show_regs(regs, 1);
258 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 219 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
259} 220}
260 221
@@ -279,6 +240,14 @@ void exit_thread(void)
279 t->io_bitmap_max = 0; 240 t->io_bitmap_max = 0;
280 put_cpu(); 241 put_cpu();
281 } 242 }
243#ifdef CONFIG_X86_DS
244 /* Free any DS contexts that have not been properly released. */
245 if (unlikely(t->ds_ctx)) {
246 /* we clear debugctl to make sure DS is not used. */
247 update_debugctlmsr(0);
248 ds_free(t->ds_ctx);
249 }
250#endif /* CONFIG_X86_DS */
282} 251}
283 252
284void flush_thread(void) 253void flush_thread(void)
@@ -354,10 +323,10 @@ void prepare_to_copy(struct task_struct *tsk)
354 323
355int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 324int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
356 unsigned long unused, 325 unsigned long unused,
357 struct task_struct * p, struct pt_regs * regs) 326 struct task_struct *p, struct pt_regs *regs)
358{ 327{
359 int err; 328 int err;
360 struct pt_regs * childregs; 329 struct pt_regs *childregs;
361 struct task_struct *me = current; 330 struct task_struct *me = current;
362 331
363 childregs = ((struct pt_regs *) 332 childregs = ((struct pt_regs *)
@@ -378,10 +347,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
378 p->thread.fs = me->thread.fs; 347 p->thread.fs = me->thread.fs;
379 p->thread.gs = me->thread.gs; 348 p->thread.gs = me->thread.gs;
380 349
381 asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); 350 savesegment(gs, p->thread.gsindex);
382 asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); 351 savesegment(fs, p->thread.fsindex);
383 asm("mov %%es,%0" : "=m" (p->thread.es)); 352 savesegment(es, p->thread.es);
384 asm("mov %%ds,%0" : "=m" (p->thread.ds)); 353 savesegment(ds, p->thread.ds);
385 354
386 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { 355 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
387 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); 356 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
@@ -402,10 +371,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
402 if (test_thread_flag(TIF_IA32)) 371 if (test_thread_flag(TIF_IA32))
403 err = do_set_thread_area(p, -1, 372 err = do_set_thread_area(p, -1,
404 (struct user_desc __user *)childregs->si, 0); 373 (struct user_desc __user *)childregs->si, 0);
405 else 374 else
406#endif 375#endif
407 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 376 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
408 if (err) 377 if (err)
409 goto out; 378 goto out;
410 } 379 }
411 err = 0; 380 err = 0;
@@ -420,7 +389,9 @@ out:
420void 389void
421start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 390start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
422{ 391{
423 asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0)); 392 loadsegment(fs, 0);
393 loadsegment(es, 0);
394 loadsegment(ds, 0);
424 load_gs_index(0); 395 load_gs_index(0);
425 regs->ip = new_ip; 396 regs->ip = new_ip;
426 regs->sp = new_sp; 397 regs->sp = new_sp;
@@ -510,13 +481,27 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
510 next = &next_p->thread; 481 next = &next_p->thread;
511 482
512 debugctl = prev->debugctlmsr; 483 debugctl = prev->debugctlmsr;
513 if (next->ds_area_msr != prev->ds_area_msr) { 484
514 /* we clear debugctl to make sure DS 485#ifdef CONFIG_X86_DS
515 * is not in use when we change it */ 486 {
516 debugctl = 0; 487 unsigned long ds_prev = 0, ds_next = 0;
517 update_debugctlmsr(0); 488
518 wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr); 489 if (prev->ds_ctx)
490 ds_prev = (unsigned long)prev->ds_ctx->ds;
491 if (next->ds_ctx)
492 ds_next = (unsigned long)next->ds_ctx->ds;
493
494 if (ds_next != ds_prev) {
495 /*
496 * We clear debugctl to make sure DS
497 * is not in use when we change it:
498 */
499 debugctl = 0;
500 update_debugctlmsr(0);
501 wrmsrl(MSR_IA32_DS_AREA, ds_next);
502 }
519 } 503 }
504#endif /* CONFIG_X86_DS */
520 505
521 if (next->debugctlmsr != debugctl) 506 if (next->debugctlmsr != debugctl)
522 update_debugctlmsr(next->debugctlmsr); 507 update_debugctlmsr(next->debugctlmsr);
@@ -554,13 +539,13 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
554 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); 539 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
555 } 540 }
556 541
557#ifdef X86_BTS 542#ifdef CONFIG_X86_PTRACE_BTS
558 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 543 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
559 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 544 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
560 545
561 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 546 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
562 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 547 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
563#endif 548#endif /* CONFIG_X86_PTRACE_BTS */
564} 549}
565 550
566/* 551/*
@@ -575,13 +560,14 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
575struct task_struct * 560struct task_struct *
576__switch_to(struct task_struct *prev_p, struct task_struct *next_p) 561__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
577{ 562{
578 struct thread_struct *prev = &prev_p->thread, 563 struct thread_struct *prev = &prev_p->thread;
579 *next = &next_p->thread; 564 struct thread_struct *next = &next_p->thread;
580 int cpu = smp_processor_id(); 565 int cpu = smp_processor_id();
581 struct tss_struct *tss = &per_cpu(init_tss, cpu); 566 struct tss_struct *tss = &per_cpu(init_tss, cpu);
567 unsigned fsindex, gsindex;
582 568
583 /* we're going to use this soon, after a few expensive things */ 569 /* we're going to use this soon, after a few expensive things */
584 if (next_p->fpu_counter>5) 570 if (next_p->fpu_counter > 5)
585 prefetch(next->xstate); 571 prefetch(next->xstate);
586 572
587 /* 573 /*
@@ -589,69 +575,82 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
589 */ 575 */
590 load_sp0(tss, next); 576 load_sp0(tss, next);
591 577
592 /* 578 /*
593 * Switch DS and ES. 579 * Switch DS and ES.
594 * This won't pick up thread selector changes, but I guess that is ok. 580 * This won't pick up thread selector changes, but I guess that is ok.
595 */ 581 */
596 asm volatile("mov %%es,%0" : "=m" (prev->es)); 582 savesegment(es, prev->es);
597 if (unlikely(next->es | prev->es)) 583 if (unlikely(next->es | prev->es))
598 loadsegment(es, next->es); 584 loadsegment(es, next->es);
599 585
600 asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); 586 savesegment(ds, prev->ds);
601 if (unlikely(next->ds | prev->ds)) 587 if (unlikely(next->ds | prev->ds))
602 loadsegment(ds, next->ds); 588 loadsegment(ds, next->ds);
603 589
590
591 /* We must save %fs and %gs before load_TLS() because
592 * %fs and %gs may be cleared by load_TLS().
593 *
594 * (e.g. xen_load_tls())
595 */
596 savesegment(fs, fsindex);
597 savesegment(gs, gsindex);
598
604 load_TLS(next, cpu); 599 load_TLS(next, cpu);
605 600
606 /* 601 /*
602 * Leave lazy mode, flushing any hypercalls made here.
603 * This must be done before restoring TLS segments so
604 * the GDT and LDT are properly updated, and must be
605 * done before math_state_restore, so the TS bit is up
606 * to date.
607 */
608 arch_leave_lazy_cpu_mode();
609
610 /*
607 * Switch FS and GS. 611 * Switch FS and GS.
612 *
613 * Segment register != 0 always requires a reload. Also
614 * reload when it has changed. When prev process used 64bit
615 * base always reload to avoid an information leak.
608 */ 616 */
609 { 617 if (unlikely(fsindex | next->fsindex | prev->fs)) {
610 unsigned fsindex; 618 loadsegment(fs, next->fsindex);
611 asm volatile("movl %%fs,%0" : "=r" (fsindex)); 619 /*
612 /* segment register != 0 always requires a reload. 620 * Check if the user used a selector != 0; if yes
613 also reload when it has changed. 621 * clear 64bit base, since overloaded base is always
614 when prev process used 64bit base always reload 622 * mapped to the Null selector
615 to avoid an information leak. */ 623 */
616 if (unlikely(fsindex | next->fsindex | prev->fs)) { 624 if (fsindex)
617 loadsegment(fs, next->fsindex); 625 prev->fs = 0;
618 /* check if the user used a selector != 0
619 * if yes clear 64bit base, since overloaded base
620 * is always mapped to the Null selector
621 */
622 if (fsindex)
623 prev->fs = 0;
624 }
625 /* when next process has a 64bit base use it */
626 if (next->fs)
627 wrmsrl(MSR_FS_BASE, next->fs);
628 prev->fsindex = fsindex;
629 } 626 }
630 { 627 /* when next process has a 64bit base use it */
631 unsigned gsindex; 628 if (next->fs)
632 asm volatile("movl %%gs,%0" : "=r" (gsindex)); 629 wrmsrl(MSR_FS_BASE, next->fs);
633 if (unlikely(gsindex | next->gsindex | prev->gs)) { 630 prev->fsindex = fsindex;
634 load_gs_index(next->gsindex); 631
635 if (gsindex) 632 if (unlikely(gsindex | next->gsindex | prev->gs)) {
636 prev->gs = 0; 633 load_gs_index(next->gsindex);
637 } 634 if (gsindex)
638 if (next->gs) 635 prev->gs = 0;
639 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
640 prev->gsindex = gsindex;
641 } 636 }
637 if (next->gs)
638 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
639 prev->gsindex = gsindex;
642 640
643 /* Must be after DS reload */ 641 /* Must be after DS reload */
644 unlazy_fpu(prev_p); 642 unlazy_fpu(prev_p);
645 643
646 /* 644 /*
647 * Switch the PDA and FPU contexts. 645 * Switch the PDA and FPU contexts.
648 */ 646 */
649 prev->usersp = read_pda(oldrsp); 647 prev->usersp = read_pda(oldrsp);
650 write_pda(oldrsp, next->usersp); 648 write_pda(oldrsp, next->usersp);
651 write_pda(pcurrent, next_p); 649 write_pda(pcurrent, next_p);
652 650
653 write_pda(kernelstack, 651 write_pda(kernelstack,
654 (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET); 652 (unsigned long)task_stack_page(next_p) +
653 THREAD_SIZE - PDA_STACKOFFSET);
655#ifdef CONFIG_CC_STACKPROTECTOR 654#ifdef CONFIG_CC_STACKPROTECTOR
656 /* 655 /*
657 * Build time only check to make sure the stack_canary is at 656 * Build time only check to make sure the stack_canary is at
@@ -687,7 +686,7 @@ long sys_execve(char __user *name, char __user * __user *argv,
687 char __user * __user *envp, struct pt_regs *regs) 686 char __user * __user *envp, struct pt_regs *regs)
688{ 687{
689 long error; 688 long error;
690 char * filename; 689 char *filename;
691 690
692 filename = getname(name); 691 filename = getname(name);
693 error = PTR_ERR(filename); 692 error = PTR_ERR(filename);
@@ -745,55 +744,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs)
745unsigned long get_wchan(struct task_struct *p) 744unsigned long get_wchan(struct task_struct *p)
746{ 745{
747 unsigned long stack; 746 unsigned long stack;
748 u64 fp,ip; 747 u64 fp, ip;
749 int count = 0; 748 int count = 0;
750 749
751 if (!p || p == current || p->state==TASK_RUNNING) 750 if (!p || p == current || p->state == TASK_RUNNING)
752 return 0; 751 return 0;
753 stack = (unsigned long)task_stack_page(p); 752 stack = (unsigned long)task_stack_page(p);
754 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) 753 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
755 return 0; 754 return 0;
756 fp = *(u64 *)(p->thread.sp); 755 fp = *(u64 *)(p->thread.sp);
757 do { 756 do {
758 if (fp < (unsigned long)stack || 757 if (fp < (unsigned long)stack ||
759 fp > (unsigned long)stack+THREAD_SIZE) 758 fp >= (unsigned long)stack+THREAD_SIZE)
760 return 0; 759 return 0;
761 ip = *(u64 *)(fp+8); 760 ip = *(u64 *)(fp+8);
762 if (!in_sched_functions(ip)) 761 if (!in_sched_functions(ip))
763 return ip; 762 return ip;
764 fp = *(u64 *)fp; 763 fp = *(u64 *)fp;
765 } while (count++ < 16); 764 } while (count++ < 16);
766 return 0; 765 return 0;
767} 766}
768 767
769long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) 768long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
770{ 769{
771 int ret = 0; 770 int ret = 0;
772 int doit = task == current; 771 int doit = task == current;
773 int cpu; 772 int cpu;
774 773
775 switch (code) { 774 switch (code) {
776 case ARCH_SET_GS: 775 case ARCH_SET_GS:
777 if (addr >= TASK_SIZE_OF(task)) 776 if (addr >= TASK_SIZE_OF(task))
778 return -EPERM; 777 return -EPERM;
779 cpu = get_cpu(); 778 cpu = get_cpu();
780 /* handle small bases via the GDT because that's faster to 779 /* handle small bases via the GDT because that's faster to
781 switch. */ 780 switch. */
782 if (addr <= 0xffffffff) { 781 if (addr <= 0xffffffff) {
783 set_32bit_tls(task, GS_TLS, addr); 782 set_32bit_tls(task, GS_TLS, addr);
784 if (doit) { 783 if (doit) {
785 load_TLS(&task->thread, cpu); 784 load_TLS(&task->thread, cpu);
786 load_gs_index(GS_TLS_SEL); 785 load_gs_index(GS_TLS_SEL);
787 } 786 }
788 task->thread.gsindex = GS_TLS_SEL; 787 task->thread.gsindex = GS_TLS_SEL;
789 task->thread.gs = 0; 788 task->thread.gs = 0;
790 } else { 789 } else {
791 task->thread.gsindex = 0; 790 task->thread.gsindex = 0;
792 task->thread.gs = addr; 791 task->thread.gs = addr;
793 if (doit) { 792 if (doit) {
794 load_gs_index(0); 793 load_gs_index(0);
795 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); 794 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
796 } 795 }
797 } 796 }
798 put_cpu(); 797 put_cpu();
799 break; 798 break;
@@ -809,7 +808,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
809 set_32bit_tls(task, FS_TLS, addr); 808 set_32bit_tls(task, FS_TLS, addr);
810 if (doit) { 809 if (doit) {
811 load_TLS(&task->thread, cpu); 810 load_TLS(&task->thread, cpu);
812 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL)); 811 loadsegment(fs, FS_TLS_SEL);
813 } 812 }
814 task->thread.fsindex = FS_TLS_SEL; 813 task->thread.fsindex = FS_TLS_SEL;
815 task->thread.fs = 0; 814 task->thread.fs = 0;
@@ -819,7 +818,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
819 if (doit) { 818 if (doit) {
820 /* set the selector to 0 to not confuse 819 /* set the selector to 0 to not confuse
821 __switch_to */ 820 __switch_to */
822 asm volatile("movl %0,%%fs" :: "r" (0)); 821 loadsegment(fs, 0);
823 ret = checking_wrmsrl(MSR_FS_BASE, addr); 822 ret = checking_wrmsrl(MSR_FS_BASE, addr);
824 } 823 }
825 } 824 }
@@ -842,13 +841,12 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
842 if (task->thread.gsindex == GS_TLS_SEL) 841 if (task->thread.gsindex == GS_TLS_SEL)
843 base = read_32bit_tls(task, GS_TLS); 842 base = read_32bit_tls(task, GS_TLS);
844 else if (doit) { 843 else if (doit) {
845 asm("movl %%gs,%0" : "=r" (gsindex)); 844 savesegment(gs, gsindex);
846 if (gsindex) 845 if (gsindex)
847 rdmsrl(MSR_KERNEL_GS_BASE, base); 846 rdmsrl(MSR_KERNEL_GS_BASE, base);
848 else 847 else
849 base = task->thread.gs; 848 base = task->thread.gs;
850 } 849 } else
851 else
852 base = task->thread.gs; 850 base = task->thread.gs;
853 ret = put_user(base, (unsigned long __user *)addr); 851 ret = put_user(base, (unsigned long __user *)addr);
854 break; 852 break;