aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-18 13:53:16 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-18 13:53:16 -0400
commit9b610fda0df5d0f0b0c64242e37441ad1b384aac (patch)
tree0ea14b15f2e6546f37fe18d8ac3dc83077ec0e55 /arch/x86/kernel/process_64.c
parentb8f8c3cf0a4ac0632ec3f0e15e9dc0c29de917af (diff)
parent5b664cb235e97afbf34db9c4d77f08ebd725335e (diff)
Merge branch 'linus' into timers/nohz
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c94
1 files changed, 43 insertions, 51 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index c0a5c2a687e6..9a10c1897921 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -56,15 +56,6 @@ asmlinkage extern void ret_from_fork(void);
56 56
57unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; 57unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
58 58
59unsigned long boot_option_idle_override = 0;
60EXPORT_SYMBOL(boot_option_idle_override);
61
62/*
63 * Powermanagement idle function, if any..
64 */
65void (*pm_idle)(void);
66EXPORT_SYMBOL(pm_idle);
67
68static ATOMIC_NOTIFIER_HEAD(idle_notifier); 59static ATOMIC_NOTIFIER_HEAD(idle_notifier);
69 60
70void idle_notifier_register(struct notifier_block *n) 61void idle_notifier_register(struct notifier_block *n)
@@ -94,25 +85,6 @@ void exit_idle(void)
94 __exit_idle(); 85 __exit_idle();
95} 86}
96 87
97/*
98 * We use this if we don't have any better
99 * idle routine..
100 */
101void default_idle(void)
102{
103 current_thread_info()->status &= ~TS_POLLING;
104 /*
105 * TS_POLLING-cleared state must be visible before we
106 * test NEED_RESCHED:
107 */
108 smp_mb();
109 if (!need_resched())
110 safe_halt(); /* enables interrupts racelessly */
111 else
112 local_irq_enable();
113 current_thread_info()->status |= TS_POLLING;
114}
115
116#ifdef CONFIG_HOTPLUG_CPU 88#ifdef CONFIG_HOTPLUG_CPU
117DECLARE_PER_CPU(int, cpu_state); 89DECLARE_PER_CPU(int, cpu_state);
118 90
@@ -150,12 +122,9 @@ void cpu_idle(void)
150 while (1) { 122 while (1) {
151 tick_nohz_stop_sched_tick(1); 123 tick_nohz_stop_sched_tick(1);
152 while (!need_resched()) { 124 while (!need_resched()) {
153 void (*idle)(void);
154 125
155 rmb(); 126 rmb();
156 idle = pm_idle; 127
157 if (!idle)
158 idle = default_idle;
159 if (cpu_is_offline(smp_processor_id())) 128 if (cpu_is_offline(smp_processor_id()))
160 play_dead(); 129 play_dead();
161 /* 130 /*
@@ -165,7 +134,10 @@ void cpu_idle(void)
165 */ 134 */
166 local_irq_disable(); 135 local_irq_disable();
167 enter_idle(); 136 enter_idle();
168 idle(); 137 /* Don't trace irqs off for idle */
138 stop_critical_timings();
139 pm_idle();
140 start_critical_timings();
169 /* In many cases the interrupt that ended idle 141 /* In many cases the interrupt that ended idle
170 has already called exit_idle. But some idle 142 has already called exit_idle. But some idle
171 loops can be woken up without interrupt. */ 143 loops can be woken up without interrupt. */
@@ -294,6 +266,7 @@ void flush_thread(void)
294 /* 266 /*
295 * Forget coprocessor state.. 267 * Forget coprocessor state..
296 */ 268 */
269 tsk->fpu_counter = 0;
297 clear_fpu(tsk); 270 clear_fpu(tsk);
298 clear_used_math(); 271 clear_used_math();
299} 272}
@@ -365,10 +338,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
365 p->thread.fs = me->thread.fs; 338 p->thread.fs = me->thread.fs;
366 p->thread.gs = me->thread.gs; 339 p->thread.gs = me->thread.gs;
367 340
368 asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); 341 savesegment(gs, p->thread.gsindex);
369 asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); 342 savesegment(fs, p->thread.fsindex);
370 asm("mov %%es,%0" : "=m" (p->thread.es)); 343 savesegment(es, p->thread.es);
371 asm("mov %%ds,%0" : "=m" (p->thread.ds)); 344 savesegment(ds, p->thread.ds);
372 345
373 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { 346 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
374 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); 347 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
@@ -407,7 +380,9 @@ out:
407void 380void
408start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 381start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
409{ 382{
410 asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0)); 383 loadsegment(fs, 0);
384 loadsegment(es, 0);
385 loadsegment(ds, 0);
411 load_gs_index(0); 386 load_gs_index(0);
412 regs->ip = new_ip; 387 regs->ip = new_ip;
413 regs->sp = new_sp; 388 regs->sp = new_sp;
@@ -566,6 +541,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
566 *next = &next_p->thread; 541 *next = &next_p->thread;
567 int cpu = smp_processor_id(); 542 int cpu = smp_processor_id();
568 struct tss_struct *tss = &per_cpu(init_tss, cpu); 543 struct tss_struct *tss = &per_cpu(init_tss, cpu);
544 unsigned fsindex, gsindex;
569 545
570 /* we're going to use this soon, after a few expensive things */ 546 /* we're going to use this soon, after a few expensive things */
571 if (next_p->fpu_counter>5) 547 if (next_p->fpu_counter>5)
@@ -580,22 +556,38 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
580 * Switch DS and ES. 556 * Switch DS and ES.
581 * This won't pick up thread selector changes, but I guess that is ok. 557 * This won't pick up thread selector changes, but I guess that is ok.
582 */ 558 */
583 asm volatile("mov %%es,%0" : "=m" (prev->es)); 559 savesegment(es, prev->es);
584 if (unlikely(next->es | prev->es)) 560 if (unlikely(next->es | prev->es))
585 loadsegment(es, next->es); 561 loadsegment(es, next->es);
586 562
587 asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); 563 savesegment(ds, prev->ds);
588 if (unlikely(next->ds | prev->ds)) 564 if (unlikely(next->ds | prev->ds))
589 loadsegment(ds, next->ds); 565 loadsegment(ds, next->ds);
590 566
567
568 /* We must save %fs and %gs before load_TLS() because
569 * %fs and %gs may be cleared by load_TLS().
570 *
571 * (e.g. xen_load_tls())
572 */
573 savesegment(fs, fsindex);
574 savesegment(gs, gsindex);
575
591 load_TLS(next, cpu); 576 load_TLS(next, cpu);
592 577
578 /*
579 * Leave lazy mode, flushing any hypercalls made here.
580 * This must be done before restoring TLS segments so
581 * the GDT and LDT are properly updated, and must be
582 * done before math_state_restore, so the TS bit is up
583 * to date.
584 */
585 arch_leave_lazy_cpu_mode();
586
593 /* 587 /*
594 * Switch FS and GS. 588 * Switch FS and GS.
595 */ 589 */
596 { 590 {
597 unsigned fsindex;
598 asm volatile("movl %%fs,%0" : "=r" (fsindex));
599 /* segment register != 0 always requires a reload. 591 /* segment register != 0 always requires a reload.
600 also reload when it has changed. 592 also reload when it has changed.
601 when prev process used 64bit base always reload 593 when prev process used 64bit base always reload
@@ -613,10 +605,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
613 if (next->fs) 605 if (next->fs)
614 wrmsrl(MSR_FS_BASE, next->fs); 606 wrmsrl(MSR_FS_BASE, next->fs);
615 prev->fsindex = fsindex; 607 prev->fsindex = fsindex;
616 } 608
617 {
618 unsigned gsindex;
619 asm volatile("movl %%gs,%0" : "=r" (gsindex));
620 if (unlikely(gsindex | next->gsindex | prev->gs)) { 609 if (unlikely(gsindex | next->gsindex | prev->gs)) {
621 load_gs_index(next->gsindex); 610 load_gs_index(next->gsindex);
622 if (gsindex) 611 if (gsindex)
@@ -658,8 +647,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
658 /* If the task has used fpu the last 5 timeslices, just do a full 647 /* If the task has used fpu the last 5 timeslices, just do a full
659 * restore of the math state immediately to avoid the trap; the 648 * restore of the math state immediately to avoid the trap; the
660 * chances of needing FPU soon are obviously high now 649 * chances of needing FPU soon are obviously high now
650 *
651 * tsk_used_math() checks prevent calling math_state_restore(),
652 * which can sleep in the case of !tsk_used_math()
661 */ 653 */
662 if (next_p->fpu_counter>5) 654 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
663 math_state_restore(); 655 math_state_restore();
664 return prev_p; 656 return prev_p;
665} 657}
@@ -794,7 +786,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
794 set_32bit_tls(task, FS_TLS, addr); 786 set_32bit_tls(task, FS_TLS, addr);
795 if (doit) { 787 if (doit) {
796 load_TLS(&task->thread, cpu); 788 load_TLS(&task->thread, cpu);
797 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL)); 789 loadsegment(fs, FS_TLS_SEL);
798 } 790 }
799 task->thread.fsindex = FS_TLS_SEL; 791 task->thread.fsindex = FS_TLS_SEL;
800 task->thread.fs = 0; 792 task->thread.fs = 0;
@@ -804,7 +796,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
804 if (doit) { 796 if (doit) {
805 /* set the selector to 0 to not confuse 797 /* set the selector to 0 to not confuse
806 __switch_to */ 798 __switch_to */
807 asm volatile("movl %0,%%fs" :: "r" (0)); 799 loadsegment(fs, 0);
808 ret = checking_wrmsrl(MSR_FS_BASE, addr); 800 ret = checking_wrmsrl(MSR_FS_BASE, addr);
809 } 801 }
810 } 802 }
@@ -827,7 +819,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
827 if (task->thread.gsindex == GS_TLS_SEL) 819 if (task->thread.gsindex == GS_TLS_SEL)
828 base = read_32bit_tls(task, GS_TLS); 820 base = read_32bit_tls(task, GS_TLS);
829 else if (doit) { 821 else if (doit) {
830 asm("movl %%gs,%0" : "=r" (gsindex)); 822 savesegment(gs, gsindex);
831 if (gsindex) 823 if (gsindex)
832 rdmsrl(MSR_KERNEL_GS_BASE, base); 824 rdmsrl(MSR_KERNEL_GS_BASE, base);
833 else 825 else