aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c88
1 files changed, 38 insertions, 50 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index c6eb5c91e5f6..a8e53626ac9a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -56,15 +56,6 @@ asmlinkage extern void ret_from_fork(void);
56 56
57unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; 57unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
58 58
59unsigned long boot_option_idle_override = 0;
60EXPORT_SYMBOL(boot_option_idle_override);
61
62/*
63 * Powermanagement idle function, if any..
64 */
65void (*pm_idle)(void);
66EXPORT_SYMBOL(pm_idle);
67
68static ATOMIC_NOTIFIER_HEAD(idle_notifier); 59static ATOMIC_NOTIFIER_HEAD(idle_notifier);
69 60
70void idle_notifier_register(struct notifier_block *n) 61void idle_notifier_register(struct notifier_block *n)
@@ -94,25 +85,6 @@ void exit_idle(void)
94 __exit_idle(); 85 __exit_idle();
95} 86}
96 87
97/*
98 * We use this if we don't have any better
99 * idle routine..
100 */
101void default_idle(void)
102{
103 current_thread_info()->status &= ~TS_POLLING;
104 /*
105 * TS_POLLING-cleared state must be visible before we
106 * test NEED_RESCHED:
107 */
108 smp_mb();
109 if (!need_resched())
110 safe_halt(); /* enables interrupts racelessly */
111 else
112 local_irq_enable();
113 current_thread_info()->status |= TS_POLLING;
114}
115
116#ifdef CONFIG_HOTPLUG_CPU 88#ifdef CONFIG_HOTPLUG_CPU
117DECLARE_PER_CPU(int, cpu_state); 89DECLARE_PER_CPU(int, cpu_state);
118 90
@@ -150,12 +122,9 @@ void cpu_idle(void)
150 while (1) { 122 while (1) {
151 tick_nohz_stop_sched_tick(); 123 tick_nohz_stop_sched_tick();
152 while (!need_resched()) { 124 while (!need_resched()) {
153 void (*idle)(void);
154 125
155 rmb(); 126 rmb();
156 idle = pm_idle; 127
157 if (!idle)
158 idle = default_idle;
159 if (cpu_is_offline(smp_processor_id())) 128 if (cpu_is_offline(smp_processor_id()))
160 play_dead(); 129 play_dead();
161 /* 130 /*
@@ -165,7 +134,10 @@ void cpu_idle(void)
165 */ 134 */
166 local_irq_disable(); 135 local_irq_disable();
167 enter_idle(); 136 enter_idle();
168 idle(); 137 /* Don't trace irqs off for idle */
138 stop_critical_timings();
139 pm_idle();
140 start_critical_timings();
169 /* In many cases the interrupt that ended idle 141 /* In many cases the interrupt that ended idle
170 has already called exit_idle. But some idle 142 has already called exit_idle. But some idle
171 loops can be woken up without interrupt. */ 143 loops can be woken up without interrupt. */
@@ -366,10 +338,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
366 p->thread.fs = me->thread.fs; 338 p->thread.fs = me->thread.fs;
367 p->thread.gs = me->thread.gs; 339 p->thread.gs = me->thread.gs;
368 340
369 asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); 341 savesegment(gs, p->thread.gsindex);
370 asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); 342 savesegment(fs, p->thread.fsindex);
371 asm("mov %%es,%0" : "=m" (p->thread.es)); 343 savesegment(es, p->thread.es);
372 asm("mov %%ds,%0" : "=m" (p->thread.ds)); 344 savesegment(ds, p->thread.ds);
373 345
374 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { 346 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
375 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); 347 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
@@ -408,7 +380,9 @@ out:
408void 380void
409start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 381start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
410{ 382{
411 asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0)); 383 loadsegment(fs, 0);
384 loadsegment(es, 0);
385 loadsegment(ds, 0);
412 load_gs_index(0); 386 load_gs_index(0);
413 regs->ip = new_ip; 387 regs->ip = new_ip;
414 regs->sp = new_sp; 388 regs->sp = new_sp;
@@ -567,6 +541,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
567 *next = &next_p->thread; 541 *next = &next_p->thread;
568 int cpu = smp_processor_id(); 542 int cpu = smp_processor_id();
569 struct tss_struct *tss = &per_cpu(init_tss, cpu); 543 struct tss_struct *tss = &per_cpu(init_tss, cpu);
544 unsigned fsindex, gsindex;
570 545
571 /* we're going to use this soon, after a few expensive things */ 546 /* we're going to use this soon, after a few expensive things */
572 if (next_p->fpu_counter>5) 547 if (next_p->fpu_counter>5)
@@ -581,22 +556,38 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
581 * Switch DS and ES. 556 * Switch DS and ES.
582 * This won't pick up thread selector changes, but I guess that is ok. 557 * This won't pick up thread selector changes, but I guess that is ok.
583 */ 558 */
584 asm volatile("mov %%es,%0" : "=m" (prev->es)); 559 savesegment(es, prev->es);
585 if (unlikely(next->es | prev->es)) 560 if (unlikely(next->es | prev->es))
586 loadsegment(es, next->es); 561 loadsegment(es, next->es);
587 562
588 asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); 563 savesegment(ds, prev->ds);
589 if (unlikely(next->ds | prev->ds)) 564 if (unlikely(next->ds | prev->ds))
590 loadsegment(ds, next->ds); 565 loadsegment(ds, next->ds);
591 566
567
568 /* We must save %fs and %gs before load_TLS() because
569 * %fs and %gs may be cleared by load_TLS().
570 *
571 * (e.g. xen_load_tls())
572 */
573 savesegment(fs, fsindex);
574 savesegment(gs, gsindex);
575
592 load_TLS(next, cpu); 576 load_TLS(next, cpu);
593 577
578 /*
579 * Leave lazy mode, flushing any hypercalls made here.
580 * This must be done before restoring TLS segments so
581 * the GDT and LDT are properly updated, and must be
582 * done before math_state_restore, so the TS bit is up
583 * to date.
584 */
585 arch_leave_lazy_cpu_mode();
586
594 /* 587 /*
595 * Switch FS and GS. 588 * Switch FS and GS.
596 */ 589 */
597 { 590 {
598 unsigned fsindex;
599 asm volatile("movl %%fs,%0" : "=r" (fsindex));
600 /* segment register != 0 always requires a reload. 591 /* segment register != 0 always requires a reload.
601 also reload when it has changed. 592 also reload when it has changed.
602 when prev process used 64bit base always reload 593 when prev process used 64bit base always reload
@@ -614,10 +605,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
614 if (next->fs) 605 if (next->fs)
615 wrmsrl(MSR_FS_BASE, next->fs); 606 wrmsrl(MSR_FS_BASE, next->fs);
616 prev->fsindex = fsindex; 607 prev->fsindex = fsindex;
617 } 608
618 {
619 unsigned gsindex;
620 asm volatile("movl %%gs,%0" : "=r" (gsindex));
621 if (unlikely(gsindex | next->gsindex | prev->gs)) { 609 if (unlikely(gsindex | next->gsindex | prev->gs)) {
622 load_gs_index(next->gsindex); 610 load_gs_index(next->gsindex);
623 if (gsindex) 611 if (gsindex)
@@ -798,7 +786,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
798 set_32bit_tls(task, FS_TLS, addr); 786 set_32bit_tls(task, FS_TLS, addr);
799 if (doit) { 787 if (doit) {
800 load_TLS(&task->thread, cpu); 788 load_TLS(&task->thread, cpu);
801 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL)); 789 loadsegment(fs, FS_TLS_SEL);
802 } 790 }
803 task->thread.fsindex = FS_TLS_SEL; 791 task->thread.fsindex = FS_TLS_SEL;
804 task->thread.fs = 0; 792 task->thread.fs = 0;
@@ -808,7 +796,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
808 if (doit) { 796 if (doit) {
809 /* set the selector to 0 to not confuse 797 /* set the selector to 0 to not confuse
810 __switch_to */ 798 __switch_to */
811 asm volatile("movl %0,%%fs" :: "r" (0)); 799 loadsegment(fs, 0);
812 ret = checking_wrmsrl(MSR_FS_BASE, addr); 800 ret = checking_wrmsrl(MSR_FS_BASE, addr);
813 } 801 }
814 } 802 }
@@ -831,7 +819,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
831 if (task->thread.gsindex == GS_TLS_SEL) 819 if (task->thread.gsindex == GS_TLS_SEL)
832 base = read_32bit_tls(task, GS_TLS); 820 base = read_32bit_tls(task, GS_TLS);
833 else if (doit) { 821 else if (doit) {
834 asm("movl %%gs,%0" : "=r" (gsindex)); 822 savesegment(gs, gsindex);
835 if (gsindex) 823 if (gsindex)
836 rdmsrl(MSR_KERNEL_GS_BASE, base); 824 rdmsrl(MSR_KERNEL_GS_BASE, base);
837 else 825 else