diff options
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r-- | arch/x86/kernel/process_64.c | 85 |
1 files changed, 35 insertions, 50 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index c6eb5c91e5f6..db5eb963e4df 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -56,15 +56,6 @@ asmlinkage extern void ret_from_fork(void); | |||
56 | 56 | ||
57 | unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; | 57 | unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; |
58 | 58 | ||
59 | unsigned long boot_option_idle_override = 0; | ||
60 | EXPORT_SYMBOL(boot_option_idle_override); | ||
61 | |||
62 | /* | ||
63 | * Powermanagement idle function, if any.. | ||
64 | */ | ||
65 | void (*pm_idle)(void); | ||
66 | EXPORT_SYMBOL(pm_idle); | ||
67 | |||
68 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); | 59 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); |
69 | 60 | ||
70 | void idle_notifier_register(struct notifier_block *n) | 61 | void idle_notifier_register(struct notifier_block *n) |
@@ -94,25 +85,6 @@ void exit_idle(void) | |||
94 | __exit_idle(); | 85 | __exit_idle(); |
95 | } | 86 | } |
96 | 87 | ||
97 | /* | ||
98 | * We use this if we don't have any better | ||
99 | * idle routine.. | ||
100 | */ | ||
101 | void default_idle(void) | ||
102 | { | ||
103 | current_thread_info()->status &= ~TS_POLLING; | ||
104 | /* | ||
105 | * TS_POLLING-cleared state must be visible before we | ||
106 | * test NEED_RESCHED: | ||
107 | */ | ||
108 | smp_mb(); | ||
109 | if (!need_resched()) | ||
110 | safe_halt(); /* enables interrupts racelessly */ | ||
111 | else | ||
112 | local_irq_enable(); | ||
113 | current_thread_info()->status |= TS_POLLING; | ||
114 | } | ||
115 | |||
116 | #ifdef CONFIG_HOTPLUG_CPU | 88 | #ifdef CONFIG_HOTPLUG_CPU |
117 | DECLARE_PER_CPU(int, cpu_state); | 89 | DECLARE_PER_CPU(int, cpu_state); |
118 | 90 | ||
@@ -150,12 +122,9 @@ void cpu_idle(void) | |||
150 | while (1) { | 122 | while (1) { |
151 | tick_nohz_stop_sched_tick(); | 123 | tick_nohz_stop_sched_tick(); |
152 | while (!need_resched()) { | 124 | while (!need_resched()) { |
153 | void (*idle)(void); | ||
154 | 125 | ||
155 | rmb(); | 126 | rmb(); |
156 | idle = pm_idle; | 127 | |
157 | if (!idle) | ||
158 | idle = default_idle; | ||
159 | if (cpu_is_offline(smp_processor_id())) | 128 | if (cpu_is_offline(smp_processor_id())) |
160 | play_dead(); | 129 | play_dead(); |
161 | /* | 130 | /* |
@@ -165,7 +134,7 @@ void cpu_idle(void) | |||
165 | */ | 134 | */ |
166 | local_irq_disable(); | 135 | local_irq_disable(); |
167 | enter_idle(); | 136 | enter_idle(); |
168 | idle(); | 137 | pm_idle(); |
169 | /* In many cases the interrupt that ended idle | 138 | /* In many cases the interrupt that ended idle |
170 | has already called exit_idle. But some idle | 139 | has already called exit_idle. But some idle |
171 | loops can be woken up without interrupt. */ | 140 | loops can be woken up without interrupt. */ |
@@ -366,10 +335,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
366 | p->thread.fs = me->thread.fs; | 335 | p->thread.fs = me->thread.fs; |
367 | p->thread.gs = me->thread.gs; | 336 | p->thread.gs = me->thread.gs; |
368 | 337 | ||
369 | asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); | 338 | savesegment(gs, p->thread.gsindex); |
370 | asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); | 339 | savesegment(fs, p->thread.fsindex); |
371 | asm("mov %%es,%0" : "=m" (p->thread.es)); | 340 | savesegment(es, p->thread.es); |
372 | asm("mov %%ds,%0" : "=m" (p->thread.ds)); | 341 | savesegment(ds, p->thread.ds); |
373 | 342 | ||
374 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { | 343 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { |
375 | p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); | 344 | p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); |
@@ -408,7 +377,9 @@ out: | |||
408 | void | 377 | void |
409 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | 378 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) |
410 | { | 379 | { |
411 | asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0)); | 380 | loadsegment(fs, 0); |
381 | loadsegment(es, 0); | ||
382 | loadsegment(ds, 0); | ||
412 | load_gs_index(0); | 383 | load_gs_index(0); |
413 | regs->ip = new_ip; | 384 | regs->ip = new_ip; |
414 | regs->sp = new_sp; | 385 | regs->sp = new_sp; |
@@ -567,6 +538,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
567 | *next = &next_p->thread; | 538 | *next = &next_p->thread; |
568 | int cpu = smp_processor_id(); | 539 | int cpu = smp_processor_id(); |
569 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 540 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
541 | unsigned fsindex, gsindex; | ||
570 | 542 | ||
571 | /* we're going to use this soon, after a few expensive things */ | 543 | /* we're going to use this soon, after a few expensive things */ |
572 | if (next_p->fpu_counter>5) | 544 | if (next_p->fpu_counter>5) |
@@ -581,22 +553,38 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
581 | * Switch DS and ES. | 553 | * Switch DS and ES. |
582 | * This won't pick up thread selector changes, but I guess that is ok. | 554 | * This won't pick up thread selector changes, but I guess that is ok. |
583 | */ | 555 | */ |
584 | asm volatile("mov %%es,%0" : "=m" (prev->es)); | 556 | savesegment(es, prev->es); |
585 | if (unlikely(next->es | prev->es)) | 557 | if (unlikely(next->es | prev->es)) |
586 | loadsegment(es, next->es); | 558 | loadsegment(es, next->es); |
587 | 559 | ||
588 | asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); | 560 | savesegment(ds, prev->ds); |
589 | if (unlikely(next->ds | prev->ds)) | 561 | if (unlikely(next->ds | prev->ds)) |
590 | loadsegment(ds, next->ds); | 562 | loadsegment(ds, next->ds); |
591 | 563 | ||
564 | |||
565 | /* We must save %fs and %gs before load_TLS() because | ||
566 | * %fs and %gs may be cleared by load_TLS(). | ||
567 | * | ||
568 | * (e.g. xen_load_tls()) | ||
569 | */ | ||
570 | savesegment(fs, fsindex); | ||
571 | savesegment(gs, gsindex); | ||
572 | |||
592 | load_TLS(next, cpu); | 573 | load_TLS(next, cpu); |
593 | 574 | ||
575 | /* | ||
576 | * Leave lazy mode, flushing any hypercalls made here. | ||
577 | * This must be done before restoring TLS segments so | ||
578 | * the GDT and LDT are properly updated, and must be | ||
579 | * done before math_state_restore, so the TS bit is up | ||
580 | * to date. | ||
581 | */ | ||
582 | arch_leave_lazy_cpu_mode(); | ||
583 | |||
594 | /* | 584 | /* |
595 | * Switch FS and GS. | 585 | * Switch FS and GS. |
596 | */ | 586 | */ |
597 | { | 587 | { |
598 | unsigned fsindex; | ||
599 | asm volatile("movl %%fs,%0" : "=r" (fsindex)); | ||
600 | /* segment register != 0 always requires a reload. | 588 | /* segment register != 0 always requires a reload. |
601 | also reload when it has changed. | 589 | also reload when it has changed. |
602 | when prev process used 64bit base always reload | 590 | when prev process used 64bit base always reload |
@@ -614,10 +602,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
614 | if (next->fs) | 602 | if (next->fs) |
615 | wrmsrl(MSR_FS_BASE, next->fs); | 603 | wrmsrl(MSR_FS_BASE, next->fs); |
616 | prev->fsindex = fsindex; | 604 | prev->fsindex = fsindex; |
617 | } | 605 | |
618 | { | ||
619 | unsigned gsindex; | ||
620 | asm volatile("movl %%gs,%0" : "=r" (gsindex)); | ||
621 | if (unlikely(gsindex | next->gsindex | prev->gs)) { | 606 | if (unlikely(gsindex | next->gsindex | prev->gs)) { |
622 | load_gs_index(next->gsindex); | 607 | load_gs_index(next->gsindex); |
623 | if (gsindex) | 608 | if (gsindex) |
@@ -798,7 +783,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
798 | set_32bit_tls(task, FS_TLS, addr); | 783 | set_32bit_tls(task, FS_TLS, addr); |
799 | if (doit) { | 784 | if (doit) { |
800 | load_TLS(&task->thread, cpu); | 785 | load_TLS(&task->thread, cpu); |
801 | asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL)); | 786 | loadsegment(fs, FS_TLS_SEL); |
802 | } | 787 | } |
803 | task->thread.fsindex = FS_TLS_SEL; | 788 | task->thread.fsindex = FS_TLS_SEL; |
804 | task->thread.fs = 0; | 789 | task->thread.fs = 0; |
@@ -808,7 +793,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
808 | if (doit) { | 793 | if (doit) { |
809 | /* set the selector to 0 to not confuse | 794 | /* set the selector to 0 to not confuse |
810 | __switch_to */ | 795 | __switch_to */ |
811 | asm volatile("movl %0,%%fs" :: "r" (0)); | 796 | loadsegment(fs, 0); |
812 | ret = checking_wrmsrl(MSR_FS_BASE, addr); | 797 | ret = checking_wrmsrl(MSR_FS_BASE, addr); |
813 | } | 798 | } |
814 | } | 799 | } |
@@ -831,7 +816,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
831 | if (task->thread.gsindex == GS_TLS_SEL) | 816 | if (task->thread.gsindex == GS_TLS_SEL) |
832 | base = read_32bit_tls(task, GS_TLS); | 817 | base = read_32bit_tls(task, GS_TLS); |
833 | else if (doit) { | 818 | else if (doit) { |
834 | asm("movl %%gs,%0" : "=r" (gsindex)); | 819 | savesegment(gs, gsindex); |
835 | if (gsindex) | 820 | if (gsindex) |
836 | rdmsrl(MSR_KERNEL_GS_BASE, base); | 821 | rdmsrl(MSR_KERNEL_GS_BASE, base); |
837 | else | 822 | else |