diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-25 05:37:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-25 05:37:07 -0400 |
commit | 0e2f65ee30eee2db054f7fd73f462c5da33ec963 (patch) | |
tree | 26c61eb7745da0c0d9135e9d12088f570cb8530d /arch/x86/kernel/process_64.c | |
parent | da7878d75b8520c9ae00d27dfbbce546a7bfdfbb (diff) | |
parent | fb2e405fc1fc8b20d9c78eaa1c7fd5a297efde43 (diff) |
Merge branch 'linus' into x86/pebs
Conflicts:
arch/x86/Kconfig.cpu
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/setup_64.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r-- | arch/x86/kernel/process_64.c | 142 |
1 files changed, 65 insertions, 77 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index a4ad0d7ea621..91ffce47af8e 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -56,15 +56,6 @@ asmlinkage extern void ret_from_fork(void); | |||
56 | 56 | ||
57 | unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; | 57 | unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; |
58 | 58 | ||
59 | unsigned long boot_option_idle_override = 0; | ||
60 | EXPORT_SYMBOL(boot_option_idle_override); | ||
61 | |||
62 | /* | ||
63 | * Powermanagement idle function, if any.. | ||
64 | */ | ||
65 | void (*pm_idle)(void); | ||
66 | EXPORT_SYMBOL(pm_idle); | ||
67 | |||
68 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); | 59 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); |
69 | 60 | ||
70 | void idle_notifier_register(struct notifier_block *n) | 61 | void idle_notifier_register(struct notifier_block *n) |
@@ -94,25 +85,6 @@ void exit_idle(void) | |||
94 | __exit_idle(); | 85 | __exit_idle(); |
95 | } | 86 | } |
96 | 87 | ||
97 | /* | ||
98 | * We use this if we don't have any better | ||
99 | * idle routine.. | ||
100 | */ | ||
101 | void default_idle(void) | ||
102 | { | ||
103 | current_thread_info()->status &= ~TS_POLLING; | ||
104 | /* | ||
105 | * TS_POLLING-cleared state must be visible before we | ||
106 | * test NEED_RESCHED: | ||
107 | */ | ||
108 | smp_mb(); | ||
109 | if (!need_resched()) | ||
110 | safe_halt(); /* enables interrupts racelessly */ | ||
111 | else | ||
112 | local_irq_enable(); | ||
113 | current_thread_info()->status |= TS_POLLING; | ||
114 | } | ||
115 | |||
116 | #ifdef CONFIG_HOTPLUG_CPU | 88 | #ifdef CONFIG_HOTPLUG_CPU |
117 | DECLARE_PER_CPU(int, cpu_state); | 89 | DECLARE_PER_CPU(int, cpu_state); |
118 | 90 | ||
@@ -148,14 +120,11 @@ void cpu_idle(void) | |||
148 | current_thread_info()->status |= TS_POLLING; | 120 | current_thread_info()->status |= TS_POLLING; |
149 | /* endless idle loop with no priority at all */ | 121 | /* endless idle loop with no priority at all */ |
150 | while (1) { | 122 | while (1) { |
151 | tick_nohz_stop_sched_tick(); | 123 | tick_nohz_stop_sched_tick(1); |
152 | while (!need_resched()) { | 124 | while (!need_resched()) { |
153 | void (*idle)(void); | ||
154 | 125 | ||
155 | rmb(); | 126 | rmb(); |
156 | idle = pm_idle; | 127 | |
157 | if (!idle) | ||
158 | idle = default_idle; | ||
159 | if (cpu_is_offline(smp_processor_id())) | 128 | if (cpu_is_offline(smp_processor_id())) |
160 | play_dead(); | 129 | play_dead(); |
161 | /* | 130 | /* |
@@ -165,7 +134,10 @@ void cpu_idle(void) | |||
165 | */ | 134 | */ |
166 | local_irq_disable(); | 135 | local_irq_disable(); |
167 | enter_idle(); | 136 | enter_idle(); |
168 | idle(); | 137 | /* Don't trace irqs off for idle */ |
138 | stop_critical_timings(); | ||
139 | pm_idle(); | ||
140 | start_critical_timings(); | ||
169 | /* In many cases the interrupt that ended idle | 141 | /* In many cases the interrupt that ended idle |
170 | has already called exit_idle. But some idle | 142 | has already called exit_idle. But some idle |
171 | loops can be woken up without interrupt. */ | 143 | loops can be woken up without interrupt. */ |
@@ -374,10 +346,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
374 | p->thread.fs = me->thread.fs; | 346 | p->thread.fs = me->thread.fs; |
375 | p->thread.gs = me->thread.gs; | 347 | p->thread.gs = me->thread.gs; |
376 | 348 | ||
377 | asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); | 349 | savesegment(gs, p->thread.gsindex); |
378 | asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); | 350 | savesegment(fs, p->thread.fsindex); |
379 | asm("mov %%es,%0" : "=m" (p->thread.es)); | 351 | savesegment(es, p->thread.es); |
380 | asm("mov %%ds,%0" : "=m" (p->thread.ds)); | 352 | savesegment(ds, p->thread.ds); |
381 | 353 | ||
382 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { | 354 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { |
383 | p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); | 355 | p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); |
@@ -416,7 +388,9 @@ out: | |||
416 | void | 388 | void |
417 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | 389 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) |
418 | { | 390 | { |
419 | asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0)); | 391 | loadsegment(fs, 0); |
392 | loadsegment(es, 0); | ||
393 | loadsegment(ds, 0); | ||
420 | load_gs_index(0); | 394 | load_gs_index(0); |
421 | regs->ip = new_ip; | 395 | regs->ip = new_ip; |
422 | regs->sp = new_sp; | 396 | regs->sp = new_sp; |
@@ -585,10 +559,11 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, | |||
585 | struct task_struct * | 559 | struct task_struct * |
586 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | 560 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
587 | { | 561 | { |
588 | struct thread_struct *prev = &prev_p->thread, | 562 | struct thread_struct *prev = &prev_p->thread; |
589 | *next = &next_p->thread; | 563 | struct thread_struct *next = &next_p->thread; |
590 | int cpu = smp_processor_id(); | 564 | int cpu = smp_processor_id(); |
591 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 565 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
566 | unsigned fsindex, gsindex; | ||
592 | 567 | ||
593 | /* we're going to use this soon, after a few expensive things */ | 568 | /* we're going to use this soon, after a few expensive things */ |
594 | if (next_p->fpu_counter>5) | 569 | if (next_p->fpu_counter>5) |
@@ -603,52 +578,64 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
603 | * Switch DS and ES. | 578 | * Switch DS and ES. |
604 | * This won't pick up thread selector changes, but I guess that is ok. | 579 | * This won't pick up thread selector changes, but I guess that is ok. |
605 | */ | 580 | */ |
606 | asm volatile("mov %%es,%0" : "=m" (prev->es)); | 581 | savesegment(es, prev->es); |
607 | if (unlikely(next->es | prev->es)) | 582 | if (unlikely(next->es | prev->es)) |
608 | loadsegment(es, next->es); | 583 | loadsegment(es, next->es); |
609 | 584 | ||
610 | asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); | 585 | savesegment(ds, prev->ds); |
611 | if (unlikely(next->ds | prev->ds)) | 586 | if (unlikely(next->ds | prev->ds)) |
612 | loadsegment(ds, next->ds); | 587 | loadsegment(ds, next->ds); |
613 | 588 | ||
589 | |||
590 | /* We must save %fs and %gs before load_TLS() because | ||
591 | * %fs and %gs may be cleared by load_TLS(). | ||
592 | * | ||
593 | * (e.g. xen_load_tls()) | ||
594 | */ | ||
595 | savesegment(fs, fsindex); | ||
596 | savesegment(gs, gsindex); | ||
597 | |||
614 | load_TLS(next, cpu); | 598 | load_TLS(next, cpu); |
615 | 599 | ||
600 | /* | ||
601 | * Leave lazy mode, flushing any hypercalls made here. | ||
602 | * This must be done before restoring TLS segments so | ||
603 | * the GDT and LDT are properly updated, and must be | ||
604 | * done before math_state_restore, so the TS bit is up | ||
605 | * to date. | ||
606 | */ | ||
607 | arch_leave_lazy_cpu_mode(); | ||
608 | |||
616 | /* | 609 | /* |
617 | * Switch FS and GS. | 610 | * Switch FS and GS. |
611 | * | ||
612 | * Segment register != 0 always requires a reload. Also | ||
613 | * reload when it has changed. When prev process used 64bit | ||
614 | * base always reload to avoid an information leak. | ||
618 | */ | 615 | */ |
619 | { | 616 | if (unlikely(fsindex | next->fsindex | prev->fs)) { |
620 | unsigned fsindex; | 617 | loadsegment(fs, next->fsindex); |
621 | asm volatile("movl %%fs,%0" : "=r" (fsindex)); | 618 | /* |
622 | /* segment register != 0 always requires a reload. | 619 | * Check if the user used a selector != 0; if yes |
623 | also reload when it has changed. | 620 | * clear 64bit base, since overloaded base is always |
624 | when prev process used 64bit base always reload | 621 | * mapped to the Null selector |
625 | to avoid an information leak. */ | 622 | */ |
626 | if (unlikely(fsindex | next->fsindex | prev->fs)) { | 623 | if (fsindex) |
627 | loadsegment(fs, next->fsindex); | ||
628 | /* check if the user used a selector != 0 | ||
629 | * if yes clear 64bit base, since overloaded base | ||
630 | * is always mapped to the Null selector | ||
631 | */ | ||
632 | if (fsindex) | ||
633 | prev->fs = 0; | 624 | prev->fs = 0; |
634 | } | ||
635 | /* when next process has a 64bit base use it */ | ||
636 | if (next->fs) | ||
637 | wrmsrl(MSR_FS_BASE, next->fs); | ||
638 | prev->fsindex = fsindex; | ||
639 | } | 625 | } |
640 | { | 626 | /* when next process has a 64bit base use it */ |
641 | unsigned gsindex; | 627 | if (next->fs) |
642 | asm volatile("movl %%gs,%0" : "=r" (gsindex)); | 628 | wrmsrl(MSR_FS_BASE, next->fs); |
643 | if (unlikely(gsindex | next->gsindex | prev->gs)) { | 629 | prev->fsindex = fsindex; |
644 | load_gs_index(next->gsindex); | 630 | |
645 | if (gsindex) | 631 | if (unlikely(gsindex | next->gsindex | prev->gs)) { |
632 | load_gs_index(next->gsindex); | ||
633 | if (gsindex) | ||
646 | prev->gs = 0; | 634 | prev->gs = 0; |
647 | } | ||
648 | if (next->gs) | ||
649 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | ||
650 | prev->gsindex = gsindex; | ||
651 | } | 635 | } |
636 | if (next->gs) | ||
637 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | ||
638 | prev->gsindex = gsindex; | ||
652 | 639 | ||
653 | /* Must be after DS reload */ | 640 | /* Must be after DS reload */ |
654 | unlazy_fpu(prev_p); | 641 | unlazy_fpu(prev_p); |
@@ -661,7 +648,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
661 | write_pda(pcurrent, next_p); | 648 | write_pda(pcurrent, next_p); |
662 | 649 | ||
663 | write_pda(kernelstack, | 650 | write_pda(kernelstack, |
664 | (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET); | 651 | (unsigned long)task_stack_page(next_p) + |
652 | THREAD_SIZE - PDA_STACKOFFSET); | ||
665 | #ifdef CONFIG_CC_STACKPROTECTOR | 653 | #ifdef CONFIG_CC_STACKPROTECTOR |
666 | write_pda(stack_canary, next_p->stack_canary); | 654 | write_pda(stack_canary, next_p->stack_canary); |
667 | /* | 655 | /* |
@@ -820,7 +808,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
820 | set_32bit_tls(task, FS_TLS, addr); | 808 | set_32bit_tls(task, FS_TLS, addr); |
821 | if (doit) { | 809 | if (doit) { |
822 | load_TLS(&task->thread, cpu); | 810 | load_TLS(&task->thread, cpu); |
823 | asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL)); | 811 | loadsegment(fs, FS_TLS_SEL); |
824 | } | 812 | } |
825 | task->thread.fsindex = FS_TLS_SEL; | 813 | task->thread.fsindex = FS_TLS_SEL; |
826 | task->thread.fs = 0; | 814 | task->thread.fs = 0; |
@@ -830,7 +818,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
830 | if (doit) { | 818 | if (doit) { |
831 | /* set the selector to 0 to not confuse | 819 | /* set the selector to 0 to not confuse |
832 | __switch_to */ | 820 | __switch_to */ |
833 | asm volatile("movl %0,%%fs" :: "r" (0)); | 821 | loadsegment(fs, 0); |
834 | ret = checking_wrmsrl(MSR_FS_BASE, addr); | 822 | ret = checking_wrmsrl(MSR_FS_BASE, addr); |
835 | } | 823 | } |
836 | } | 824 | } |
@@ -853,7 +841,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
853 | if (task->thread.gsindex == GS_TLS_SEL) | 841 | if (task->thread.gsindex == GS_TLS_SEL) |
854 | base = read_32bit_tls(task, GS_TLS); | 842 | base = read_32bit_tls(task, GS_TLS); |
855 | else if (doit) { | 843 | else if (doit) { |
856 | asm("movl %%gs,%0" : "=r" (gsindex)); | 844 | savesegment(gs, gsindex); |
857 | if (gsindex) | 845 | if (gsindex) |
858 | rdmsrl(MSR_KERNEL_GS_BASE, base); | 846 | rdmsrl(MSR_KERNEL_GS_BASE, base); |
859 | else | 847 | else |