diff options
author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:59:11 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:59:11 -0500 |
commit | 4522d58275f124105819723e24e912c8e5bf3cdd (patch) | |
tree | b92c29014fadffe049c1925676037f0092b8d112 /arch/i386/kernel/process.c | |
parent | 6cf24f031bc97cb5a7c9df3b6e73c45b628b2b28 (diff) | |
parent | 64a26a731235b59c9d73bbe82c1f896d57400d37 (diff) |
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (156 commits)
[PATCH] x86-64: Export smp_call_function_single
[PATCH] i386: Clean up smp_tune_scheduling()
[PATCH] unwinder: move .eh_frame to RODATA
[PATCH] unwinder: fully support linker generated .eh_frame_hdr section
[PATCH] x86-64: don't use set_irq_regs()
[PATCH] x86-64: check vector in setup_ioapic_dest to verify if need setup_IO_APIC_irq
[PATCH] x86-64: Make ix86 default to HIGHMEM4G instead of NOHIGHMEM
[PATCH] i386: replace kmalloc+memset with kzalloc
[PATCH] x86-64: remove remaining pc98 code
[PATCH] x86-64: remove unused variable
[PATCH] x86-64: Fix constraints in atomic_add_return()
[PATCH] x86-64: fix asm constraints in i386 atomic_add_return
[PATCH] x86-64: Correct documentation for bzImage protocol v2.05
[PATCH] x86-64: replace kmalloc+memset with kzalloc in MTRR code
[PATCH] x86-64: Fix numaq build error
[PATCH] x86-64: include/asm-x86_64/cpufeature.h isn't a userspace header
[PATCH] unwinder: Add debugging output to the Dwarf2 unwinder
[PATCH] x86-64: Clarify error message in GART code
[PATCH] x86-64: Fix interrupt race in idle callback (3rd try)
[PATCH] x86-64: Remove unwind stack pointer alignment forcing again
...
Fixed conflict in include/linux/uaccess.h manually
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel/process.c')
-rw-r--r-- | arch/i386/kernel/process.c | 81 |
1 files changed, 37 insertions, 44 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index dd53c58f64f1..99308510a17c 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -56,6 +56,7 @@ | |||
56 | 56 | ||
57 | #include <asm/tlbflush.h> | 57 | #include <asm/tlbflush.h> |
58 | #include <asm/cpu.h> | 58 | #include <asm/cpu.h> |
59 | #include <asm/pda.h> | ||
59 | 60 | ||
60 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 61 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
61 | 62 | ||
@@ -99,22 +100,18 @@ EXPORT_SYMBOL(enable_hlt); | |||
99 | */ | 100 | */ |
100 | void default_idle(void) | 101 | void default_idle(void) |
101 | { | 102 | { |
102 | local_irq_enable(); | ||
103 | |||
104 | if (!hlt_counter && boot_cpu_data.hlt_works_ok) { | 103 | if (!hlt_counter && boot_cpu_data.hlt_works_ok) { |
105 | current_thread_info()->status &= ~TS_POLLING; | 104 | current_thread_info()->status &= ~TS_POLLING; |
106 | smp_mb__after_clear_bit(); | 105 | smp_mb__after_clear_bit(); |
107 | while (!need_resched()) { | 106 | local_irq_disable(); |
108 | local_irq_disable(); | 107 | if (!need_resched()) |
109 | if (!need_resched()) | 108 | safe_halt(); /* enables interrupts racelessly */ |
110 | safe_halt(); | 109 | else |
111 | else | 110 | local_irq_enable(); |
112 | local_irq_enable(); | ||
113 | } | ||
114 | current_thread_info()->status |= TS_POLLING; | 111 | current_thread_info()->status |= TS_POLLING; |
115 | } else { | 112 | } else { |
116 | while (!need_resched()) | 113 | /* loop is done by the caller */ |
117 | cpu_relax(); | 114 | cpu_relax(); |
118 | } | 115 | } |
119 | } | 116 | } |
120 | #ifdef CONFIG_APM_MODULE | 117 | #ifdef CONFIG_APM_MODULE |
@@ -128,14 +125,7 @@ EXPORT_SYMBOL(default_idle); | |||
128 | */ | 125 | */ |
129 | static void poll_idle (void) | 126 | static void poll_idle (void) |
130 | { | 127 | { |
131 | local_irq_enable(); | 128 | cpu_relax(); |
132 | |||
133 | asm volatile( | ||
134 | "2:" | ||
135 | "testl %0, %1;" | ||
136 | "rep; nop;" | ||
137 | "je 2b;" | ||
138 | : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags)); | ||
139 | } | 129 | } |
140 | 130 | ||
141 | #ifdef CONFIG_HOTPLUG_CPU | 131 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -256,8 +246,7 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) | |||
256 | static void mwait_idle(void) | 246 | static void mwait_idle(void) |
257 | { | 247 | { |
258 | local_irq_enable(); | 248 | local_irq_enable(); |
259 | while (!need_resched()) | 249 | mwait_idle_with_hints(0, 0); |
260 | mwait_idle_with_hints(0, 0); | ||
261 | } | 250 | } |
262 | 251 | ||
263 | void __devinit select_idle_routine(const struct cpuinfo_x86 *c) | 252 | void __devinit select_idle_routine(const struct cpuinfo_x86 *c) |
@@ -314,8 +303,8 @@ void show_regs(struct pt_regs * regs) | |||
314 | regs->eax,regs->ebx,regs->ecx,regs->edx); | 303 | regs->eax,regs->ebx,regs->ecx,regs->edx); |
315 | printk("ESI: %08lx EDI: %08lx EBP: %08lx", | 304 | printk("ESI: %08lx EDI: %08lx EBP: %08lx", |
316 | regs->esi, regs->edi, regs->ebp); | 305 | regs->esi, regs->edi, regs->ebp); |
317 | printk(" DS: %04x ES: %04x\n", | 306 | printk(" DS: %04x ES: %04x GS: %04x\n", |
318 | 0xffff & regs->xds,0xffff & regs->xes); | 307 | 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs); |
319 | 308 | ||
320 | cr0 = read_cr0(); | 309 | cr0 = read_cr0(); |
321 | cr2 = read_cr2(); | 310 | cr2 = read_cr2(); |
@@ -346,6 +335,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
346 | 335 | ||
347 | regs.xds = __USER_DS; | 336 | regs.xds = __USER_DS; |
348 | regs.xes = __USER_DS; | 337 | regs.xes = __USER_DS; |
338 | regs.xgs = __KERNEL_PDA; | ||
349 | regs.orig_eax = -1; | 339 | regs.orig_eax = -1; |
350 | regs.eip = (unsigned long) kernel_thread_helper; | 340 | regs.eip = (unsigned long) kernel_thread_helper; |
351 | regs.xcs = __KERNEL_CS | get_kernel_rpl(); | 341 | regs.xcs = __KERNEL_CS | get_kernel_rpl(); |
@@ -431,7 +421,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, | |||
431 | p->thread.eip = (unsigned long) ret_from_fork; | 421 | p->thread.eip = (unsigned long) ret_from_fork; |
432 | 422 | ||
433 | savesegment(fs,p->thread.fs); | 423 | savesegment(fs,p->thread.fs); |
434 | savesegment(gs,p->thread.gs); | ||
435 | 424 | ||
436 | tsk = current; | 425 | tsk = current; |
437 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { | 426 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
@@ -508,7 +497,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump) | |||
508 | dump->regs.ds = regs->xds; | 497 | dump->regs.ds = regs->xds; |
509 | dump->regs.es = regs->xes; | 498 | dump->regs.es = regs->xes; |
510 | savesegment(fs,dump->regs.fs); | 499 | savesegment(fs,dump->regs.fs); |
511 | savesegment(gs,dump->regs.gs); | 500 | dump->regs.gs = regs->xgs; |
512 | dump->regs.orig_eax = regs->orig_eax; | 501 | dump->regs.orig_eax = regs->orig_eax; |
513 | dump->regs.eip = regs->eip; | 502 | dump->regs.eip = regs->eip; |
514 | dump->regs.cs = regs->xcs; | 503 | dump->regs.cs = regs->xcs; |
@@ -648,22 +637,27 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas | |||
648 | 637 | ||
649 | __unlazy_fpu(prev_p); | 638 | __unlazy_fpu(prev_p); |
650 | 639 | ||
640 | |||
641 | /* we're going to use this soon, after a few expensive things */ | ||
642 | if (next_p->fpu_counter > 5) | ||
643 | prefetch(&next->i387.fxsave); | ||
644 | |||
651 | /* | 645 | /* |
652 | * Reload esp0. | 646 | * Reload esp0. |
653 | */ | 647 | */ |
654 | load_esp0(tss, next); | 648 | load_esp0(tss, next); |
655 | 649 | ||
656 | /* | 650 | /* |
657 | * Save away %fs and %gs. No need to save %es and %ds, as | 651 | * Save away %fs. No need to save %gs, as it was saved on the |
658 | * those are always kernel segments while inside the kernel. | 652 | * stack on entry. No need to save %es and %ds, as those are |
659 | * Doing this before setting the new TLS descriptors avoids | 653 | * always kernel segments while inside the kernel. Doing this |
660 | * the situation where we temporarily have non-reloadable | 654 | * before setting the new TLS descriptors avoids the situation |
661 | * segments in %fs and %gs. This could be an issue if the | 655 | * where we temporarily have non-reloadable segments in %fs |
662 | * NMI handler ever used %fs or %gs (it does not today), or | 656 | * and %gs. This could be an issue if the NMI handler ever |
663 | * if the kernel is running inside of a hypervisor layer. | 657 | * used %fs or %gs (it does not today), or if the kernel is |
658 | * running inside of a hypervisor layer. | ||
664 | */ | 659 | */ |
665 | savesegment(fs, prev->fs); | 660 | savesegment(fs, prev->fs); |
666 | savesegment(gs, prev->gs); | ||
667 | 661 | ||
668 | /* | 662 | /* |
669 | * Load the per-thread Thread-Local Storage descriptor. | 663 | * Load the per-thread Thread-Local Storage descriptor. |
@@ -671,22 +665,14 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas | |||
671 | load_TLS(next, cpu); | 665 | load_TLS(next, cpu); |
672 | 666 | ||
673 | /* | 667 | /* |
674 | * Restore %fs and %gs if needed. | 668 | * Restore %fs if needed. |
675 | * | 669 | * |
676 | * Glibc normally makes %fs be zero, and %gs is one of | 670 | * Glibc normally makes %fs be zero. |
677 | * the TLS segments. | ||
678 | */ | 671 | */ |
679 | if (unlikely(prev->fs | next->fs)) | 672 | if (unlikely(prev->fs | next->fs)) |
680 | loadsegment(fs, next->fs); | 673 | loadsegment(fs, next->fs); |
681 | 674 | ||
682 | if (prev->gs | next->gs) | 675 | write_pda(pcurrent, next_p); |
683 | loadsegment(gs, next->gs); | ||
684 | |||
685 | /* | ||
686 | * Restore IOPL if needed. | ||
687 | */ | ||
688 | if (unlikely(prev->iopl != next->iopl)) | ||
689 | set_iopl_mask(next->iopl); | ||
690 | 676 | ||
691 | /* | 677 | /* |
692 | * Now maybe handle debug registers and/or IO bitmaps | 678 | * Now maybe handle debug registers and/or IO bitmaps |
@@ -697,6 +683,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas | |||
697 | 683 | ||
698 | disable_tsc(prev_p, next_p); | 684 | disable_tsc(prev_p, next_p); |
699 | 685 | ||
686 | /* If the task has used fpu the last 5 timeslices, just do a full | ||
687 | * restore of the math state immediately to avoid the trap; the | ||
688 | * chances of needing FPU soon are obviously high now | ||
689 | */ | ||
690 | if (next_p->fpu_counter > 5) | ||
691 | math_state_restore(); | ||
692 | |||
700 | return prev_p; | 693 | return prev_p; |
701 | } | 694 | } |
702 | 695 | ||