aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/process.c
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-01-17 18:34:51 -0500
committerDavid Woodhouse <dwmw2@infradead.org>2007-01-17 18:34:51 -0500
commit9cdf083f981b8d37b3212400a359368661385099 (patch)
treeaa15a6a08ad87e650dea40fb59b3180bef0d345b /arch/i386/kernel/process.c
parente499e01d234a31d59679b7b1e1cf628d917ba49a (diff)
parenta8b3485287731978899ced11f24628c927890e78 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/i386/kernel/process.c')
-rw-r--r--arch/i386/kernel/process.c88
1 files changed, 43 insertions, 45 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index dd53c58f64f1..c641056233a6 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -56,6 +56,7 @@
56 56
57#include <asm/tlbflush.h> 57#include <asm/tlbflush.h>
58#include <asm/cpu.h> 58#include <asm/cpu.h>
59#include <asm/pda.h>
59 60
60asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 61asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
61 62
@@ -99,22 +100,23 @@ EXPORT_SYMBOL(enable_hlt);
99 */ 100 */
100void default_idle(void) 101void default_idle(void)
101{ 102{
102 local_irq_enable();
103
104 if (!hlt_counter && boot_cpu_data.hlt_works_ok) { 103 if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
105 current_thread_info()->status &= ~TS_POLLING; 104 current_thread_info()->status &= ~TS_POLLING;
106 smp_mb__after_clear_bit(); 105 /*
107 while (!need_resched()) { 106 * TS_POLLING-cleared state must be visible before we
108 local_irq_disable(); 107 * test NEED_RESCHED:
109 if (!need_resched()) 108 */
110 safe_halt(); 109 smp_mb();
111 else 110
112 local_irq_enable(); 111 local_irq_disable();
113 } 112 if (!need_resched())
113 safe_halt(); /* enables interrupts racelessly */
114 else
115 local_irq_enable();
114 current_thread_info()->status |= TS_POLLING; 116 current_thread_info()->status |= TS_POLLING;
115 } else { 117 } else {
116 while (!need_resched()) 118 /* loop is done by the caller */
117 cpu_relax(); 119 cpu_relax();
118 } 120 }
119} 121}
120#ifdef CONFIG_APM_MODULE 122#ifdef CONFIG_APM_MODULE
@@ -128,14 +130,7 @@ EXPORT_SYMBOL(default_idle);
128 */ 130 */
129static void poll_idle (void) 131static void poll_idle (void)
130{ 132{
131 local_irq_enable(); 133 cpu_relax();
132
133 asm volatile(
134 "2:"
135 "testl %0, %1;"
136 "rep; nop;"
137 "je 2b;"
138 : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
139} 134}
140 135
141#ifdef CONFIG_HOTPLUG_CPU 136#ifdef CONFIG_HOTPLUG_CPU
@@ -256,8 +251,7 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
256static void mwait_idle(void) 251static void mwait_idle(void)
257{ 252{
258 local_irq_enable(); 253 local_irq_enable();
259 while (!need_resched()) 254 mwait_idle_with_hints(0, 0);
260 mwait_idle_with_hints(0, 0);
261} 255}
262 256
263void __devinit select_idle_routine(const struct cpuinfo_x86 *c) 257void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
@@ -314,8 +308,8 @@ void show_regs(struct pt_regs * regs)
314 regs->eax,regs->ebx,regs->ecx,regs->edx); 308 regs->eax,regs->ebx,regs->ecx,regs->edx);
315 printk("ESI: %08lx EDI: %08lx EBP: %08lx", 309 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
316 regs->esi, regs->edi, regs->ebp); 310 regs->esi, regs->edi, regs->ebp);
317 printk(" DS: %04x ES: %04x\n", 311 printk(" DS: %04x ES: %04x GS: %04x\n",
318 0xffff & regs->xds,0xffff & regs->xes); 312 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs);
319 313
320 cr0 = read_cr0(); 314 cr0 = read_cr0();
321 cr2 = read_cr2(); 315 cr2 = read_cr2();
@@ -346,6 +340,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
346 340
347 regs.xds = __USER_DS; 341 regs.xds = __USER_DS;
348 regs.xes = __USER_DS; 342 regs.xes = __USER_DS;
343 regs.xgs = __KERNEL_PDA;
349 regs.orig_eax = -1; 344 regs.orig_eax = -1;
350 regs.eip = (unsigned long) kernel_thread_helper; 345 regs.eip = (unsigned long) kernel_thread_helper;
351 regs.xcs = __KERNEL_CS | get_kernel_rpl(); 346 regs.xcs = __KERNEL_CS | get_kernel_rpl();
@@ -431,7 +426,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
431 p->thread.eip = (unsigned long) ret_from_fork; 426 p->thread.eip = (unsigned long) ret_from_fork;
432 427
433 savesegment(fs,p->thread.fs); 428 savesegment(fs,p->thread.fs);
434 savesegment(gs,p->thread.gs);
435 429
436 tsk = current; 430 tsk = current;
437 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 431 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -508,7 +502,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
508 dump->regs.ds = regs->xds; 502 dump->regs.ds = regs->xds;
509 dump->regs.es = regs->xes; 503 dump->regs.es = regs->xes;
510 savesegment(fs,dump->regs.fs); 504 savesegment(fs,dump->regs.fs);
511 savesegment(gs,dump->regs.gs); 505 dump->regs.gs = regs->xgs;
512 dump->regs.orig_eax = regs->orig_eax; 506 dump->regs.orig_eax = regs->orig_eax;
513 dump->regs.eip = regs->eip; 507 dump->regs.eip = regs->eip;
514 dump->regs.cs = regs->xcs; 508 dump->regs.cs = regs->xcs;
@@ -648,22 +642,27 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
648 642
649 __unlazy_fpu(prev_p); 643 __unlazy_fpu(prev_p);
650 644
645
646 /* we're going to use this soon, after a few expensive things */
647 if (next_p->fpu_counter > 5)
648 prefetch(&next->i387.fxsave);
649
651 /* 650 /*
652 * Reload esp0. 651 * Reload esp0.
653 */ 652 */
654 load_esp0(tss, next); 653 load_esp0(tss, next);
655 654
656 /* 655 /*
657 * Save away %fs and %gs. No need to save %es and %ds, as 656 * Save away %fs. No need to save %gs, as it was saved on the
658 * those are always kernel segments while inside the kernel. 657 * stack on entry. No need to save %es and %ds, as those are
659 * Doing this before setting the new TLS descriptors avoids 658 * always kernel segments while inside the kernel. Doing this
660 * the situation where we temporarily have non-reloadable 659 * before setting the new TLS descriptors avoids the situation
661 * segments in %fs and %gs. This could be an issue if the 660 * where we temporarily have non-reloadable segments in %fs
662 * NMI handler ever used %fs or %gs (it does not today), or 661 * and %gs. This could be an issue if the NMI handler ever
663 * if the kernel is running inside of a hypervisor layer. 662 * used %fs or %gs (it does not today), or if the kernel is
663 * running inside of a hypervisor layer.
664 */ 664 */
665 savesegment(fs, prev->fs); 665 savesegment(fs, prev->fs);
666 savesegment(gs, prev->gs);
667 666
668 /* 667 /*
669 * Load the per-thread Thread-Local Storage descriptor. 668 * Load the per-thread Thread-Local Storage descriptor.
@@ -671,22 +670,14 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
671 load_TLS(next, cpu); 670 load_TLS(next, cpu);
672 671
673 /* 672 /*
674 * Restore %fs and %gs if needed. 673 * Restore %fs if needed.
675 * 674 *
676 * Glibc normally makes %fs be zero, and %gs is one of 675 * Glibc normally makes %fs be zero.
677 * the TLS segments.
678 */ 676 */
679 if (unlikely(prev->fs | next->fs)) 677 if (unlikely(prev->fs | next->fs))
680 loadsegment(fs, next->fs); 678 loadsegment(fs, next->fs);
681 679
682 if (prev->gs | next->gs) 680 write_pda(pcurrent, next_p);
683 loadsegment(gs, next->gs);
684
685 /*
686 * Restore IOPL if needed.
687 */
688 if (unlikely(prev->iopl != next->iopl))
689 set_iopl_mask(next->iopl);
690 681
691 /* 682 /*
692 * Now maybe handle debug registers and/or IO bitmaps 683 * Now maybe handle debug registers and/or IO bitmaps
@@ -697,6 +688,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
697 688
698 disable_tsc(prev_p, next_p); 689 disable_tsc(prev_p, next_p);
699 690
691 /* If the task has used fpu the last 5 timeslices, just do a full
692 * restore of the math state immediately to avoid the trap; the
693 * chances of needing FPU soon are obviously high now
694 */
695 if (next_p->fpu_counter > 5)
696 math_state_restore();
697
700 return prev_p; 698 return prev_p;
701} 699}
702 700