aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/process.c')
-rw-r--r--arch/i386/kernel/process.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 393a67d5d94..61999479b7a 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -39,6 +39,7 @@
39#include <linux/random.h> 39#include <linux/random.h>
40#include <linux/personality.h> 40#include <linux/personality.h>
41#include <linux/tick.h> 41#include <linux/tick.h>
42#include <linux/percpu.h>
42 43
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
44#include <asm/pgtable.h> 45#include <asm/pgtable.h>
@@ -57,7 +58,6 @@
57 58
58#include <asm/tlbflush.h> 59#include <asm/tlbflush.h>
59#include <asm/cpu.h> 60#include <asm/cpu.h>
60#include <asm/pda.h>
61 61
62asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 62asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
63 63
@@ -66,6 +66,12 @@ static int hlt_counter;
66unsigned long boot_option_idle_override = 0; 66unsigned long boot_option_idle_override = 0;
67EXPORT_SYMBOL(boot_option_idle_override); 67EXPORT_SYMBOL(boot_option_idle_override);
68 68
69DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
70EXPORT_PER_CPU_SYMBOL(current_task);
71
72DEFINE_PER_CPU(int, cpu_number);
73EXPORT_PER_CPU_SYMBOL(cpu_number);
74
69/* 75/*
70 * Return saved PC of a blocked thread. 76 * Return saved PC of a blocked thread.
71 */ 77 */
@@ -272,25 +278,24 @@ void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
272 } 278 }
273} 279}
274 280
275static int __init idle_setup (char *str) 281static int __init idle_setup(char *str)
276{ 282{
277 if (!strncmp(str, "poll", 4)) { 283 if (!strcmp(str, "poll")) {
278 printk("using polling idle threads.\n"); 284 printk("using polling idle threads.\n");
279 pm_idle = poll_idle; 285 pm_idle = poll_idle;
280#ifdef CONFIG_X86_SMP 286#ifdef CONFIG_X86_SMP
281 if (smp_num_siblings > 1) 287 if (smp_num_siblings > 1)
282 printk("WARNING: polling idle and HT enabled, performance may degrade.\n"); 288 printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
283#endif 289#endif
284 } else if (!strncmp(str, "halt", 4)) { 290 } else if (!strcmp(str, "mwait"))
285 printk("using halt in idle threads.\n"); 291 force_mwait = 1;
286 pm_idle = default_idle; 292 else
287 } 293 return -1;
288 294
289 boot_option_idle_override = 1; 295 boot_option_idle_override = 1;
290 return 1; 296 return 0;
291} 297}
292 298early_param("idle", idle_setup);
293__setup("idle=", idle_setup);
294 299
295void show_regs(struct pt_regs * regs) 300void show_regs(struct pt_regs * regs)
296{ 301{
@@ -343,7 +348,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
343 348
344 regs.xds = __USER_DS; 349 regs.xds = __USER_DS;
345 regs.xes = __USER_DS; 350 regs.xes = __USER_DS;
346 regs.xfs = __KERNEL_PDA; 351 regs.xfs = __KERNEL_PERCPU;
347 regs.orig_eax = -1; 352 regs.orig_eax = -1;
348 regs.eip = (unsigned long) kernel_thread_helper; 353 regs.eip = (unsigned long) kernel_thread_helper;
349 regs.xcs = __KERNEL_CS | get_kernel_rpl(); 354 regs.xcs = __KERNEL_CS | get_kernel_rpl();
@@ -376,7 +381,7 @@ void exit_thread(void)
376 t->io_bitmap_max = 0; 381 t->io_bitmap_max = 0;
377 tss->io_bitmap_owner = NULL; 382 tss->io_bitmap_owner = NULL;
378 tss->io_bitmap_max = 0; 383 tss->io_bitmap_max = 0;
379 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 384 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
380 put_cpu(); 385 put_cpu();
381 } 386 }
382} 387}
@@ -555,7 +560,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
555 * Disable the bitmap via an invalid offset. We still cache 560 * Disable the bitmap via an invalid offset. We still cache
556 * the previous bitmap owner and the IO bitmap contents: 561 * the previous bitmap owner and the IO bitmap contents:
557 */ 562 */
558 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 563 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
559 return; 564 return;
560 } 565 }
561 566
@@ -565,7 +570,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
565 * matches the next task, we dont have to do anything but 570 * matches the next task, we dont have to do anything but
566 * to set a valid offset in the TSS: 571 * to set a valid offset in the TSS:
567 */ 572 */
568 tss->io_bitmap_base = IO_BITMAP_OFFSET; 573 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
569 return; 574 return;
570 } 575 }
571 /* 576 /*
@@ -577,7 +582,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
577 * redundant copies when the currently switched task does not 582 * redundant copies when the currently switched task does not
578 * perform any I/O during its timeslice. 583 * perform any I/O during its timeslice.
579 */ 584 */
580 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; 585 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
581} 586}
582 587
583/* 588/*
@@ -712,7 +717,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
712 if (prev->gs | next->gs) 717 if (prev->gs | next->gs)
713 loadsegment(gs, next->gs); 718 loadsegment(gs, next->gs);
714 719
715 write_pda(pcurrent, next_p); 720 x86_write_percpu(current_task, next_p);
716 721
717 return prev_p; 722 return prev_p;
718} 723}