aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smpboot_32.c
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-03-19 13:25:45 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:41:02 -0400
commit365c894c65b98da944992199ea24206f531674de (patch)
tree5d3048f9e101c36d47aa823f282c865dcffa6dbe /arch/x86/kernel/smpboot_32.c
parentddd10ecfa231c88382fc2f10a3120d2ad8e92381 (diff)
x86: use create_idle struct in do_boot_cpu
Use a new worker, with help of the create_idle struct to fork the idle thread. We now have two workers, the first of them triggered by __smp_prepare_cpu. But the later is going away soon. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/smpboot_32.c')
-rw-r--r--arch/x86/kernel/smpboot_32.c86
1 files changed, 59 insertions, 27 deletions
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index c30abed08923..fc1eb5255f66 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -79,6 +79,24 @@ static void map_cpu_to_logical_apicid(void);
79/* State of each CPU. */ 79/* State of each CPU. */
80DEFINE_PER_CPU(int, cpu_state) = { 0 }; 80DEFINE_PER_CPU(int, cpu_state) = { 0 };
81 81
82/* Store all idle threads, this can be reused instead of creating
83* a new thread. Also avoids complicated thread destroy functionality
84* for idle threads.
85*/
86#ifdef CONFIG_HOTPLUG_CPU
87/*
88 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
89 * removed after init for !CONFIG_HOTPLUG_CPU.
90 */
91static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
92#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
93#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
94#else
95struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
96#define get_idle_for_cpu(x) (idle_thread_array[(x)])
97#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
98#endif
99
82static atomic_t init_deasserted; 100static atomic_t init_deasserted;
83 101
84static void __cpuinit smp_callin(void) 102static void __cpuinit smp_callin(void)
@@ -513,30 +531,21 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
513 531
514extern cpumask_t cpu_initialized; 532extern cpumask_t cpu_initialized;
515 533
516#ifdef CONFIG_HOTPLUG_CPU 534struct create_idle {
517static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS]; 535 struct work_struct work;
518static inline struct task_struct * __cpuinit alloc_idle_task(int cpu)
519{
520 struct task_struct *idle; 536 struct task_struct *idle;
537 struct completion done;
538 int cpu;
539};
521 540
522 if ((idle = cpu_idle_tasks[cpu]) != NULL) { 541static void __cpuinit do_fork_idle(struct work_struct *work)
523 /* initialize thread_struct. we really want to avoid destroy 542{
524 * idle tread 543 struct create_idle *c_idle =
525 */ 544 container_of(work, struct create_idle, work);
526 idle->thread.sp = (unsigned long)task_pt_regs(idle);
527 init_idle(idle, cpu);
528 return idle;
529 }
530 idle = fork_idle(cpu);
531 545
532 if (!IS_ERR(idle)) 546 c_idle->idle = fork_idle(c_idle->cpu);
533 cpu_idle_tasks[cpu] = idle; 547 complete(&c_idle->done);
534 return idle;
535} 548}
536#else
537#define alloc_idle_task(cpu) fork_idle(cpu)
538#endif
539
540static int __cpuinit do_boot_cpu(int apicid, int cpu) 549static int __cpuinit do_boot_cpu(int apicid, int cpu)
541/* 550/*
542 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 551 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
@@ -544,11 +553,15 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
544 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. 553 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
545 */ 554 */
546{ 555{
547 struct task_struct *idle;
548 unsigned long boot_error; 556 unsigned long boot_error;
549 int timeout; 557 int timeout;
550 unsigned long start_eip; 558 unsigned long start_eip;
551 unsigned short nmi_high = 0, nmi_low = 0; 559 unsigned short nmi_high = 0, nmi_low = 0;
560 struct create_idle c_idle = {
561 .cpu = cpu,
562 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
563 };
564 INIT_WORK(&c_idle.work, do_fork_idle);
552 565
553 /* 566 /*
554 * Save current MTRR state in case it was changed since early boot 567 * Save current MTRR state in case it was changed since early boot
@@ -556,19 +569,38 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
556 */ 569 */
557 mtrr_save_state(); 570 mtrr_save_state();
558 571
572 c_idle.idle = get_idle_for_cpu(cpu);
573
559 /* 574 /*
560 * We can't use kernel_thread since we must avoid to 575 * We can't use kernel_thread since we must avoid to
561 * reschedule the child. 576 * reschedule the child.
562 */ 577 */
563 idle = alloc_idle_task(cpu); 578 if (c_idle.idle) {
564 if (IS_ERR(idle)) 579 c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
565 panic("failed fork for CPU %d", cpu); 580 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
581 init_idle(c_idle.idle, cpu);
582 goto do_rest;
583 }
584
585 if (!keventd_up() || current_is_keventd())
586 c_idle.work.func(&c_idle.work);
587 else {
588 schedule_work(&c_idle.work);
589 wait_for_completion(&c_idle.done);
590 }
591
592 if (IS_ERR(c_idle.idle)) {
593 printk(KERN_ERR "failed fork for CPU %d\n", cpu);
594 return PTR_ERR(c_idle.idle);
595 }
566 596
597 set_idle_for_cpu(cpu, c_idle.idle);
598do_rest:
599 per_cpu(current_task, cpu) = c_idle.idle;
567 init_gdt(cpu); 600 init_gdt(cpu);
568 per_cpu(current_task, cpu) = idle;
569 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 601 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
570 602
571 idle->thread.ip = (unsigned long) start_secondary; 603 c_idle.idle->thread.ip = (unsigned long) start_secondary;
572 /* start_eip had better be page-aligned! */ 604 /* start_eip had better be page-aligned! */
573 start_eip = setup_trampoline(); 605 start_eip = setup_trampoline();
574 606
@@ -577,7 +609,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
577 /* So we see what's up */ 609 /* So we see what's up */
578 printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip); 610 printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip);
579 /* Stack for startup_32 can be just as for start_secondary onwards */ 611 /* Stack for startup_32 can be just as for start_secondary onwards */
580 stack_start.sp = (void *) idle->thread.sp; 612 stack_start.sp = (void *) c_idle.idle->thread.sp;
581 613
582 irq_ctx_init(cpu); 614 irq_ctx_init(cpu);
583 615