diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2012-04-20 09:05:48 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2012-04-26 06:06:10 -0400 |
commit | 7eb43a6d232bfa46464b501cd1987ec2d705d8cf (patch) | |
tree | 38758c752b1f8839e3cc5b317b41ba39e392a9f8 | |
parent | 5cdaf1834f43b0edc4a3aa683aa4ec98f6bfe8a7 (diff) |
x86: Use generic idle thread allocation
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: x86@kernel.org
Link: http://lkml.kernel.org/r/20120420124557.246929343@linutronix.de
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/smp.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 81 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 15 |
4 files changed, 14 insertions, 84 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1d14cc6b79ad..046bf4bd2510 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -82,6 +82,7 @@ config X86 | |||
82 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 82 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
83 | select GENERIC_IOMAP | 83 | select GENERIC_IOMAP |
84 | select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC | 84 | select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC |
85 | select GENERIC_SMP_IDLE_THREAD | ||
85 | 86 | ||
86 | config INSTRUCTION_DECODER | 87 | config INSTRUCTION_DECODER |
87 | def_bool (KPROBES || PERF_EVENTS) | 88 | def_bool (KPROBES || PERF_EVENTS) |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index f3ed33811c23..f8cbc6f20e31 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -164,6 +164,7 @@ int wbinvd_on_all_cpus(void); | |||
164 | 164 | ||
165 | void native_send_call_func_ipi(const struct cpumask *mask); | 165 | void native_send_call_func_ipi(const struct cpumask *mask); |
166 | void native_send_call_func_single_ipi(int cpu); | 166 | void native_send_call_func_single_ipi(int cpu); |
167 | void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); | ||
167 | 168 | ||
168 | void smp_store_cpu_info(int id); | 169 | void smp_store_cpu_info(int id); |
169 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | 170 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index def235bf7594..3acaf51dfddb 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -76,20 +76,8 @@ | |||
76 | /* State of each CPU */ | 76 | /* State of each CPU */ |
77 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 77 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
78 | 78 | ||
79 | /* Store all idle threads, this can be reused instead of creating | ||
80 | * a new thread. Also avoids complicated thread destroy functionality | ||
81 | * for idle threads. | ||
82 | */ | ||
83 | #ifdef CONFIG_HOTPLUG_CPU | 79 | #ifdef CONFIG_HOTPLUG_CPU |
84 | /* | 80 | /* |
85 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | ||
86 | * removed after init for !CONFIG_HOTPLUG_CPU. | ||
87 | */ | ||
88 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | ||
89 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | ||
90 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | ||
91 | |||
92 | /* | ||
93 | * We need this for trampoline_base protection from concurrent accesses when | 81 | * We need this for trampoline_base protection from concurrent accesses when |
94 | * off- and onlining cores wildly. | 82 | * off- and onlining cores wildly. |
95 | */ | 83 | */ |
@@ -97,20 +85,16 @@ static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); | |||
97 | 85 | ||
98 | void cpu_hotplug_driver_lock(void) | 86 | void cpu_hotplug_driver_lock(void) |
99 | { | 87 | { |
100 | mutex_lock(&x86_cpu_hotplug_driver_mutex); | 88 | mutex_lock(&x86_cpu_hotplug_driver_mutex); |
101 | } | 89 | } |
102 | 90 | ||
103 | void cpu_hotplug_driver_unlock(void) | 91 | void cpu_hotplug_driver_unlock(void) |
104 | { | 92 | { |
105 | mutex_unlock(&x86_cpu_hotplug_driver_mutex); | 93 | mutex_unlock(&x86_cpu_hotplug_driver_mutex); |
106 | } | 94 | } |
107 | 95 | ||
108 | ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } | 96 | ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } |
109 | ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; } | 97 | ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; } |
110 | #else | ||
111 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | ||
112 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
113 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | ||
114 | #endif | 98 | #endif |
115 | 99 | ||
116 | /* Number of siblings per CPU package */ | 100 | /* Number of siblings per CPU package */ |
@@ -618,22 +602,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | |||
618 | return (send_status | accept_status); | 602 | return (send_status | accept_status); |
619 | } | 603 | } |
620 | 604 | ||
621 | struct create_idle { | ||
622 | struct work_struct work; | ||
623 | struct task_struct *idle; | ||
624 | struct completion done; | ||
625 | int cpu; | ||
626 | }; | ||
627 | |||
628 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
629 | { | ||
630 | struct create_idle *c_idle = | ||
631 | container_of(work, struct create_idle, work); | ||
632 | |||
633 | c_idle->idle = fork_idle(c_idle->cpu); | ||
634 | complete(&c_idle->done); | ||
635 | } | ||
636 | |||
637 | /* reduce the number of lines printed when booting a large cpu count system */ | 605 | /* reduce the number of lines printed when booting a large cpu count system */ |
638 | static void __cpuinit announce_cpu(int cpu, int apicid) | 606 | static void __cpuinit announce_cpu(int cpu, int apicid) |
639 | { | 607 | { |
@@ -660,58 +628,31 @@ static void __cpuinit announce_cpu(int cpu, int apicid) | |||
660 | * Returns zero if CPU booted OK, else error code from | 628 | * Returns zero if CPU booted OK, else error code from |
661 | * ->wakeup_secondary_cpu. | 629 | * ->wakeup_secondary_cpu. |
662 | */ | 630 | */ |
663 | static int __cpuinit do_boot_cpu(int apicid, int cpu) | 631 | static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) |
664 | { | 632 | { |
665 | unsigned long boot_error = 0; | 633 | unsigned long boot_error = 0; |
666 | unsigned long start_ip; | 634 | unsigned long start_ip; |
667 | int timeout; | 635 | int timeout; |
668 | struct create_idle c_idle = { | ||
669 | .cpu = cpu, | ||
670 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
671 | }; | ||
672 | |||
673 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | ||
674 | 636 | ||
675 | alternatives_smp_switch(1); | 637 | alternatives_smp_switch(1); |
676 | 638 | ||
677 | c_idle.idle = get_idle_for_cpu(cpu); | 639 | idle->thread.sp = (unsigned long) (((struct pt_regs *) |
678 | 640 | (THREAD_SIZE + task_stack_page(idle))) - 1); | |
679 | /* | 641 | per_cpu(current_task, cpu) = idle; |
680 | * We can't use kernel_thread since we must avoid to | ||
681 | * reschedule the child. | ||
682 | */ | ||
683 | if (c_idle.idle) { | ||
684 | c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) | ||
685 | (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); | ||
686 | init_idle(c_idle.idle, cpu); | ||
687 | goto do_rest; | ||
688 | } | ||
689 | 642 | ||
690 | schedule_work(&c_idle.work); | ||
691 | wait_for_completion(&c_idle.done); | ||
692 | |||
693 | if (IS_ERR(c_idle.idle)) { | ||
694 | printk("failed fork for CPU %d\n", cpu); | ||
695 | destroy_work_on_stack(&c_idle.work); | ||
696 | return PTR_ERR(c_idle.idle); | ||
697 | } | ||
698 | |||
699 | set_idle_for_cpu(cpu, c_idle.idle); | ||
700 | do_rest: | ||
701 | per_cpu(current_task, cpu) = c_idle.idle; | ||
702 | #ifdef CONFIG_X86_32 | 643 | #ifdef CONFIG_X86_32 |
703 | /* Stack for startup_32 can be just as for start_secondary onwards */ | 644 | /* Stack for startup_32 can be just as for start_secondary onwards */ |
704 | irq_ctx_init(cpu); | 645 | irq_ctx_init(cpu); |
705 | #else | 646 | #else |
706 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); | 647 | clear_tsk_thread_flag(idle, TIF_FORK); |
707 | initial_gs = per_cpu_offset(cpu); | 648 | initial_gs = per_cpu_offset(cpu); |
708 | per_cpu(kernel_stack, cpu) = | 649 | per_cpu(kernel_stack, cpu) = |
709 | (unsigned long)task_stack_page(c_idle.idle) - | 650 | (unsigned long)task_stack_page(idle) - |
710 | KERNEL_STACK_OFFSET + THREAD_SIZE; | 651 | KERNEL_STACK_OFFSET + THREAD_SIZE; |
711 | #endif | 652 | #endif |
712 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | 653 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
713 | initial_code = (unsigned long)start_secondary; | 654 | initial_code = (unsigned long)start_secondary; |
714 | stack_start = c_idle.idle->thread.sp; | 655 | stack_start = idle->thread.sp; |
715 | 656 | ||
716 | /* start_ip had better be page-aligned! */ | 657 | /* start_ip had better be page-aligned! */ |
717 | start_ip = trampoline_address(); | 658 | start_ip = trampoline_address(); |
@@ -813,8 +754,6 @@ do_rest: | |||
813 | */ | 754 | */ |
814 | smpboot_restore_warm_reset_vector(); | 755 | smpboot_restore_warm_reset_vector(); |
815 | } | 756 | } |
816 | |||
817 | destroy_work_on_stack(&c_idle.work); | ||
818 | return boot_error; | 757 | return boot_error; |
819 | } | 758 | } |
820 | 759 | ||
@@ -851,7 +790,7 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
851 | 790 | ||
852 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 791 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
853 | 792 | ||
854 | err = do_boot_cpu(apicid, cpu); | 793 | err = do_boot_cpu(apicid, cpu, tidle); |
855 | if (err) { | 794 | if (err) { |
856 | pr_debug("do_boot_cpu failed %d\n", err); | 795 | pr_debug("do_boot_cpu failed %d\n", err); |
857 | return -EIO; | 796 | return -EIO; |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 64d3bbce0b36..8f44cc1a9291 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -250,18 +250,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
250 | set_cpu_possible(cpu, false); | 250 | set_cpu_possible(cpu, false); |
251 | } | 251 | } |
252 | 252 | ||
253 | for_each_possible_cpu (cpu) { | 253 | for_each_possible_cpu(cpu) |
254 | struct task_struct *idle; | ||
255 | |||
256 | if (cpu == 0) | ||
257 | continue; | ||
258 | |||
259 | idle = fork_idle(cpu); | ||
260 | if (IS_ERR(idle)) | ||
261 | panic("failed fork for CPU %d", cpu); | ||
262 | |||
263 | set_cpu_present(cpu, true); | 254 | set_cpu_present(cpu, true); |
264 | } | ||
265 | } | 255 | } |
266 | 256 | ||
267 | static int __cpuinit | 257 | static int __cpuinit |
@@ -331,9 +321,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
331 | return 0; | 321 | return 0; |
332 | } | 322 | } |
333 | 323 | ||
334 | static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *tidle) | 324 | static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) |
335 | { | 325 | { |
336 | struct task_struct *idle = idle_task(cpu); | ||
337 | int rc; | 326 | int rc; |
338 | 327 | ||
339 | per_cpu(current_task, cpu) = idle; | 328 | per_cpu(current_task, cpu) = idle; |