aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2012-04-20 20:08:50 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-05-03 13:32:34 -0400
commit3bb5d2ee396aabaa4e318f17e94d13e2ee0e5a88 (patch)
tree723fd419cbbc6874b3303d11a439303f3c4ff46c /kernel
parent9a1347237492f273f84ec39962b5806c70b2806a (diff)
smp, idle: Allocate idle thread for each possible cpu during boot
percpu areas are already allocated during boot for each possible cpu. percpu idle threads can be considered as an extension of the percpu areas, and allocate them for each possible cpu during boot. This will eliminate the need for workqueue based idle thread allocation. In future we can move the idle thread area into the percpu area too. [ tglx: Moved the loop into smpboot.c and added an error check when the init code failed to allocate an idle thread for a cpu which should be onlined ] Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: venki@google.com Link: http://lkml.kernel.org/r/1334966930.28674.245.camel@sbsiddha-desk.sc.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c9
-rw-r--r--kernel/smp.c4
-rw-r--r--kernel/smpboot.c72
-rw-r--r--kernel/smpboot.h2
4 files changed, 31 insertions, 56 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 05c46bae5e55..0e6353cf147a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -297,15 +297,18 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
297 int ret, nr_calls = 0; 297 int ret, nr_calls = 0;
298 void *hcpu = (void *)(long)cpu; 298 void *hcpu = (void *)(long)cpu;
299 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 299 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
300 struct task_struct *idle;
300 301
301 if (cpu_online(cpu) || !cpu_present(cpu)) 302 if (cpu_online(cpu) || !cpu_present(cpu))
302 return -EINVAL; 303 return -EINVAL;
303 304
304 cpu_hotplug_begin(); 305 cpu_hotplug_begin();
305 306
306 ret = smpboot_prepare(cpu); 307 idle = idle_thread_get(cpu);
307 if (ret) 308 if (IS_ERR(idle)) {
309 ret = PTR_ERR(idle);
308 goto out; 310 goto out;
311 }
309 312
310 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 313 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
311 if (ret) { 314 if (ret) {
@@ -316,7 +319,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
316 } 319 }
317 320
318 /* Arch-specific enabling code. */ 321 /* Arch-specific enabling code. */
319 ret = __cpu_up(cpu, idle_thread_get(cpu)); 322 ret = __cpu_up(cpu, idle);
320 if (ret != 0) 323 if (ret != 0)
321 goto out_notify; 324 goto out_notify;
322 BUG_ON(!cpu_online(cpu)); 325 BUG_ON(!cpu_online(cpu));
diff --git a/kernel/smp.c b/kernel/smp.c
index 2f8b10ecf759..a61294c07f3f 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -13,6 +13,8 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15 15
16#include "smpboot.h"
17
16#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 18#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
17static struct { 19static struct {
18 struct list_head queue; 20 struct list_head queue;
@@ -669,6 +671,8 @@ void __init smp_init(void)
669{ 671{
670 unsigned int cpu; 672 unsigned int cpu;
671 673
674 idle_threads_init();
675
672 /* FIXME: This should be done in userspace --RR */ 676 /* FIXME: This should be done in userspace --RR */
673 for_each_present_cpu(cpu) { 677 for_each_present_cpu(cpu) {
674 if (num_online_cpus() >= setup_max_cpus) 678 if (num_online_cpus() >= setup_max_cpus)
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index ed1576981801..e1a797e028a3 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -6,64 +6,42 @@
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/percpu.h> 8#include <linux/percpu.h>
9#include <linux/workqueue.h>
10 9
11#include "smpboot.h" 10#include "smpboot.h"
12 11
13#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD 12#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
14struct create_idle {
15 struct work_struct work;
16 struct task_struct *idle;
17 struct completion done;
18 unsigned int cpu;
19};
20
21static void __cpuinit do_fork_idle(struct work_struct *work)
22{
23 struct create_idle *c = container_of(work, struct create_idle, work);
24
25 c->idle = fork_idle(c->cpu);
26 complete(&c->done);
27}
28
29static struct task_struct * __cpuinit idle_thread_create(unsigned int cpu)
30{
31 struct create_idle c_idle = {
32 .cpu = cpu,
33 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
34 };
35
36 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
37 schedule_work(&c_idle.work);
38 wait_for_completion(&c_idle.done);
39 destroy_work_on_stack(&c_idle.work);
40 return c_idle.idle;
41}
42
43/* 13/*
44 * For the hotplug case we keep the task structs around and reuse 14 * For the hotplug case we keep the task structs around and reuse
45 * them. 15 * them.
46 */ 16 */
47static DEFINE_PER_CPU(struct task_struct *, idle_threads); 17static DEFINE_PER_CPU(struct task_struct *, idle_threads);
48 18
49static inline struct task_struct *get_idle_for_cpu(unsigned int cpu) 19struct task_struct * __cpuinit idle_thread_get(unsigned int cpu)
50{ 20{
51 struct task_struct *tsk = per_cpu(idle_threads, cpu); 21 struct task_struct *tsk = per_cpu(idle_threads, cpu);
52 22
53 if (!tsk) 23 if (!tsk)
54 return idle_thread_create(cpu); 24 return ERR_PTR(-ENOMEM);
55 init_idle(tsk, cpu); 25 init_idle(tsk, cpu);
56 return tsk; 26 return tsk;
57} 27}
58 28
59struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) 29void __init idle_thread_set_boot_cpu(void)
60{ 30{
61 return per_cpu(idle_threads, cpu); 31 per_cpu(idle_threads, smp_processor_id()) = current;
62} 32}
63 33
64void __init idle_thread_set_boot_cpu(void) 34static inline void idle_init(unsigned int cpu)
65{ 35{
66 per_cpu(idle_threads, smp_processor_id()) = current; 36 struct task_struct *tsk = per_cpu(idle_threads, cpu);
37
38 if (!tsk) {
39 tsk = fork_idle(cpu);
40 if (IS_ERR(tsk))
41 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
42 else
43 per_cpu(idle_threads, cpu) = tsk;
44 }
67} 45}
68 46
69/** 47/**
@@ -72,25 +50,13 @@ void __init idle_thread_set_boot_cpu(void)
72 * 50 *
73 * Creates the thread if it does not exist. 51 * Creates the thread if it does not exist.
74 */ 52 */
75static int __cpuinit idle_thread_init(unsigned int cpu) 53void __init idle_threads_init(void)
76{ 54{
77 struct task_struct *idle = get_idle_for_cpu(cpu); 55 unsigned int cpu;
78 56
79 if (IS_ERR(idle)) { 57 for_each_possible_cpu(cpu) {
80 printk(KERN_ERR "failed fork for CPU %u\n", cpu); 58 if (cpu != smp_processor_id())
81 return PTR_ERR(idle); 59 idle_init(cpu);
82 } 60 }
83 per_cpu(idle_threads, cpu) = idle;
84 return 0;
85} 61}
86#else
87static inline int idle_thread_init(unsigned int cpu) { return 0; }
88#endif 62#endif
89
90/**
91 * smpboot_prepare - generic smpboot preparation
92 */
93int __cpuinit smpboot_prepare(unsigned int cpu)
94{
95 return idle_thread_init(cpu);
96}
diff --git a/kernel/smpboot.h b/kernel/smpboot.h
index 7943bbbab917..4cfbcb8a8362 100644
--- a/kernel/smpboot.h
+++ b/kernel/smpboot.h
@@ -8,9 +8,11 @@ int smpboot_prepare(unsigned int cpu);
8#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD 8#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
9struct task_struct *idle_thread_get(unsigned int cpu); 9struct task_struct *idle_thread_get(unsigned int cpu);
10void idle_thread_set_boot_cpu(void); 10void idle_thread_set_boot_cpu(void);
11void idle_threads_init(void);
11#else 12#else
12static inline struct task_struct *idle_thread_get(unsigned int cpu) { return NULL; } 13static inline struct task_struct *idle_thread_get(unsigned int cpu) { return NULL; }
13static inline void idle_thread_set_boot_cpu(void) { } 14static inline void idle_thread_set_boot_cpu(void) { }
15static inline void idle_threads_init(unsigned int cpu) { }
14#endif 16#endif
15 17
16#endif 18#endif