aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/smpboot.c
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2012-04-20 20:08:50 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-05-03 13:32:34 -0400
commit3bb5d2ee396aabaa4e318f17e94d13e2ee0e5a88 (patch)
tree723fd419cbbc6874b3303d11a439303f3c4ff46c /kernel/smpboot.c
parent9a1347237492f273f84ec39962b5806c70b2806a (diff)
smp, idle: Allocate idle thread for each possible cpu during boot
percpu areas are already allocated during boot for each possible cpu. percpu idle threads can be considered as an extension of the percpu areas, and allocate them for each possible cpu during boot. This will eliminate the need for workqueue based idle thread allocation. In future we can move the idle thread area into the percpu area too. [ tglx: Moved the loop into smpboot.c and added an error check when the init code failed to allocate an idle thread for a cpu which should be onlined ] Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: venki@google.com Link: http://lkml.kernel.org/r/1334966930.28674.245.camel@sbsiddha-desk.sc.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/smpboot.c')
-rw-r--r--kernel/smpboot.c72
1 files changed, 19 insertions, 53 deletions
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index ed1576981801..e1a797e028a3 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -6,64 +6,42 @@
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/percpu.h> 8#include <linux/percpu.h>
9#include <linux/workqueue.h>
10 9
11#include "smpboot.h" 10#include "smpboot.h"
12 11
13#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD 12#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
14struct create_idle {
15 struct work_struct work;
16 struct task_struct *idle;
17 struct completion done;
18 unsigned int cpu;
19};
20
21static void __cpuinit do_fork_idle(struct work_struct *work)
22{
23 struct create_idle *c = container_of(work, struct create_idle, work);
24
25 c->idle = fork_idle(c->cpu);
26 complete(&c->done);
27}
28
29static struct task_struct * __cpuinit idle_thread_create(unsigned int cpu)
30{
31 struct create_idle c_idle = {
32 .cpu = cpu,
33 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
34 };
35
36 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
37 schedule_work(&c_idle.work);
38 wait_for_completion(&c_idle.done);
39 destroy_work_on_stack(&c_idle.work);
40 return c_idle.idle;
41}
42
43/* 13/*
44 * For the hotplug case we keep the task structs around and reuse 14 * For the hotplug case we keep the task structs around and reuse
45 * them. 15 * them.
46 */ 16 */
47static DEFINE_PER_CPU(struct task_struct *, idle_threads); 17static DEFINE_PER_CPU(struct task_struct *, idle_threads);
48 18
49static inline struct task_struct *get_idle_for_cpu(unsigned int cpu) 19struct task_struct * __cpuinit idle_thread_get(unsigned int cpu)
50{ 20{
51 struct task_struct *tsk = per_cpu(idle_threads, cpu); 21 struct task_struct *tsk = per_cpu(idle_threads, cpu);
52 22
53 if (!tsk) 23 if (!tsk)
54 return idle_thread_create(cpu); 24 return ERR_PTR(-ENOMEM);
55 init_idle(tsk, cpu); 25 init_idle(tsk, cpu);
56 return tsk; 26 return tsk;
57} 27}
58 28
59struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) 29void __init idle_thread_set_boot_cpu(void)
60{ 30{
61 return per_cpu(idle_threads, cpu); 31 per_cpu(idle_threads, smp_processor_id()) = current;
62} 32}
63 33
64void __init idle_thread_set_boot_cpu(void) 34static inline void idle_init(unsigned int cpu)
65{ 35{
66 per_cpu(idle_threads, smp_processor_id()) = current; 36 struct task_struct *tsk = per_cpu(idle_threads, cpu);
37
38 if (!tsk) {
39 tsk = fork_idle(cpu);
40 if (IS_ERR(tsk))
41 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
42 else
43 per_cpu(idle_threads, cpu) = tsk;
44 }
67} 45}
68 46
69/** 47/**
@@ -72,25 +50,13 @@ void __init idle_thread_set_boot_cpu(void)
72 * 50 *
73 * Creates the thread if it does not exist. 51 * Creates the thread if it does not exist.
74 */ 52 */
75static int __cpuinit idle_thread_init(unsigned int cpu) 53void __init idle_threads_init(void)
76{ 54{
77 struct task_struct *idle = get_idle_for_cpu(cpu); 55 unsigned int cpu;
78 56
79 if (IS_ERR(idle)) { 57 for_each_possible_cpu(cpu) {
80 printk(KERN_ERR "failed fork for CPU %u\n", cpu); 58 if (cpu != smp_processor_id())
81 return PTR_ERR(idle); 59 idle_init(cpu);
82 } 60 }
83 per_cpu(idle_threads, cpu) = idle;
84 return 0;
85} 61}
86#else
87static inline int idle_thread_init(unsigned int cpu) { return 0; }
88#endif 62#endif
89
90/**
91 * smpboot_prepare - generic smpboot preparation
92 */
93int __cpuinit smpboot_prepare(unsigned int cpu)
94{
95 return idle_thread_init(cpu);
96}