aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-03-07 22:40:04 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-04-01 00:37:34 -0400
commitc56e58537d504706954a06570b4034c04e5b7500 (patch)
tree403843557d3eb29fffcc06de2f88f16ec1ffea08 /arch/powerpc/kernel/smp.c
parent105765f451d3ff007bb4ae3761e825686d9615db (diff)
powerpc/smp: Create idle threads on demand and properly reset them
Instead of creating idle threads at boot for all possible CPUs, we create them on demand, like x86 or ARM, and we properly call init_idle to re-initialize an idle thread when a CPU was unplugged and is now re-plugged. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c100
1 files changed, 78 insertions, 22 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d7f8cc18ae05..54faff91b805 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,6 +57,25 @@
57#define DBG(fmt...) 57#define DBG(fmt...)
58#endif 58#endif
59 59
60
61/* Store all idle threads, this can be reused instead of creating
62* a new thread. Also avoids complicated thread destroy functionality
63* for idle threads.
64*/
65#ifdef CONFIG_HOTPLUG_CPU
66/*
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
69 */
70static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
73#else
74static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
75#define get_idle_for_cpu(x) (idle_thread_array[(x)])
76#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
77#endif
78
60struct thread_info *secondary_ti; 79struct thread_info *secondary_ti;
61 80
62DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 81DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
@@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id)
238 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 257 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
239} 258}
240 259
241static void __init smp_create_idle(unsigned int cpu)
242{
243 struct task_struct *p;
244
245 /* create a process for the processor */
246 p = fork_idle(cpu);
247 if (IS_ERR(p))
248 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
249#ifdef CONFIG_PPC64
250 paca[cpu].__current = p;
251 paca[cpu].kstack = (unsigned long) task_thread_info(p)
252 + THREAD_SIZE - STACK_FRAME_OVERHEAD;
253#endif
254 current_set[cpu] = task_thread_info(p);
255 task_thread_info(p)->cpu = cpu;
256}
257
258void __init smp_prepare_cpus(unsigned int max_cpus) 260void __init smp_prepare_cpus(unsigned int max_cpus)
259{ 261{
260 unsigned int cpu; 262 unsigned int cpu;
@@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
288 max_cpus = NR_CPUS; 290 max_cpus = NR_CPUS;
289 else 291 else
290 max_cpus = 1; 292 max_cpus = 1;
291
292 for_each_possible_cpu(cpu)
293 if (cpu != boot_cpuid)
294 smp_create_idle(cpu);
295} 293}
296 294
297void __devinit smp_prepare_boot_cpu(void) 295void __devinit smp_prepare_boot_cpu(void)
@@ -355,9 +353,62 @@ void generic_set_cpu_dead(unsigned int cpu)
355} 353}
356#endif 354#endif
357 355
356struct create_idle {
357 struct work_struct work;
358 struct task_struct *idle;
359 struct completion done;
360 int cpu;
361};
362
363static void __cpuinit do_fork_idle(struct work_struct *work)
364{
365 struct create_idle *c_idle =
366 container_of(work, struct create_idle, work);
367
368 c_idle->idle = fork_idle(c_idle->cpu);
369 complete(&c_idle->done);
370}
371
372static int __cpuinit create_idle(unsigned int cpu)
373{
374 struct thread_info *ti;
375 struct create_idle c_idle = {
376 .cpu = cpu,
377 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
378 };
379 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
380
381 c_idle.idle = get_idle_for_cpu(cpu);
382
383 /* We can't use kernel_thread since we must avoid to
384 * reschedule the child. We use a workqueue because
385 * we want to fork from a kernel thread, not whatever
386 * userspace process happens to be trying to online us.
387 */
388 if (!c_idle.idle) {
389 schedule_work(&c_idle.work);
390 wait_for_completion(&c_idle.done);
391 } else
392 init_idle(c_idle.idle, cpu);
393 if (IS_ERR(c_idle.idle)) {
394 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
395 return PTR_ERR(c_idle.idle);
396 }
397 ti = task_thread_info(c_idle.idle);
398
399#ifdef CONFIG_PPC64
400 paca[cpu].__current = c_idle.idle;
401 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
402#endif
403 ti->cpu = cpu;
404 current_set[cpu] = ti;
405
406 return 0;
407}
408
358int __cpuinit __cpu_up(unsigned int cpu) 409int __cpuinit __cpu_up(unsigned int cpu)
359{ 410{
360 int c; 411 int rc, c;
361 412
362 secondary_ti = current_set[cpu]; 413 secondary_ti = current_set[cpu];
363 414
@@ -365,6 +416,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
365 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 416 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
366 return -EINVAL; 417 return -EINVAL;
367 418
419 /* Make sure we have an idle thread */
420 rc = create_idle(cpu);
421 if (rc)
422 return rc;
423
368 /* Make sure callin-map entry is 0 (can be leftover a CPU 424 /* Make sure callin-map entry is 0 (can be leftover a CPU
369 * hotplug 425 * hotplug
370 */ 426 */