aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-04-20 09:05:48 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-04-26 06:06:10 -0400
commit17e32eacc3543c25a4377bb7ce54026e38db7d20 (patch)
tree94c34a3d75a126642a372fd30e4431c234c74e52 /arch
parent7eb43a6d232bfa46464b501cd1987ec2d705d8cf (diff)
powerpc: Use generic idle thread allocation
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Link: http://lkml.kernel.org/r/20120420124557.311212868@linutronix.de
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/smp.c74
2 files changed, 6 insertions, 69 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index feab3bad6d0..c8155350836 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -144,6 +144,7 @@ config PPC
144 select HAVE_BPF_JIT if (PPC64 && NET) 144 select HAVE_BPF_JIT if (PPC64 && NET)
145 select HAVE_ARCH_JUMP_LABEL 145 select HAVE_ARCH_JUMP_LABEL
146 select ARCH_HAVE_NMI_SAFE_CMPXCHG 146 select ARCH_HAVE_NMI_SAFE_CMPXCHG
147 select GENERIC_SMP_IDLE_THREAD
147 148
148config EARLY_PRINTK 149config EARLY_PRINTK
149 bool 150 bool
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d38030fb347..e4cb34322de 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,27 +57,9 @@
57#define DBG(fmt...) 57#define DBG(fmt...)
58#endif 58#endif
59 59
60
61/* Store all idle threads, this can be reused instead of creating
62* a new thread. Also avoids complicated thread destroy functionality
63* for idle threads.
64*/
65#ifdef CONFIG_HOTPLUG_CPU 60#ifdef CONFIG_HOTPLUG_CPU
66/*
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
69 */
70static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
73
74/* State of each CPU during hotplug phases */ 61/* State of each CPU during hotplug phases */
75static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 62static DEFINE_PER_CPU(int, cpu_state) = { 0 };
76
77#else
78static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
79#define get_idle_for_cpu(x) (idle_thread_array[(x)])
80#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
81#endif 63#endif
82 64
83struct thread_info *secondary_ti; 65struct thread_info *secondary_ti;
@@ -429,57 +411,16 @@ int generic_check_cpu_restart(unsigned int cpu)
429} 411}
430#endif 412#endif
431 413
432struct create_idle { 414static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
433 struct work_struct work;
434 struct task_struct *idle;
435 struct completion done;
436 int cpu;
437};
438
439static void __cpuinit do_fork_idle(struct work_struct *work)
440{ 415{
441 struct create_idle *c_idle = 416 struct thread_info *ti = task_thread_info(idle);
442 container_of(work, struct create_idle, work);
443
444 c_idle->idle = fork_idle(c_idle->cpu);
445 complete(&c_idle->done);
446}
447
448static int __cpuinit create_idle(unsigned int cpu)
449{
450 struct thread_info *ti;
451 struct create_idle c_idle = {
452 .cpu = cpu,
453 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
454 };
455 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
456
457 c_idle.idle = get_idle_for_cpu(cpu);
458
459 /* We can't use kernel_thread since we must avoid to
460 * reschedule the child. We use a workqueue because
461 * we want to fork from a kernel thread, not whatever
462 * userspace process happens to be trying to online us.
463 */
464 if (!c_idle.idle) {
465 schedule_work(&c_idle.work);
466 wait_for_completion(&c_idle.done);
467 } else
468 init_idle(c_idle.idle, cpu);
469 if (IS_ERR(c_idle.idle)) {
470 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
471 return PTR_ERR(c_idle.idle);
472 }
473 ti = task_thread_info(c_idle.idle);
474 417
475#ifdef CONFIG_PPC64 418#ifdef CONFIG_PPC64
476 paca[cpu].__current = c_idle.idle; 419 paca[cpu].__current = idle;
477 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; 420 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
478#endif 421#endif
479 ti->cpu = cpu; 422 ti->cpu = cpu;
480 current_set[cpu] = ti; 423 secondary_ti = current_set[cpu] = ti;
481
482 return 0;
483} 424}
484 425
485int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 426int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
@@ -490,12 +431,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
490 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 431 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
491 return -EINVAL; 432 return -EINVAL;
492 433
493 /* Make sure we have an idle thread */ 434 cpu_idle_thread_init(cpu, tidle);
494 rc = create_idle(cpu);
495 if (rc)
496 return rc;
497
498 secondary_ti = current_set[cpu];
499 435
500 /* Make sure callin-map entry is 0 (can be leftover a CPU 436 /* Make sure callin-map entry is 0 (can be leftover a CPU
501 * hotplug 437 * hotplug