aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-09-19 13:44:49 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-09-20 01:53:24 -0400
commitfb82b83970a32263698e54a8779d2ce88cd3b060 (patch)
tree3f396a3225fa7eae960d9033926b00e751327577 /arch/powerpc/kernel
parentb8bb922c680b7e21af2268bc8eec93b17f5b9ab8 (diff)
powerpc/smp: More generic support for "soft hotplug"
This adds more generic support for doing CPU hotplug with a simple idle loop and no actual reset of the processors. The generic smp_generic_kick_cpu() does the hotplug bringup trick if the PACA shows that the CPU has already been started at boot and we provide an accessor for the CPU state. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/smp.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 7bf2187dfd9..af7e7722eca 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -70,6 +70,10 @@
70static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); 70static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 71#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 72#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
73
74/* State of each CPU during hotplug phases */
75static DEFINE_PER_CPU(int, cpu_state) = { 0 };
76
73#else 77#else
74static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 78static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
75#define get_idle_for_cpu(x) (idle_thread_array[(x)]) 79#define get_idle_for_cpu(x) (idle_thread_array[(x)])
@@ -104,12 +108,25 @@ int __devinit smp_generic_kick_cpu(int nr)
104 * cpu_start field to become non-zero After we set cpu_start, 108 * cpu_start field to become non-zero After we set cpu_start,
105 * the processor will continue on to secondary_start 109 * the processor will continue on to secondary_start
106 */ 110 */
107 paca[nr].cpu_start = 1; 111 if (!paca[nr].cpu_start) {
108 smp_mb(); 112 paca[nr].cpu_start = 1;
113 smp_mb();
114 return 0;
115 }
116
117#ifdef CONFIG_HOTPLUG_CPU
118 /*
119 * Ok it's not there, so it might be soft-unplugged, let's
120 * try to bring it back
121 */
122 per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
123 smp_wmb();
124 smp_send_reschedule(nr);
125#endif /* CONFIG_HOTPLUG_CPU */
109 126
110 return 0; 127 return 0;
111} 128}
112#endif 129#endif /* CONFIG_PPC64 */
113 130
114static irqreturn_t call_function_action(int irq, void *data) 131static irqreturn_t call_function_action(int irq, void *data)
115{ 132{
@@ -357,8 +374,6 @@ void __devinit smp_prepare_boot_cpu(void)
357} 374}
358 375
359#ifdef CONFIG_HOTPLUG_CPU 376#ifdef CONFIG_HOTPLUG_CPU
360/* State of each CPU during hotplug phases */
361static DEFINE_PER_CPU(int, cpu_state) = { 0 };
362 377
363int generic_cpu_disable(void) 378int generic_cpu_disable(void)
364{ 379{
@@ -406,6 +421,11 @@ void generic_set_cpu_dead(unsigned int cpu)
406{ 421{
407 per_cpu(cpu_state, cpu) = CPU_DEAD; 422 per_cpu(cpu_state, cpu) = CPU_DEAD;
408} 423}
424
425int generic_check_cpu_restart(unsigned int cpu)
426{
427 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
428}
409#endif 429#endif
410 430
411struct create_idle { 431struct create_idle {