aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2007-12-27 23:11:09 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-25 06:52:50 -0500
commite057d985fd8aad83d07376c5c36f2c8a6c5411be (patch)
tree60506ba8d0fe04b53e7fe182e91633660a2ee710 /arch
parentb616de5ef928ac1914348ff6a42521ca6b83112e (diff)
[POWERPC] Make smp_send_stop() handle panic and xmon reboot
smp_send_stop() will send an IPI to all other cpus to shut them down. However, for the case of xmon-based reboots (as well as potentially some panics), the other cpus are (or might be) spinning with interrupts off, and won't take the IPI. Current code will drop us into the debugger when the IPI fails, which means we're in an infinite loop that we can't get out of without an external reset of some sort. Instead, make the smp_send_stop() IPI call path just print the warning about being unable to send IPIs, but make it return so the rest of the shutdown sequence can continue. It's not perfect, but the lesser of two evils. Also move the call_lock handling outside of smp_call_function_map so we can avoid deadlocks in smp_send_stop(). Signed-off-by: Olof Johansson <olof@lixom.net> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/smp.c37
1 files changed, 29 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index cefeee81c52e..be35ffae10f0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -76,6 +76,8 @@ void smp_call_function_interrupt(void);
76 76
77int smt_enabled_at_boot = 1; 77int smt_enabled_at_boot = 1;
78 78
79static int ipi_fail_ok;
80
79static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 81static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
80 82
81#ifdef CONFIG_PPC64 83#ifdef CONFIG_PPC64
@@ -204,8 +206,6 @@ static int __smp_call_function_map(void (*func) (void *info), void *info,
204 if (wait) 206 if (wait)
205 atomic_set(&data.finished, 0); 207 atomic_set(&data.finished, 0);
206 208
207 spin_lock(&call_lock);
208
209 /* remove 'self' from the map */ 209 /* remove 'self' from the map */
210 if (cpu_isset(smp_processor_id(), map)) 210 if (cpu_isset(smp_processor_id(), map))
211 cpu_clear(smp_processor_id(), map); 211 cpu_clear(smp_processor_id(), map);
@@ -232,7 +232,8 @@ static int __smp_call_function_map(void (*func) (void *info), void *info,
232 printk("smp_call_function on cpu %d: other cpus not " 232 printk("smp_call_function on cpu %d: other cpus not "
233 "responding (%d)\n", smp_processor_id(), 233 "responding (%d)\n", smp_processor_id(),
234 atomic_read(&data.started)); 234 atomic_read(&data.started));
235 debugger(NULL); 235 if (!ipi_fail_ok)
236 debugger(NULL);
236 goto out; 237 goto out;
237 } 238 }
238 } 239 }
@@ -259,15 +260,18 @@ static int __smp_call_function_map(void (*func) (void *info), void *info,
259 out: 260 out:
260 call_data = NULL; 261 call_data = NULL;
261 HMT_medium(); 262 HMT_medium();
262 spin_unlock(&call_lock);
263 return ret; 263 return ret;
264} 264}
265 265
266static int __smp_call_function(void (*func)(void *info), void *info, 266static int __smp_call_function(void (*func)(void *info), void *info,
267 int nonatomic, int wait) 267 int nonatomic, int wait)
268{ 268{
269 return __smp_call_function_map(func, info, nonatomic, wait, 269 int ret;
270 spin_lock(&call_lock);
271 ret =__smp_call_function_map(func, info, nonatomic, wait,
270 cpu_online_map); 272 cpu_online_map);
273 spin_unlock(&call_lock);
274 return ret;
271} 275}
272 276
273int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 277int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
@@ -293,9 +297,11 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
293 return -EINVAL; 297 return -EINVAL;
294 298
295 cpu_set(cpu, map); 299 cpu_set(cpu, map);
296 if (cpu != get_cpu()) 300 if (cpu != get_cpu()) {
301 spin_lock(&call_lock);
297 ret = __smp_call_function_map(func, info, nonatomic, wait, map); 302 ret = __smp_call_function_map(func, info, nonatomic, wait, map);
298 else { 303 spin_unlock(&call_lock);
304 } else {
299 local_irq_disable(); 305 local_irq_disable();
300 func(info); 306 func(info);
301 local_irq_enable(); 307 local_irq_enable();
@@ -307,7 +313,22 @@ EXPORT_SYMBOL(smp_call_function_single);
307 313
308void smp_send_stop(void) 314void smp_send_stop(void)
309{ 315{
310 __smp_call_function(stop_this_cpu, NULL, 1, 0); 316 int nolock;
317
318 /* It's OK to fail sending the IPI, since the alternative is to
319 * be stuck forever waiting on the other CPU to take the interrupt.
320 *
321 * It's better to at least continue and go through reboot, since this
322 * function is usually called at panic or reboot time in the first
323 * place.
324 */
325 ipi_fail_ok = 1;
326
327 /* Don't deadlock in case we got called through panic */
328 nolock = !spin_trylock(&call_lock);
329 __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
330 if (!nolock)
331 spin_unlock(&call_lock);
311} 332}
312 333
313void smp_call_function_interrupt(void) 334void smp_call_function_interrupt(void)