diff options
author | Sebastien Dugue <sebastien.dugue@bull.net> | 2008-12-01 08:09:07 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-30 16:35:29 -0500 |
commit | 94df7de0289bc2df3d6e85cd2ece52bf42682f45 (patch) | |
tree | b3f614b015adfb9574959687bb9b7ac4c884e23a /kernel/time/tick-common.c | |
parent | 7f22391cbe82a80a9f891d8bd10fc28ff248d1e2 (diff) |
hrtimers: allow the hot-unplugging of all cpus
Impact: fix CPU hotplug hang on Power6 testbox
On architectures that support offlining all cpus (at least powerpc/pseries),
hot-unpluging the tick_do_timer_cpu can result in a system hang.
This comes from the fact that if the cpu going down happens to be the
cpu doing the tick, then as the tick_do_timer_cpu handover happens after the
cpu is dead (via the CPU_DEAD notification), we're left without ticks,
jiffies are frozen and any task relying on timers (msleep, ...) is stuck.
That's particularly the case for the cpu looping in __cpu_die() waiting
for the dying cpu to be dead.
This patch addresses this by having the tick_do_timer_cpu handover happen
earlier during the CPU_DYING notification. For this, a new clockevent
notification type is introduced (CLOCK_EVT_NOTIFY_CPU_DYING) which is triggered
in hrtimer_cpu_notify().
Signed-off-by: Sebastien Dugue <sebastien.dugue@bull.net>
Cc: <stable@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/time/tick-common.c')
-rw-r--r-- | kernel/time/tick-common.c | 26 |
1 files changed, 19 insertions, 7 deletions
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 63e05d423a09..21a5ca849514 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -274,6 +274,21 @@ out_bc: | |||
274 | } | 274 | } |
275 | 275 | ||
276 | /* | 276 | /* |
277 | * Transfer the do_timer job away from a dying cpu. | ||
278 | * | ||
279 | * Called with interrupts disabled. | ||
280 | */ | ||
281 | static void tick_handover_do_timer(int *cpup) | ||
282 | { | ||
283 | if (*cpup == tick_do_timer_cpu) { | ||
284 | int cpu = cpumask_first(cpu_online_mask); | ||
285 | |||
286 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : | ||
287 | TICK_DO_TIMER_NONE; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | /* | ||
277 | * Shutdown an event device on a given cpu: | 292 | * Shutdown an event device on a given cpu: |
278 | * | 293 | * |
279 | * This is called on a life CPU, when a CPU is dead. So we cannot | 294 | * This is called on a life CPU, when a CPU is dead. So we cannot |
@@ -297,13 +312,6 @@ static void tick_shutdown(unsigned int *cpup) | |||
297 | clockevents_exchange_device(dev, NULL); | 312 | clockevents_exchange_device(dev, NULL); |
298 | td->evtdev = NULL; | 313 | td->evtdev = NULL; |
299 | } | 314 | } |
300 | /* Transfer the do_timer job away from this cpu */ | ||
301 | if (*cpup == tick_do_timer_cpu) { | ||
302 | int cpu = cpumask_first(cpu_online_mask); | ||
303 | |||
304 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : | ||
305 | TICK_DO_TIMER_NONE; | ||
306 | } | ||
307 | spin_unlock_irqrestore(&tick_device_lock, flags); | 315 | spin_unlock_irqrestore(&tick_device_lock, flags); |
308 | } | 316 | } |
309 | 317 | ||
@@ -357,6 +365,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason, | |||
357 | tick_broadcast_oneshot_control(reason); | 365 | tick_broadcast_oneshot_control(reason); |
358 | break; | 366 | break; |
359 | 367 | ||
368 | case CLOCK_EVT_NOTIFY_CPU_DYING: | ||
369 | tick_handover_do_timer(dev); | ||
370 | break; | ||
371 | |||
360 | case CLOCK_EVT_NOTIFY_CPU_DEAD: | 372 | case CLOCK_EVT_NOTIFY_CPU_DEAD: |
361 | tick_shutdown_broadcast_oneshot(dev); | 373 | tick_shutdown_broadcast_oneshot(dev); |
362 | tick_shutdown_broadcast(dev); | 374 | tick_shutdown_broadcast(dev); |