diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2013-06-26 06:17:32 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2013-07-02 08:26:44 -0400 |
commit | c9b5a266b103af873abb9ac03bc3d067702c8f4b (patch) | |
tree | 91e5ec118a7e96b998e18a8601d8f14c76d94823 /kernel/time/tick-broadcast.c | |
parent | 47433b8c9d7480a3eebd99df38e857ce85a37cee (diff) |
tick: Make oneshot broadcast robust vs. CPU offlining
In periodic mode we remove offline cpus from the broadcast propagation
mask. In oneshot mode we fail to do so. This was not a problem so far,
but the recent changes to the broadcast propagation introduced a
constellation which can result in a NULL pointer dereference.
What happens is:
CPU0 CPU1
idle()
arch_idle()
tick_broadcast_oneshot_control(OFF);
set cpu1 in tick_broadcast_force_mask
if (cpu_offline())
arch_cpu_dead()
cpu_dead_cleanup(cpu1)
cpu1 tickdevice pointer = NULL
broadcast interrupt
dereference cpu1 tickdevice pointer -> OOPS
We dereference the pointer because cpu1 is still set in
tick_broadcast_force_mask and tick_do_broadcast() expects a valid
cpumask and therefor lacks any further checks.
Remove the cpu from the tick_broadcast_force_mask before we set the
tick device pointer to NULL. Also add a sanity check to the oneshot
broadcast function, so we can detect such issues w/o crashing the
machine.
Reported-by: Prarit Bhargava <prarit@redhat.com>
Cc: athorlton@sgi.com
Cc: CAI Qian <caiqian@redhat.com>
Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1306261303260.4013@ionos.tec.linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time/tick-broadcast.c')
-rw-r--r-- | kernel/time/tick-broadcast.c | 13 |
1 files changed, 11 insertions, 2 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index d067c01586f5..4790037163f6 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -533,6 +533,13 @@ again: | |||
533 | cpumask_clear(tick_broadcast_force_mask); | 533 | cpumask_clear(tick_broadcast_force_mask); |
534 | 534 | ||
535 | /* | 535 | /* |
536 | * Sanity check. Catch the case where we try to broadcast to | ||
537 | * offline cpus. | ||
538 | */ | ||
539 | if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask))) | ||
540 | cpumask_and(tmpmask, tmpmask, cpu_online_mask); | ||
541 | |||
542 | /* | ||
536 | * Wakeup the cpus which have an expired event. | 543 | * Wakeup the cpus which have an expired event. |
537 | */ | 544 | */ |
538 | tick_do_broadcast(tmpmask); | 545 | tick_do_broadcast(tmpmask); |
@@ -773,10 +780,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
773 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 780 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
774 | 781 | ||
775 | /* | 782 | /* |
776 | * Clear the broadcast mask flag for the dead cpu, but do not | 783 | * Clear the broadcast masks for the dead cpu, but do not stop |
777 | * stop the broadcast device! | 784 | * the broadcast device! |
778 | */ | 785 | */ |
779 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | 786 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
787 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | ||
788 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | ||
780 | 789 | ||
781 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 790 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
782 | } | 791 | } |