diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-10 09:35:10 -0500 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-11 04:28:08 -0500 |
| commit | bb6eddf7676e1c1f3e637aa93c5224488d99036f (patch) | |
| tree | 3256cb7ea6954f28b0bb3ccee2db570e3d0ff762 /kernel/time | |
| parent | e9c0748b687aa70179a9e6d8ffc24b2874fe350b (diff) | |
clockevents: Prevent clockevent_devices list corruption on cpu hotplug
Xiaotian Feng triggered a list corruption in the clock events list on
CPU hotplug and debugged the root cause.
If a CPU registers more than one per cpu clock event device, then only
the active clock event device is removed on CPU_DEAD. The unused
devices are kept in the clock events device list.
On CPU up the clock event devices are registered again, which means
that we list_add an already enqueued list_head. That results in list
corruption.
Resolve this by removing all devices which are associated to the dead
CPU on CPU_DEAD.
Reported-by: Xiaotian Feng <dfeng@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Xiaotian Feng <dfeng@redhat.com>
Cc: stable@kernel.org
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/clockevents.c | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 20a8920029ee..91db2e33d86a 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -238,8 +238,9 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
| 238 | */ | 238 | */ |
| 239 | void clockevents_notify(unsigned long reason, void *arg) | 239 | void clockevents_notify(unsigned long reason, void *arg) |
| 240 | { | 240 | { |
| 241 | struct list_head *node, *tmp; | 241 | struct clock_event_device *dev, *tmp; |
| 242 | unsigned long flags; | 242 | unsigned long flags; |
| 243 | int cpu; | ||
| 243 | 244 | ||
| 244 | spin_lock_irqsave(&clockevents_lock, flags); | 245 | spin_lock_irqsave(&clockevents_lock, flags); |
| 245 | clockevents_do_notify(reason, arg); | 246 | clockevents_do_notify(reason, arg); |
| @@ -250,8 +251,19 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
| 250 | * Unregister the clock event devices which were | 251 | * Unregister the clock event devices which were |
| 251 | * released from the users in the notify chain. | 252 | * released from the users in the notify chain. |
| 252 | */ | 253 | */ |
| 253 | list_for_each_safe(node, tmp, &clockevents_released) | 254 | list_for_each_entry_safe(dev, tmp, &clockevents_released, list) |
| 254 | list_del(node); | 255 | list_del(&dev->list); |
| 256 | /* | ||
| 257 | * Now check whether the CPU has left unused per cpu devices | ||
| 258 | */ | ||
| 259 | cpu = *((int *)arg); | ||
| 260 | list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { | ||
| 261 | if (cpumask_test_cpu(cpu, dev->cpumask) && | ||
| 262 | cpumask_weight(dev->cpumask) == 1) { | ||
| 263 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | ||
| 264 | list_del(&dev->list); | ||
| 265 | } | ||
| 266 | } | ||
| 255 | break; | 267 | break; |
| 256 | default: | 268 | default: |
| 257 | break; | 269 | break; |
