diff options
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/clockevents.c | 3 | ||||
| -rw-r--r-- | kernel/time/clocksource.c | 18 |
2 files changed, 17 insertions, 4 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 6f740d9f0948..d7395fdfb9f3 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -259,7 +259,8 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
| 259 | cpu = *((int *)arg); | 259 | cpu = *((int *)arg); |
| 260 | list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { | 260 | list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { |
| 261 | if (cpumask_test_cpu(cpu, dev->cpumask) && | 261 | if (cpumask_test_cpu(cpu, dev->cpumask) && |
| 262 | cpumask_weight(dev->cpumask) == 1) { | 262 | cpumask_weight(dev->cpumask) == 1 && |
| 263 | !tick_is_broadcast_device(dev)) { | ||
| 263 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | 264 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); |
| 264 | list_del(&dev->list); | 265 | list_del(&dev->list); |
| 265 | } | 266 | } |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index e85c23404d34..13700833c181 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -343,7 +343,19 @@ static void clocksource_resume_watchdog(void) | |||
| 343 | { | 343 | { |
| 344 | unsigned long flags; | 344 | unsigned long flags; |
| 345 | 345 | ||
| 346 | spin_lock_irqsave(&watchdog_lock, flags); | 346 | /* |
| 347 | * We use trylock here to avoid a potential dead lock when | ||
| 348 | * kgdb calls this code after the kernel has been stopped with | ||
| 349 | * watchdog_lock held. When watchdog_lock is held we just | ||
| 350 | * return and accept, that the watchdog might trigger and mark | ||
| 351 | * the monitored clock source (usually TSC) unstable. | ||
| 352 | * | ||
| 353 | * This does not affect the other caller clocksource_resume() | ||
| 354 | * because at this point the kernel is UP, interrupts are | ||
| 355 | * disabled and nothing can hold watchdog_lock. | ||
| 356 | */ | ||
| 357 | if (!spin_trylock_irqsave(&watchdog_lock, flags)) | ||
| 358 | return; | ||
| 347 | clocksource_reset_watchdog(); | 359 | clocksource_reset_watchdog(); |
| 348 | spin_unlock_irqrestore(&watchdog_lock, flags); | 360 | spin_unlock_irqrestore(&watchdog_lock, flags); |
| 349 | } | 361 | } |
| @@ -458,8 +470,8 @@ void clocksource_resume(void) | |||
| 458 | * clocksource_touch_watchdog - Update watchdog | 470 | * clocksource_touch_watchdog - Update watchdog |
| 459 | * | 471 | * |
| 460 | * Update the watchdog after exception contexts such as kgdb so as not | 472 | * Update the watchdog after exception contexts such as kgdb so as not |
| 461 | * to incorrectly trip the watchdog. | 473 | * to incorrectly trip the watchdog. This might fail when the kernel |
| 462 | * | 474 | * was stopped in code which holds watchdog_lock. |
| 463 | */ | 475 | */ |
| 464 | void clocksource_touch_watchdog(void) | 476 | void clocksource_touch_watchdog(void) |
| 465 | { | 477 | { |
