diff options
Diffstat (limited to 'kernel/time/clocksource.c')
| -rw-r--r-- | kernel/time/clocksource.c | 34 |
1 files changed, 29 insertions, 5 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index d422c7b2236b..1f663d23e85e 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -343,7 +343,19 @@ static void clocksource_resume_watchdog(void) | |||
| 343 | { | 343 | { |
| 344 | unsigned long flags; | 344 | unsigned long flags; |
| 345 | 345 | ||
| 346 | spin_lock_irqsave(&watchdog_lock, flags); | 346 | /* |
| 347 | * We use trylock here to avoid a potential dead lock when | ||
| 348 | * kgdb calls this code after the kernel has been stopped with | ||
| 349 | * watchdog_lock held. When watchdog_lock is held we just | ||
| 350 | * return and accept, that the watchdog might trigger and mark | ||
| 351 | * the monitored clock source (usually TSC) unstable. | ||
| 352 | * | ||
| 353 | * This does not affect the other caller clocksource_resume() | ||
| 354 | * because at this point the kernel is UP, interrupts are | ||
| 355 | * disabled and nothing can hold watchdog_lock. | ||
| 356 | */ | ||
| 357 | if (!spin_trylock_irqsave(&watchdog_lock, flags)) | ||
| 358 | return; | ||
| 347 | clocksource_reset_watchdog(); | 359 | clocksource_reset_watchdog(); |
| 348 | spin_unlock_irqrestore(&watchdog_lock, flags); | 360 | spin_unlock_irqrestore(&watchdog_lock, flags); |
| 349 | } | 361 | } |
| @@ -441,6 +453,18 @@ static inline int clocksource_watchdog_kthread(void *data) { return 0; } | |||
| 441 | #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ | 453 | #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ |
| 442 | 454 | ||
| 443 | /** | 455 | /** |
| 456 | * clocksource_suspend - suspend the clocksource(s) | ||
| 457 | */ | ||
| 458 | void clocksource_suspend(void) | ||
| 459 | { | ||
| 460 | struct clocksource *cs; | ||
| 461 | |||
| 462 | list_for_each_entry_reverse(cs, &clocksource_list, list) | ||
| 463 | if (cs->suspend) | ||
| 464 | cs->suspend(cs); | ||
| 465 | } | ||
| 466 | |||
| 467 | /** | ||
| 444 | * clocksource_resume - resume the clocksource(s) | 468 | * clocksource_resume - resume the clocksource(s) |
| 445 | */ | 469 | */ |
| 446 | void clocksource_resume(void) | 470 | void clocksource_resume(void) |
| @@ -449,7 +473,7 @@ void clocksource_resume(void) | |||
| 449 | 473 | ||
| 450 | list_for_each_entry(cs, &clocksource_list, list) | 474 | list_for_each_entry(cs, &clocksource_list, list) |
| 451 | if (cs->resume) | 475 | if (cs->resume) |
| 452 | cs->resume(); | 476 | cs->resume(cs); |
| 453 | 477 | ||
| 454 | clocksource_resume_watchdog(); | 478 | clocksource_resume_watchdog(); |
| 455 | } | 479 | } |
| @@ -458,8 +482,8 @@ void clocksource_resume(void) | |||
| 458 | * clocksource_touch_watchdog - Update watchdog | 482 | * clocksource_touch_watchdog - Update watchdog |
| 459 | * | 483 | * |
| 460 | * Update the watchdog after exception contexts such as kgdb so as not | 484 | * Update the watchdog after exception contexts such as kgdb so as not |
| 461 | * to incorrectly trip the watchdog. | 485 | * to incorrectly trip the watchdog. This might fail when the kernel |
| 462 | * | 486 | * was stopped in code which holds watchdog_lock. |
| 463 | */ | 487 | */ |
| 464 | void clocksource_touch_watchdog(void) | 488 | void clocksource_touch_watchdog(void) |
| 465 | { | 489 | { |
| @@ -677,7 +701,7 @@ sysfs_show_current_clocksources(struct sys_device *dev, | |||
| 677 | * @count: length of buffer | 701 | * @count: length of buffer |
| 678 | * | 702 | * |
| 679 | * Takes input from sysfs interface for manually overriding the default | 703 | * Takes input from sysfs interface for manually overriding the default |
| 680 | * clocksource selction. | 704 | * clocksource selection. |
| 681 | */ | 705 | */ |
| 682 | static ssize_t sysfs_override_clocksource(struct sys_device *dev, | 706 | static ssize_t sysfs_override_clocksource(struct sys_device *dev, |
| 683 | struct sysdev_attribute *attr, | 707 | struct sysdev_attribute *attr, |
