aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2016-01-22 12:31:53 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-01-27 06:38:05 -0500
commitbbf66d897adf2bb0c310db96c97e8db6369f39e1 (patch)
tree7528b65e67f4eaafffc1a87b3a0b9c5b38281ad5
parenta6e707ddbdf150bd1c2a5c0eccc55abdc62a0039 (diff)
clocksource: Allow unregistering the watchdog
Hyper-V vmbus module registers TSC page clocksource when loaded. This is the clocksource with the highest rating and thus it becomes the watchdog making unloading of the vmbus module impossible. Separate clocksource_select_watchdog() from clocksource_enqueue_watchdog() and use it on clocksource register/rating change/unregister. After all, lobotomized monkeys may need some love too. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: Dexuan Cui <decui@microsoft.com> Cc: K. Y. Srinivasan <kys@microsoft.com> Link: http://lkml.kernel.org/r/1453483913-25672-1-git-send-email-vkuznets@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/time/clocksource.c52
1 files changed, 42 insertions, 10 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 664de539299b..56ece145a814 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
323 /* cs is a watchdog. */ 323 /* cs is a watchdog. */
324 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 324 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
325 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 325 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
326 }
327 spin_unlock_irqrestore(&watchdog_lock, flags);
328}
329
330static void clocksource_select_watchdog(bool fallback)
331{
332 struct clocksource *cs, *old_wd;
333 unsigned long flags;
334
335 spin_lock_irqsave(&watchdog_lock, flags);
336 /* save current watchdog */
337 old_wd = watchdog;
338 if (fallback)
339 watchdog = NULL;
340
341 list_for_each_entry(cs, &clocksource_list, list) {
342 /* cs is a clocksource to be watched. */
343 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
344 continue;
345
346 /* Skip current if we were requested for a fallback. */
347 if (fallback && cs == old_wd)
348 continue;
349
326 /* Pick the best watchdog. */ 350 /* Pick the best watchdog. */
327 if (!watchdog || cs->rating > watchdog->rating) { 351 if (!watchdog || cs->rating > watchdog->rating)
328 watchdog = cs; 352 watchdog = cs;
329 /* Reset watchdog cycles */
330 clocksource_reset_watchdog();
331 }
332 } 353 }
354 /* If we failed to find a fallback restore the old one. */
355 if (!watchdog)
356 watchdog = old_wd;
357
358 /* If we changed the watchdog we need to reset cycles. */
359 if (watchdog != old_wd)
360 clocksource_reset_watchdog();
361
333 /* Check if the watchdog timer needs to be started. */ 362 /* Check if the watchdog timer needs to be started. */
334 clocksource_start_watchdog(); 363 clocksource_start_watchdog();
335 spin_unlock_irqrestore(&watchdog_lock, flags); 364 spin_unlock_irqrestore(&watchdog_lock, flags);
@@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
404 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 433 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
405} 434}
406 435
436static void clocksource_select_watchdog(bool fallback) { }
407static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 437static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
408static inline void clocksource_resume_watchdog(void) { } 438static inline void clocksource_resume_watchdog(void) { }
409static inline int __clocksource_watchdog_kthread(void) { return 0; } 439static inline int __clocksource_watchdog_kthread(void) { return 0; }
@@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
736 clocksource_enqueue(cs); 766 clocksource_enqueue(cs);
737 clocksource_enqueue_watchdog(cs); 767 clocksource_enqueue_watchdog(cs);
738 clocksource_select(); 768 clocksource_select();
769 clocksource_select_watchdog(false);
739 mutex_unlock(&clocksource_mutex); 770 mutex_unlock(&clocksource_mutex);
740 return 0; 771 return 0;
741} 772}
@@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
758 mutex_lock(&clocksource_mutex); 789 mutex_lock(&clocksource_mutex);
759 __clocksource_change_rating(cs, rating); 790 __clocksource_change_rating(cs, rating);
760 clocksource_select(); 791 clocksource_select();
792 clocksource_select_watchdog(false);
761 mutex_unlock(&clocksource_mutex); 793 mutex_unlock(&clocksource_mutex);
762} 794}
763EXPORT_SYMBOL(clocksource_change_rating); 795EXPORT_SYMBOL(clocksource_change_rating);
@@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating);
767 */ 799 */
768static int clocksource_unbind(struct clocksource *cs) 800static int clocksource_unbind(struct clocksource *cs)
769{ 801{
770 /* 802 if (clocksource_is_watchdog(cs)) {
771 * I really can't convince myself to support this on hardware 803 /* Select and try to install a replacement watchdog. */
772 * designed by lobotomized monkeys. 804 clocksource_select_watchdog(true);
773 */ 805 if (clocksource_is_watchdog(cs))
774 if (clocksource_is_watchdog(cs)) 806 return -EBUSY;
775 return -EBUSY; 807 }
776 808
777 if (cs == curr_clocksource) { 809 if (cs == curr_clocksource) {
778 /* Select and try to install a replacement clock source */ 810 /* Select and try to install a replacement clock source */