aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-04-23 11:28:55 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-05-02 10:10:40 -0400
commit2aae7bcfa4104b770e6f612356adb8d66c6144d6 (patch)
tree086cf4527750ef42097af921a4ae4b5ccbff2393
parente9088adda13cd23249d4b0abb97ff8a81bf5573a (diff)
clocksource: Allow clocksource_mark_unstable() on unregistered clocksources
Because of how the code flips between tsc-early and tsc clocksources it might need to mark one or both unstable. The current code in mark_tsc_unstable() only worked because previously it registered the tsc clocksource once and then never touched it. Since it now unregisters the tsc-early clocksource, it needs to know if a clocksource got unregistered and the current cs->mult test doesn't work for that. Instead use list_empty(&cs->list) to test for registration. Furthermore, since clocksource_mark_unstable() needs to place the cs on the wd_list, it links the cs->list and cs->wd_list serialization. It must not see a clocsource registered (!empty cs->list) but already past dequeue_watchdog(). So place {en,de}queue{,_watchdog}() under the same lock. Provided cs->list is initialized to empty, this then allows us to unconditionally use clocksource_mark_unstable(), regardless of the registration state. Fixes: aa83c45762a2 ("x86/tsc: Introduce early tsc clocksource") Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Tested-by: Diego Viola <diego.viola@gmail.com> Cc: len.brown@intel.com Cc: rjw@rjwysocki.net Cc: diego.viola@gmail.com Cc: rui.zhang@intel.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180502135312.GS12217@hirez.programming.kicks-ass.net
-rw-r--r--kernel/time/clocksource.c50
1 files changed, 34 insertions, 16 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 0e974cface0b..c3d2b94723dc 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
119static int watchdog_running; 119static int watchdog_running;
120static atomic_t watchdog_reset_pending; 120static atomic_t watchdog_reset_pending;
121 121
122static void inline clocksource_watchdog_lock(unsigned long *flags)
123{
124 spin_lock_irqsave(&watchdog_lock, *flags);
125}
126
127static void inline clocksource_watchdog_unlock(unsigned long *flags)
128{
129 spin_unlock_irqrestore(&watchdog_lock, *flags);
130}
131
122static int clocksource_watchdog_kthread(void *data); 132static int clocksource_watchdog_kthread(void *data);
123static void __clocksource_change_rating(struct clocksource *cs, int rating); 133static void __clocksource_change_rating(struct clocksource *cs, int rating);
124 134
@@ -142,6 +152,9 @@ static void __clocksource_unstable(struct clocksource *cs)
142 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 152 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
143 cs->flags |= CLOCK_SOURCE_UNSTABLE; 153 cs->flags |= CLOCK_SOURCE_UNSTABLE;
144 154
155 if (list_empty(&cs->list))
156 return;
157
145 if (cs->mark_unstable) 158 if (cs->mark_unstable)
146 cs->mark_unstable(cs); 159 cs->mark_unstable(cs);
147 160
@@ -164,7 +177,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
164 177
165 spin_lock_irqsave(&watchdog_lock, flags); 178 spin_lock_irqsave(&watchdog_lock, flags);
166 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 179 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
167 if (list_empty(&cs->wd_list)) 180 if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
168 list_add(&cs->wd_list, &watchdog_list); 181 list_add(&cs->wd_list, &watchdog_list);
169 __clocksource_unstable(cs); 182 __clocksource_unstable(cs);
170 } 183 }
@@ -319,9 +332,6 @@ static void clocksource_resume_watchdog(void)
319 332
320static void clocksource_enqueue_watchdog(struct clocksource *cs) 333static void clocksource_enqueue_watchdog(struct clocksource *cs)
321{ 334{
322 unsigned long flags;
323
324 spin_lock_irqsave(&watchdog_lock, flags);
325 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 335 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
326 /* cs is a clocksource to be watched. */ 336 /* cs is a clocksource to be watched. */
327 list_add(&cs->wd_list, &watchdog_list); 337 list_add(&cs->wd_list, &watchdog_list);
@@ -331,7 +341,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
331 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 341 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
332 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 342 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
333 } 343 }
334 spin_unlock_irqrestore(&watchdog_lock, flags);
335} 344}
336 345
337static void clocksource_select_watchdog(bool fallback) 346static void clocksource_select_watchdog(bool fallback)
@@ -373,9 +382,6 @@ static void clocksource_select_watchdog(bool fallback)
373 382
374static void clocksource_dequeue_watchdog(struct clocksource *cs) 383static void clocksource_dequeue_watchdog(struct clocksource *cs)
375{ 384{
376 unsigned long flags;
377
378 spin_lock_irqsave(&watchdog_lock, flags);
379 if (cs != watchdog) { 385 if (cs != watchdog) {
380 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 386 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
381 /* cs is a watched clocksource. */ 387 /* cs is a watched clocksource. */
@@ -384,21 +390,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
384 clocksource_stop_watchdog(); 390 clocksource_stop_watchdog();
385 } 391 }
386 } 392 }
387 spin_unlock_irqrestore(&watchdog_lock, flags);
388} 393}
389 394
390static int __clocksource_watchdog_kthread(void) 395static int __clocksource_watchdog_kthread(void)
391{ 396{
392 struct clocksource *cs, *tmp; 397 struct clocksource *cs, *tmp;
393 unsigned long flags; 398 unsigned long flags;
394 LIST_HEAD(unstable);
395 int select = 0; 399 int select = 0;
396 400
397 spin_lock_irqsave(&watchdog_lock, flags); 401 spin_lock_irqsave(&watchdog_lock, flags);
398 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 402 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
399 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 403 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
400 list_del_init(&cs->wd_list); 404 list_del_init(&cs->wd_list);
401 list_add(&cs->wd_list, &unstable); 405 __clocksource_change_rating(cs, 0);
402 select = 1; 406 select = 1;
403 } 407 }
404 if (cs->flags & CLOCK_SOURCE_RESELECT) { 408 if (cs->flags & CLOCK_SOURCE_RESELECT) {
@@ -410,11 +414,6 @@ static int __clocksource_watchdog_kthread(void)
410 clocksource_stop_watchdog(); 414 clocksource_stop_watchdog();
411 spin_unlock_irqrestore(&watchdog_lock, flags); 415 spin_unlock_irqrestore(&watchdog_lock, flags);
412 416
413 /* Needs to be done outside of watchdog lock */
414 list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
415 list_del_init(&cs->wd_list);
416 __clocksource_change_rating(cs, 0);
417 }
418 return select; 417 return select;
419} 418}
420 419
@@ -447,6 +446,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
447static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 446static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
448void clocksource_mark_unstable(struct clocksource *cs) { } 447void clocksource_mark_unstable(struct clocksource *cs) { }
449 448
449static void inline clocksource_watchdog_lock(unsigned long *flags) { }
450static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
451
450#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 452#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
451 453
452/** 454/**
@@ -779,14 +781,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
779 */ 781 */
780int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 782int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
781{ 783{
784 unsigned long flags;
782 785
783 /* Initialize mult/shift and max_idle_ns */ 786 /* Initialize mult/shift and max_idle_ns */
784 __clocksource_update_freq_scale(cs, scale, freq); 787 __clocksource_update_freq_scale(cs, scale, freq);
785 788
786 /* Add clocksource to the clocksource list */ 789 /* Add clocksource to the clocksource list */
787 mutex_lock(&clocksource_mutex); 790 mutex_lock(&clocksource_mutex);
791
792 clocksource_watchdog_lock(&flags);
788 clocksource_enqueue(cs); 793 clocksource_enqueue(cs);
789 clocksource_enqueue_watchdog(cs); 794 clocksource_enqueue_watchdog(cs);
795 clocksource_watchdog_unlock(&flags);
796
790 clocksource_select(); 797 clocksource_select();
791 clocksource_select_watchdog(false); 798 clocksource_select_watchdog(false);
792 mutex_unlock(&clocksource_mutex); 799 mutex_unlock(&clocksource_mutex);
@@ -808,8 +815,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
808 */ 815 */
809void clocksource_change_rating(struct clocksource *cs, int rating) 816void clocksource_change_rating(struct clocksource *cs, int rating)
810{ 817{
818 unsigned long flags;
819
811 mutex_lock(&clocksource_mutex); 820 mutex_lock(&clocksource_mutex);
821 clocksource_watchdog_lock(&flags);
812 __clocksource_change_rating(cs, rating); 822 __clocksource_change_rating(cs, rating);
823 clocksource_watchdog_unlock(&flags);
824
813 clocksource_select(); 825 clocksource_select();
814 clocksource_select_watchdog(false); 826 clocksource_select_watchdog(false);
815 mutex_unlock(&clocksource_mutex); 827 mutex_unlock(&clocksource_mutex);
@@ -821,6 +833,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
821 */ 833 */
822static int clocksource_unbind(struct clocksource *cs) 834static int clocksource_unbind(struct clocksource *cs)
823{ 835{
836 unsigned long flags;
837
824 if (clocksource_is_watchdog(cs)) { 838 if (clocksource_is_watchdog(cs)) {
825 /* Select and try to install a replacement watchdog. */ 839 /* Select and try to install a replacement watchdog. */
826 clocksource_select_watchdog(true); 840 clocksource_select_watchdog(true);
@@ -834,8 +848,12 @@ static int clocksource_unbind(struct clocksource *cs)
834 if (curr_clocksource == cs) 848 if (curr_clocksource == cs)
835 return -EBUSY; 849 return -EBUSY;
836 } 850 }
851
852 clocksource_watchdog_lock(&flags);
837 clocksource_dequeue_watchdog(cs); 853 clocksource_dequeue_watchdog(cs);
838 list_del_init(&cs->list); 854 list_del_init(&cs->list);
855 clocksource_watchdog_unlock(&flags);
856
839 return 0; 857 return 0;
840} 858}
841 859