aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/clocksource.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/clocksource.c')
-rw-r--r--kernel/time/clocksource.c40
1 files changed, 30 insertions, 10 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index f74fb00d8064..0e6e97a01942 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
133 spin_unlock_irqrestore(&watchdog_lock, *flags); 133 spin_unlock_irqrestore(&watchdog_lock, *flags);
134} 134}
135 135
136static int clocksource_watchdog_kthread(void *data);
137static void __clocksource_change_rating(struct clocksource *cs, int rating);
138
136/* 139/*
137 * Interval: 0.5sec Threshold: 0.0625s 140 * Interval: 0.5sec Threshold: 0.0625s
138 */ 141 */
139#define WATCHDOG_INTERVAL (HZ >> 1) 142#define WATCHDOG_INTERVAL (HZ >> 1)
140#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 143#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
141 144
145static void clocksource_watchdog_work(struct work_struct *work)
146{
147 /*
148 * We cannot directly run clocksource_watchdog_kthread() here, because
149 * clocksource_select() calls timekeeping_notify() which uses
150 * stop_machine(). One cannot use stop_machine() from a workqueue() due
151 * lock inversions wrt CPU hotplug.
152 *
153 * Also, we only ever run this work once or twice during the lifetime
154 * of the kernel, so there is no point in creating a more permanent
155 * kthread for this.
156 *
157 * If kthread_run fails the next watchdog scan over the
158 * watchdog_list will find the unstable clock again.
159 */
160 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
161}
162
142static void __clocksource_unstable(struct clocksource *cs) 163static void __clocksource_unstable(struct clocksource *cs)
143{ 164{
144 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 165 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
145 cs->flags |= CLOCK_SOURCE_UNSTABLE; 166 cs->flags |= CLOCK_SOURCE_UNSTABLE;
146 167
147 /* 168 /*
148 * If the clocksource is registered clocksource_watchdog_work() will 169 * If the clocksource is registered clocksource_watchdog_kthread() will
149 * re-rate and re-select. 170 * re-rate and re-select.
150 */ 171 */
151 if (list_empty(&cs->list)) { 172 if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
156 if (cs->mark_unstable) 177 if (cs->mark_unstable)
157 cs->mark_unstable(cs); 178 cs->mark_unstable(cs);
158 179
159 /* kick clocksource_watchdog_work() */ 180 /* kick clocksource_watchdog_kthread() */
160 if (finished_booting) 181 if (finished_booting)
161 schedule_work(&watchdog_work); 182 schedule_work(&watchdog_work);
162} 183}
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
166 * @cs: clocksource to be marked unstable 187 * @cs: clocksource to be marked unstable
167 * 188 *
168 * This function is called by the x86 TSC code to mark clocksources as unstable; 189 * This function is called by the x86 TSC code to mark clocksources as unstable;
169 * it defers demotion and re-selection to a work. 190 * it defers demotion and re-selection to a kthread.
170 */ 191 */
171void clocksource_mark_unstable(struct clocksource *cs) 192void clocksource_mark_unstable(struct clocksource *cs)
172{ 193{
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
391 } 412 }
392} 413}
393 414
394static void __clocksource_change_rating(struct clocksource *cs, int rating); 415static int __clocksource_watchdog_kthread(void)
395
396static int __clocksource_watchdog_work(void)
397{ 416{
398 struct clocksource *cs, *tmp; 417 struct clocksource *cs, *tmp;
399 unsigned long flags; 418 unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
418 return select; 437 return select;
419} 438}
420 439
421static void clocksource_watchdog_work(struct work_struct *work) 440static int clocksource_watchdog_kthread(void *data)
422{ 441{
423 mutex_lock(&clocksource_mutex); 442 mutex_lock(&clocksource_mutex);
424 if (__clocksource_watchdog_work()) 443 if (__clocksource_watchdog_kthread())
425 clocksource_select(); 444 clocksource_select();
426 mutex_unlock(&clocksource_mutex); 445 mutex_unlock(&clocksource_mutex);
446 return 0;
427} 447}
428 448
429static bool clocksource_is_watchdog(struct clocksource *cs) 449static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
442static void clocksource_select_watchdog(bool fallback) { } 462static void clocksource_select_watchdog(bool fallback) { }
443static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 463static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
444static inline void clocksource_resume_watchdog(void) { } 464static inline void clocksource_resume_watchdog(void) { }
445static inline int __clocksource_watchdog_work(void) { return 0; } 465static inline int __clocksource_watchdog_kthread(void) { return 0; }
446static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 466static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
447void clocksource_mark_unstable(struct clocksource *cs) { } 467void clocksource_mark_unstable(struct clocksource *cs) { }
448 468
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
810 /* 830 /*
811 * Run the watchdog first to eliminate unstable clock sources 831 * Run the watchdog first to eliminate unstable clock sources
812 */ 832 */
813 __clocksource_watchdog_work(); 833 __clocksource_watchdog_kthread();
814 clocksource_select(); 834 clocksource_select();
815 mutex_unlock(&clocksource_mutex); 835 mutex_unlock(&clocksource_mutex);
816 return 0; 836 return 0;