aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-09-12 07:32:23 -0400
committerThomas Gleixner <tglx@linutronix.de>2011-09-13 03:58:29 -0400
commit9fb60336253edf73dedc527b2aa2bf32eae0d6da (patch)
tree926cb7ca98eaacea06c8b951ee8b3a1330522c82 /kernel/time
parente8abccb719377af63cb0f1fed289db405e3def16 (diff)
clocksource: Make watchdog reset lockless
KGDB needs to trylock watchdog_lock when trying to reset the clocksource watchdog after the system has been stopped to avoid a potential deadlock. When the trylock fails TSC usually becomes unstable. We can be more clever by using an atomic counter and checking it in the clocksource_watchdog callback. We restart the watchdog whenever the counter is > 0 and only decrement the counter when we ran through a full update cycle. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: John Stultz <johnstul@us.ibm.com> Acked-by: Jason Wessel <jason.wessel@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1109121326280.2723@ionos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clocksource.c38
1 files changed, 18 insertions, 20 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index e0980f0d9a0..cf52fda2e09 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -186,6 +186,7 @@ static struct timer_list watchdog_timer;
186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
187static DEFINE_SPINLOCK(watchdog_lock); 187static DEFINE_SPINLOCK(watchdog_lock);
188static int watchdog_running; 188static int watchdog_running;
189static atomic_t watchdog_reset_pending;
189 190
190static int clocksource_watchdog_kthread(void *data); 191static int clocksource_watchdog_kthread(void *data);
191static void __clocksource_change_rating(struct clocksource *cs, int rating); 192static void __clocksource_change_rating(struct clocksource *cs, int rating);
@@ -247,12 +248,14 @@ static void clocksource_watchdog(unsigned long data)
247 struct clocksource *cs; 248 struct clocksource *cs;
248 cycle_t csnow, wdnow; 249 cycle_t csnow, wdnow;
249 int64_t wd_nsec, cs_nsec; 250 int64_t wd_nsec, cs_nsec;
250 int next_cpu; 251 int next_cpu, reset_pending;
251 252
252 spin_lock(&watchdog_lock); 253 spin_lock(&watchdog_lock);
253 if (!watchdog_running) 254 if (!watchdog_running)
254 goto out; 255 goto out;
255 256
257 reset_pending = atomic_read(&watchdog_reset_pending);
258
256 list_for_each_entry(cs, &watchdog_list, wd_list) { 259 list_for_each_entry(cs, &watchdog_list, wd_list) {
257 260
258 /* Clocksource already marked unstable? */ 261 /* Clocksource already marked unstable? */
@@ -268,7 +271,8 @@ static void clocksource_watchdog(unsigned long data)
268 local_irq_enable(); 271 local_irq_enable();
269 272
270 /* Clocksource initialized ? */ 273 /* Clocksource initialized ? */
271 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 274 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
275 atomic_read(&watchdog_reset_pending)) {
272 cs->flags |= CLOCK_SOURCE_WATCHDOG; 276 cs->flags |= CLOCK_SOURCE_WATCHDOG;
273 cs->wd_last = wdnow; 277 cs->wd_last = wdnow;
274 cs->cs_last = csnow; 278 cs->cs_last = csnow;
@@ -283,8 +287,11 @@ static void clocksource_watchdog(unsigned long data)
283 cs->cs_last = csnow; 287 cs->cs_last = csnow;
284 cs->wd_last = wdnow; 288 cs->wd_last = wdnow;
285 289
290 if (atomic_read(&watchdog_reset_pending))
291 continue;
292
286 /* Check the deviation from the watchdog clocksource. */ 293 /* Check the deviation from the watchdog clocksource. */
287 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 294 if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
288 clocksource_unstable(cs, cs_nsec - wd_nsec); 295 clocksource_unstable(cs, cs_nsec - wd_nsec);
289 continue; 296 continue;
290 } 297 }
@@ -303,6 +310,13 @@ static void clocksource_watchdog(unsigned long data)
303 } 310 }
304 311
305 /* 312 /*
313 * We only clear the watchdog_reset_pending, when we did a
314 * full cycle through all clocksources.
315 */
316 if (reset_pending)
317 atomic_dec(&watchdog_reset_pending);
318
319 /*
306 * Cycle through CPUs to check if the CPUs stay synchronized 320 * Cycle through CPUs to check if the CPUs stay synchronized
307 * to each other. 321 * to each other.
308 */ 322 */
@@ -344,23 +358,7 @@ static inline void clocksource_reset_watchdog(void)
344 358
345static void clocksource_resume_watchdog(void) 359static void clocksource_resume_watchdog(void)
346{ 360{
347 unsigned long flags; 361 atomic_inc(&watchdog_reset_pending);
348
349 /*
350 * We use trylock here to avoid a potential dead lock when
351 * kgdb calls this code after the kernel has been stopped with
352 * watchdog_lock held. When watchdog_lock is held we just
353 * return and accept, that the watchdog might trigger and mark
354 * the monitored clock source (usually TSC) unstable.
355 *
356 * This does not affect the other caller clocksource_resume()
357 * because at this point the kernel is UP, interrupts are
358 * disabled and nothing can hold watchdog_lock.
359 */
360 if (!spin_trylock_irqsave(&watchdog_lock, flags))
361 return;
362 clocksource_reset_watchdog();
363 spin_unlock_irqrestore(&watchdog_lock, flags);
364} 362}
365 363
366static void clocksource_enqueue_watchdog(struct clocksource *cs) 364static void clocksource_enqueue_watchdog(struct clocksource *cs)