aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2009-08-14 09:47:24 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-08-15 04:55:46 -0400
commitfb63a0ebe615fba9de8c75ea44ded999d1e24c65 (patch)
tree7544b72d9de56f68047579d39da4927f839197d3 /kernel/time
parent0f8e8ef7c204988246da5a42d576b7fa5277a8e4 (diff)
clocksource: Refactor clocksource watchdog
Refactor clocksource watchdog code to make it more readable. Add clocksource_dequeue_watchdog to remove a clocksource from the watchdog list when it is unregistered. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Acked-by: John Stultz <johnstul@us.ibm.com> Cc: Daniel Walker <dwalker@fifo99.com> LKML-Reference: <20090814134809.110881699@de.ibm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clocksource.c97
1 files changed, 69 insertions, 28 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 89a7b91bfbdd..56aaa749645d 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,6 +145,7 @@ static struct clocksource *watchdog;
145static struct timer_list watchdog_timer; 145static struct timer_list watchdog_timer;
146static DEFINE_SPINLOCK(watchdog_lock); 146static DEFINE_SPINLOCK(watchdog_lock);
147static cycle_t watchdog_last; 147static cycle_t watchdog_last;
148static int watchdog_running;
148 149
149/* 150/*
150 * Interval: 0.5sec Threshold: 0.0625s 151 * Interval: 0.5sec Threshold: 0.0625s
@@ -168,6 +169,8 @@ static void clocksource_watchdog(unsigned long data)
168 int64_t wd_nsec, cs_nsec; 169 int64_t wd_nsec, cs_nsec;
169 170
170 spin_lock(&watchdog_lock); 171 spin_lock(&watchdog_lock);
172 if (!watchdog_running)
173 goto out;
171 174
172 wdnow = watchdog->read(watchdog); 175 wdnow = watchdog->read(watchdog);
173 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 176 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
@@ -217,9 +220,30 @@ static void clocksource_watchdog(unsigned long data)
217 watchdog_timer.expires += WATCHDOG_INTERVAL; 220 watchdog_timer.expires += WATCHDOG_INTERVAL;
218 add_timer_on(&watchdog_timer, next_cpu); 221 add_timer_on(&watchdog_timer, next_cpu);
219 } 222 }
223out:
220 spin_unlock(&watchdog_lock); 224 spin_unlock(&watchdog_lock);
221} 225}
222 226
227static inline void clocksource_start_watchdog(void)
228{
229 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
230 return;
231 init_timer(&watchdog_timer);
232 watchdog_timer.function = clocksource_watchdog;
233 watchdog_last = watchdog->read(watchdog);
234 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
235 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
236 watchdog_running = 1;
237}
238
239static inline void clocksource_stop_watchdog(void)
240{
241 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
242 return;
243 del_timer(&watchdog_timer);
244 watchdog_running = 0;
245}
246
223static inline void clocksource_reset_watchdog(void) 247static inline void clocksource_reset_watchdog(void)
224{ 248{
225 struct clocksource *cs; 249 struct clocksource *cs;
@@ -237,55 +261,70 @@ static void clocksource_resume_watchdog(void)
237 spin_unlock_irqrestore(&watchdog_lock, flags); 261 spin_unlock_irqrestore(&watchdog_lock, flags);
238} 262}
239 263
240static void clocksource_check_watchdog(struct clocksource *cs) 264static void clocksource_enqueue_watchdog(struct clocksource *cs)
241{ 265{
242 unsigned long flags; 266 unsigned long flags;
243 267
244 spin_lock_irqsave(&watchdog_lock, flags); 268 spin_lock_irqsave(&watchdog_lock, flags);
245 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 269 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
246 int started = !list_empty(&watchdog_list); 270 /* cs is a clocksource to be watched. */
247
248 list_add(&cs->wd_list, &watchdog_list); 271 list_add(&cs->wd_list, &watchdog_list);
249 if (!started && watchdog) { 272 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
250 watchdog_last = watchdog->read(watchdog);
251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
252 add_timer_on(&watchdog_timer,
253 cpumask_first(cpu_online_mask));
254 }
255 } else { 273 } else {
274 /* cs is a watchdog. */
256 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 275 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
257 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 276 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
258 277 /* Pick the best watchdog. */
259 if (!watchdog || cs->rating > watchdog->rating) { 278 if (!watchdog || cs->rating > watchdog->rating) {
260 if (watchdog)
261 del_timer(&watchdog_timer);
262 watchdog = cs; 279 watchdog = cs;
263 init_timer(&watchdog_timer);
264 watchdog_timer.function = clocksource_watchdog;
265
266 /* Reset watchdog cycles */ 280 /* Reset watchdog cycles */
267 clocksource_reset_watchdog(); 281 clocksource_reset_watchdog();
268 /* Start if list is not empty */
269 if (!list_empty(&watchdog_list)) {
270 watchdog_last = watchdog->read(watchdog);
271 watchdog_timer.expires =
272 jiffies + WATCHDOG_INTERVAL;
273 add_timer_on(&watchdog_timer,
274 cpumask_first(cpu_online_mask));
275 }
276 } 282 }
277 } 283 }
284 /* Check if the watchdog timer needs to be started. */
285 clocksource_start_watchdog();
278 spin_unlock_irqrestore(&watchdog_lock, flags); 286 spin_unlock_irqrestore(&watchdog_lock, flags);
279} 287}
280#else 288
281static void clocksource_check_watchdog(struct clocksource *cs) 289static void clocksource_dequeue_watchdog(struct clocksource *cs)
290{
291 struct clocksource *tmp;
292 unsigned long flags;
293
294 spin_lock_irqsave(&watchdog_lock, flags);
295 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
296 /* cs is a watched clocksource. */
297 list_del_init(&cs->wd_list);
298 } else if (cs == watchdog) {
299 /* Reset watchdog cycles */
300 clocksource_reset_watchdog();
301 /* Current watchdog is removed. Find an alternative. */
302 watchdog = NULL;
303 list_for_each_entry(tmp, &clocksource_list, list) {
304 if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
305 continue;
306 if (!watchdog || tmp->rating > watchdog->rating)
307 watchdog = tmp;
308 }
309 }
310 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
311 /* Check if the watchdog timer needs to be stopped. */
312 clocksource_stop_watchdog();
313 spin_unlock_irqrestore(&watchdog_lock, flags);
314}
315
316#else /* CONFIG_CLOCKSOURCE_WATCHDOG */
317
318static void clocksource_enqueue_watchdog(struct clocksource *cs)
282{ 319{
283 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 320 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
284 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 321 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
285} 322}
286 323
324static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
287static inline void clocksource_resume_watchdog(void) { } 325static inline void clocksource_resume_watchdog(void) { }
288#endif 326
327#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
289 328
290/** 329/**
291 * clocksource_resume - resume the clocksource(s) 330 * clocksource_resume - resume the clocksource(s)
@@ -414,14 +453,13 @@ int clocksource_register(struct clocksource *cs)
414 clocksource_enqueue(cs); 453 clocksource_enqueue(cs);
415 clocksource_select(); 454 clocksource_select();
416 spin_unlock_irqrestore(&clocksource_lock, flags); 455 spin_unlock_irqrestore(&clocksource_lock, flags);
417 clocksource_check_watchdog(cs); 456 clocksource_enqueue_watchdog(cs);
418 return 0; 457 return 0;
419} 458}
420EXPORT_SYMBOL(clocksource_register); 459EXPORT_SYMBOL(clocksource_register);
421 460
422/** 461/**
423 * clocksource_change_rating - Change the rating of a registered clocksource 462 * clocksource_change_rating - Change the rating of a registered clocksource
424 *
425 */ 463 */
426void clocksource_change_rating(struct clocksource *cs, int rating) 464void clocksource_change_rating(struct clocksource *cs, int rating)
427{ 465{
@@ -434,6 +472,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
434 clocksource_select(); 472 clocksource_select();
435 spin_unlock_irqrestore(&clocksource_lock, flags); 473 spin_unlock_irqrestore(&clocksource_lock, flags);
436} 474}
475EXPORT_SYMBOL(clocksource_change_rating);
437 476
438/** 477/**
439 * clocksource_unregister - remove a registered clocksource 478 * clocksource_unregister - remove a registered clocksource
@@ -442,11 +481,13 @@ void clocksource_unregister(struct clocksource *cs)
442{ 481{
443 unsigned long flags; 482 unsigned long flags;
444 483
484 clocksource_dequeue_watchdog(cs);
445 spin_lock_irqsave(&clocksource_lock, flags); 485 spin_lock_irqsave(&clocksource_lock, flags);
446 list_del(&cs->list); 486 list_del(&cs->list);
447 clocksource_select(); 487 clocksource_select();
448 spin_unlock_irqrestore(&clocksource_lock, flags); 488 spin_unlock_irqrestore(&clocksource_lock, flags);
449} 489}
490EXPORT_SYMBOL(clocksource_unregister);
450 491
451#ifdef CONFIG_SYSFS 492#ifdef CONFIG_SYSFS
452/** 493/**