aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2009-08-14 09:47:30 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-08-15 04:55:46 -0400
commit75c5158f70c065b9704b924503d96e8297838f79 (patch)
tree74b02ba1f13aaf8292bd472a8a197ac900ff20e7
parent2ba2a3054fdffc8e6452f4ee120760322a6fbd43 (diff)
timekeeping: Update clocksource with stop_machine
update_wall_time calls change_clocksource HZ times per second to check if a new clock source is available. In close to 100% of all calls there is no new clock. Replace the tick based check by an update done with stop_machine. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Acked-by: John Stultz <johnstul@us.ibm.com> Cc: Daniel Walker <dwalker@fifo99.com> LKML-Reference: <20090814134810.711836357@de.ibm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--kernel/time/clocksource.c112
-rw-r--r--kernel/time/timekeeping.c41
3 files changed, 72 insertions, 83 deletions
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index e34015effeb6..9ea40ff26f0e 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -291,4 +291,6 @@ static inline void update_vsyscall_tz(void)
291} 291}
292#endif 292#endif
293 293
294extern void timekeeping_notify(struct clocksource *clock);
295
294#endif /* _LINUX_CLOCKSOURCE_H */ 296#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index f18c9a6bdcf4..a1657b5fdeb9 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -109,35 +109,17 @@ EXPORT_SYMBOL(timecounter_cyc2time);
109/*[Clocksource internal variables]--------- 109/*[Clocksource internal variables]---------
110 * curr_clocksource: 110 * curr_clocksource:
111 * currently selected clocksource. 111 * currently selected clocksource.
112 * next_clocksource:
113 * pending next selected clocksource.
114 * clocksource_list: 112 * clocksource_list:
115 * linked list with the registered clocksources 113 * linked list with the registered clocksources
116 * clocksource_lock: 114 * clocksource_mutex:
117 * protects manipulations to curr_clocksource and next_clocksource 115 * protects manipulations to curr_clocksource and the clocksource_list
118 * and the clocksource_list
119 * override_name: 116 * override_name:
120 * Name of the user-specified clocksource. 117 * Name of the user-specified clocksource.
121 */ 118 */
122static struct clocksource *curr_clocksource; 119static struct clocksource *curr_clocksource;
123static struct clocksource *next_clocksource;
124static LIST_HEAD(clocksource_list); 120static LIST_HEAD(clocksource_list);
125static DEFINE_SPINLOCK(clocksource_lock); 121static DEFINE_MUTEX(clocksource_mutex);
126static char override_name[32]; 122static char override_name[32];
127static int finished_booting;
128
129/* clocksource_done_booting - Called near the end of core bootup
130 *
131 * Hack to avoid lots of clocksource churn at boot time.
132 * We use fs_initcall because we want this to start before
133 * device_initcall but after subsys_initcall.
134 */
135static int __init clocksource_done_booting(void)
136{
137 finished_booting = 1;
138 return 0;
139}
140fs_initcall(clocksource_done_booting);
141 123
142#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 124#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
143static LIST_HEAD(watchdog_list); 125static LIST_HEAD(watchdog_list);
@@ -356,18 +338,16 @@ static inline void clocksource_resume_watchdog(void) { }
356void clocksource_resume(void) 338void clocksource_resume(void)
357{ 339{
358 struct clocksource *cs; 340 struct clocksource *cs;
359 unsigned long flags;
360 341
361 spin_lock_irqsave(&clocksource_lock, flags); 342 mutex_lock(&clocksource_mutex);
362 343
363 list_for_each_entry(cs, &clocksource_list, list) { 344 list_for_each_entry(cs, &clocksource_list, list)
364 if (cs->resume) 345 if (cs->resume)
365 cs->resume(); 346 cs->resume();
366 }
367 347
368 clocksource_resume_watchdog(); 348 clocksource_resume_watchdog();
369 349
370 spin_unlock_irqrestore(&clocksource_lock, flags); 350 mutex_unlock(&clocksource_mutex);
371} 351}
372 352
373/** 353/**
@@ -383,28 +363,13 @@ void clocksource_touch_watchdog(void)
383} 363}
384 364
385#ifdef CONFIG_GENERIC_TIME 365#ifdef CONFIG_GENERIC_TIME
386/**
387 * clocksource_get_next - Returns the selected clocksource
388 *
389 */
390struct clocksource *clocksource_get_next(void)
391{
392 unsigned long flags;
393 366
394 spin_lock_irqsave(&clocksource_lock, flags); 367static int finished_booting;
395 if (next_clocksource && finished_booting) {
396 curr_clocksource = next_clocksource;
397 next_clocksource = NULL;
398 }
399 spin_unlock_irqrestore(&clocksource_lock, flags);
400
401 return curr_clocksource;
402}
403 368
404/** 369/**
405 * clocksource_select - Select the best clocksource available 370 * clocksource_select - Select the best clocksource available
406 * 371 *
407 * Private function. Must hold clocksource_lock when called. 372 * Private function. Must hold clocksource_mutex when called.
408 * 373 *
409 * Select the clocksource with the best rating, or the clocksource, 374 * Select the clocksource with the best rating, or the clocksource,
410 * which is selected by userspace override. 375 * which is selected by userspace override.
@@ -413,7 +378,7 @@ static void clocksource_select(void)
413{ 378{
414 struct clocksource *best, *cs; 379 struct clocksource *best, *cs;
415 380
416 if (list_empty(&clocksource_list)) 381 if (!finished_booting || list_empty(&clocksource_list))
417 return; 382 return;
418 /* First clocksource on the list has the best rating. */ 383 /* First clocksource on the list has the best rating. */
419 best = list_first_entry(&clocksource_list, struct clocksource, list); 384 best = list_first_entry(&clocksource_list, struct clocksource, list);
@@ -438,13 +403,31 @@ static void clocksource_select(void)
438 best = cs; 403 best = cs;
439 break; 404 break;
440 } 405 }
441 if (curr_clocksource != best) 406 if (curr_clocksource != best) {
442 next_clocksource = best; 407 printk(KERN_INFO "Switching to clocksource %s\n", best->name);
408 curr_clocksource = best;
409 timekeeping_notify(curr_clocksource);
410 }
443} 411}
444 412
413/*
414 * clocksource_done_booting - Called near the end of core bootup
415 *
416 * Hack to avoid lots of clocksource churn at boot time.
417 * We use fs_initcall because we want this to start before
418 * device_initcall but after subsys_initcall.
419 */
420static int __init clocksource_done_booting(void)
421{
422 finished_booting = 1;
423 clocksource_select();
424 return 0;
425}
426fs_initcall(clocksource_done_booting);
427
445#else /* CONFIG_GENERIC_TIME */ 428#else /* CONFIG_GENERIC_TIME */
446 429
447static void clocksource_select(void) { } 430static inline void clocksource_select(void) { }
448 431
449#endif 432#endif
450 433
@@ -471,13 +454,11 @@ static void clocksource_enqueue(struct clocksource *cs)
471 */ 454 */
472int clocksource_register(struct clocksource *cs) 455int clocksource_register(struct clocksource *cs)
473{ 456{
474 unsigned long flags; 457 mutex_lock(&clocksource_mutex);
475
476 spin_lock_irqsave(&clocksource_lock, flags);
477 clocksource_enqueue(cs); 458 clocksource_enqueue(cs);
478 clocksource_select(); 459 clocksource_select();
479 spin_unlock_irqrestore(&clocksource_lock, flags);
480 clocksource_enqueue_watchdog(cs); 460 clocksource_enqueue_watchdog(cs);
461 mutex_unlock(&clocksource_mutex);
481 return 0; 462 return 0;
482} 463}
483EXPORT_SYMBOL(clocksource_register); 464EXPORT_SYMBOL(clocksource_register);
@@ -487,14 +468,12 @@ EXPORT_SYMBOL(clocksource_register);
487 */ 468 */
488void clocksource_change_rating(struct clocksource *cs, int rating) 469void clocksource_change_rating(struct clocksource *cs, int rating)
489{ 470{
490 unsigned long flags; 471 mutex_lock(&clocksource_mutex);
491
492 spin_lock_irqsave(&clocksource_lock, flags);
493 list_del(&cs->list); 472 list_del(&cs->list);
494 cs->rating = rating; 473 cs->rating = rating;
495 clocksource_enqueue(cs); 474 clocksource_enqueue(cs);
496 clocksource_select(); 475 clocksource_select();
497 spin_unlock_irqrestore(&clocksource_lock, flags); 476 mutex_unlock(&clocksource_mutex);
498} 477}
499EXPORT_SYMBOL(clocksource_change_rating); 478EXPORT_SYMBOL(clocksource_change_rating);
500 479
@@ -503,13 +482,11 @@ EXPORT_SYMBOL(clocksource_change_rating);
503 */ 482 */
504void clocksource_unregister(struct clocksource *cs) 483void clocksource_unregister(struct clocksource *cs)
505{ 484{
506 unsigned long flags; 485 mutex_lock(&clocksource_mutex);
507
508 clocksource_dequeue_watchdog(cs); 486 clocksource_dequeue_watchdog(cs);
509 spin_lock_irqsave(&clocksource_lock, flags);
510 list_del(&cs->list); 487 list_del(&cs->list);
511 clocksource_select(); 488 clocksource_select();
512 spin_unlock_irqrestore(&clocksource_lock, flags); 489 mutex_unlock(&clocksource_mutex);
513} 490}
514EXPORT_SYMBOL(clocksource_unregister); 491EXPORT_SYMBOL(clocksource_unregister);
515 492
@@ -527,9 +504,9 @@ sysfs_show_current_clocksources(struct sys_device *dev,
527{ 504{
528 ssize_t count = 0; 505 ssize_t count = 0;
529 506
530 spin_lock_irq(&clocksource_lock); 507 mutex_lock(&clocksource_mutex);
531 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); 508 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
532 spin_unlock_irq(&clocksource_lock); 509 mutex_unlock(&clocksource_mutex);
533 510
534 return count; 511 return count;
535} 512}
@@ -557,14 +534,14 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
557 if (buf[count-1] == '\n') 534 if (buf[count-1] == '\n')
558 count--; 535 count--;
559 536
560 spin_lock_irq(&clocksource_lock); 537 mutex_lock(&clocksource_mutex);
561 538
562 if (count > 0) 539 if (count > 0)
563 memcpy(override_name, buf, count); 540 memcpy(override_name, buf, count);
564 override_name[count] = 0; 541 override_name[count] = 0;
565 clocksource_select(); 542 clocksource_select();
566 543
567 spin_unlock_irq(&clocksource_lock); 544 mutex_unlock(&clocksource_mutex);
568 545
569 return ret; 546 return ret;
570} 547}
@@ -584,7 +561,7 @@ sysfs_show_available_clocksources(struct sys_device *dev,
584 struct clocksource *src; 561 struct clocksource *src;
585 ssize_t count = 0; 562 ssize_t count = 0;
586 563
587 spin_lock_irq(&clocksource_lock); 564 mutex_lock(&clocksource_mutex);
588 list_for_each_entry(src, &clocksource_list, list) { 565 list_for_each_entry(src, &clocksource_list, list) {
589 /* 566 /*
590 * Don't show non-HRES clocksource if the tick code is 567 * Don't show non-HRES clocksource if the tick code is
@@ -596,7 +573,7 @@ sysfs_show_available_clocksources(struct sys_device *dev,
596 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), 573 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
597 "%s ", src->name); 574 "%s ", src->name);
598 } 575 }
599 spin_unlock_irq(&clocksource_lock); 576 mutex_unlock(&clocksource_mutex);
600 577
601 count += snprintf(buf + count, 578 count += snprintf(buf + count,
602 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); 579 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
@@ -651,11 +628,10 @@ device_initcall(init_clocksource_sysfs);
651 */ 628 */
652static int __init boot_override_clocksource(char* str) 629static int __init boot_override_clocksource(char* str)
653{ 630{
654 unsigned long flags; 631 mutex_lock(&clocksource_mutex);
655 spin_lock_irqsave(&clocksource_lock, flags);
656 if (str) 632 if (str)
657 strlcpy(override_name, str, sizeof(override_name)); 633 strlcpy(override_name, str, sizeof(override_name));
658 spin_unlock_irqrestore(&clocksource_lock, flags); 634 mutex_unlock(&clocksource_mutex);
659 return 1; 635 return 1;
660} 636}
661 637
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 27ae01b596b7..41579e7fcf9d 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -18,6 +18,7 @@
18#include <linux/jiffies.h> 18#include <linux/jiffies.h>
19#include <linux/time.h> 19#include <linux/time.h>
20#include <linux/tick.h> 20#include <linux/tick.h>
21#include <linux/stop_machine.h>
21 22
22/* Structure holding internal timekeeping values. */ 23/* Structure holding internal timekeeping values. */
23struct timekeeper { 24struct timekeeper {
@@ -179,6 +180,7 @@ void timekeeping_leap_insert(int leapsecond)
179} 180}
180 181
181#ifdef CONFIG_GENERIC_TIME 182#ifdef CONFIG_GENERIC_TIME
183
182/** 184/**
183 * timekeeping_forward_now - update clock to the current time 185 * timekeeping_forward_now - update clock to the current time
184 * 186 *
@@ -351,31 +353,40 @@ EXPORT_SYMBOL(do_settimeofday);
351 * 353 *
352 * Accumulates current time interval and initializes new clocksource 354 * Accumulates current time interval and initializes new clocksource
353 */ 355 */
354static void change_clocksource(void) 356static int change_clocksource(void *data)
355{ 357{
356 struct clocksource *new, *old; 358 struct clocksource *new, *old;
357 359
358 new = clocksource_get_next(); 360 new = (struct clocksource *) data;
359
360 if (!new || timekeeper.clock == new)
361 return;
362 361
363 timekeeping_forward_now(); 362 timekeeping_forward_now();
363 if (!new->enable || new->enable(new) == 0) {
364 old = timekeeper.clock;
365 timekeeper_setup_internals(new);
366 if (old->disable)
367 old->disable(old);
368 }
369 return 0;
370}
364 371
365 if (new->enable && !new->enable(new)) 372/**
373 * timekeeping_notify - Install a new clock source
374 * @clock: pointer to the clock source
375 *
376 * This function is called from clocksource.c after a new, better clock
377 * source has been registered. The caller holds the clocksource_mutex.
378 */
379void timekeeping_notify(struct clocksource *clock)
380{
381 if (timekeeper.clock == clock)
366 return; 382 return;
367 383 stop_machine(change_clocksource, clock, NULL);
368 old = timekeeper.clock;
369 timekeeper_setup_internals(new);
370
371 if (old->disable)
372 old->disable(old);
373
374 tick_clock_notify(); 384 tick_clock_notify();
375} 385}
386
376#else /* GENERIC_TIME */ 387#else /* GENERIC_TIME */
388
377static inline void timekeeping_forward_now(void) { } 389static inline void timekeeping_forward_now(void) { }
378static inline void change_clocksource(void) { }
379 390
380/** 391/**
381 * ktime_get - get the monotonic time in ktime_t format 392 * ktime_get - get the monotonic time in ktime_t format
@@ -416,6 +427,7 @@ void ktime_get_ts(struct timespec *ts)
416 ts->tv_nsec + tomono.tv_nsec); 427 ts->tv_nsec + tomono.tv_nsec);
417} 428}
418EXPORT_SYMBOL_GPL(ktime_get_ts); 429EXPORT_SYMBOL_GPL(ktime_get_ts);
430
419#endif /* !GENERIC_TIME */ 431#endif /* !GENERIC_TIME */
420 432
421/** 433/**
@@ -773,7 +785,6 @@ void update_wall_time(void)
773 update_xtime_cache(nsecs); 785 update_xtime_cache(nsecs);
774 786
775 /* check to see if there is a new clocksource to use */ 787 /* check to see if there is a new clocksource to use */
776 change_clocksource();
777 update_vsyscall(&xtime, timekeeper.clock); 788 update_vsyscall(&xtime, timekeeper.clock);
778} 789}
779 790