aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-05-18 17:33:40 -0400
committerThomas Gleixner <tglx@linutronix.de>2011-05-19 08:24:15 -0400
commit724ed53e8ac2c5278af8955673049714c1073464 (patch)
tree51762a734e564fb973758014997ee99406693541 /kernel/time
parent369db4c9524b7487faf1ff89646eee396c1363e1 (diff)
clocksource: Get rid of the hardcoded 5 seconds sleep time limit
Slow clocksources can have a way longer sleep time than 5 seconds and even fast ones can easily cope with 600 seconds and still maintain proper accuracy. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: John Stultz <john.stultz@linaro.org> Reviewed-by: Ingo Molnar <mingo@elte.hu> Link: http://lkml.kernel.org/r/%3C20110518210136.109811585%40linutronix.de%3E
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clocksource.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 6519cf62d9cd..6dbbbb1ae6ba 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -626,19 +626,6 @@ static void clocksource_enqueue(struct clocksource *cs)
626 list_add(&cs->list, entry); 626 list_add(&cs->list, entry);
627} 627}
628 628
629
630/*
631 * Maximum time we expect to go between ticks. This includes idle
632 * tickless time. It provides the trade off between selecting a
633 * mult/shift pair that is very precise but can only handle a short
634 * period of time, vs. a mult/shift pair that can handle long periods
635 * of time but isn't as precise.
636 *
637 * This is a subsystem constant, and actual hardware limitations
638 * may override it (ie: clocksources that wrap every 3 seconds).
639 */
640#define MAX_UPDATE_LENGTH 5 /* Seconds */
641
642/** 629/**
643 * __clocksource_updatefreq_scale - Used update clocksource with new freq 630 * __clocksource_updatefreq_scale - Used update clocksource with new freq
644 * @t: clocksource to be registered 631 * @t: clocksource to be registered
@@ -652,15 +639,28 @@ static void clocksource_enqueue(struct clocksource *cs)
652 */ 639 */
653void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) 640void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
654{ 641{
642 unsigned long sec;
643
655 /* 644 /*
656 * Ideally we want to use some of the limits used in 645 * Calc the maximum number of seconds which we can run before
657 * clocksource_max_deferment, to provide a more informed 646 * wrapping around. For clocksources which have a mask > 32bit
658 * MAX_UPDATE_LENGTH. But for now this just gets the 647 * we need to limit the max sleep time to have a good
659 * register interface working properly. 648 * conversion precision. 10 minutes is still a reasonable
649 * amount. That results in a shift value of 24 for a
650 * clocksource with mask >= 40bit and f >= 4GHz. That maps to
651 * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
652 * margin as we do in clocksource_max_deferment()
660 */ 653 */
654 sec = (cs->mask - (cs->mask >> 5));
655 do_div(sec, freq);
656 do_div(sec, scale);
657 if (!sec)
658 sec = 1;
659 else if (sec > 600 && cs->mask > UINT_MAX)
660 sec = 600;
661
661 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 662 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
662 NSEC_PER_SEC/scale, 663 NSEC_PER_SEC / scale, sec * scale);
663 MAX_UPDATE_LENGTH*scale);
664 cs->max_idle_ns = clocksource_max_deferment(cs); 664 cs->max_idle_ns = clocksource_max_deferment(cs);
665} 665}
666EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); 666EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);