aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/clocksource.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/clocksource.c')
-rw-r--r--kernel/time/clocksource.c77
1 files changed, 40 insertions, 37 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c18d7efa1b4b..e0980f0d9a0a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time);
113 * @shift: pointer to shift variable 113 * @shift: pointer to shift variable
114 * @from: frequency to convert from 114 * @from: frequency to convert from
115 * @to: frequency to convert to 115 * @to: frequency to convert to
116 * @minsec: guaranteed runtime conversion range in seconds 116 * @maxsec: guaranteed runtime conversion range in seconds
117 * 117 *
118 * The function evaluates the shift/mult pair for the scaled math 118 * The function evaluates the shift/mult pair for the scaled math
119 * operations of clocksources and clockevents. 119 * operations of clocksources and clockevents.
@@ -122,7 +122,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time);
122 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock 122 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
123 * event @to is the counter frequency and @from is NSEC_PER_SEC. 123 * event @to is the counter frequency and @from is NSEC_PER_SEC.
124 * 124 *
125 * The @minsec conversion range argument controls the time frame in 125 * The @maxsec conversion range argument controls the time frame in
126 * seconds which must be covered by the runtime conversion with the 126 * seconds which must be covered by the runtime conversion with the
127 * calculated mult and shift factors. This guarantees that no 64bit 127 * calculated mult and shift factors. This guarantees that no 64bit
128 * overflow happens when the input value of the conversion is 128 * overflow happens when the input value of the conversion is
@@ -131,7 +131,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time);
131 * factors. 131 * factors.
132 */ 132 */
133void 133void
134clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec) 134clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
135{ 135{
136 u64 tmp; 136 u64 tmp;
137 u32 sft, sftacc= 32; 137 u32 sft, sftacc= 32;
@@ -140,7 +140,7 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
140 * Calculate the shift factor which is limiting the conversion 140 * Calculate the shift factor which is limiting the conversion
141 * range: 141 * range:
142 */ 142 */
143 tmp = ((u64)minsec * from) >> 32; 143 tmp = ((u64)maxsec * from) >> 32;
144 while (tmp) { 144 while (tmp) {
145 tmp >>=1; 145 tmp >>=1;
146 sftacc--; 146 sftacc--;
@@ -152,6 +152,7 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
152 */ 152 */
153 for (sft = 32; sft > 0; sft--) { 153 for (sft = 32; sft > 0; sft--) {
154 tmp = (u64) to << sft; 154 tmp = (u64) to << sft;
155 tmp += from / 2;
155 do_div(tmp, from); 156 do_div(tmp, from);
156 if ((tmp >> sftacc) == 0) 157 if ((tmp >> sftacc) == 0)
157 break; 158 break;
@@ -184,7 +185,6 @@ static struct clocksource *watchdog;
184static struct timer_list watchdog_timer; 185static struct timer_list watchdog_timer;
185static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
186static DEFINE_SPINLOCK(watchdog_lock); 187static DEFINE_SPINLOCK(watchdog_lock);
187static cycle_t watchdog_last;
188static int watchdog_running; 188static int watchdog_running;
189 189
190static int clocksource_watchdog_kthread(void *data); 190static int clocksource_watchdog_kthread(void *data);
@@ -253,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
253 if (!watchdog_running) 253 if (!watchdog_running)
254 goto out; 254 goto out;
255 255
256 wdnow = watchdog->read(watchdog);
257 wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
258 watchdog->mult, watchdog->shift);
259 watchdog_last = wdnow;
260
261 list_for_each_entry(cs, &watchdog_list, wd_list) { 256 list_for_each_entry(cs, &watchdog_list, wd_list) {
262 257
263 /* Clocksource already marked unstable? */ 258 /* Clocksource already marked unstable? */
@@ -267,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
267 continue; 262 continue;
268 } 263 }
269 264
265 local_irq_disable();
270 csnow = cs->read(cs); 266 csnow = cs->read(cs);
267 wdnow = watchdog->read(watchdog);
268 local_irq_enable();
271 269
272 /* Clocksource initialized ? */ 270 /* Clocksource initialized ? */
273 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 271 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
274 cs->flags |= CLOCK_SOURCE_WATCHDOG; 272 cs->flags |= CLOCK_SOURCE_WATCHDOG;
275 cs->wd_last = csnow; 273 cs->wd_last = wdnow;
274 cs->cs_last = csnow;
276 continue; 275 continue;
277 } 276 }
278 277
279 /* Check the deviation from the watchdog clocksource. */ 278 wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
280 cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & 279 watchdog->mult, watchdog->shift);
280
281 cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
281 cs->mask, cs->mult, cs->shift); 282 cs->mask, cs->mult, cs->shift);
282 cs->wd_last = csnow; 283 cs->cs_last = csnow;
284 cs->wd_last = wdnow;
285
286 /* Check the deviation from the watchdog clocksource. */
283 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 287 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
284 clocksource_unstable(cs, cs_nsec - wd_nsec); 288 clocksource_unstable(cs, cs_nsec - wd_nsec);
285 continue; 289 continue;
@@ -317,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
317 return; 321 return;
318 init_timer(&watchdog_timer); 322 init_timer(&watchdog_timer);
319 watchdog_timer.function = clocksource_watchdog; 323 watchdog_timer.function = clocksource_watchdog;
320 watchdog_last = watchdog->read(watchdog);
321 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 324 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
322 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 325 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
323 watchdog_running = 1; 326 watchdog_running = 1;
@@ -625,19 +628,6 @@ static void clocksource_enqueue(struct clocksource *cs)
625 list_add(&cs->list, entry); 628 list_add(&cs->list, entry);
626} 629}
627 630
628
629/*
630 * Maximum time we expect to go between ticks. This includes idle
631 * tickless time. It provides the trade off between selecting a
632 * mult/shift pair that is very precise but can only handle a short
633 * period of time, vs. a mult/shift pair that can handle long periods
634 * of time but isn't as precise.
635 *
636 * This is a subsystem constant, and actual hardware limitations
637 * may override it (ie: clocksources that wrap every 3 seconds).
638 */
639#define MAX_UPDATE_LENGTH 5 /* Seconds */
640
641/** 631/**
642 * __clocksource_updatefreq_scale - Used update clocksource with new freq 632 * __clocksource_updatefreq_scale - Used update clocksource with new freq
643 * @t: clocksource to be registered 633 * @t: clocksource to be registered
@@ -651,15 +641,28 @@ static void clocksource_enqueue(struct clocksource *cs)
651 */ 641 */
652void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) 642void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
653{ 643{
644 u64 sec;
645
654 /* 646 /*
655 * Ideally we want to use some of the limits used in 647 * Calc the maximum number of seconds which we can run before
656 * clocksource_max_deferment, to provide a more informed 648 * wrapping around. For clocksources which have a mask > 32bit
657 * MAX_UPDATE_LENGTH. But for now this just gets the 649 * we need to limit the max sleep time to have a good
658 * register interface working properly. 650 * conversion precision. 10 minutes is still a reasonable
651 * amount. That results in a shift value of 24 for a
652 * clocksource with mask >= 40bit and f >= 4GHz. That maps to
653 * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
654 * margin as we do in clocksource_max_deferment()
659 */ 655 */
656 sec = (cs->mask - (cs->mask >> 5));
657 do_div(sec, freq);
658 do_div(sec, scale);
659 if (!sec)
660 sec = 1;
661 else if (sec > 600 && cs->mask > UINT_MAX)
662 sec = 600;
663
660 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 664 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
661 NSEC_PER_SEC/scale, 665 NSEC_PER_SEC / scale, sec * scale);
662 MAX_UPDATE_LENGTH*scale);
663 cs->max_idle_ns = clocksource_max_deferment(cs); 666 cs->max_idle_ns = clocksource_max_deferment(cs);
664} 667}
665EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); 668EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
@@ -678,14 +681,14 @@ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
678int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 681int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
679{ 682{
680 683
681 /* Intialize mult/shift and max_idle_ns */ 684 /* Initialize mult/shift and max_idle_ns */
682 __clocksource_updatefreq_scale(cs, scale, freq); 685 __clocksource_updatefreq_scale(cs, scale, freq);
683 686
684 /* Add clocksource to the clcoksource list */ 687 /* Add clocksource to the clcoksource list */
685 mutex_lock(&clocksource_mutex); 688 mutex_lock(&clocksource_mutex);
686 clocksource_enqueue(cs); 689 clocksource_enqueue(cs);
687 clocksource_select();
688 clocksource_enqueue_watchdog(cs); 690 clocksource_enqueue_watchdog(cs);
691 clocksource_select();
689 mutex_unlock(&clocksource_mutex); 692 mutex_unlock(&clocksource_mutex);
690 return 0; 693 return 0;
691} 694}
@@ -705,8 +708,8 @@ int clocksource_register(struct clocksource *cs)
705 708
706 mutex_lock(&clocksource_mutex); 709 mutex_lock(&clocksource_mutex);
707 clocksource_enqueue(cs); 710 clocksource_enqueue(cs);
708 clocksource_select();
709 clocksource_enqueue_watchdog(cs); 711 clocksource_enqueue_watchdog(cs);
712 clocksource_select();
710 mutex_unlock(&clocksource_mutex); 713 mutex_unlock(&clocksource_mutex);
711 return 0; 714 return 0;
712} 715}