aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/clocksource.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/clocksource.c')
-rw-r--r--kernel/time/clocksource.c123
1 files changed, 116 insertions, 7 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 5e18c6ab2c6a..13700833c181 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -39,7 +39,7 @@ void timecounter_init(struct timecounter *tc,
39 tc->cycle_last = cc->read(cc); 39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp; 40 tc->nsec = start_tstamp;
41} 41}
42EXPORT_SYMBOL(timecounter_init); 42EXPORT_SYMBOL_GPL(timecounter_init);
43 43
44/** 44/**
45 * timecounter_read_delta - get nanoseconds since last call of this function 45 * timecounter_read_delta - get nanoseconds since last call of this function
@@ -83,7 +83,7 @@ u64 timecounter_read(struct timecounter *tc)
83 83
84 return nsec; 84 return nsec;
85} 85}
86EXPORT_SYMBOL(timecounter_read); 86EXPORT_SYMBOL_GPL(timecounter_read);
87 87
88u64 timecounter_cyc2time(struct timecounter *tc, 88u64 timecounter_cyc2time(struct timecounter *tc,
89 cycle_t cycle_tstamp) 89 cycle_t cycle_tstamp)
@@ -105,7 +105,60 @@ u64 timecounter_cyc2time(struct timecounter *tc,
105 105
106 return nsec; 106 return nsec;
107} 107}
108EXPORT_SYMBOL(timecounter_cyc2time); 108EXPORT_SYMBOL_GPL(timecounter_cyc2time);
109
110/**
111 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
112 * @mult: pointer to mult variable
113 * @shift: pointer to shift variable
114 * @from: frequency to convert from
115 * @to: frequency to convert to
116 * @minsec: guaranteed runtime conversion range in seconds
117 *
118 * The function evaluates the shift/mult pair for the scaled math
119 * operations of clocksources and clockevents.
120 *
121 * @to and @from are frequency values in HZ. For clock sources @to is
122 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
123 * event @to is the counter frequency and @from is NSEC_PER_SEC.
124 *
125 * The @minsec conversion range argument controls the time frame in
126 * seconds which must be covered by the runtime conversion with the
127 * calculated mult and shift factors. This guarantees that no 64bit
128 * overflow happens when the input value of the conversion is
129 * multiplied with the calculated mult factor. Larger ranges may
130 * reduce the conversion accuracy by chosing smaller mult and shift
131 * factors.
132 */
133void
134clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
135{
136 u64 tmp;
137 u32 sft, sftacc= 32;
138
139 /*
140 * Calculate the shift factor which is limiting the conversion
141 * range:
142 */
143 tmp = ((u64)minsec * from) >> 32;
144 while (tmp) {
145 tmp >>=1;
146 sftacc--;
147 }
148
149 /*
150 * Find the conversion shift/mult pair which has the best
151 * accuracy and fits the maxsec conversion range:
152 */
153 for (sft = 32; sft > 0; sft--) {
154 tmp = (u64) to << sft;
155 do_div(tmp, from);
156 if ((tmp >> sftacc) == 0)
157 break;
158 }
159 *mult = tmp;
160 *shift = sft;
161}
109 162
110/*[Clocksource internal variables]--------- 163/*[Clocksource internal variables]---------
111 * curr_clocksource: 164 * curr_clocksource:
@@ -290,7 +343,19 @@ static void clocksource_resume_watchdog(void)
290{ 343{
291 unsigned long flags; 344 unsigned long flags;
292 345
293 spin_lock_irqsave(&watchdog_lock, flags); 346 /*
347 * We use trylock here to avoid a potential dead lock when
348 * kgdb calls this code after the kernel has been stopped with
349 * watchdog_lock held. When watchdog_lock is held we just
350 * return and accept, that the watchdog might trigger and mark
351 * the monitored clock source (usually TSC) unstable.
352 *
353 * This does not affect the other caller clocksource_resume()
354 * because at this point the kernel is UP, interrupts are
355 * disabled and nothing can hold watchdog_lock.
356 */
357 if (!spin_trylock_irqsave(&watchdog_lock, flags))
358 return;
294 clocksource_reset_watchdog(); 359 clocksource_reset_watchdog();
295 spin_unlock_irqrestore(&watchdog_lock, flags); 360 spin_unlock_irqrestore(&watchdog_lock, flags);
296} 361}
@@ -405,14 +470,55 @@ void clocksource_resume(void)
405 * clocksource_touch_watchdog - Update watchdog 470 * clocksource_touch_watchdog - Update watchdog
406 * 471 *
407 * Update the watchdog after exception contexts such as kgdb so as not 472 * Update the watchdog after exception contexts such as kgdb so as not
408 * to incorrectly trip the watchdog. 473 * to incorrectly trip the watchdog. This might fail when the kernel
409 * 474 * was stopped in code which holds watchdog_lock.
410 */ 475 */
411void clocksource_touch_watchdog(void) 476void clocksource_touch_watchdog(void)
412{ 477{
413 clocksource_resume_watchdog(); 478 clocksource_resume_watchdog();
414} 479}
415 480
481/**
482 * clocksource_max_deferment - Returns max time the clocksource can be deferred
483 * @cs: Pointer to clocksource
484 *
485 */
486static u64 clocksource_max_deferment(struct clocksource *cs)
487{
488 u64 max_nsecs, max_cycles;
489
490 /*
491 * Calculate the maximum number of cycles that we can pass to the
492 * cyc2ns function without overflowing a 64-bit signed result. The
493 * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
494 * is equivalent to the below.
495 * max_cycles < (2^63)/cs->mult
496 * max_cycles < 2^(log2((2^63)/cs->mult))
497 * max_cycles < 2^(log2(2^63) - log2(cs->mult))
498 * max_cycles < 2^(63 - log2(cs->mult))
499 * max_cycles < 1 << (63 - log2(cs->mult))
500 * Please note that we add 1 to the result of the log2 to account for
501 * any rounding errors, ensure the above inequality is satisfied and
502 * no overflow will occur.
503 */
504 max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
505
506 /*
507 * The actual maximum number of cycles we can defer the clocksource is
508 * determined by the minimum of max_cycles and cs->mask.
509 */
510 max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
511 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
512
513 /*
514 * To ensure that the clocksource does not wrap whilst we are idle,
515 * limit the time the clocksource can be deferred by 12.5%. Please
516 * note a margin of 12.5% is used because this can be computed with
517 * a shift, versus say 10% which would require division.
518 */
519 return max_nsecs - (max_nsecs >> 5);
520}
521
416#ifdef CONFIG_GENERIC_TIME 522#ifdef CONFIG_GENERIC_TIME
417 523
418/** 524/**
@@ -511,6 +617,9 @@ static void clocksource_enqueue(struct clocksource *cs)
511 */ 617 */
512int clocksource_register(struct clocksource *cs) 618int clocksource_register(struct clocksource *cs)
513{ 619{
620 /* calculate max idle time permitted for this clocksource */
621 cs->max_idle_ns = clocksource_max_deferment(cs);
622
514 mutex_lock(&clocksource_mutex); 623 mutex_lock(&clocksource_mutex);
515 clocksource_enqueue(cs); 624 clocksource_enqueue(cs);
516 clocksource_select(); 625 clocksource_select();
@@ -580,7 +689,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
580 * @count: length of buffer 689 * @count: length of buffer
581 * 690 *
582 * Takes input from sysfs interface for manually overriding the default 691 * Takes input from sysfs interface for manually overriding the default
583 * clocksource selction. 692 * clocksource selection.
584 */ 693 */
585static ssize_t sysfs_override_clocksource(struct sys_device *dev, 694static ssize_t sysfs_override_clocksource(struct sys_device *dev,
586 struct sysdev_attribute *attr, 695 struct sysdev_attribute *attr,