aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/clocksource.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/clocksource.c')
-rw-r--r--kernel/time/clocksource.c141
1 files changed, 133 insertions, 8 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 5e18c6ab2c6a..1f5dde637457 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -39,7 +39,7 @@ void timecounter_init(struct timecounter *tc,
39 tc->cycle_last = cc->read(cc); 39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp; 40 tc->nsec = start_tstamp;
41} 41}
42EXPORT_SYMBOL(timecounter_init); 42EXPORT_SYMBOL_GPL(timecounter_init);
43 43
44/** 44/**
45 * timecounter_read_delta - get nanoseconds since last call of this function 45 * timecounter_read_delta - get nanoseconds since last call of this function
@@ -83,7 +83,7 @@ u64 timecounter_read(struct timecounter *tc)
83 83
84 return nsec; 84 return nsec;
85} 85}
86EXPORT_SYMBOL(timecounter_read); 86EXPORT_SYMBOL_GPL(timecounter_read);
87 87
88u64 timecounter_cyc2time(struct timecounter *tc, 88u64 timecounter_cyc2time(struct timecounter *tc,
89 cycle_t cycle_tstamp) 89 cycle_t cycle_tstamp)
@@ -105,7 +105,60 @@ u64 timecounter_cyc2time(struct timecounter *tc,
105 105
106 return nsec; 106 return nsec;
107} 107}
108EXPORT_SYMBOL(timecounter_cyc2time); 108EXPORT_SYMBOL_GPL(timecounter_cyc2time);
109
110/**
111 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
112 * @mult: pointer to mult variable
113 * @shift: pointer to shift variable
114 * @from: frequency to convert from
115 * @to: frequency to convert to
116 * @minsec: guaranteed runtime conversion range in seconds
117 *
118 * The function evaluates the shift/mult pair for the scaled math
119 * operations of clocksources and clockevents.
120 *
121 * @to and @from are frequency values in HZ. For clock sources @to is
122 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
123 * event @to is the counter frequency and @from is NSEC_PER_SEC.
124 *
125 * The @minsec conversion range argument controls the time frame in
126 * seconds which must be covered by the runtime conversion with the
127 * calculated mult and shift factors. This guarantees that no 64bit
128 * overflow happens when the input value of the conversion is
129 * multiplied with the calculated mult factor. Larger ranges may
130 * reduce the conversion accuracy by chosing smaller mult and shift
131 * factors.
132 */
133void
134clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
135{
136 u64 tmp;
137 u32 sft, sftacc= 32;
138
139 /*
140 * Calculate the shift factor which is limiting the conversion
141 * range:
142 */
143 tmp = ((u64)minsec * from) >> 32;
144 while (tmp) {
145 tmp >>=1;
146 sftacc--;
147 }
148
149 /*
150 * Find the conversion shift/mult pair which has the best
151 * accuracy and fits the maxsec conversion range:
152 */
153 for (sft = 32; sft > 0; sft--) {
154 tmp = (u64) to << sft;
155 do_div(tmp, from);
156 if ((tmp >> sftacc) == 0)
157 break;
158 }
159 *mult = tmp;
160 *shift = sft;
161}
109 162
110/*[Clocksource internal variables]--------- 163/*[Clocksource internal variables]---------
111 * curr_clocksource: 164 * curr_clocksource:
@@ -290,7 +343,19 @@ static void clocksource_resume_watchdog(void)
290{ 343{
291 unsigned long flags; 344 unsigned long flags;
292 345
293 spin_lock_irqsave(&watchdog_lock, flags); 346 /*
347 * We use trylock here to avoid a potential dead lock when
348 * kgdb calls this code after the kernel has been stopped with
349 * watchdog_lock held. When watchdog_lock is held we just
350 * return and accept, that the watchdog might trigger and mark
351 * the monitored clock source (usually TSC) unstable.
352 *
353 * This does not affect the other caller clocksource_resume()
354 * because at this point the kernel is UP, interrupts are
355 * disabled and nothing can hold watchdog_lock.
356 */
357 if (!spin_trylock_irqsave(&watchdog_lock, flags))
358 return;
294 clocksource_reset_watchdog(); 359 clocksource_reset_watchdog();
295 spin_unlock_irqrestore(&watchdog_lock, flags); 360 spin_unlock_irqrestore(&watchdog_lock, flags);
296} 361}
@@ -388,6 +453,18 @@ static inline int clocksource_watchdog_kthread(void *data) { return 0; }
388#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 453#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
389 454
390/** 455/**
456 * clocksource_suspend - suspend the clocksource(s)
457 */
458void clocksource_suspend(void)
459{
460 struct clocksource *cs;
461
462 list_for_each_entry_reverse(cs, &clocksource_list, list)
463 if (cs->suspend)
464 cs->suspend(cs);
465}
466
467/**
391 * clocksource_resume - resume the clocksource(s) 468 * clocksource_resume - resume the clocksource(s)
392 */ 469 */
393void clocksource_resume(void) 470void clocksource_resume(void)
@@ -396,7 +473,7 @@ void clocksource_resume(void)
396 473
397 list_for_each_entry(cs, &clocksource_list, list) 474 list_for_each_entry(cs, &clocksource_list, list)
398 if (cs->resume) 475 if (cs->resume)
399 cs->resume(); 476 cs->resume(cs);
400 477
401 clocksource_resume_watchdog(); 478 clocksource_resume_watchdog();
402} 479}
@@ -405,14 +482,55 @@ void clocksource_resume(void)
405 * clocksource_touch_watchdog - Update watchdog 482 * clocksource_touch_watchdog - Update watchdog
406 * 483 *
407 * Update the watchdog after exception contexts such as kgdb so as not 484 * Update the watchdog after exception contexts such as kgdb so as not
408 * to incorrectly trip the watchdog. 485 * to incorrectly trip the watchdog. This might fail when the kernel
409 * 486 * was stopped in code which holds watchdog_lock.
410 */ 487 */
411void clocksource_touch_watchdog(void) 488void clocksource_touch_watchdog(void)
412{ 489{
413 clocksource_resume_watchdog(); 490 clocksource_resume_watchdog();
414} 491}
415 492
493/**
494 * clocksource_max_deferment - Returns max time the clocksource can be deferred
495 * @cs: Pointer to clocksource
496 *
497 */
498static u64 clocksource_max_deferment(struct clocksource *cs)
499{
500 u64 max_nsecs, max_cycles;
501
502 /*
503 * Calculate the maximum number of cycles that we can pass to the
504 * cyc2ns function without overflowing a 64-bit signed result. The
505 * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
506 * is equivalent to the below.
507 * max_cycles < (2^63)/cs->mult
508 * max_cycles < 2^(log2((2^63)/cs->mult))
509 * max_cycles < 2^(log2(2^63) - log2(cs->mult))
510 * max_cycles < 2^(63 - log2(cs->mult))
511 * max_cycles < 1 << (63 - log2(cs->mult))
512 * Please note that we add 1 to the result of the log2 to account for
513 * any rounding errors, ensure the above inequality is satisfied and
514 * no overflow will occur.
515 */
516 max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
517
518 /*
519 * The actual maximum number of cycles we can defer the clocksource is
520 * determined by the minimum of max_cycles and cs->mask.
521 */
522 max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
523 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
524
525 /*
526 * To ensure that the clocksource does not wrap whilst we are idle,
527 * limit the time the clocksource can be deferred by 12.5%. Please
528 * note a margin of 12.5% is used because this can be computed with
529 * a shift, versus say 10% which would require division.
530 */
531 return max_nsecs - (max_nsecs >> 5);
532}
533
416#ifdef CONFIG_GENERIC_TIME 534#ifdef CONFIG_GENERIC_TIME
417 535
418/** 536/**
@@ -474,6 +592,10 @@ static inline void clocksource_select(void) { }
474 */ 592 */
475static int __init clocksource_done_booting(void) 593static int __init clocksource_done_booting(void)
476{ 594{
595 mutex_lock(&clocksource_mutex);
596 curr_clocksource = clocksource_default_clock();
597 mutex_unlock(&clocksource_mutex);
598
477 finished_booting = 1; 599 finished_booting = 1;
478 600
479 /* 601 /*
@@ -511,6 +633,9 @@ static void clocksource_enqueue(struct clocksource *cs)
511 */ 633 */
512int clocksource_register(struct clocksource *cs) 634int clocksource_register(struct clocksource *cs)
513{ 635{
636 /* calculate max idle time permitted for this clocksource */
637 cs->max_idle_ns = clocksource_max_deferment(cs);
638
514 mutex_lock(&clocksource_mutex); 639 mutex_lock(&clocksource_mutex);
515 clocksource_enqueue(cs); 640 clocksource_enqueue(cs);
516 clocksource_select(); 641 clocksource_select();
@@ -580,7 +705,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
580 * @count: length of buffer 705 * @count: length of buffer
581 * 706 *
582 * Takes input from sysfs interface for manually overriding the default 707 * Takes input from sysfs interface for manually overriding the default
583 * clocksource selction. 708 * clocksource selection.
584 */ 709 */
585static ssize_t sysfs_override_clocksource(struct sys_device *dev, 710static ssize_t sysfs_override_clocksource(struct sys_device *dev,
586 struct sysdev_attribute *attr, 711 struct sysdev_attribute *attr,