aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2015-03-12 00:16:30 -0400
committerIngo Molnar <mingo@kernel.org>2015-03-12 05:16:38 -0400
commit362fde0410377e468ca00ad363fdf3e3ec42eb6a (patch)
tree220b416eda9d12618ec45394efb44974ec219e8a
parent6086e346fdea1ae64d974c94c1acacc2605567ae (diff)
clocksource: Simplify the logic around clocksource wrapping safety margins
The clocksource logic has a number of places where we try to include a safety margin. Most of these are 12% safety margins, but they are inconsistently applied and sometimes are applied on top of each other. Additionally, in the previous patch, we corrected an issue where we unintentionally in effect created a 50% safety margin, which these 12.5% margins where then added to. So to simplify the logic here, this patch removes the various 12.5% margins, and consolidates adding the margin in one place: clocks_calc_max_nsecs(). Additionally, Linus prefers a 50% safety margin, as it allows bad clock values to be more easily caught. This should really have no net effect, due to the corrected issue earlier which caused greater then 50% margins to be used w/o issue. Signed-off-by: John Stultz <john.stultz@linaro.org> Acked-by: Stephen Boyd <sboyd@codeaurora.org> (for the sched_clock.c bit) Cc: Dave Jones <davej@codemonkey.org.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1426133800-29329-3-git-send-email-john.stultz@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/time/clocksource.c26
-rw-r--r--kernel/time/sched_clock.c4
2 files changed, 14 insertions, 16 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 2148f413256c..ace95763b3a6 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -469,6 +469,9 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
469 * @shift: cycle to nanosecond divisor (power of two) 469 * @shift: cycle to nanosecond divisor (power of two)
470 * @maxadj: maximum adjustment value to mult (~11%) 470 * @maxadj: maximum adjustment value to mult (~11%)
471 * @mask: bitmask for two's complement subtraction of non 64 bit counters 471 * @mask: bitmask for two's complement subtraction of non 64 bit counters
472 *
473 * NOTE: This function includes a safety margin of 50%, so that bad clock values
474 * can be detected.
472 */ 475 */
473u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask) 476u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
474{ 477{
@@ -490,11 +493,14 @@ u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
490 max_cycles = min(max_cycles, mask); 493 max_cycles = min(max_cycles, mask);
491 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); 494 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
492 495
496 /* Return 50% of the actual maximum, so we can detect bad values */
497 max_nsecs >>= 1;
498
493 return max_nsecs; 499 return max_nsecs;
494} 500}
495 501
496/** 502/**
497 * clocksource_max_deferment - Returns max time the clocksource can be deferred 503 * clocksource_max_deferment - Returns max time the clocksource should be deferred
498 * @cs: Pointer to clocksource 504 * @cs: Pointer to clocksource
499 * 505 *
500 */ 506 */
@@ -504,13 +510,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
504 510
505 max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj, 511 max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj,
506 cs->mask); 512 cs->mask);
507 /* 513 return max_nsecs;
508 * To ensure that the clocksource does not wrap whilst we are idle,
509 * limit the time the clocksource can be deferred by 12.5%. Please
510 * note a margin of 12.5% is used because this can be computed with
511 * a shift, versus say 10% which would require division.
512 */
513 return max_nsecs - (max_nsecs >> 3);
514} 514}
515 515
516#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 516#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -659,10 +659,9 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
659 * conversion precision. 10 minutes is still a reasonable 659 * conversion precision. 10 minutes is still a reasonable
660 * amount. That results in a shift value of 24 for a 660 * amount. That results in a shift value of 24 for a
661 * clocksource with mask >= 40bit and f >= 4GHz. That maps to 661 * clocksource with mask >= 40bit and f >= 4GHz. That maps to
662 * ~ 0.06ppm granularity for NTP. We apply the same 12.5% 662 * ~ 0.06ppm granularity for NTP.
663 * margin as we do in clocksource_max_deferment()
664 */ 663 */
665 sec = (cs->mask - (cs->mask >> 3)); 664 sec = cs->mask;
666 do_div(sec, freq); 665 do_div(sec, freq);
667 do_div(sec, scale); 666 do_div(sec, scale);
668 if (!sec) 667 if (!sec)
@@ -674,9 +673,8 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
674 NSEC_PER_SEC / scale, sec * scale); 673 NSEC_PER_SEC / scale, sec * scale);
675 674
676 /* 675 /*
677 * for clocksources that have large mults, to avoid overflow. 676 * Ensure clocksources that have large 'mult' values don't overflow
678 * Since mult may be adjusted by ntp, add an safety extra margin 677 * when adjusted.
679 *
680 */ 678 */
681 cs->maxadj = clocksource_max_adjustment(cs); 679 cs->maxadj = clocksource_max_adjustment(cs);
682 while ((cs->mult + cs->maxadj < cs->mult) 680 while ((cs->mult + cs->maxadj < cs->mult)
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 01d2d15aa662..3b8ae45020c1 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -125,9 +125,9 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
125 125
126 new_mask = CLOCKSOURCE_MASK(bits); 126 new_mask = CLOCKSOURCE_MASK(bits);
127 127
128 /* calculate how many ns until we wrap */ 128 /* calculate how many nanosecs until we risk wrapping */
129 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); 129 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
130 new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); 130 new_wrap_kt = ns_to_ktime(wrap);
131 131
132 /* update epoch for new counter and update epoch_ns from old counter*/ 132 /* update epoch for new counter and update epoch_ns from old counter*/
133 new_epoch = read(); 133 new_epoch = read();