diff options
Diffstat (limited to 'kernel/time/clocksource.c')
-rw-r--r-- | kernel/time/clocksource.c | 62 |
1 files changed, 50 insertions, 12 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index e0980f0d9a0..8f77da18fef 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -494,6 +494,22 @@ void clocksource_touch_watchdog(void) | |||
494 | } | 494 | } |
495 | 495 | ||
496 | /** | 496 | /** |
497 | * clocksource_max_adjustment- Returns max adjustment amount | ||
498 | * @cs: Pointer to clocksource | ||
499 | * | ||
500 | */ | ||
501 | static u32 clocksource_max_adjustment(struct clocksource *cs) | ||
502 | { | ||
503 | u64 ret; | ||
504 | /* | ||
505 | * We won't try to correct for more then 11% adjustments (110,000 ppm), | ||
506 | */ | ||
507 | ret = (u64)cs->mult * 11; | ||
508 | do_div(ret,100); | ||
509 | return (u32)ret; | ||
510 | } | ||
511 | |||
512 | /** | ||
497 | * clocksource_max_deferment - Returns max time the clocksource can be deferred | 513 | * clocksource_max_deferment - Returns max time the clocksource can be deferred |
498 | * @cs: Pointer to clocksource | 514 | * @cs: Pointer to clocksource |
499 | * | 515 | * |
@@ -505,25 +521,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs) | |||
505 | /* | 521 | /* |
506 | * Calculate the maximum number of cycles that we can pass to the | 522 | * Calculate the maximum number of cycles that we can pass to the |
507 | * cyc2ns function without overflowing a 64-bit signed result. The | 523 | * cyc2ns function without overflowing a 64-bit signed result. The |
508 | * maximum number of cycles is equal to ULLONG_MAX/cs->mult which | 524 | * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj) |
509 | * is equivalent to the below. | 525 | * which is equivalent to the below. |
510 | * max_cycles < (2^63)/cs->mult | 526 | * max_cycles < (2^63)/(cs->mult + cs->maxadj) |
511 | * max_cycles < 2^(log2((2^63)/cs->mult)) | 527 | * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj))) |
512 | * max_cycles < 2^(log2(2^63) - log2(cs->mult)) | 528 | * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj)) |
513 | * max_cycles < 2^(63 - log2(cs->mult)) | 529 | * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj)) |
514 | * max_cycles < 1 << (63 - log2(cs->mult)) | 530 | * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj)) |
515 | * Please note that we add 1 to the result of the log2 to account for | 531 | * Please note that we add 1 to the result of the log2 to account for |
516 | * any rounding errors, ensure the above inequality is satisfied and | 532 | * any rounding errors, ensure the above inequality is satisfied and |
517 | * no overflow will occur. | 533 | * no overflow will occur. |
518 | */ | 534 | */ |
519 | max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); | 535 | max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1)); |
520 | 536 | ||
521 | /* | 537 | /* |
522 | * The actual maximum number of cycles we can defer the clocksource is | 538 | * The actual maximum number of cycles we can defer the clocksource is |
523 | * determined by the minimum of max_cycles and cs->mask. | 539 | * determined by the minimum of max_cycles and cs->mask. |
540 | * Note: Here we subtract the maxadj to make sure we don't sleep for | ||
541 | * too long if there's a large negative adjustment. | ||
524 | */ | 542 | */ |
525 | max_cycles = min_t(u64, max_cycles, (u64) cs->mask); | 543 | max_cycles = min_t(u64, max_cycles, (u64) cs->mask); |
526 | max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); | 544 | max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj, |
545 | cs->shift); | ||
527 | 546 | ||
528 | /* | 547 | /* |
529 | * To ensure that the clocksource does not wrap whilst we are idle, | 548 | * To ensure that the clocksource does not wrap whilst we are idle, |
@@ -531,7 +550,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs) | |||
531 | * note a margin of 12.5% is used because this can be computed with | 550 | * note a margin of 12.5% is used because this can be computed with |
532 | * a shift, versus say 10% which would require division. | 551 | * a shift, versus say 10% which would require division. |
533 | */ | 552 | */ |
534 | return max_nsecs - (max_nsecs >> 5); | 553 | return max_nsecs - (max_nsecs >> 3); |
535 | } | 554 | } |
536 | 555 | ||
537 | #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET | 556 | #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET |
@@ -642,7 +661,6 @@ static void clocksource_enqueue(struct clocksource *cs) | |||
642 | void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) | 661 | void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) |
643 | { | 662 | { |
644 | u64 sec; | 663 | u64 sec; |
645 | |||
646 | /* | 664 | /* |
647 | * Calc the maximum number of seconds which we can run before | 665 | * Calc the maximum number of seconds which we can run before |
648 | * wrapping around. For clocksources which have a mask > 32bit | 666 | * wrapping around. For clocksources which have a mask > 32bit |
@@ -653,7 +671,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
653 | * ~ 0.06ppm granularity for NTP. We apply the same 12.5% | 671 | * ~ 0.06ppm granularity for NTP. We apply the same 12.5% |
654 | * margin as we do in clocksource_max_deferment() | 672 | * margin as we do in clocksource_max_deferment() |
655 | */ | 673 | */ |
656 | sec = (cs->mask - (cs->mask >> 5)); | 674 | sec = (cs->mask - (cs->mask >> 3)); |
657 | do_div(sec, freq); | 675 | do_div(sec, freq); |
658 | do_div(sec, scale); | 676 | do_div(sec, scale); |
659 | if (!sec) | 677 | if (!sec) |
@@ -663,6 +681,20 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
663 | 681 | ||
664 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, | 682 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, |
665 | NSEC_PER_SEC / scale, sec * scale); | 683 | NSEC_PER_SEC / scale, sec * scale); |
684 | |||
685 | /* | ||
686 | * for clocksources that have large mults, to avoid overflow. | ||
687 | * Since mult may be adjusted by ntp, add an safety extra margin | ||
688 | * | ||
689 | */ | ||
690 | cs->maxadj = clocksource_max_adjustment(cs); | ||
691 | while ((cs->mult + cs->maxadj < cs->mult) | ||
692 | || (cs->mult - cs->maxadj > cs->mult)) { | ||
693 | cs->mult >>= 1; | ||
694 | cs->shift--; | ||
695 | cs->maxadj = clocksource_max_adjustment(cs); | ||
696 | } | ||
697 | |||
666 | cs->max_idle_ns = clocksource_max_deferment(cs); | 698 | cs->max_idle_ns = clocksource_max_deferment(cs); |
667 | } | 699 | } |
668 | EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); | 700 | EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); |
@@ -703,6 +735,12 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale); | |||
703 | */ | 735 | */ |
704 | int clocksource_register(struct clocksource *cs) | 736 | int clocksource_register(struct clocksource *cs) |
705 | { | 737 | { |
738 | /* calculate max adjustment for given mult/shift */ | ||
739 | cs->maxadj = clocksource_max_adjustment(cs); | ||
740 | WARN_ONCE(cs->mult + cs->maxadj < cs->mult, | ||
741 | "Clocksource %s might overflow on 11%% adjustment\n", | ||
742 | cs->name); | ||
743 | |||
706 | /* calculate max idle time permitted for this clocksource */ | 744 | /* calculate max idle time permitted for this clocksource */ |
707 | cs->max_idle_ns = clocksource_max_deferment(cs); | 745 | cs->max_idle_ns = clocksource_max_deferment(cs); |
708 | 746 | ||