diff options
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r-- | kernel/time/timekeeping.c | 102 |
1 files changed, 74 insertions, 28 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index c3a4e2907eaa..7faaa32fbf4f 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -177,7 +177,7 @@ void timekeeping_leap_insert(int leapsecond) | |||
177 | { | 177 | { |
178 | xtime.tv_sec += leapsecond; | 178 | xtime.tv_sec += leapsecond; |
179 | wall_to_monotonic.tv_sec -= leapsecond; | 179 | wall_to_monotonic.tv_sec -= leapsecond; |
180 | update_vsyscall(&xtime, timekeeper.clock); | 180 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
181 | } | 181 | } |
182 | 182 | ||
183 | #ifdef CONFIG_GENERIC_TIME | 183 | #ifdef CONFIG_GENERIC_TIME |
@@ -337,7 +337,7 @@ int do_settimeofday(struct timespec *tv) | |||
337 | timekeeper.ntp_error = 0; | 337 | timekeeper.ntp_error = 0; |
338 | ntp_clear(); | 338 | ntp_clear(); |
339 | 339 | ||
340 | update_vsyscall(&xtime, timekeeper.clock); | 340 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
341 | 341 | ||
342 | write_sequnlock_irqrestore(&xtime_lock, flags); | 342 | write_sequnlock_irqrestore(&xtime_lock, flags); |
343 | 343 | ||
@@ -488,6 +488,17 @@ int timekeeping_valid_for_hres(void) | |||
488 | } | 488 | } |
489 | 489 | ||
490 | /** | 490 | /** |
491 | * timekeeping_max_deferment - Returns max time the clocksource can be deferred | ||
492 | * | ||
493 | * Caller must observe xtime_lock via read_seqbegin/read_seqretry to | ||
494 | * ensure that the clocksource does not change! | ||
495 | */ | ||
496 | u64 timekeeping_max_deferment(void) | ||
497 | { | ||
498 | return timekeeper.clock->max_idle_ns; | ||
499 | } | ||
500 | |||
501 | /** | ||
491 | * read_persistent_clock - Return time from the persistent clock. | 502 | * read_persistent_clock - Return time from the persistent clock. |
492 | * | 503 | * |
493 | * Weak dummy function for arches that do not yet support it. | 504 | * Weak dummy function for arches that do not yet support it. |
@@ -722,6 +733,51 @@ static void timekeeping_adjust(s64 offset) | |||
722 | timekeeper.ntp_error_shift; | 733 | timekeeper.ntp_error_shift; |
723 | } | 734 | } |
724 | 735 | ||
736 | |||
737 | /** | ||
738 | * logarithmic_accumulation - shifted accumulation of cycles | ||
739 | * | ||
740 | * This functions accumulates a shifted interval of cycles into | ||
741 | * into a shifted interval nanoseconds. Allows for O(log) accumulation | ||
742 | * loop. | ||
743 | * | ||
744 | * Returns the unconsumed cycles. | ||
745 | */ | ||
746 | static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | ||
747 | { | ||
748 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | ||
749 | |||
750 | /* If the offset is smaller then a shifted interval, do nothing */ | ||
751 | if (offset < timekeeper.cycle_interval<<shift) | ||
752 | return offset; | ||
753 | |||
754 | /* Accumulate one shifted interval */ | ||
755 | offset -= timekeeper.cycle_interval << shift; | ||
756 | timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; | ||
757 | |||
758 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; | ||
759 | while (timekeeper.xtime_nsec >= nsecps) { | ||
760 | timekeeper.xtime_nsec -= nsecps; | ||
761 | xtime.tv_sec++; | ||
762 | second_overflow(); | ||
763 | } | ||
764 | |||
765 | /* Accumulate into raw time */ | ||
766 | raw_time.tv_nsec += timekeeper.raw_interval << shift;; | ||
767 | while (raw_time.tv_nsec >= NSEC_PER_SEC) { | ||
768 | raw_time.tv_nsec -= NSEC_PER_SEC; | ||
769 | raw_time.tv_sec++; | ||
770 | } | ||
771 | |||
772 | /* Accumulate error between NTP and clock interval */ | ||
773 | timekeeper.ntp_error += tick_length << shift; | ||
774 | timekeeper.ntp_error -= timekeeper.xtime_interval << | ||
775 | (timekeeper.ntp_error_shift + shift); | ||
776 | |||
777 | return offset; | ||
778 | } | ||
779 | |||
780 | |||
725 | /** | 781 | /** |
726 | * update_wall_time - Uses the current clocksource to increment the wall time | 782 | * update_wall_time - Uses the current clocksource to increment the wall time |
727 | * | 783 | * |
@@ -732,6 +788,7 @@ void update_wall_time(void) | |||
732 | struct clocksource *clock; | 788 | struct clocksource *clock; |
733 | cycle_t offset; | 789 | cycle_t offset; |
734 | u64 nsecs; | 790 | u64 nsecs; |
791 | int shift = 0, maxshift; | ||
735 | 792 | ||
736 | /* Make sure we're fully resumed: */ | 793 | /* Make sure we're fully resumed: */ |
737 | if (unlikely(timekeeping_suspended)) | 794 | if (unlikely(timekeeping_suspended)) |
@@ -745,33 +802,22 @@ void update_wall_time(void) | |||
745 | #endif | 802 | #endif |
746 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; | 803 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; |
747 | 804 | ||
748 | /* normally this loop will run just once, however in the | 805 | /* |
749 | * case of lost or late ticks, it will accumulate correctly. | 806 | * With NO_HZ we may have to accumulate many cycle_intervals |
807 | * (think "ticks") worth of time at once. To do this efficiently, | ||
808 | * we calculate the largest doubling multiple of cycle_intervals | ||
809 | * that is smaller then the offset. We then accumulate that | ||
810 | * chunk in one go, and then try to consume the next smaller | ||
811 | * doubled multiple. | ||
750 | */ | 812 | */ |
813 | shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); | ||
814 | shift = max(0, shift); | ||
815 | /* Bound shift to one less then what overflows tick_length */ | ||
816 | maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1; | ||
817 | shift = min(shift, maxshift); | ||
751 | while (offset >= timekeeper.cycle_interval) { | 818 | while (offset >= timekeeper.cycle_interval) { |
752 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | 819 | offset = logarithmic_accumulation(offset, shift); |
753 | 820 | shift--; | |
754 | /* accumulate one interval */ | ||
755 | offset -= timekeeper.cycle_interval; | ||
756 | clock->cycle_last += timekeeper.cycle_interval; | ||
757 | |||
758 | timekeeper.xtime_nsec += timekeeper.xtime_interval; | ||
759 | if (timekeeper.xtime_nsec >= nsecps) { | ||
760 | timekeeper.xtime_nsec -= nsecps; | ||
761 | xtime.tv_sec++; | ||
762 | second_overflow(); | ||
763 | } | ||
764 | |||
765 | raw_time.tv_nsec += timekeeper.raw_interval; | ||
766 | if (raw_time.tv_nsec >= NSEC_PER_SEC) { | ||
767 | raw_time.tv_nsec -= NSEC_PER_SEC; | ||
768 | raw_time.tv_sec++; | ||
769 | } | ||
770 | |||
771 | /* accumulate error between NTP and clock interval */ | ||
772 | timekeeper.ntp_error += tick_length; | ||
773 | timekeeper.ntp_error -= timekeeper.xtime_interval << | ||
774 | timekeeper.ntp_error_shift; | ||
775 | } | 821 | } |
776 | 822 | ||
777 | /* correct the clock when NTP error is too big */ | 823 | /* correct the clock when NTP error is too big */ |
@@ -811,7 +857,7 @@ void update_wall_time(void) | |||
811 | update_xtime_cache(nsecs); | 857 | update_xtime_cache(nsecs); |
812 | 858 | ||
813 | /* check to see if there is a new clocksource to use */ | 859 | /* check to see if there is a new clocksource to use */ |
814 | update_vsyscall(&xtime, timekeeper.clock); | 860 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
815 | } | 861 | } |
816 | 862 | ||
817 | /** | 863 | /** |