diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /kernel/time/timekeeping.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r-- | kernel/time/timekeeping.c | 106 |
1 files changed, 78 insertions, 28 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index c3a4e2907eaa..39f6177fafac 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -177,7 +177,7 @@ void timekeeping_leap_insert(int leapsecond) | |||
177 | { | 177 | { |
178 | xtime.tv_sec += leapsecond; | 178 | xtime.tv_sec += leapsecond; |
179 | wall_to_monotonic.tv_sec -= leapsecond; | 179 | wall_to_monotonic.tv_sec -= leapsecond; |
180 | update_vsyscall(&xtime, timekeeper.clock); | 180 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
181 | } | 181 | } |
182 | 182 | ||
183 | #ifdef CONFIG_GENERIC_TIME | 183 | #ifdef CONFIG_GENERIC_TIME |
@@ -337,7 +337,7 @@ int do_settimeofday(struct timespec *tv) | |||
337 | timekeeper.ntp_error = 0; | 337 | timekeeper.ntp_error = 0; |
338 | ntp_clear(); | 338 | ntp_clear(); |
339 | 339 | ||
340 | update_vsyscall(&xtime, timekeeper.clock); | 340 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
341 | 341 | ||
342 | write_sequnlock_irqrestore(&xtime_lock, flags); | 342 | write_sequnlock_irqrestore(&xtime_lock, flags); |
343 | 343 | ||
@@ -488,6 +488,17 @@ int timekeeping_valid_for_hres(void) | |||
488 | } | 488 | } |
489 | 489 | ||
490 | /** | 490 | /** |
491 | * timekeeping_max_deferment - Returns max time the clocksource can be deferred | ||
492 | * | ||
493 | * Caller must observe xtime_lock via read_seqbegin/read_seqretry to | ||
494 | * ensure that the clocksource does not change! | ||
495 | */ | ||
496 | u64 timekeeping_max_deferment(void) | ||
497 | { | ||
498 | return timekeeper.clock->max_idle_ns; | ||
499 | } | ||
500 | |||
501 | /** | ||
491 | * read_persistent_clock - Return time from the persistent clock. | 502 | * read_persistent_clock - Return time from the persistent clock. |
492 | * | 503 | * |
493 | * Weak dummy function for arches that do not yet support it. | 504 | * Weak dummy function for arches that do not yet support it. |
@@ -611,6 +622,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |||
611 | write_sequnlock_irqrestore(&xtime_lock, flags); | 622 | write_sequnlock_irqrestore(&xtime_lock, flags); |
612 | 623 | ||
613 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | 624 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); |
625 | clocksource_suspend(); | ||
614 | 626 | ||
615 | return 0; | 627 | return 0; |
616 | } | 628 | } |
@@ -722,6 +734,51 @@ static void timekeeping_adjust(s64 offset) | |||
722 | timekeeper.ntp_error_shift; | 734 | timekeeper.ntp_error_shift; |
723 | } | 735 | } |
724 | 736 | ||
737 | |||
738 | /** | ||
739 | * logarithmic_accumulation - shifted accumulation of cycles | ||
740 | * | ||
741 | * This functions accumulates a shifted interval of cycles into | ||
742 | * into a shifted interval nanoseconds. Allows for O(log) accumulation | ||
743 | * loop. | ||
744 | * | ||
745 | * Returns the unconsumed cycles. | ||
746 | */ | ||
747 | static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | ||
748 | { | ||
749 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | ||
750 | |||
751 | /* If the offset is smaller then a shifted interval, do nothing */ | ||
752 | if (offset < timekeeper.cycle_interval<<shift) | ||
753 | return offset; | ||
754 | |||
755 | /* Accumulate one shifted interval */ | ||
756 | offset -= timekeeper.cycle_interval << shift; | ||
757 | timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; | ||
758 | |||
759 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; | ||
760 | while (timekeeper.xtime_nsec >= nsecps) { | ||
761 | timekeeper.xtime_nsec -= nsecps; | ||
762 | xtime.tv_sec++; | ||
763 | second_overflow(); | ||
764 | } | ||
765 | |||
766 | /* Accumulate into raw time */ | ||
767 | raw_time.tv_nsec += timekeeper.raw_interval << shift;; | ||
768 | while (raw_time.tv_nsec >= NSEC_PER_SEC) { | ||
769 | raw_time.tv_nsec -= NSEC_PER_SEC; | ||
770 | raw_time.tv_sec++; | ||
771 | } | ||
772 | |||
773 | /* Accumulate error between NTP and clock interval */ | ||
774 | timekeeper.ntp_error += tick_length << shift; | ||
775 | timekeeper.ntp_error -= timekeeper.xtime_interval << | ||
776 | (timekeeper.ntp_error_shift + shift); | ||
777 | |||
778 | return offset; | ||
779 | } | ||
780 | |||
781 | |||
725 | /** | 782 | /** |
726 | * update_wall_time - Uses the current clocksource to increment the wall time | 783 | * update_wall_time - Uses the current clocksource to increment the wall time |
727 | * | 784 | * |
@@ -732,6 +789,7 @@ void update_wall_time(void) | |||
732 | struct clocksource *clock; | 789 | struct clocksource *clock; |
733 | cycle_t offset; | 790 | cycle_t offset; |
734 | u64 nsecs; | 791 | u64 nsecs; |
792 | int shift = 0, maxshift; | ||
735 | 793 | ||
736 | /* Make sure we're fully resumed: */ | 794 | /* Make sure we're fully resumed: */ |
737 | if (unlikely(timekeeping_suspended)) | 795 | if (unlikely(timekeeping_suspended)) |
@@ -745,33 +803,23 @@ void update_wall_time(void) | |||
745 | #endif | 803 | #endif |
746 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; | 804 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; |
747 | 805 | ||
748 | /* normally this loop will run just once, however in the | 806 | /* |
749 | * case of lost or late ticks, it will accumulate correctly. | 807 | * With NO_HZ we may have to accumulate many cycle_intervals |
808 | * (think "ticks") worth of time at once. To do this efficiently, | ||
809 | * we calculate the largest doubling multiple of cycle_intervals | ||
810 | * that is smaller then the offset. We then accumulate that | ||
811 | * chunk in one go, and then try to consume the next smaller | ||
812 | * doubled multiple. | ||
750 | */ | 813 | */ |
814 | shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); | ||
815 | shift = max(0, shift); | ||
816 | /* Bound shift to one less then what overflows tick_length */ | ||
817 | maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1; | ||
818 | shift = min(shift, maxshift); | ||
751 | while (offset >= timekeeper.cycle_interval) { | 819 | while (offset >= timekeeper.cycle_interval) { |
752 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | 820 | offset = logarithmic_accumulation(offset, shift); |
753 | 821 | if(offset < timekeeper.cycle_interval<<shift) | |
754 | /* accumulate one interval */ | 822 | shift--; |
755 | offset -= timekeeper.cycle_interval; | ||
756 | clock->cycle_last += timekeeper.cycle_interval; | ||
757 | |||
758 | timekeeper.xtime_nsec += timekeeper.xtime_interval; | ||
759 | if (timekeeper.xtime_nsec >= nsecps) { | ||
760 | timekeeper.xtime_nsec -= nsecps; | ||
761 | xtime.tv_sec++; | ||
762 | second_overflow(); | ||
763 | } | ||
764 | |||
765 | raw_time.tv_nsec += timekeeper.raw_interval; | ||
766 | if (raw_time.tv_nsec >= NSEC_PER_SEC) { | ||
767 | raw_time.tv_nsec -= NSEC_PER_SEC; | ||
768 | raw_time.tv_sec++; | ||
769 | } | ||
770 | |||
771 | /* accumulate error between NTP and clock interval */ | ||
772 | timekeeper.ntp_error += tick_length; | ||
773 | timekeeper.ntp_error -= timekeeper.xtime_interval << | ||
774 | timekeeper.ntp_error_shift; | ||
775 | } | 823 | } |
776 | 824 | ||
777 | /* correct the clock when NTP error is too big */ | 825 | /* correct the clock when NTP error is too big */ |
@@ -811,7 +859,7 @@ void update_wall_time(void) | |||
811 | update_xtime_cache(nsecs); | 859 | update_xtime_cache(nsecs); |
812 | 860 | ||
813 | /* check to see if there is a new clocksource to use */ | 861 | /* check to see if there is a new clocksource to use */ |
814 | update_vsyscall(&xtime, timekeeper.clock); | 862 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
815 | } | 863 | } |
816 | 864 | ||
817 | /** | 865 | /** |
@@ -834,6 +882,7 @@ void getboottime(struct timespec *ts) | |||
834 | 882 | ||
835 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); | 883 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); |
836 | } | 884 | } |
885 | EXPORT_SYMBOL_GPL(getboottime); | ||
837 | 886 | ||
838 | /** | 887 | /** |
839 | * monotonic_to_bootbased - Convert the monotonic time to boot based. | 888 | * monotonic_to_bootbased - Convert the monotonic time to boot based. |
@@ -843,6 +892,7 @@ void monotonic_to_bootbased(struct timespec *ts) | |||
843 | { | 892 | { |
844 | *ts = timespec_add_safe(*ts, total_sleep_time); | 893 | *ts = timespec_add_safe(*ts, total_sleep_time); |
845 | } | 894 | } |
895 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | ||
846 | 896 | ||
847 | unsigned long get_seconds(void) | 897 | unsigned long get_seconds(void) |
848 | { | 898 | { |