diff options
author | John Stultz <johnstul@us.ibm.com> | 2010-04-06 17:30:51 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-04-13 06:43:42 -0400 |
commit | 6a867a395558a7f882d041783e4cdea6744ca2bf (patch) | |
tree | dfe350df25fba5fec6f7e1088b04d6b03f0974b3 /kernel | |
parent | 9ca7d8e6834c40a99622bbe4a88aaf64313ae43c (diff) |
time: Remove xtime_cache
With the earlier logarithmic time accumulation patch, xtime will now
always be within one "tick" of the current time, instead of possibly
half a second off.
This removes the need for the xtime_cache value, which always stored the
time at the last interrupt, so this patch cleans that up removing the
xtime_cache related code.
This patch also addresses an issue with an earlier version of this change,
where xtime_cache was normalizing xtime, which could in some cases be
not valid (ie: tv_nsec == NSEC_PER_SEC). This is fixed by handling
the edge case in update_wall_time().
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Cc: Petr Titěra <P.Titera@century.cz>
LKML-Reference: <1270589451-30773-1-git-send-email-johnstul@us.ibm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/time/timekeeping.c | 35 |
1 files changed, 16 insertions, 19 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 16736379a9ca..1137f245a4ba 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -165,13 +165,6 @@ struct timespec raw_time; | |||
165 | /* flag for if timekeeping is suspended */ | 165 | /* flag for if timekeeping is suspended */ |
166 | int __read_mostly timekeeping_suspended; | 166 | int __read_mostly timekeeping_suspended; |
167 | 167 | ||
168 | static struct timespec xtime_cache __attribute__ ((aligned (16))); | ||
169 | void update_xtime_cache(u64 nsec) | ||
170 | { | ||
171 | xtime_cache = xtime; | ||
172 | timespec_add_ns(&xtime_cache, nsec); | ||
173 | } | ||
174 | |||
175 | /* must hold xtime_lock */ | 168 | /* must hold xtime_lock */ |
176 | void timekeeping_leap_insert(int leapsecond) | 169 | void timekeeping_leap_insert(int leapsecond) |
177 | { | 170 | { |
@@ -332,8 +325,6 @@ int do_settimeofday(struct timespec *tv) | |||
332 | 325 | ||
333 | xtime = *tv; | 326 | xtime = *tv; |
334 | 327 | ||
335 | update_xtime_cache(0); | ||
336 | |||
337 | timekeeper.ntp_error = 0; | 328 | timekeeper.ntp_error = 0; |
338 | ntp_clear(); | 329 | ntp_clear(); |
339 | 330 | ||
@@ -559,7 +550,6 @@ void __init timekeeping_init(void) | |||
559 | } | 550 | } |
560 | set_normalized_timespec(&wall_to_monotonic, | 551 | set_normalized_timespec(&wall_to_monotonic, |
561 | -boot.tv_sec, -boot.tv_nsec); | 552 | -boot.tv_sec, -boot.tv_nsec); |
562 | update_xtime_cache(0); | ||
563 | total_sleep_time.tv_sec = 0; | 553 | total_sleep_time.tv_sec = 0; |
564 | total_sleep_time.tv_nsec = 0; | 554 | total_sleep_time.tv_nsec = 0; |
565 | write_sequnlock_irqrestore(&xtime_lock, flags); | 555 | write_sequnlock_irqrestore(&xtime_lock, flags); |
@@ -593,7 +583,6 @@ static int timekeeping_resume(struct sys_device *dev) | |||
593 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); | 583 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); |
594 | total_sleep_time = timespec_add_safe(total_sleep_time, ts); | 584 | total_sleep_time = timespec_add_safe(total_sleep_time, ts); |
595 | } | 585 | } |
596 | update_xtime_cache(0); | ||
597 | /* re-base the last cycle value */ | 586 | /* re-base the last cycle value */ |
598 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); | 587 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
599 | timekeeper.ntp_error = 0; | 588 | timekeeper.ntp_error = 0; |
@@ -788,7 +777,6 @@ void update_wall_time(void) | |||
788 | { | 777 | { |
789 | struct clocksource *clock; | 778 | struct clocksource *clock; |
790 | cycle_t offset; | 779 | cycle_t offset; |
791 | u64 nsecs; | ||
792 | int shift = 0, maxshift; | 780 | int shift = 0, maxshift; |
793 | 781 | ||
794 | /* Make sure we're fully resumed: */ | 782 | /* Make sure we're fully resumed: */ |
@@ -846,7 +834,9 @@ void update_wall_time(void) | |||
846 | timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; | 834 | timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; |
847 | } | 835 | } |
848 | 836 | ||
849 | /* store full nanoseconds into xtime after rounding it up and | 837 | |
838 | /* | ||
839 | * Store full nanoseconds into xtime after rounding it up and | ||
850 | * add the remainder to the error difference. | 840 | * add the remainder to the error difference. |
851 | */ | 841 | */ |
852 | xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; | 842 | xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; |
@@ -854,8 +844,15 @@ void update_wall_time(void) | |||
854 | timekeeper.ntp_error += timekeeper.xtime_nsec << | 844 | timekeeper.ntp_error += timekeeper.xtime_nsec << |
855 | timekeeper.ntp_error_shift; | 845 | timekeeper.ntp_error_shift; |
856 | 846 | ||
857 | nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift); | 847 | /* |
858 | update_xtime_cache(nsecs); | 848 | * Finally, make sure that after the rounding |
849 | * xtime.tv_nsec isn't larger then NSEC_PER_SEC | ||
850 | */ | ||
851 | if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) { | ||
852 | xtime.tv_nsec -= NSEC_PER_SEC; | ||
853 | xtime.tv_sec++; | ||
854 | second_overflow(); | ||
855 | } | ||
859 | 856 | ||
860 | /* check to see if there is a new clocksource to use */ | 857 | /* check to see if there is a new clocksource to use */ |
861 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); | 858 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
@@ -895,13 +892,13 @@ EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | |||
895 | 892 | ||
896 | unsigned long get_seconds(void) | 893 | unsigned long get_seconds(void) |
897 | { | 894 | { |
898 | return xtime_cache.tv_sec; | 895 | return xtime.tv_sec; |
899 | } | 896 | } |
900 | EXPORT_SYMBOL(get_seconds); | 897 | EXPORT_SYMBOL(get_seconds); |
901 | 898 | ||
902 | struct timespec __current_kernel_time(void) | 899 | struct timespec __current_kernel_time(void) |
903 | { | 900 | { |
904 | return xtime_cache; | 901 | return xtime; |
905 | } | 902 | } |
906 | 903 | ||
907 | struct timespec current_kernel_time(void) | 904 | struct timespec current_kernel_time(void) |
@@ -912,7 +909,7 @@ struct timespec current_kernel_time(void) | |||
912 | do { | 909 | do { |
913 | seq = read_seqbegin(&xtime_lock); | 910 | seq = read_seqbegin(&xtime_lock); |
914 | 911 | ||
915 | now = xtime_cache; | 912 | now = xtime; |
916 | } while (read_seqretry(&xtime_lock, seq)); | 913 | } while (read_seqretry(&xtime_lock, seq)); |
917 | 914 | ||
918 | return now; | 915 | return now; |
@@ -927,7 +924,7 @@ struct timespec get_monotonic_coarse(void) | |||
927 | do { | 924 | do { |
928 | seq = read_seqbegin(&xtime_lock); | 925 | seq = read_seqbegin(&xtime_lock); |
929 | 926 | ||
930 | now = xtime_cache; | 927 | now = xtime; |
931 | mono = wall_to_monotonic; | 928 | mono = wall_to_monotonic; |
932 | } while (read_seqretry(&xtime_lock, seq)); | 929 | } while (read_seqretry(&xtime_lock, seq)); |
933 | 930 | ||