aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/timekeeping.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r--kernel/time/timekeeping.c126
1 files changed, 77 insertions, 49 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index fb0f46fa1ecd..af4135f05825 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -13,6 +13,7 @@
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/sched.h>
16#include <linux/sysdev.h> 17#include <linux/sysdev.h>
17#include <linux/clocksource.h> 18#include <linux/clocksource.h>
18#include <linux/jiffies.h> 19#include <linux/jiffies.h>
@@ -164,19 +165,12 @@ struct timespec raw_time;
164/* flag for if timekeeping is suspended */ 165/* flag for if timekeeping is suspended */
165int __read_mostly timekeeping_suspended; 166int __read_mostly timekeeping_suspended;
166 167
167static struct timespec xtime_cache __attribute__ ((aligned (16)));
168void update_xtime_cache(u64 nsec)
169{
170 xtime_cache = xtime;
171 timespec_add_ns(&xtime_cache, nsec);
172}
173
174/* must hold xtime_lock */ 168/* must hold xtime_lock */
175void timekeeping_leap_insert(int leapsecond) 169void timekeeping_leap_insert(int leapsecond)
176{ 170{
177 xtime.tv_sec += leapsecond; 171 xtime.tv_sec += leapsecond;
178 wall_to_monotonic.tv_sec -= leapsecond; 172 wall_to_monotonic.tv_sec -= leapsecond;
179 update_vsyscall(&xtime, timekeeper.clock); 173 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
180} 174}
181 175
182#ifdef CONFIG_GENERIC_TIME 176#ifdef CONFIG_GENERIC_TIME
@@ -331,12 +325,10 @@ int do_settimeofday(struct timespec *tv)
331 325
332 xtime = *tv; 326 xtime = *tv;
333 327
334 update_xtime_cache(0);
335
336 timekeeper.ntp_error = 0; 328 timekeeper.ntp_error = 0;
337 ntp_clear(); 329 ntp_clear();
338 330
339 update_vsyscall(&xtime, timekeeper.clock); 331 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
340 332
341 write_sequnlock_irqrestore(&xtime_lock, flags); 333 write_sequnlock_irqrestore(&xtime_lock, flags);
342 334
@@ -487,6 +479,17 @@ int timekeeping_valid_for_hres(void)
487} 479}
488 480
489/** 481/**
482 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
483 *
484 * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
485 * ensure that the clocksource does not change!
486 */
487u64 timekeeping_max_deferment(void)
488{
489 return timekeeper.clock->max_idle_ns;
490}
491
492/**
490 * read_persistent_clock - Return time from the persistent clock. 493 * read_persistent_clock - Return time from the persistent clock.
491 * 494 *
492 * Weak dummy function for arches that do not yet support it. 495 * Weak dummy function for arches that do not yet support it.
@@ -547,7 +550,6 @@ void __init timekeeping_init(void)
547 } 550 }
548 set_normalized_timespec(&wall_to_monotonic, 551 set_normalized_timespec(&wall_to_monotonic,
549 -boot.tv_sec, -boot.tv_nsec); 552 -boot.tv_sec, -boot.tv_nsec);
550 update_xtime_cache(0);
551 total_sleep_time.tv_sec = 0; 553 total_sleep_time.tv_sec = 0;
552 total_sleep_time.tv_nsec = 0; 554 total_sleep_time.tv_nsec = 0;
553 write_sequnlock_irqrestore(&xtime_lock, flags); 555 write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -581,7 +583,6 @@ static int timekeeping_resume(struct sys_device *dev)
581 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); 583 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
582 total_sleep_time = timespec_add_safe(total_sleep_time, ts); 584 total_sleep_time = timespec_add_safe(total_sleep_time, ts);
583 } 585 }
584 update_xtime_cache(0);
585 /* re-base the last cycle value */ 586 /* re-base the last cycle value */
586 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); 587 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
587 timekeeper.ntp_error = 0; 588 timekeeper.ntp_error = 0;
@@ -722,6 +723,49 @@ static void timekeeping_adjust(s64 offset)
722} 723}
723 724
724/** 725/**
726 * logarithmic_accumulation - shifted accumulation of cycles
727 *
728 * This functions accumulates a shifted interval of cycles into
729 * into a shifted interval nanoseconds. Allows for O(log) accumulation
730 * loop.
731 *
732 * Returns the unconsumed cycles.
733 */
734static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
735{
736 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
737
738 /* If the offset is smaller then a shifted interval, do nothing */
739 if (offset < timekeeper.cycle_interval<<shift)
740 return offset;
741
742 /* Accumulate one shifted interval */
743 offset -= timekeeper.cycle_interval << shift;
744 timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
745
746 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
747 while (timekeeper.xtime_nsec >= nsecps) {
748 timekeeper.xtime_nsec -= nsecps;
749 xtime.tv_sec++;
750 second_overflow();
751 }
752
753 /* Accumulate into raw time */
754 raw_time.tv_nsec += timekeeper.raw_interval << shift;;
755 while (raw_time.tv_nsec >= NSEC_PER_SEC) {
756 raw_time.tv_nsec -= NSEC_PER_SEC;
757 raw_time.tv_sec++;
758 }
759
760 /* Accumulate error between NTP and clock interval */
761 timekeeper.ntp_error += tick_length << shift;
762 timekeeper.ntp_error -= timekeeper.xtime_interval <<
763 (timekeeper.ntp_error_shift + shift);
764
765 return offset;
766}
767
768/**
725 * update_wall_time - Uses the current clocksource to increment the wall time 769 * update_wall_time - Uses the current clocksource to increment the wall time
726 * 770 *
727 * Called from the timer interrupt, must hold a write on xtime_lock. 771 * Called from the timer interrupt, must hold a write on xtime_lock.
@@ -730,7 +774,7 @@ void update_wall_time(void)
730{ 774{
731 struct clocksource *clock; 775 struct clocksource *clock;
732 cycle_t offset; 776 cycle_t offset;
733 u64 nsecs; 777 int shift = 0, maxshift;
734 778
735 /* Make sure we're fully resumed: */ 779 /* Make sure we're fully resumed: */
736 if (unlikely(timekeeping_suspended)) 780 if (unlikely(timekeeping_suspended))
@@ -744,33 +788,22 @@ void update_wall_time(void)
744#endif 788#endif
745 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; 789 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
746 790
747 /* normally this loop will run just once, however in the 791 /*
748 * case of lost or late ticks, it will accumulate correctly. 792 * With NO_HZ we may have to accumulate many cycle_intervals
793 * (think "ticks") worth of time at once. To do this efficiently,
794 * we calculate the largest doubling multiple of cycle_intervals
795 * that is smaller then the offset. We then accumulate that
796 * chunk in one go, and then try to consume the next smaller
797 * doubled multiple.
749 */ 798 */
799 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
800 shift = max(0, shift);
801 /* Bound shift to one less then what overflows tick_length */
802 maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1;
803 shift = min(shift, maxshift);
750 while (offset >= timekeeper.cycle_interval) { 804 while (offset >= timekeeper.cycle_interval) {
751 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; 805 offset = logarithmic_accumulation(offset, shift);
752 806 shift--;
753 /* accumulate one interval */
754 offset -= timekeeper.cycle_interval;
755 clock->cycle_last += timekeeper.cycle_interval;
756
757 timekeeper.xtime_nsec += timekeeper.xtime_interval;
758 if (timekeeper.xtime_nsec >= nsecps) {
759 timekeeper.xtime_nsec -= nsecps;
760 xtime.tv_sec++;
761 second_overflow();
762 }
763
764 raw_time.tv_nsec += timekeeper.raw_interval;
765 if (raw_time.tv_nsec >= NSEC_PER_SEC) {
766 raw_time.tv_nsec -= NSEC_PER_SEC;
767 raw_time.tv_sec++;
768 }
769
770 /* accumulate error between NTP and clock interval */
771 timekeeper.ntp_error += tick_length;
772 timekeeper.ntp_error -= timekeeper.xtime_interval <<
773 timekeeper.ntp_error_shift;
774 } 807 }
775 808
776 /* correct the clock when NTP error is too big */ 809 /* correct the clock when NTP error is too big */
@@ -806,11 +839,8 @@ void update_wall_time(void)
806 timekeeper.ntp_error += timekeeper.xtime_nsec << 839 timekeeper.ntp_error += timekeeper.xtime_nsec <<
807 timekeeper.ntp_error_shift; 840 timekeeper.ntp_error_shift;
808 841
809 nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
810 update_xtime_cache(nsecs);
811
812 /* check to see if there is a new clocksource to use */ 842 /* check to see if there is a new clocksource to use */
813 update_vsyscall(&xtime, timekeeper.clock); 843 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
814} 844}
815 845
816/** 846/**
@@ -845,13 +875,13 @@ void monotonic_to_bootbased(struct timespec *ts)
845 875
846unsigned long get_seconds(void) 876unsigned long get_seconds(void)
847{ 877{
848 return xtime_cache.tv_sec; 878 return xtime.tv_sec;
849} 879}
850EXPORT_SYMBOL(get_seconds); 880EXPORT_SYMBOL(get_seconds);
851 881
852struct timespec __current_kernel_time(void) 882struct timespec __current_kernel_time(void)
853{ 883{
854 return xtime_cache; 884 return xtime;
855} 885}
856 886
857struct timespec current_kernel_time(void) 887struct timespec current_kernel_time(void)
@@ -861,8 +891,7 @@ struct timespec current_kernel_time(void)
861 891
862 do { 892 do {
863 seq = read_seqbegin(&xtime_lock); 893 seq = read_seqbegin(&xtime_lock);
864 894 now = xtime;
865 now = xtime_cache;
866 } while (read_seqretry(&xtime_lock, seq)); 895 } while (read_seqretry(&xtime_lock, seq));
867 896
868 return now; 897 return now;
@@ -876,8 +905,7 @@ struct timespec get_monotonic_coarse(void)
876 905
877 do { 906 do {
878 seq = read_seqbegin(&xtime_lock); 907 seq = read_seqbegin(&xtime_lock);
879 908 now = xtime;
880 now = xtime_cache;
881 mono = wall_to_monotonic; 909 mono = wall_to_monotonic;
882 } while (read_seqretry(&xtime_lock, seq)); 910 } while (read_seqretry(&xtime_lock, seq));
883 911