aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2009-08-14 09:47:27 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-08-15 04:55:46 -0400
commit23ce72117c714baab794e66c8daf343bf6a912bf (patch)
treeef2cfc1ffbf07c344ab28a6f5f2357e6b2c9a99d /kernel
parent155ec60226ae0ae2aadaa57c951a58a359331030 (diff)
timekeeping: Add xtime_shift and ntp_error_shift to struct timekeeper
The xtime_nsec value in the timekeeper structure is shifted by a few bits to improve precision. This happens to be the same value as the clock->shift. To improve readability add xtime_shift to the timekeeper and use it instead of the clock->shift. Likewise add ntp_error_shift and replace all (NTP_SCALE_SHIFT - clock->shift) expressions. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Acked-by: John Stultz <johnstul@us.ibm.com> Cc: Daniel Walker <dwalker@fifo99.com> LKML-Reference: <20090814134809.871899606@de.ibm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/timekeeping.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 7af45cbf6b13..dfdab1cefe1e 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -23,6 +23,8 @@
23struct timekeeper { 23struct timekeeper {
24 /* Current clocksource used for timekeeping. */ 24 /* Current clocksource used for timekeeping. */
25 struct clocksource *clock; 25 struct clocksource *clock;
26 /* The shift value of the current clocksource. */
27 int shift;
26 28
27 /* Number of clock cycles in one NTP interval. */ 29 /* Number of clock cycles in one NTP interval. */
28 cycle_t cycle_interval; 30 cycle_t cycle_interval;
@@ -36,6 +38,9 @@ struct timekeeper {
36 /* Difference between accumulated time and NTP time in ntp 38 /* Difference between accumulated time and NTP time in ntp
37 * shifted nano seconds. */ 39 * shifted nano seconds. */
38 s64 ntp_error; 40 s64 ntp_error;
41 /* Shift conversion between clock shifted nano seconds and
42 * ntp shifted nano seconds. */
43 int ntp_error_shift;
39}; 44};
40 45
41struct timekeeper timekeeper; 46struct timekeeper timekeeper;
@@ -75,8 +80,10 @@ static void timekeeper_setup_internals(struct clocksource *clock)
75 ((u64) interval * clock->mult_orig) >> clock->shift; 80 ((u64) interval * clock->mult_orig) >> clock->shift;
76 81
77 timekeeper.xtime_nsec = 0; 82 timekeeper.xtime_nsec = 0;
83 timekeeper.shift = clock->shift;
78 84
79 timekeeper.ntp_error = 0; 85 timekeeper.ntp_error = 0;
86 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
80} 87}
81 88
82/* 89/*
@@ -641,8 +648,7 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
641 * Now calculate the error in (1 << look_ahead) ticks, but first 648 * Now calculate the error in (1 << look_ahead) ticks, but first
642 * remove the single look ahead already included in the error. 649 * remove the single look ahead already included in the error.
643 */ 650 */
644 tick_error = tick_length >> 651 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
645 (NTP_SCALE_SHIFT - timekeeper.clock->shift + 1);
646 tick_error -= timekeeper.xtime_interval >> 1; 652 tick_error -= timekeeper.xtime_interval >> 1;
647 error = ((error - tick_error) >> look_ahead) + tick_error; 653 error = ((error - tick_error) >> look_ahead) + tick_error;
648 654
@@ -673,8 +679,7 @@ static void timekeeping_adjust(s64 offset)
673 s64 error, interval = timekeeper.cycle_interval; 679 s64 error, interval = timekeeper.cycle_interval;
674 int adj; 680 int adj;
675 681
676 error = timekeeper.ntp_error >> 682 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
677 (NTP_SCALE_SHIFT - timekeeper.clock->shift - 1);
678 if (error > interval) { 683 if (error > interval) {
679 error >>= 2; 684 error >>= 2;
680 if (likely(error <= interval)) 685 if (likely(error <= interval))
@@ -696,7 +701,7 @@ static void timekeeping_adjust(s64 offset)
696 timekeeper.xtime_interval += interval; 701 timekeeper.xtime_interval += interval;
697 timekeeper.xtime_nsec -= offset; 702 timekeeper.xtime_nsec -= offset;
698 timekeeper.ntp_error -= (interval - offset) << 703 timekeeper.ntp_error -= (interval - offset) <<
699 (NTP_SCALE_SHIFT - timekeeper.clock->shift); 704 timekeeper.ntp_error_shift;
700} 705}
701 706
702/** 707/**
@@ -708,7 +713,7 @@ void update_wall_time(void)
708{ 713{
709 struct clocksource *clock; 714 struct clocksource *clock;
710 cycle_t offset; 715 cycle_t offset;
711 s64 nsecs; 716 u64 nsecs;
712 717
713 /* Make sure we're fully resumed: */ 718 /* Make sure we're fully resumed: */
714 if (unlikely(timekeeping_suspended)) 719 if (unlikely(timekeeping_suspended))
@@ -720,13 +725,13 @@ void update_wall_time(void)
720#else 725#else
721 offset = timekeeper.cycle_interval; 726 offset = timekeeper.cycle_interval;
722#endif 727#endif
723 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << clock->shift; 728 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
724 729
725 /* normally this loop will run just once, however in the 730 /* normally this loop will run just once, however in the
726 * case of lost or late ticks, it will accumulate correctly. 731 * case of lost or late ticks, it will accumulate correctly.
727 */ 732 */
728 while (offset >= timekeeper.cycle_interval) { 733 while (offset >= timekeeper.cycle_interval) {
729 u64 nsecps = (u64)NSEC_PER_SEC << clock->shift; 734 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
730 735
731 /* accumulate one interval */ 736 /* accumulate one interval */
732 offset -= timekeeper.cycle_interval; 737 offset -= timekeeper.cycle_interval;
@@ -748,7 +753,7 @@ void update_wall_time(void)
748 /* accumulate error between NTP and clock interval */ 753 /* accumulate error between NTP and clock interval */
749 timekeeper.ntp_error += tick_length; 754 timekeeper.ntp_error += tick_length;
750 timekeeper.ntp_error -= timekeeper.xtime_interval << 755 timekeeper.ntp_error -= timekeeper.xtime_interval <<
751 (NTP_SCALE_SHIFT - clock->shift); 756 timekeeper.ntp_error_shift;
752 } 757 }
753 758
754 /* correct the clock when NTP error is too big */ 759 /* correct the clock when NTP error is too big */
@@ -773,16 +778,16 @@ void update_wall_time(void)
773 if (unlikely((s64)timekeeper.xtime_nsec < 0)) { 778 if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
774 s64 neg = -(s64)timekeeper.xtime_nsec; 779 s64 neg = -(s64)timekeeper.xtime_nsec;
775 timekeeper.xtime_nsec = 0; 780 timekeeper.xtime_nsec = 0;
776 timekeeper.ntp_error += neg << (NTP_SCALE_SHIFT - clock->shift); 781 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
777 } 782 }
778 783
779 /* store full nanoseconds into xtime after rounding it up and 784 /* store full nanoseconds into xtime after rounding it up and
780 * add the remainder to the error difference. 785 * add the remainder to the error difference.
781 */ 786 */
782 xtime.tv_nsec = ((s64)timekeeper.xtime_nsec >> clock->shift) + 1; 787 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
783 timekeeper.xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; 788 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift;
784 timekeeper.ntp_error += timekeeper.xtime_nsec << 789 timekeeper.ntp_error += timekeeper.xtime_nsec <<
785 (NTP_SCALE_SHIFT - clock->shift); 790 timekeeper.ntp_error_shift;
786 791
787 nsecs = clocksource_cyc2ns(offset, clock->mult, clock->shift); 792 nsecs = clocksource_cyc2ns(offset, clock->mult, clock->shift);
788 update_xtime_cache(nsecs); 793 update_xtime_cache(nsecs);