aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2012-07-13 01:21:57 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-07-15 04:39:07 -0400
commitf726a697d06102e7a1fc0a87308cb30a84580205 (patch)
tree1c70d5187ecb27a0b5e0ae2882d978c8c943211d
parent2a8c0883c3cfffcc148ea606e2a4e7453cd75e73 (diff)
time: Rework timekeeping functions to take timekeeper ptr as argument
As part of cleaning up the timekeeping code, this patch converts a number of internal functions to takei a timekeeper ptr as an argument, so that the internal functions don't access the global timekeeper structure directly. This allows for further optimizations to reduce lock hold time later. This patch has been updated to include more consistent usage of the timekeeper value, by making sure it is always passed as a argument to non top-level functions. Signed-off-by: John Stultz <john.stultz@linaro.org> Reviewed-by: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Prarit Bhargava <prarit@redhat.com> Link: http://lkml.kernel.org/r/1342156917-25092-9-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/time/timekeeping.c208
1 files changed, 103 insertions, 105 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index aeeaab8cba6e..5980e902978c 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -127,14 +127,14 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
127 * 127 *
128 * Unless you're the timekeeping code, you should not be using this! 128 * Unless you're the timekeeping code, you should not be using this!
129 */ 129 */
130static void timekeeper_setup_internals(struct clocksource *clock) 130static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
131{ 131{
132 cycle_t interval; 132 cycle_t interval;
133 u64 tmp, ntpinterval; 133 u64 tmp, ntpinterval;
134 struct clocksource *old_clock; 134 struct clocksource *old_clock;
135 135
136 old_clock = timekeeper.clock; 136 old_clock = tk->clock;
137 timekeeper.clock = clock; 137 tk->clock = clock;
138 clock->cycle_last = clock->read(clock); 138 clock->cycle_last = clock->read(clock);
139 139
140 /* Do the ns -> cycle conversion first, using original mult */ 140 /* Do the ns -> cycle conversion first, using original mult */
@@ -147,64 +147,64 @@ static void timekeeper_setup_internals(struct clocksource *clock)
147 tmp = 1; 147 tmp = 1;
148 148
149 interval = (cycle_t) tmp; 149 interval = (cycle_t) tmp;
150 timekeeper.cycle_interval = interval; 150 tk->cycle_interval = interval;
151 151
152 /* Go back from cycles -> shifted ns */ 152 /* Go back from cycles -> shifted ns */
153 timekeeper.xtime_interval = (u64) interval * clock->mult; 153 tk->xtime_interval = (u64) interval * clock->mult;
154 timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval; 154 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
155 timekeeper.raw_interval = 155 tk->raw_interval =
156 ((u64) interval * clock->mult) >> clock->shift; 156 ((u64) interval * clock->mult) >> clock->shift;
157 157
158 /* if changing clocks, convert xtime_nsec shift units */ 158 /* if changing clocks, convert xtime_nsec shift units */
159 if (old_clock) { 159 if (old_clock) {
160 int shift_change = clock->shift - old_clock->shift; 160 int shift_change = clock->shift - old_clock->shift;
161 if (shift_change < 0) 161 if (shift_change < 0)
162 timekeeper.xtime_nsec >>= -shift_change; 162 tk->xtime_nsec >>= -shift_change;
163 else 163 else
164 timekeeper.xtime_nsec <<= shift_change; 164 tk->xtime_nsec <<= shift_change;
165 } 165 }
166 timekeeper.shift = clock->shift; 166 tk->shift = clock->shift;
167 167
168 timekeeper.ntp_error = 0; 168 tk->ntp_error = 0;
169 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; 169 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
170 170
171 /* 171 /*
172 * The timekeeper keeps its own mult values for the currently 172 * The timekeeper keeps its own mult values for the currently
173 * active clocksource. These value will be adjusted via NTP 173 * active clocksource. These value will be adjusted via NTP
174 * to counteract clock drifting. 174 * to counteract clock drifting.
175 */ 175 */
176 timekeeper.mult = clock->mult; 176 tk->mult = clock->mult;
177} 177}
178 178
179/* Timekeeper helper functions. */ 179/* Timekeeper helper functions. */
180static inline s64 timekeeping_get_ns(void) 180static inline s64 timekeeping_get_ns(struct timekeeper *tk)
181{ 181{
182 cycle_t cycle_now, cycle_delta; 182 cycle_t cycle_now, cycle_delta;
183 struct clocksource *clock; 183 struct clocksource *clock;
184 s64 nsec; 184 s64 nsec;
185 185
186 /* read clocksource: */ 186 /* read clocksource: */
187 clock = timekeeper.clock; 187 clock = tk->clock;
188 cycle_now = clock->read(clock); 188 cycle_now = clock->read(clock);
189 189
190 /* calculate the delta since the last update_wall_time: */ 190 /* calculate the delta since the last update_wall_time: */
191 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 191 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
192 192
193 nsec = cycle_delta * timekeeper.mult + timekeeper.xtime_nsec; 193 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
194 nsec >>= timekeeper.shift; 194 nsec >>= tk->shift;
195 195
196 /* If arch requires, add in gettimeoffset() */ 196 /* If arch requires, add in gettimeoffset() */
197 return nsec + arch_gettimeoffset(); 197 return nsec + arch_gettimeoffset();
198} 198}
199 199
200static inline s64 timekeeping_get_ns_raw(void) 200static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
201{ 201{
202 cycle_t cycle_now, cycle_delta; 202 cycle_t cycle_now, cycle_delta;
203 struct clocksource *clock; 203 struct clocksource *clock;
204 s64 nsec; 204 s64 nsec;
205 205
206 /* read clocksource: */ 206 /* read clocksource: */
207 clock = timekeeper.clock; 207 clock = tk->clock;
208 cycle_now = clock->read(clock); 208 cycle_now = clock->read(clock);
209 209
210 /* calculate the delta since the last update_wall_time: */ 210 /* calculate the delta since the last update_wall_time: */
@@ -217,27 +217,26 @@ static inline s64 timekeeping_get_ns_raw(void)
217 return nsec + arch_gettimeoffset(); 217 return nsec + arch_gettimeoffset();
218} 218}
219 219
220static void update_rt_offset(void) 220static void update_rt_offset(struct timekeeper *tk)
221{ 221{
222 struct timespec tmp, *wtm = &timekeeper.wall_to_monotonic; 222 struct timespec tmp, *wtm = &tk->wall_to_monotonic;
223 223
224 set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec); 224 set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
225 timekeeper.offs_real = timespec_to_ktime(tmp); 225 tk->offs_real = timespec_to_ktime(tmp);
226} 226}
227 227
228/* must hold write on timekeeper.lock */ 228/* must hold write on timekeeper.lock */
229static void timekeeping_update(bool clearntp) 229static void timekeeping_update(struct timekeeper *tk, bool clearntp)
230{ 230{
231 struct timespec xt; 231 struct timespec xt;
232 232
233 if (clearntp) { 233 if (clearntp) {
234 timekeeper.ntp_error = 0; 234 tk->ntp_error = 0;
235 ntp_clear(); 235 ntp_clear();
236 } 236 }
237 update_rt_offset(); 237 update_rt_offset(tk);
238 xt = tk_xtime(&timekeeper); 238 xt = tk_xtime(tk);
239 update_vsyscall(&xt, &timekeeper.wall_to_monotonic, 239 update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
240 timekeeper.clock, timekeeper.mult);
241} 240}
242 241
243 242
@@ -248,26 +247,26 @@ static void timekeeping_update(bool clearntp)
248 * update_wall_time(). This is useful before significant clock changes, 247 * update_wall_time(). This is useful before significant clock changes,
249 * as it avoids having to deal with this time offset explicitly. 248 * as it avoids having to deal with this time offset explicitly.
250 */ 249 */
251static void timekeeping_forward_now(void) 250static void timekeeping_forward_now(struct timekeeper *tk)
252{ 251{
253 cycle_t cycle_now, cycle_delta; 252 cycle_t cycle_now, cycle_delta;
254 struct clocksource *clock; 253 struct clocksource *clock;
255 s64 nsec; 254 s64 nsec;
256 255
257 clock = timekeeper.clock; 256 clock = tk->clock;
258 cycle_now = clock->read(clock); 257 cycle_now = clock->read(clock);
259 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 258 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
260 clock->cycle_last = cycle_now; 259 clock->cycle_last = cycle_now;
261 260
262 timekeeper.xtime_nsec += cycle_delta * timekeeper.mult; 261 tk->xtime_nsec += cycle_delta * tk->mult;
263 262
264 /* If arch requires, add in gettimeoffset() */ 263 /* If arch requires, add in gettimeoffset() */
265 timekeeper.xtime_nsec += arch_gettimeoffset() << timekeeper.shift; 264 tk->xtime_nsec += arch_gettimeoffset() << tk->shift;
266 265
267 tk_normalize_xtime(&timekeeper); 266 tk_normalize_xtime(tk);
268 267
269 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 268 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
270 timespec_add_ns(&timekeeper.raw_time, nsec); 269 timespec_add_ns(&tk->raw_time, nsec);
271} 270}
272 271
273/** 272/**
@@ -287,7 +286,7 @@ void getnstimeofday(struct timespec *ts)
287 seq = read_seqbegin(&timekeeper.lock); 286 seq = read_seqbegin(&timekeeper.lock);
288 287
289 ts->tv_sec = timekeeper.xtime_sec; 288 ts->tv_sec = timekeeper.xtime_sec;
290 ts->tv_nsec = timekeeping_get_ns(); 289 ts->tv_nsec = timekeeping_get_ns(&timekeeper);
291 290
292 } while (read_seqretry(&timekeeper.lock, seq)); 291 } while (read_seqretry(&timekeeper.lock, seq));
293 292
@@ -306,7 +305,7 @@ ktime_t ktime_get(void)
306 seq = read_seqbegin(&timekeeper.lock); 305 seq = read_seqbegin(&timekeeper.lock);
307 secs = timekeeper.xtime_sec + 306 secs = timekeeper.xtime_sec +
308 timekeeper.wall_to_monotonic.tv_sec; 307 timekeeper.wall_to_monotonic.tv_sec;
309 nsecs = timekeeping_get_ns() + 308 nsecs = timekeeping_get_ns(&timekeeper) +
310 timekeeper.wall_to_monotonic.tv_nsec; 309 timekeeper.wall_to_monotonic.tv_nsec;
311 310
312 } while (read_seqretry(&timekeeper.lock, seq)); 311 } while (read_seqretry(&timekeeper.lock, seq));
@@ -336,7 +335,7 @@ void ktime_get_ts(struct timespec *ts)
336 do { 335 do {
337 seq = read_seqbegin(&timekeeper.lock); 336 seq = read_seqbegin(&timekeeper.lock);
338 ts->tv_sec = timekeeper.xtime_sec; 337 ts->tv_sec = timekeeper.xtime_sec;
339 ts->tv_nsec = timekeeping_get_ns(); 338 ts->tv_nsec = timekeeping_get_ns(&timekeeper);
340 tomono = timekeeper.wall_to_monotonic; 339 tomono = timekeeper.wall_to_monotonic;
341 340
342 } while (read_seqretry(&timekeeper.lock, seq)); 341 } while (read_seqretry(&timekeeper.lock, seq));
@@ -371,8 +370,8 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
371 ts_real->tv_sec = timekeeper.xtime_sec; 370 ts_real->tv_sec = timekeeper.xtime_sec;
372 ts_real->tv_nsec = 0; 371 ts_real->tv_nsec = 0;
373 372
374 nsecs_raw = timekeeping_get_ns_raw(); 373 nsecs_raw = timekeeping_get_ns_raw(&timekeeper);
375 nsecs_real = timekeeping_get_ns(); 374 nsecs_real = timekeeping_get_ns(&timekeeper);
376 375
377 } while (read_seqretry(&timekeeper.lock, seq)); 376 } while (read_seqretry(&timekeeper.lock, seq));
378 377
@@ -415,7 +414,7 @@ int do_settimeofday(const struct timespec *tv)
415 414
416 write_seqlock_irqsave(&timekeeper.lock, flags); 415 write_seqlock_irqsave(&timekeeper.lock, flags);
417 416
418 timekeeping_forward_now(); 417 timekeeping_forward_now(&timekeeper);
419 418
420 xt = tk_xtime(&timekeeper); 419 xt = tk_xtime(&timekeeper);
421 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; 420 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
@@ -426,7 +425,7 @@ int do_settimeofday(const struct timespec *tv)
426 425
427 tk_set_xtime(&timekeeper, tv); 426 tk_set_xtime(&timekeeper, tv);
428 427
429 timekeeping_update(true); 428 timekeeping_update(&timekeeper, true);
430 429
431 write_sequnlock_irqrestore(&timekeeper.lock, flags); 430 write_sequnlock_irqrestore(&timekeeper.lock, flags);
432 431
@@ -453,14 +452,14 @@ int timekeeping_inject_offset(struct timespec *ts)
453 452
454 write_seqlock_irqsave(&timekeeper.lock, flags); 453 write_seqlock_irqsave(&timekeeper.lock, flags);
455 454
456 timekeeping_forward_now(); 455 timekeeping_forward_now(&timekeeper);
457 456
458 457
459 tk_xtime_add(&timekeeper, ts); 458 tk_xtime_add(&timekeeper, ts);
460 timekeeper.wall_to_monotonic = 459 timekeeper.wall_to_monotonic =
461 timespec_sub(timekeeper.wall_to_monotonic, *ts); 460 timespec_sub(timekeeper.wall_to_monotonic, *ts);
462 461
463 timekeeping_update(true); 462 timekeeping_update(&timekeeper, true);
464 463
465 write_sequnlock_irqrestore(&timekeeper.lock, flags); 464 write_sequnlock_irqrestore(&timekeeper.lock, flags);
466 465
@@ -485,14 +484,14 @@ static int change_clocksource(void *data)
485 484
486 write_seqlock_irqsave(&timekeeper.lock, flags); 485 write_seqlock_irqsave(&timekeeper.lock, flags);
487 486
488 timekeeping_forward_now(); 487 timekeeping_forward_now(&timekeeper);
489 if (!new->enable || new->enable(new) == 0) { 488 if (!new->enable || new->enable(new) == 0) {
490 old = timekeeper.clock; 489 old = timekeeper.clock;
491 timekeeper_setup_internals(new); 490 tk_setup_internals(&timekeeper, new);
492 if (old->disable) 491 if (old->disable)
493 old->disable(old); 492 old->disable(old);
494 } 493 }
495 timekeeping_update(true); 494 timekeeping_update(&timekeeper, true);
496 495
497 write_sequnlock_irqrestore(&timekeeper.lock, flags); 496 write_sequnlock_irqrestore(&timekeeper.lock, flags);
498 497
@@ -542,7 +541,7 @@ void getrawmonotonic(struct timespec *ts)
542 541
543 do { 542 do {
544 seq = read_seqbegin(&timekeeper.lock); 543 seq = read_seqbegin(&timekeeper.lock);
545 nsecs = timekeeping_get_ns_raw(); 544 nsecs = timekeeping_get_ns_raw(&timekeeper);
546 *ts = timekeeper.raw_time; 545 *ts = timekeeper.raw_time;
547 546
548 } while (read_seqretry(&timekeeper.lock, seq)); 547 } while (read_seqretry(&timekeeper.lock, seq));
@@ -638,7 +637,7 @@ void __init timekeeping_init(void)
638 clock = clocksource_default_clock(); 637 clock = clocksource_default_clock();
639 if (clock->enable) 638 if (clock->enable)
640 clock->enable(clock); 639 clock->enable(clock);
641 timekeeper_setup_internals(clock); 640 tk_setup_internals(&timekeeper, clock);
642 641
643 tk_set_xtime(&timekeeper, &now); 642 tk_set_xtime(&timekeeper, &now);
644 timekeeper.raw_time.tv_sec = 0; 643 timekeeper.raw_time.tv_sec = 0;
@@ -648,7 +647,7 @@ void __init timekeeping_init(void)
648 647
649 set_normalized_timespec(&timekeeper.wall_to_monotonic, 648 set_normalized_timespec(&timekeeper.wall_to_monotonic,
650 -boot.tv_sec, -boot.tv_nsec); 649 -boot.tv_sec, -boot.tv_nsec);
651 update_rt_offset(); 650 update_rt_offset(&timekeeper);
652 timekeeper.total_sleep_time.tv_sec = 0; 651 timekeeper.total_sleep_time.tv_sec = 0;
653 timekeeper.total_sleep_time.tv_nsec = 0; 652 timekeeper.total_sleep_time.tv_nsec = 0;
654 write_sequnlock_irqrestore(&timekeeper.lock, flags); 653 write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -670,7 +669,8 @@ static void update_sleep_time(struct timespec t)
670 * Takes a timespec offset measuring a suspend interval and properly 669 * Takes a timespec offset measuring a suspend interval and properly
671 * adds the sleep offset to the timekeeping variables. 670 * adds the sleep offset to the timekeeping variables.
672 */ 671 */
673static void __timekeeping_inject_sleeptime(struct timespec *delta) 672static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
673 struct timespec *delta)
674{ 674{
675 if (!timespec_valid(delta)) { 675 if (!timespec_valid(delta)) {
676 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " 676 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
@@ -678,10 +678,9 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
678 return; 678 return;
679 } 679 }
680 680
681 tk_xtime_add(&timekeeper, delta); 681 tk_xtime_add(tk, delta);
682 timekeeper.wall_to_monotonic = 682 tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta);
683 timespec_sub(timekeeper.wall_to_monotonic, *delta); 683 update_sleep_time(timespec_add(tk->total_sleep_time, *delta));
684 update_sleep_time(timespec_add(timekeeper.total_sleep_time, *delta));
685} 684}
686 685
687 686
@@ -707,11 +706,11 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
707 706
708 write_seqlock_irqsave(&timekeeper.lock, flags); 707 write_seqlock_irqsave(&timekeeper.lock, flags);
709 708
710 timekeeping_forward_now(); 709 timekeeping_forward_now(&timekeeper);
711 710
712 __timekeeping_inject_sleeptime(delta); 711 __timekeeping_inject_sleeptime(&timekeeper, delta);
713 712
714 timekeeping_update(true); 713 timekeeping_update(&timekeeper, true);
715 714
716 write_sequnlock_irqrestore(&timekeeper.lock, flags); 715 write_sequnlock_irqrestore(&timekeeper.lock, flags);
717 716
@@ -740,7 +739,7 @@ static void timekeeping_resume(void)
740 739
741 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { 740 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
742 ts = timespec_sub(ts, timekeeping_suspend_time); 741 ts = timespec_sub(ts, timekeeping_suspend_time);
743 __timekeeping_inject_sleeptime(&ts); 742 __timekeeping_inject_sleeptime(&timekeeper, &ts);
744 } 743 }
745 /* re-base the last cycle value */ 744 /* re-base the last cycle value */
746 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); 745 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
@@ -765,7 +764,7 @@ static int timekeeping_suspend(void)
765 read_persistent_clock(&timekeeping_suspend_time); 764 read_persistent_clock(&timekeeping_suspend_time);
766 765
767 write_seqlock_irqsave(&timekeeper.lock, flags); 766 write_seqlock_irqsave(&timekeeper.lock, flags);
768 timekeeping_forward_now(); 767 timekeeping_forward_now(&timekeeper);
769 timekeeping_suspended = 1; 768 timekeeping_suspended = 1;
770 769
771 /* 770 /*
@@ -813,7 +812,8 @@ device_initcall(timekeeping_init_ops);
813 * If the error is already larger, we look ahead even further 812 * If the error is already larger, we look ahead even further
814 * to compensate for late or lost adjustments. 813 * to compensate for late or lost adjustments.
815 */ 814 */
816static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, 815static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
816 s64 error, s64 *interval,
817 s64 *offset) 817 s64 *offset)
818{ 818{
819 s64 tick_error, i; 819 s64 tick_error, i;
@@ -829,7 +829,7 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
829 * here. This is tuned so that an error of about 1 msec is adjusted 829 * here. This is tuned so that an error of about 1 msec is adjusted
830 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). 830 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
831 */ 831 */
832 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); 832 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
833 error2 = abs(error2); 833 error2 = abs(error2);
834 for (look_ahead = 0; error2 > 0; look_ahead++) 834 for (look_ahead = 0; error2 > 0; look_ahead++)
835 error2 >>= 2; 835 error2 >>= 2;
@@ -838,8 +838,8 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
838 * Now calculate the error in (1 << look_ahead) ticks, but first 838 * Now calculate the error in (1 << look_ahead) ticks, but first
839 * remove the single look ahead already included in the error. 839 * remove the single look ahead already included in the error.
840 */ 840 */
841 tick_error = ntp_tick_length() >> (timekeeper.ntp_error_shift + 1); 841 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
842 tick_error -= timekeeper.xtime_interval >> 1; 842 tick_error -= tk->xtime_interval >> 1;
843 error = ((error - tick_error) >> look_ahead) + tick_error; 843 error = ((error - tick_error) >> look_ahead) + tick_error;
844 844
845 /* Finally calculate the adjustment shift value. */ 845 /* Finally calculate the adjustment shift value. */
@@ -864,9 +864,9 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
864 * this is optimized for the most common adjustments of -1,0,1, 864 * this is optimized for the most common adjustments of -1,0,1,
865 * for other values we can do a bit more work. 865 * for other values we can do a bit more work.
866 */ 866 */
867static void timekeeping_adjust(s64 offset) 867static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
868{ 868{
869 s64 error, interval = timekeeper.cycle_interval; 869 s64 error, interval = tk->cycle_interval;
870 int adj; 870 int adj;
871 871
872 /* 872 /*
@@ -882,7 +882,7 @@ static void timekeeping_adjust(s64 offset)
882 * 882 *
883 * Note: It does not "save" on aggravation when reading the code. 883 * Note: It does not "save" on aggravation when reading the code.
884 */ 884 */
885 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); 885 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
886 if (error > interval) { 886 if (error > interval) {
887 /* 887 /*
888 * We now divide error by 4(via shift), which checks if 888 * We now divide error by 4(via shift), which checks if
@@ -904,7 +904,8 @@ static void timekeeping_adjust(s64 offset)
904 if (likely(error <= interval)) 904 if (likely(error <= interval))
905 adj = 1; 905 adj = 1;
906 else 906 else
907 adj = timekeeping_bigadjust(error, &interval, &offset); 907 adj = timekeeping_bigadjust(tk, error, &interval,
908 &offset);
908 } else if (error < -interval) { 909 } else if (error < -interval) {
909 /* See comment above, this is just switched for the negative */ 910 /* See comment above, this is just switched for the negative */
910 error >>= 2; 911 error >>= 2;
@@ -913,18 +914,17 @@ static void timekeeping_adjust(s64 offset)
913 interval = -interval; 914 interval = -interval;
914 offset = -offset; 915 offset = -offset;
915 } else 916 } else
916 adj = timekeeping_bigadjust(error, &interval, &offset); 917 adj = timekeeping_bigadjust(tk, error, &interval,
917 } else /* No adjustment needed */ 918 &offset);
919 } else
918 return; 920 return;
919 921
920 if (unlikely(timekeeper.clock->maxadj && 922 if (unlikely(tk->clock->maxadj &&
921 (timekeeper.mult + adj > 923 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
922 timekeeper.clock->mult + timekeeper.clock->maxadj))) {
923 printk_once(KERN_WARNING 924 printk_once(KERN_WARNING
924 "Adjusting %s more than 11%% (%ld vs %ld)\n", 925 "Adjusting %s more than 11%% (%ld vs %ld)\n",
925 timekeeper.clock->name, (long)timekeeper.mult + adj, 926 tk->clock->name, (long)tk->mult + adj,
926 (long)timekeeper.clock->mult + 927 (long)tk->clock->mult + tk->clock->maxadj);
927 timekeeper.clock->maxadj);
928 } 928 }
929 /* 929 /*
930 * So the following can be confusing. 930 * So the following can be confusing.
@@ -975,11 +975,10 @@ static void timekeeping_adjust(s64 offset)
975 * 975 *
976 * XXX - TODO: Doc ntp_error calculation. 976 * XXX - TODO: Doc ntp_error calculation.
977 */ 977 */
978 timekeeper.mult += adj; 978 tk->mult += adj;
979 timekeeper.xtime_interval += interval; 979 tk->xtime_interval += interval;
980 timekeeper.xtime_nsec -= offset; 980 tk->xtime_nsec -= offset;
981 timekeeper.ntp_error -= (interval - offset) << 981 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
982 timekeeper.ntp_error_shift;
983 982
984 /* 983 /*
985 * It may be possible that when we entered this function, xtime_nsec 984 * It may be possible that when we entered this function, xtime_nsec
@@ -995,10 +994,10 @@ static void timekeeping_adjust(s64 offset)
995 * We'll correct this error next time through this function, when 994 * We'll correct this error next time through this function, when
996 * xtime_nsec is not as small. 995 * xtime_nsec is not as small.
997 */ 996 */
998 if (unlikely((s64)timekeeper.xtime_nsec < 0)) { 997 if (unlikely((s64)tk->xtime_nsec < 0)) {
999 s64 neg = -(s64)timekeeper.xtime_nsec; 998 s64 neg = -(s64)tk->xtime_nsec;
1000 timekeeper.xtime_nsec = 0; 999 tk->xtime_nsec = 0;
1001 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; 1000 tk->ntp_error += neg << tk->ntp_error_shift;
1002 } 1001 }
1003 1002
1004} 1003}
@@ -1042,37 +1041,36 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1042 * 1041 *
1043 * Returns the unconsumed cycles. 1042 * Returns the unconsumed cycles.
1044 */ 1043 */
1045static cycle_t logarithmic_accumulation(cycle_t offset, u32 shift) 1044static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1045 u32 shift)
1046{ 1046{
1047 u64 raw_nsecs; 1047 u64 raw_nsecs;
1048 1048
1049 /* If the offset is smaller than a shifted interval, do nothing */ 1049 /* If the offset is smaller then a shifted interval, do nothing */
1050 if (offset < timekeeper.cycle_interval<<shift) 1050 if (offset < tk->cycle_interval<<shift)
1051 return offset; 1051 return offset;
1052 1052
1053 /* Accumulate one shifted interval */ 1053 /* Accumulate one shifted interval */
1054 offset -= timekeeper.cycle_interval << shift; 1054 offset -= tk->cycle_interval << shift;
1055 timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; 1055 tk->clock->cycle_last += tk->cycle_interval << shift;
1056 1056
1057 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; 1057 tk->xtime_nsec += tk->xtime_interval << shift;
1058 1058 accumulate_nsecs_to_secs(tk);
1059 accumulate_nsecs_to_secs(&timekeeper);
1060 1059
1061 /* Accumulate raw time */ 1060 /* Accumulate raw time */
1062 raw_nsecs = timekeeper.raw_interval << shift; 1061 raw_nsecs = tk->raw_interval << shift;
1063 raw_nsecs += timekeeper.raw_time.tv_nsec; 1062 raw_nsecs += tk->raw_time.tv_nsec;
1064 if (raw_nsecs >= NSEC_PER_SEC) { 1063 if (raw_nsecs >= NSEC_PER_SEC) {
1065 u64 raw_secs = raw_nsecs; 1064 u64 raw_secs = raw_nsecs;
1066 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); 1065 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1067 timekeeper.raw_time.tv_sec += raw_secs; 1066 tk->raw_time.tv_sec += raw_secs;
1068 } 1067 }
1069 timekeeper.raw_time.tv_nsec = raw_nsecs; 1068 tk->raw_time.tv_nsec = raw_nsecs;
1070 1069
1071 /* Accumulate error between NTP and clock interval */ 1070 /* Accumulate error between NTP and clock interval */
1072 timekeeper.ntp_error += ntp_tick_length() << shift; 1071 tk->ntp_error += ntp_tick_length() << shift;
1073 timekeeper.ntp_error -= 1072 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1074 (timekeeper.xtime_interval + timekeeper.xtime_remainder) << 1073 (tk->ntp_error_shift + shift);
1075 (timekeeper.ntp_error_shift + shift);
1076 1074
1077 return offset; 1075 return offset;
1078} 1076}
@@ -1118,13 +1116,13 @@ static void update_wall_time(void)
1118 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; 1116 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1119 shift = min(shift, maxshift); 1117 shift = min(shift, maxshift);
1120 while (offset >= timekeeper.cycle_interval) { 1118 while (offset >= timekeeper.cycle_interval) {
1121 offset = logarithmic_accumulation(offset, shift); 1119 offset = logarithmic_accumulation(&timekeeper, offset, shift);
1122 if(offset < timekeeper.cycle_interval<<shift) 1120 if(offset < timekeeper.cycle_interval<<shift)
1123 shift--; 1121 shift--;
1124 } 1122 }
1125 1123
1126 /* correct the clock when NTP error is too big */ 1124 /* correct the clock when NTP error is too big */
1127 timekeeping_adjust(offset); 1125 timekeeping_adjust(&timekeeper, offset);
1128 1126
1129 1127
1130 /* 1128 /*
@@ -1147,7 +1145,7 @@ static void update_wall_time(void)
1147 */ 1145 */
1148 accumulate_nsecs_to_secs(&timekeeper); 1146 accumulate_nsecs_to_secs(&timekeeper);
1149 1147
1150 timekeeping_update(false); 1148 timekeeping_update(&timekeeper, false);
1151 1149
1152out: 1150out:
1153 write_sequnlock_irqrestore(&timekeeper.lock, flags); 1151 write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -1198,7 +1196,7 @@ void get_monotonic_boottime(struct timespec *ts)
1198 do { 1196 do {
1199 seq = read_seqbegin(&timekeeper.lock); 1197 seq = read_seqbegin(&timekeeper.lock);
1200 ts->tv_sec = timekeeper.xtime_sec; 1198 ts->tv_sec = timekeeper.xtime_sec;
1201 ts->tv_nsec = timekeeping_get_ns(); 1199 ts->tv_nsec = timekeeping_get_ns(&timekeeper);
1202 tomono = timekeeper.wall_to_monotonic; 1200 tomono = timekeeper.wall_to_monotonic;
1203 sleep = timekeeper.total_sleep_time; 1201 sleep = timekeeper.total_sleep_time;
1204 1202
@@ -1330,7 +1328,7 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
1330 seq = read_seqbegin(&timekeeper.lock); 1328 seq = read_seqbegin(&timekeeper.lock);
1331 1329
1332 secs = timekeeper.xtime_sec; 1330 secs = timekeeper.xtime_sec;
1333 nsecs = timekeeping_get_ns(); 1331 nsecs = timekeeping_get_ns(&timekeeper);
1334 1332
1335 *offs_real = timekeeper.offs_real; 1333 *offs_real = timekeeper.offs_real;
1336 *offs_boot = timekeeper.offs_boot; 1334 *offs_boot = timekeeper.offs_boot;