aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/timekeeping.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r--kernel/time/timekeeping.c490
1 files changed, 325 insertions, 165 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 91db94136c10..946acb72179f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -59,17 +59,15 @@ struct tk_fast {
59}; 59};
60 60
61static struct tk_fast tk_fast_mono ____cacheline_aligned; 61static struct tk_fast tk_fast_mono ____cacheline_aligned;
62static struct tk_fast tk_fast_raw ____cacheline_aligned;
62 63
63/* flag for if timekeeping is suspended */ 64/* flag for if timekeeping is suspended */
64int __read_mostly timekeeping_suspended; 65int __read_mostly timekeeping_suspended;
65 66
66/* Flag for if there is a persistent clock on this platform */
67bool __read_mostly persistent_clock_exist = false;
68
69static inline void tk_normalize_xtime(struct timekeeper *tk) 67static inline void tk_normalize_xtime(struct timekeeper *tk)
70{ 68{
71 while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) { 69 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
72 tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift; 70 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
73 tk->xtime_sec++; 71 tk->xtime_sec++;
74 } 72 }
75} 73}
@@ -79,20 +77,20 @@ static inline struct timespec64 tk_xtime(struct timekeeper *tk)
79 struct timespec64 ts; 77 struct timespec64 ts;
80 78
81 ts.tv_sec = tk->xtime_sec; 79 ts.tv_sec = tk->xtime_sec;
82 ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift); 80 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
83 return ts; 81 return ts;
84} 82}
85 83
86static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) 84static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
87{ 85{
88 tk->xtime_sec = ts->tv_sec; 86 tk->xtime_sec = ts->tv_sec;
89 tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift; 87 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
90} 88}
91 89
92static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) 90static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
93{ 91{
94 tk->xtime_sec += ts->tv_sec; 92 tk->xtime_sec += ts->tv_sec;
95 tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift; 93 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
96 tk_normalize_xtime(tk); 94 tk_normalize_xtime(tk);
97} 95}
98 96
@@ -118,6 +116,117 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
118 tk->offs_boot = ktime_add(tk->offs_boot, delta); 116 tk->offs_boot = ktime_add(tk->offs_boot, delta);
119} 117}
120 118
119#ifdef CONFIG_DEBUG_TIMEKEEPING
120#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
121/*
122 * These simple flag variables are managed
123 * without locks, which is racy, but ok since
124 * we don't really care about being super
125 * precise about how many events were seen,
126 * just that a problem was observed.
127 */
128static int timekeeping_underflow_seen;
129static int timekeeping_overflow_seen;
130
131/* last_warning is only modified under the timekeeping lock */
132static long timekeeping_last_warning;
133
134static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
135{
136
137 cycle_t max_cycles = tk->tkr_mono.clock->max_cycles;
138 const char *name = tk->tkr_mono.clock->name;
139
140 if (offset > max_cycles) {
141 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
142 offset, name, max_cycles);
143 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
144 } else {
145 if (offset > (max_cycles >> 1)) {
146 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n",
147 offset, name, max_cycles >> 1);
148 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
149 }
150 }
151
152 if (timekeeping_underflow_seen) {
153 if (jiffies - timekeeping_last_warning > WARNING_FREQ) {
154 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
155 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
156 printk_deferred(" Your kernel is probably still fine.\n");
157 timekeeping_last_warning = jiffies;
158 }
159 timekeeping_underflow_seen = 0;
160 }
161
162 if (timekeeping_overflow_seen) {
163 if (jiffies - timekeeping_last_warning > WARNING_FREQ) {
164 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
165 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
166 printk_deferred(" Your kernel is probably still fine.\n");
167 timekeeping_last_warning = jiffies;
168 }
169 timekeeping_overflow_seen = 0;
170 }
171}
172
173static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
174{
175 cycle_t now, last, mask, max, delta;
176 unsigned int seq;
177
178 /*
179 * Since we're called holding a seqlock, the data may shift
180 * under us while we're doing the calculation. This can cause
181 * false positives, since we'd note a problem but throw the
182 * results away. So nest another seqlock here to atomically
183 * grab the points we are checking with.
184 */
185 do {
186 seq = read_seqcount_begin(&tk_core.seq);
187 now = tkr->read(tkr->clock);
188 last = tkr->cycle_last;
189 mask = tkr->mask;
190 max = tkr->clock->max_cycles;
191 } while (read_seqcount_retry(&tk_core.seq, seq));
192
193 delta = clocksource_delta(now, last, mask);
194
195 /*
196 * Try to catch underflows by checking if we are seeing small
197 * mask-relative negative values.
198 */
199 if (unlikely((~delta & mask) < (mask >> 3))) {
200 timekeeping_underflow_seen = 1;
201 delta = 0;
202 }
203
204 /* Cap delta value to the max_cycles values to avoid mult overflows */
205 if (unlikely(delta > max)) {
206 timekeeping_overflow_seen = 1;
207 delta = tkr->clock->max_cycles;
208 }
209
210 return delta;
211}
212#else
213static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
214{
215}
216static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
217{
218 cycle_t cycle_now, delta;
219
220 /* read clocksource */
221 cycle_now = tkr->read(tkr->clock);
222
223 /* calculate the delta since the last update_wall_time */
224 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
225
226 return delta;
227}
228#endif
229
121/** 230/**
122 * tk_setup_internals - Set up internals to use clocksource clock. 231 * tk_setup_internals - Set up internals to use clocksource clock.
123 * 232 *
@@ -135,11 +244,16 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
135 u64 tmp, ntpinterval; 244 u64 tmp, ntpinterval;
136 struct clocksource *old_clock; 245 struct clocksource *old_clock;
137 246
138 old_clock = tk->tkr.clock; 247 old_clock = tk->tkr_mono.clock;
139 tk->tkr.clock = clock; 248 tk->tkr_mono.clock = clock;
140 tk->tkr.read = clock->read; 249 tk->tkr_mono.read = clock->read;
141 tk->tkr.mask = clock->mask; 250 tk->tkr_mono.mask = clock->mask;
142 tk->tkr.cycle_last = tk->tkr.read(clock); 251 tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
252
253 tk->tkr_raw.clock = clock;
254 tk->tkr_raw.read = clock->read;
255 tk->tkr_raw.mask = clock->mask;
256 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
143 257
144 /* Do the ns -> cycle conversion first, using original mult */ 258 /* Do the ns -> cycle conversion first, using original mult */
145 tmp = NTP_INTERVAL_LENGTH; 259 tmp = NTP_INTERVAL_LENGTH;
@@ -163,11 +277,14 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
163 if (old_clock) { 277 if (old_clock) {
164 int shift_change = clock->shift - old_clock->shift; 278 int shift_change = clock->shift - old_clock->shift;
165 if (shift_change < 0) 279 if (shift_change < 0)
166 tk->tkr.xtime_nsec >>= -shift_change; 280 tk->tkr_mono.xtime_nsec >>= -shift_change;
167 else 281 else
168 tk->tkr.xtime_nsec <<= shift_change; 282 tk->tkr_mono.xtime_nsec <<= shift_change;
169 } 283 }
170 tk->tkr.shift = clock->shift; 284 tk->tkr_raw.xtime_nsec = 0;
285
286 tk->tkr_mono.shift = clock->shift;
287 tk->tkr_raw.shift = clock->shift;
171 288
172 tk->ntp_error = 0; 289 tk->ntp_error = 0;
173 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; 290 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
@@ -178,7 +295,8 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
178 * active clocksource. These value will be adjusted via NTP 295 * active clocksource. These value will be adjusted via NTP
179 * to counteract clock drifting. 296 * to counteract clock drifting.
180 */ 297 */
181 tk->tkr.mult = clock->mult; 298 tk->tkr_mono.mult = clock->mult;
299 tk->tkr_raw.mult = clock->mult;
182 tk->ntp_err_mult = 0; 300 tk->ntp_err_mult = 0;
183} 301}
184 302
@@ -193,14 +311,10 @@ static inline u32 arch_gettimeoffset(void) { return 0; }
193 311
194static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) 312static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
195{ 313{
196 cycle_t cycle_now, delta; 314 cycle_t delta;
197 s64 nsec; 315 s64 nsec;
198 316
199 /* read clocksource: */ 317 delta = timekeeping_get_delta(tkr);
200 cycle_now = tkr->read(tkr->clock);
201
202 /* calculate the delta since the last update_wall_time: */
203 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
204 318
205 nsec = delta * tkr->mult + tkr->xtime_nsec; 319 nsec = delta * tkr->mult + tkr->xtime_nsec;
206 nsec >>= tkr->shift; 320 nsec >>= tkr->shift;
@@ -209,25 +323,6 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
209 return nsec + arch_gettimeoffset(); 323 return nsec + arch_gettimeoffset();
210} 324}
211 325
212static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
213{
214 struct clocksource *clock = tk->tkr.clock;
215 cycle_t cycle_now, delta;
216 s64 nsec;
217
218 /* read clocksource: */
219 cycle_now = tk->tkr.read(clock);
220
221 /* calculate the delta since the last update_wall_time: */
222 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
223
224 /* convert delta to nanoseconds. */
225 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
226
227 /* If arch requires, add in get_arch_timeoffset() */
228 return nsec + arch_gettimeoffset();
229}
230
231/** 326/**
232 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper. 327 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
233 * @tkr: Timekeeping readout base from which we take the update 328 * @tkr: Timekeeping readout base from which we take the update
@@ -267,18 +362,18 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
267 * slightly wrong timestamp (a few nanoseconds). See 362 * slightly wrong timestamp (a few nanoseconds). See
268 * @ktime_get_mono_fast_ns. 363 * @ktime_get_mono_fast_ns.
269 */ 364 */
270static void update_fast_timekeeper(struct tk_read_base *tkr) 365static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
271{ 366{
272 struct tk_read_base *base = tk_fast_mono.base; 367 struct tk_read_base *base = tkf->base;
273 368
274 /* Force readers off to base[1] */ 369 /* Force readers off to base[1] */
275 raw_write_seqcount_latch(&tk_fast_mono.seq); 370 raw_write_seqcount_latch(&tkf->seq);
276 371
277 /* Update base[0] */ 372 /* Update base[0] */
278 memcpy(base, tkr, sizeof(*base)); 373 memcpy(base, tkr, sizeof(*base));
279 374
280 /* Force readers back to base[0] */ 375 /* Force readers back to base[0] */
281 raw_write_seqcount_latch(&tk_fast_mono.seq); 376 raw_write_seqcount_latch(&tkf->seq);
282 377
283 /* Update base[1] */ 378 /* Update base[1] */
284 memcpy(base + 1, base, sizeof(*base)); 379 memcpy(base + 1, base, sizeof(*base));
@@ -316,22 +411,33 @@ static void update_fast_timekeeper(struct tk_read_base *tkr)
316 * of the following timestamps. Callers need to be aware of that and 411 * of the following timestamps. Callers need to be aware of that and
317 * deal with it. 412 * deal with it.
318 */ 413 */
319u64 notrace ktime_get_mono_fast_ns(void) 414static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
320{ 415{
321 struct tk_read_base *tkr; 416 struct tk_read_base *tkr;
322 unsigned int seq; 417 unsigned int seq;
323 u64 now; 418 u64 now;
324 419
325 do { 420 do {
326 seq = raw_read_seqcount(&tk_fast_mono.seq); 421 seq = raw_read_seqcount(&tkf->seq);
327 tkr = tk_fast_mono.base + (seq & 0x01); 422 tkr = tkf->base + (seq & 0x01);
328 now = ktime_to_ns(tkr->base_mono) + timekeeping_get_ns(tkr); 423 now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
424 } while (read_seqcount_retry(&tkf->seq, seq));
329 425
330 } while (read_seqcount_retry(&tk_fast_mono.seq, seq));
331 return now; 426 return now;
332} 427}
428
429u64 ktime_get_mono_fast_ns(void)
430{
431 return __ktime_get_fast_ns(&tk_fast_mono);
432}
333EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns); 433EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
334 434
435u64 ktime_get_raw_fast_ns(void)
436{
437 return __ktime_get_fast_ns(&tk_fast_raw);
438}
439EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
440
335/* Suspend-time cycles value for halted fast timekeeper. */ 441/* Suspend-time cycles value for halted fast timekeeper. */
336static cycle_t cycles_at_suspend; 442static cycle_t cycles_at_suspend;
337 443
@@ -353,12 +459,17 @@ static cycle_t dummy_clock_read(struct clocksource *cs)
353static void halt_fast_timekeeper(struct timekeeper *tk) 459static void halt_fast_timekeeper(struct timekeeper *tk)
354{ 460{
355 static struct tk_read_base tkr_dummy; 461 static struct tk_read_base tkr_dummy;
356 struct tk_read_base *tkr = &tk->tkr; 462 struct tk_read_base *tkr = &tk->tkr_mono;
357 463
358 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); 464 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
359 cycles_at_suspend = tkr->read(tkr->clock); 465 cycles_at_suspend = tkr->read(tkr->clock);
360 tkr_dummy.read = dummy_clock_read; 466 tkr_dummy.read = dummy_clock_read;
361 update_fast_timekeeper(&tkr_dummy); 467 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
468
469 tkr = &tk->tkr_raw;
470 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
471 tkr_dummy.read = dummy_clock_read;
472 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
362} 473}
363 474
364#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD 475#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
@@ -369,8 +480,8 @@ static inline void update_vsyscall(struct timekeeper *tk)
369 480
370 xt = timespec64_to_timespec(tk_xtime(tk)); 481 xt = timespec64_to_timespec(tk_xtime(tk));
371 wm = timespec64_to_timespec(tk->wall_to_monotonic); 482 wm = timespec64_to_timespec(tk->wall_to_monotonic);
372 update_vsyscall_old(&xt, &wm, tk->tkr.clock, tk->tkr.mult, 483 update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
373 tk->tkr.cycle_last); 484 tk->tkr_mono.cycle_last);
374} 485}
375 486
376static inline void old_vsyscall_fixup(struct timekeeper *tk) 487static inline void old_vsyscall_fixup(struct timekeeper *tk)
@@ -387,11 +498,11 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
387 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD 498 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
388 * users are removed, this can be killed. 499 * users are removed, this can be killed.
389 */ 500 */
390 remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1); 501 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
391 tk->tkr.xtime_nsec -= remainder; 502 tk->tkr_mono.xtime_nsec -= remainder;
392 tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift; 503 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
393 tk->ntp_error += remainder << tk->ntp_error_shift; 504 tk->ntp_error += remainder << tk->ntp_error_shift;
394 tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift; 505 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
395} 506}
396#else 507#else
397#define old_vsyscall_fixup(tk) 508#define old_vsyscall_fixup(tk)
@@ -456,17 +567,17 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
456 */ 567 */
457 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); 568 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
458 nsec = (u32) tk->wall_to_monotonic.tv_nsec; 569 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
459 tk->tkr.base_mono = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); 570 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
460 571
461 /* Update the monotonic raw base */ 572 /* Update the monotonic raw base */
462 tk->base_raw = timespec64_to_ktime(tk->raw_time); 573 tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
463 574
464 /* 575 /*
465 * The sum of the nanoseconds portions of xtime and 576 * The sum of the nanoseconds portions of xtime and
466 * wall_to_monotonic can be greater/equal one second. Take 577 * wall_to_monotonic can be greater/equal one second. Take
467 * this into account before updating tk->ktime_sec. 578 * this into account before updating tk->ktime_sec.
468 */ 579 */
469 nsec += (u32)(tk->tkr.xtime_nsec >> tk->tkr.shift); 580 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
470 if (nsec >= NSEC_PER_SEC) 581 if (nsec >= NSEC_PER_SEC)
471 seconds++; 582 seconds++;
472 tk->ktime_sec = seconds; 583 tk->ktime_sec = seconds;
@@ -489,7 +600,8 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
489 memcpy(&shadow_timekeeper, &tk_core.timekeeper, 600 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
490 sizeof(tk_core.timekeeper)); 601 sizeof(tk_core.timekeeper));
491 602
492 update_fast_timekeeper(&tk->tkr); 603 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
604 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
493} 605}
494 606
495/** 607/**
@@ -501,22 +613,23 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
501 */ 613 */
502static void timekeeping_forward_now(struct timekeeper *tk) 614static void timekeeping_forward_now(struct timekeeper *tk)
503{ 615{
504 struct clocksource *clock = tk->tkr.clock; 616 struct clocksource *clock = tk->tkr_mono.clock;
505 cycle_t cycle_now, delta; 617 cycle_t cycle_now, delta;
506 s64 nsec; 618 s64 nsec;
507 619
508 cycle_now = tk->tkr.read(clock); 620 cycle_now = tk->tkr_mono.read(clock);
509 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask); 621 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
510 tk->tkr.cycle_last = cycle_now; 622 tk->tkr_mono.cycle_last = cycle_now;
623 tk->tkr_raw.cycle_last = cycle_now;
511 624
512 tk->tkr.xtime_nsec += delta * tk->tkr.mult; 625 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
513 626
514 /* If arch requires, add in get_arch_timeoffset() */ 627 /* If arch requires, add in get_arch_timeoffset() */
515 tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift; 628 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
516 629
517 tk_normalize_xtime(tk); 630 tk_normalize_xtime(tk);
518 631
519 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); 632 nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
520 timespec64_add_ns(&tk->raw_time, nsec); 633 timespec64_add_ns(&tk->raw_time, nsec);
521} 634}
522 635
@@ -537,7 +650,7 @@ int __getnstimeofday64(struct timespec64 *ts)
537 seq = read_seqcount_begin(&tk_core.seq); 650 seq = read_seqcount_begin(&tk_core.seq);
538 651
539 ts->tv_sec = tk->xtime_sec; 652 ts->tv_sec = tk->xtime_sec;
540 nsecs = timekeeping_get_ns(&tk->tkr); 653 nsecs = timekeeping_get_ns(&tk->tkr_mono);
541 654
542 } while (read_seqcount_retry(&tk_core.seq, seq)); 655 } while (read_seqcount_retry(&tk_core.seq, seq));
543 656
@@ -577,8 +690,8 @@ ktime_t ktime_get(void)
577 690
578 do { 691 do {
579 seq = read_seqcount_begin(&tk_core.seq); 692 seq = read_seqcount_begin(&tk_core.seq);
580 base = tk->tkr.base_mono; 693 base = tk->tkr_mono.base;
581 nsecs = timekeeping_get_ns(&tk->tkr); 694 nsecs = timekeeping_get_ns(&tk->tkr_mono);
582 695
583 } while (read_seqcount_retry(&tk_core.seq, seq)); 696 } while (read_seqcount_retry(&tk_core.seq, seq));
584 697
@@ -603,8 +716,8 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs)
603 716
604 do { 717 do {
605 seq = read_seqcount_begin(&tk_core.seq); 718 seq = read_seqcount_begin(&tk_core.seq);
606 base = ktime_add(tk->tkr.base_mono, *offset); 719 base = ktime_add(tk->tkr_mono.base, *offset);
607 nsecs = timekeeping_get_ns(&tk->tkr); 720 nsecs = timekeeping_get_ns(&tk->tkr_mono);
608 721
609 } while (read_seqcount_retry(&tk_core.seq, seq)); 722 } while (read_seqcount_retry(&tk_core.seq, seq));
610 723
@@ -645,8 +758,8 @@ ktime_t ktime_get_raw(void)
645 758
646 do { 759 do {
647 seq = read_seqcount_begin(&tk_core.seq); 760 seq = read_seqcount_begin(&tk_core.seq);
648 base = tk->base_raw; 761 base = tk->tkr_raw.base;
649 nsecs = timekeeping_get_ns_raw(tk); 762 nsecs = timekeeping_get_ns(&tk->tkr_raw);
650 763
651 } while (read_seqcount_retry(&tk_core.seq, seq)); 764 } while (read_seqcount_retry(&tk_core.seq, seq));
652 765
@@ -674,7 +787,7 @@ void ktime_get_ts64(struct timespec64 *ts)
674 do { 787 do {
675 seq = read_seqcount_begin(&tk_core.seq); 788 seq = read_seqcount_begin(&tk_core.seq);
676 ts->tv_sec = tk->xtime_sec; 789 ts->tv_sec = tk->xtime_sec;
677 nsec = timekeeping_get_ns(&tk->tkr); 790 nsec = timekeeping_get_ns(&tk->tkr_mono);
678 tomono = tk->wall_to_monotonic; 791 tomono = tk->wall_to_monotonic;
679 792
680 } while (read_seqcount_retry(&tk_core.seq, seq)); 793 } while (read_seqcount_retry(&tk_core.seq, seq));
@@ -759,8 +872,8 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
759 ts_real->tv_sec = tk->xtime_sec; 872 ts_real->tv_sec = tk->xtime_sec;
760 ts_real->tv_nsec = 0; 873 ts_real->tv_nsec = 0;
761 874
762 nsecs_raw = timekeeping_get_ns_raw(tk); 875 nsecs_raw = timekeeping_get_ns(&tk->tkr_raw);
763 nsecs_real = timekeeping_get_ns(&tk->tkr); 876 nsecs_real = timekeeping_get_ns(&tk->tkr_mono);
764 877
765 } while (read_seqcount_retry(&tk_core.seq, seq)); 878 } while (read_seqcount_retry(&tk_core.seq, seq));
766 879
@@ -943,7 +1056,7 @@ static int change_clocksource(void *data)
943 */ 1056 */
944 if (try_module_get(new->owner)) { 1057 if (try_module_get(new->owner)) {
945 if (!new->enable || new->enable(new) == 0) { 1058 if (!new->enable || new->enable(new) == 0) {
946 old = tk->tkr.clock; 1059 old = tk->tkr_mono.clock;
947 tk_setup_internals(tk, new); 1060 tk_setup_internals(tk, new);
948 if (old->disable) 1061 if (old->disable)
949 old->disable(old); 1062 old->disable(old);
@@ -971,11 +1084,11 @@ int timekeeping_notify(struct clocksource *clock)
971{ 1084{
972 struct timekeeper *tk = &tk_core.timekeeper; 1085 struct timekeeper *tk = &tk_core.timekeeper;
973 1086
974 if (tk->tkr.clock == clock) 1087 if (tk->tkr_mono.clock == clock)
975 return 0; 1088 return 0;
976 stop_machine(change_clocksource, clock, NULL); 1089 stop_machine(change_clocksource, clock, NULL);
977 tick_clock_notify(); 1090 tick_clock_notify();
978 return tk->tkr.clock == clock ? 0 : -1; 1091 return tk->tkr_mono.clock == clock ? 0 : -1;
979} 1092}
980 1093
981/** 1094/**
@@ -993,7 +1106,7 @@ void getrawmonotonic64(struct timespec64 *ts)
993 1106
994 do { 1107 do {
995 seq = read_seqcount_begin(&tk_core.seq); 1108 seq = read_seqcount_begin(&tk_core.seq);
996 nsecs = timekeeping_get_ns_raw(tk); 1109 nsecs = timekeeping_get_ns(&tk->tkr_raw);
997 ts64 = tk->raw_time; 1110 ts64 = tk->raw_time;
998 1111
999 } while (read_seqcount_retry(&tk_core.seq, seq)); 1112 } while (read_seqcount_retry(&tk_core.seq, seq));
@@ -1016,7 +1129,7 @@ int timekeeping_valid_for_hres(void)
1016 do { 1129 do {
1017 seq = read_seqcount_begin(&tk_core.seq); 1130 seq = read_seqcount_begin(&tk_core.seq);
1018 1131
1019 ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 1132 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1020 1133
1021 } while (read_seqcount_retry(&tk_core.seq, seq)); 1134 } while (read_seqcount_retry(&tk_core.seq, seq));
1022 1135
@@ -1035,7 +1148,7 @@ u64 timekeeping_max_deferment(void)
1035 do { 1148 do {
1036 seq = read_seqcount_begin(&tk_core.seq); 1149 seq = read_seqcount_begin(&tk_core.seq);
1037 1150
1038 ret = tk->tkr.clock->max_idle_ns; 1151 ret = tk->tkr_mono.clock->max_idle_ns;
1039 1152
1040 } while (read_seqcount_retry(&tk_core.seq, seq)); 1153 } while (read_seqcount_retry(&tk_core.seq, seq));
1041 1154
@@ -1057,6 +1170,14 @@ void __weak read_persistent_clock(struct timespec *ts)
1057 ts->tv_nsec = 0; 1170 ts->tv_nsec = 0;
1058} 1171}
1059 1172
1173void __weak read_persistent_clock64(struct timespec64 *ts64)
1174{
1175 struct timespec ts;
1176
1177 read_persistent_clock(&ts);
1178 *ts64 = timespec_to_timespec64(ts);
1179}
1180
1060/** 1181/**
1061 * read_boot_clock - Return time of the system start. 1182 * read_boot_clock - Return time of the system start.
1062 * 1183 *
@@ -1072,6 +1193,20 @@ void __weak read_boot_clock(struct timespec *ts)
1072 ts->tv_nsec = 0; 1193 ts->tv_nsec = 0;
1073} 1194}
1074 1195
1196void __weak read_boot_clock64(struct timespec64 *ts64)
1197{
1198 struct timespec ts;
1199
1200 read_boot_clock(&ts);
1201 *ts64 = timespec_to_timespec64(ts);
1202}
1203
1204/* Flag for if timekeeping_resume() has injected sleeptime */
1205static bool sleeptime_injected;
1206
1207/* Flag for if there is a persistent clock on this platform */
1208static bool persistent_clock_exists;
1209
1075/* 1210/*
1076 * timekeeping_init - Initializes the clocksource and common timekeeping values 1211 * timekeeping_init - Initializes the clocksource and common timekeeping values
1077 */ 1212 */
@@ -1081,20 +1216,17 @@ void __init timekeeping_init(void)
1081 struct clocksource *clock; 1216 struct clocksource *clock;
1082 unsigned long flags; 1217 unsigned long flags;
1083 struct timespec64 now, boot, tmp; 1218 struct timespec64 now, boot, tmp;
1084 struct timespec ts;
1085 1219
1086 read_persistent_clock(&ts); 1220 read_persistent_clock64(&now);
1087 now = timespec_to_timespec64(ts);
1088 if (!timespec64_valid_strict(&now)) { 1221 if (!timespec64_valid_strict(&now)) {
1089 pr_warn("WARNING: Persistent clock returned invalid value!\n" 1222 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1090 " Check your CMOS/BIOS settings.\n"); 1223 " Check your CMOS/BIOS settings.\n");
1091 now.tv_sec = 0; 1224 now.tv_sec = 0;
1092 now.tv_nsec = 0; 1225 now.tv_nsec = 0;
1093 } else if (now.tv_sec || now.tv_nsec) 1226 } else if (now.tv_sec || now.tv_nsec)
1094 persistent_clock_exist = true; 1227 persistent_clock_exists = true;
1095 1228
1096 read_boot_clock(&ts); 1229 read_boot_clock64(&boot);
1097 boot = timespec_to_timespec64(ts);
1098 if (!timespec64_valid_strict(&boot)) { 1230 if (!timespec64_valid_strict(&boot)) {
1099 pr_warn("WARNING: Boot clock returned invalid value!\n" 1231 pr_warn("WARNING: Boot clock returned invalid value!\n"
1100 " Check your CMOS/BIOS settings.\n"); 1232 " Check your CMOS/BIOS settings.\n");
@@ -1114,7 +1246,6 @@ void __init timekeeping_init(void)
1114 tk_set_xtime(tk, &now); 1246 tk_set_xtime(tk, &now);
1115 tk->raw_time.tv_sec = 0; 1247 tk->raw_time.tv_sec = 0;
1116 tk->raw_time.tv_nsec = 0; 1248 tk->raw_time.tv_nsec = 0;
1117 tk->base_raw.tv64 = 0;
1118 if (boot.tv_sec == 0 && boot.tv_nsec == 0) 1249 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1119 boot = tk_xtime(tk); 1250 boot = tk_xtime(tk);
1120 1251
@@ -1127,7 +1258,7 @@ void __init timekeeping_init(void)
1127 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1258 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1128} 1259}
1129 1260
1130/* time in seconds when suspend began */ 1261/* time in seconds when suspend began for persistent clock */
1131static struct timespec64 timekeeping_suspend_time; 1262static struct timespec64 timekeeping_suspend_time;
1132 1263
1133/** 1264/**
@@ -1152,12 +1283,49 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1152 tk_debug_account_sleep_time(delta); 1283 tk_debug_account_sleep_time(delta);
1153} 1284}
1154 1285
1286#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1287/**
1288 * We have three kinds of time sources to use for sleep time
1289 * injection, the preference order is:
1290 * 1) non-stop clocksource
1291 * 2) persistent clock (ie: RTC accessible when irqs are off)
1292 * 3) RTC
1293 *
1294 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1295 * If system has neither 1) nor 2), 3) will be used finally.
1296 *
1297 *
1298 * If timekeeping has injected sleeptime via either 1) or 2),
1299 * 3) becomes needless, so in this case we don't need to call
1300 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1301 * means.
1302 */
1303bool timekeeping_rtc_skipresume(void)
1304{
1305 return sleeptime_injected;
1306}
1307
1308/**
1309 * 1) can be determined whether to use or not only when doing
1310 * timekeeping_resume() which is invoked after rtc_suspend(),
1311 * so we can't skip rtc_suspend() surely if system has 1).
1312 *
1313 * But if system has 2), 2) will definitely be used, so in this
1314 * case we don't need to call rtc_suspend(), and this is what
1315 * timekeeping_rtc_skipsuspend() means.
1316 */
1317bool timekeeping_rtc_skipsuspend(void)
1318{
1319 return persistent_clock_exists;
1320}
1321
1155/** 1322/**
1156 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values 1323 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1157 * @delta: pointer to a timespec64 delta value 1324 * @delta: pointer to a timespec64 delta value
1158 * 1325 *
1159 * This hook is for architectures that cannot support read_persistent_clock 1326 * This hook is for architectures that cannot support read_persistent_clock64
1160 * because their RTC/persistent clock is only accessible when irqs are enabled. 1327 * because their RTC/persistent clock is only accessible when irqs are enabled.
1328 * and also don't have an effective nonstop clocksource.
1161 * 1329 *
1162 * This function should only be called by rtc_resume(), and allows 1330 * This function should only be called by rtc_resume(), and allows
1163 * a suspend offset to be injected into the timekeeping values. 1331 * a suspend offset to be injected into the timekeeping values.
@@ -1167,13 +1335,6 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1167 struct timekeeper *tk = &tk_core.timekeeper; 1335 struct timekeeper *tk = &tk_core.timekeeper;
1168 unsigned long flags; 1336 unsigned long flags;
1169 1337
1170 /*
1171 * Make sure we don't set the clock twice, as timekeeping_resume()
1172 * already did it
1173 */
1174 if (has_persistent_clock())
1175 return;
1176
1177 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1338 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1178 write_seqcount_begin(&tk_core.seq); 1339 write_seqcount_begin(&tk_core.seq);
1179 1340
@@ -1189,26 +1350,21 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1189 /* signal hrtimers about time change */ 1350 /* signal hrtimers about time change */
1190 clock_was_set(); 1351 clock_was_set();
1191} 1352}
1353#endif
1192 1354
1193/** 1355/**
1194 * timekeeping_resume - Resumes the generic timekeeping subsystem. 1356 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1195 *
1196 * This is for the generic clocksource timekeeping.
1197 * xtime/wall_to_monotonic/jiffies/etc are
1198 * still managed by arch specific suspend/resume code.
1199 */ 1357 */
1200void timekeeping_resume(void) 1358void timekeeping_resume(void)
1201{ 1359{
1202 struct timekeeper *tk = &tk_core.timekeeper; 1360 struct timekeeper *tk = &tk_core.timekeeper;
1203 struct clocksource *clock = tk->tkr.clock; 1361 struct clocksource *clock = tk->tkr_mono.clock;
1204 unsigned long flags; 1362 unsigned long flags;
1205 struct timespec64 ts_new, ts_delta; 1363 struct timespec64 ts_new, ts_delta;
1206 struct timespec tmp;
1207 cycle_t cycle_now, cycle_delta; 1364 cycle_t cycle_now, cycle_delta;
1208 bool suspendtime_found = false;
1209 1365
1210 read_persistent_clock(&tmp); 1366 sleeptime_injected = false;
1211 ts_new = timespec_to_timespec64(tmp); 1367 read_persistent_clock64(&ts_new);
1212 1368
1213 clockevents_resume(); 1369 clockevents_resume();
1214 clocksource_resume(); 1370 clocksource_resume();
@@ -1228,16 +1384,16 @@ void timekeeping_resume(void)
1228 * The less preferred source will only be tried if there is no better 1384 * The less preferred source will only be tried if there is no better
1229 * usable source. The rtc part is handled separately in rtc core code. 1385 * usable source. The rtc part is handled separately in rtc core code.
1230 */ 1386 */
1231 cycle_now = tk->tkr.read(clock); 1387 cycle_now = tk->tkr_mono.read(clock);
1232 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && 1388 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1233 cycle_now > tk->tkr.cycle_last) { 1389 cycle_now > tk->tkr_mono.cycle_last) {
1234 u64 num, max = ULLONG_MAX; 1390 u64 num, max = ULLONG_MAX;
1235 u32 mult = clock->mult; 1391 u32 mult = clock->mult;
1236 u32 shift = clock->shift; 1392 u32 shift = clock->shift;
1237 s64 nsec = 0; 1393 s64 nsec = 0;
1238 1394
1239 cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, 1395 cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1240 tk->tkr.mask); 1396 tk->tkr_mono.mask);
1241 1397
1242 /* 1398 /*
1243 * "cycle_delta * mutl" may cause 64 bits overflow, if the 1399 * "cycle_delta * mutl" may cause 64 bits overflow, if the
@@ -1253,17 +1409,19 @@ void timekeeping_resume(void)
1253 nsec += ((u64) cycle_delta * mult) >> shift; 1409 nsec += ((u64) cycle_delta * mult) >> shift;
1254 1410
1255 ts_delta = ns_to_timespec64(nsec); 1411 ts_delta = ns_to_timespec64(nsec);
1256 suspendtime_found = true; 1412 sleeptime_injected = true;
1257 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) { 1413 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1258 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time); 1414 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1259 suspendtime_found = true; 1415 sleeptime_injected = true;
1260 } 1416 }
1261 1417
1262 if (suspendtime_found) 1418 if (sleeptime_injected)
1263 __timekeeping_inject_sleeptime(tk, &ts_delta); 1419 __timekeeping_inject_sleeptime(tk, &ts_delta);
1264 1420
1265 /* Re-base the last cycle value */ 1421 /* Re-base the last cycle value */
1266 tk->tkr.cycle_last = cycle_now; 1422 tk->tkr_mono.cycle_last = cycle_now;
1423 tk->tkr_raw.cycle_last = cycle_now;
1424
1267 tk->ntp_error = 0; 1425 tk->ntp_error = 0;
1268 timekeeping_suspended = 0; 1426 timekeeping_suspended = 0;
1269 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); 1427 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
@@ -1272,9 +1430,7 @@ void timekeeping_resume(void)
1272 1430
1273 touch_softlockup_watchdog(); 1431 touch_softlockup_watchdog();
1274 1432
1275 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); 1433 tick_resume();
1276
1277 /* Resume hrtimers */
1278 hrtimers_resume(); 1434 hrtimers_resume();
1279} 1435}
1280 1436
@@ -1284,10 +1440,8 @@ int timekeeping_suspend(void)
1284 unsigned long flags; 1440 unsigned long flags;
1285 struct timespec64 delta, delta_delta; 1441 struct timespec64 delta, delta_delta;
1286 static struct timespec64 old_delta; 1442 static struct timespec64 old_delta;
1287 struct timespec tmp;
1288 1443
1289 read_persistent_clock(&tmp); 1444 read_persistent_clock64(&timekeeping_suspend_time);
1290 timekeeping_suspend_time = timespec_to_timespec64(tmp);
1291 1445
1292 /* 1446 /*
1293 * On some systems the persistent_clock can not be detected at 1447 * On some systems the persistent_clock can not be detected at
@@ -1295,31 +1449,33 @@ int timekeeping_suspend(void)
1295 * value returned, update the persistent_clock_exists flag. 1449 * value returned, update the persistent_clock_exists flag.
1296 */ 1450 */
1297 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec) 1451 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1298 persistent_clock_exist = true; 1452 persistent_clock_exists = true;
1299 1453
1300 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1454 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1301 write_seqcount_begin(&tk_core.seq); 1455 write_seqcount_begin(&tk_core.seq);
1302 timekeeping_forward_now(tk); 1456 timekeeping_forward_now(tk);
1303 timekeeping_suspended = 1; 1457 timekeeping_suspended = 1;
1304 1458
1305 /* 1459 if (persistent_clock_exists) {
1306 * To avoid drift caused by repeated suspend/resumes,
1307 * which each can add ~1 second drift error,
1308 * try to compensate so the difference in system time
1309 * and persistent_clock time stays close to constant.
1310 */
1311 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1312 delta_delta = timespec64_sub(delta, old_delta);
1313 if (abs(delta_delta.tv_sec) >= 2) {
1314 /* 1460 /*
1315 * if delta_delta is too large, assume time correction 1461 * To avoid drift caused by repeated suspend/resumes,
1316 * has occured and set old_delta to the current delta. 1462 * which each can add ~1 second drift error,
1463 * try to compensate so the difference in system time
1464 * and persistent_clock time stays close to constant.
1317 */ 1465 */
1318 old_delta = delta; 1466 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1319 } else { 1467 delta_delta = timespec64_sub(delta, old_delta);
1320 /* Otherwise try to adjust old_system to compensate */ 1468 if (abs(delta_delta.tv_sec) >= 2) {
1321 timekeeping_suspend_time = 1469 /*
1322 timespec64_add(timekeeping_suspend_time, delta_delta); 1470 * if delta_delta is too large, assume time correction
1471 * has occurred and set old_delta to the current delta.
1472 */
1473 old_delta = delta;
1474 } else {
1475 /* Otherwise try to adjust old_system to compensate */
1476 timekeeping_suspend_time =
1477 timespec64_add(timekeeping_suspend_time, delta_delta);
1478 }
1323 } 1479 }
1324 1480
1325 timekeeping_update(tk, TK_MIRROR); 1481 timekeeping_update(tk, TK_MIRROR);
@@ -1327,7 +1483,7 @@ int timekeeping_suspend(void)
1327 write_seqcount_end(&tk_core.seq); 1483 write_seqcount_end(&tk_core.seq);
1328 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1484 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1329 1485
1330 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 1486 tick_suspend();
1331 clocksource_suspend(); 1487 clocksource_suspend();
1332 clockevents_suspend(); 1488 clockevents_suspend();
1333 1489
@@ -1416,15 +1572,15 @@ static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1416 * 1572 *
1417 * XXX - TODO: Doc ntp_error calculation. 1573 * XXX - TODO: Doc ntp_error calculation.
1418 */ 1574 */
1419 if ((mult_adj > 0) && (tk->tkr.mult + mult_adj < mult_adj)) { 1575 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1420 /* NTP adjustment caused clocksource mult overflow */ 1576 /* NTP adjustment caused clocksource mult overflow */
1421 WARN_ON_ONCE(1); 1577 WARN_ON_ONCE(1);
1422 return; 1578 return;
1423 } 1579 }
1424 1580
1425 tk->tkr.mult += mult_adj; 1581 tk->tkr_mono.mult += mult_adj;
1426 tk->xtime_interval += interval; 1582 tk->xtime_interval += interval;
1427 tk->tkr.xtime_nsec -= offset; 1583 tk->tkr_mono.xtime_nsec -= offset;
1428 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; 1584 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1429} 1585}
1430 1586
@@ -1486,13 +1642,13 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1486 tk->ntp_err_mult = 0; 1642 tk->ntp_err_mult = 0;
1487 } 1643 }
1488 1644
1489 if (unlikely(tk->tkr.clock->maxadj && 1645 if (unlikely(tk->tkr_mono.clock->maxadj &&
1490 (abs(tk->tkr.mult - tk->tkr.clock->mult) 1646 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1491 > tk->tkr.clock->maxadj))) { 1647 > tk->tkr_mono.clock->maxadj))) {
1492 printk_once(KERN_WARNING 1648 printk_once(KERN_WARNING
1493 "Adjusting %s more than 11%% (%ld vs %ld)\n", 1649 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1494 tk->tkr.clock->name, (long)tk->tkr.mult, 1650 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1495 (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj); 1651 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1496 } 1652 }
1497 1653
1498 /* 1654 /*
@@ -1509,9 +1665,9 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1509 * We'll correct this error next time through this function, when 1665 * We'll correct this error next time through this function, when
1510 * xtime_nsec is not as small. 1666 * xtime_nsec is not as small.
1511 */ 1667 */
1512 if (unlikely((s64)tk->tkr.xtime_nsec < 0)) { 1668 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1513 s64 neg = -(s64)tk->tkr.xtime_nsec; 1669 s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
1514 tk->tkr.xtime_nsec = 0; 1670 tk->tkr_mono.xtime_nsec = 0;
1515 tk->ntp_error += neg << tk->ntp_error_shift; 1671 tk->ntp_error += neg << tk->ntp_error_shift;
1516 } 1672 }
1517} 1673}
@@ -1526,13 +1682,13 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1526 */ 1682 */
1527static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) 1683static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1528{ 1684{
1529 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift; 1685 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1530 unsigned int clock_set = 0; 1686 unsigned int clock_set = 0;
1531 1687
1532 while (tk->tkr.xtime_nsec >= nsecps) { 1688 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1533 int leap; 1689 int leap;
1534 1690
1535 tk->tkr.xtime_nsec -= nsecps; 1691 tk->tkr_mono.xtime_nsec -= nsecps;
1536 tk->xtime_sec++; 1692 tk->xtime_sec++;
1537 1693
1538 /* Figure out if its a leap sec and apply if needed */ 1694 /* Figure out if its a leap sec and apply if needed */
@@ -1577,9 +1733,10 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1577 1733
1578 /* Accumulate one shifted interval */ 1734 /* Accumulate one shifted interval */
1579 offset -= interval; 1735 offset -= interval;
1580 tk->tkr.cycle_last += interval; 1736 tk->tkr_mono.cycle_last += interval;
1737 tk->tkr_raw.cycle_last += interval;
1581 1738
1582 tk->tkr.xtime_nsec += tk->xtime_interval << shift; 1739 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
1583 *clock_set |= accumulate_nsecs_to_secs(tk); 1740 *clock_set |= accumulate_nsecs_to_secs(tk);
1584 1741
1585 /* Accumulate raw time */ 1742 /* Accumulate raw time */
@@ -1622,14 +1779,17 @@ void update_wall_time(void)
1622#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 1779#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1623 offset = real_tk->cycle_interval; 1780 offset = real_tk->cycle_interval;
1624#else 1781#else
1625 offset = clocksource_delta(tk->tkr.read(tk->tkr.clock), 1782 offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
1626 tk->tkr.cycle_last, tk->tkr.mask); 1783 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
1627#endif 1784#endif
1628 1785
1629 /* Check if there's really nothing to do */ 1786 /* Check if there's really nothing to do */
1630 if (offset < real_tk->cycle_interval) 1787 if (offset < real_tk->cycle_interval)
1631 goto out; 1788 goto out;
1632 1789
1790 /* Do some additional sanity checking */
1791 timekeeping_check_update(real_tk, offset);
1792
1633 /* 1793 /*
1634 * With NO_HZ we may have to accumulate many cycle_intervals 1794 * With NO_HZ we may have to accumulate many cycle_intervals
1635 * (think "ticks") worth of time at once. To do this efficiently, 1795 * (think "ticks") worth of time at once. To do this efficiently,
@@ -1784,8 +1944,8 @@ ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
1784 do { 1944 do {
1785 seq = read_seqcount_begin(&tk_core.seq); 1945 seq = read_seqcount_begin(&tk_core.seq);
1786 1946
1787 base = tk->tkr.base_mono; 1947 base = tk->tkr_mono.base;
1788 nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift; 1948 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
1789 1949
1790 *offs_real = tk->offs_real; 1950 *offs_real = tk->offs_real;
1791 *offs_boot = tk->offs_boot; 1951 *offs_boot = tk->offs_boot;
@@ -1816,8 +1976,8 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
1816 do { 1976 do {
1817 seq = read_seqcount_begin(&tk_core.seq); 1977 seq = read_seqcount_begin(&tk_core.seq);
1818 1978
1819 base = tk->tkr.base_mono; 1979 base = tk->tkr_mono.base;
1820 nsecs = timekeeping_get_ns(&tk->tkr); 1980 nsecs = timekeeping_get_ns(&tk->tkr_mono);
1821 1981
1822 *offs_real = tk->offs_real; 1982 *offs_real = tk->offs_real;
1823 *offs_boot = tk->offs_boot; 1983 *offs_boot = tk->offs_boot;