aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-07-16 17:05:16 -0400
committerJohn Stultz <john.stultz@linaro.org>2014-07-23 18:01:53 -0400
commitd28ede83791defee9a81e558540699dc46dbbe13 (patch)
tree40df9738db287097241fe25493df982ee33b046f /kernel
parent6d3aadf3e180e09dbefab16478c6876b584ce16e (diff)
timekeeping: Create struct tk_read_base and use it in struct timekeeper
The members of the new struct are the required ones for the new NMI safe accessor to clcok monotonic. In order to reuse the existing timekeeping code and to make the update of the fast NMI safe timekeepers a simple memcpy use the struct for the timekeeper as well and convert all users. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/timekeeping.c132
1 files changed, 66 insertions, 66 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 14b7367e6b94..ccb69980ef7e 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -52,8 +52,8 @@ bool __read_mostly persistent_clock_exist = false;
52 52
53static inline void tk_normalize_xtime(struct timekeeper *tk) 53static inline void tk_normalize_xtime(struct timekeeper *tk)
54{ 54{
55 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { 55 while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
56 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift; 56 tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
57 tk->xtime_sec++; 57 tk->xtime_sec++;
58 } 58 }
59} 59}
@@ -63,20 +63,20 @@ static inline struct timespec64 tk_xtime(struct timekeeper *tk)
63 struct timespec64 ts; 63 struct timespec64 ts;
64 64
65 ts.tv_sec = tk->xtime_sec; 65 ts.tv_sec = tk->xtime_sec;
66 ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); 66 ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
67 return ts; 67 return ts;
68} 68}
69 69
70static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) 70static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
71{ 71{
72 tk->xtime_sec = ts->tv_sec; 72 tk->xtime_sec = ts->tv_sec;
73 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift; 73 tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
74} 74}
75 75
76static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) 76static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
77{ 77{
78 tk->xtime_sec += ts->tv_sec; 78 tk->xtime_sec += ts->tv_sec;
79 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; 79 tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
80 tk_normalize_xtime(tk); 80 tk_normalize_xtime(tk);
81} 81}
82 82
@@ -119,11 +119,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
119 u64 tmp, ntpinterval; 119 u64 tmp, ntpinterval;
120 struct clocksource *old_clock; 120 struct clocksource *old_clock;
121 121
122 old_clock = tk->clock; 122 old_clock = tk->tkr.clock;
123 tk->clock = clock; 123 tk->tkr.clock = clock;
124 tk->read = clock->read; 124 tk->tkr.read = clock->read;
125 tk->mask = clock->mask; 125 tk->tkr.mask = clock->mask;
126 tk->cycle_last = tk->read(clock); 126 tk->tkr.cycle_last = tk->tkr.read(clock);
127 127
128 /* Do the ns -> cycle conversion first, using original mult */ 128 /* Do the ns -> cycle conversion first, using original mult */
129 tmp = NTP_INTERVAL_LENGTH; 129 tmp = NTP_INTERVAL_LENGTH;
@@ -147,11 +147,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
147 if (old_clock) { 147 if (old_clock) {
148 int shift_change = clock->shift - old_clock->shift; 148 int shift_change = clock->shift - old_clock->shift;
149 if (shift_change < 0) 149 if (shift_change < 0)
150 tk->xtime_nsec >>= -shift_change; 150 tk->tkr.xtime_nsec >>= -shift_change;
151 else 151 else
152 tk->xtime_nsec <<= shift_change; 152 tk->tkr.xtime_nsec <<= shift_change;
153 } 153 }
154 tk->shift = clock->shift; 154 tk->tkr.shift = clock->shift;
155 155
156 tk->ntp_error = 0; 156 tk->ntp_error = 0;
157 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; 157 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
@@ -161,7 +161,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
161 * active clocksource. These value will be adjusted via NTP 161 * active clocksource. These value will be adjusted via NTP
162 * to counteract clock drifting. 162 * to counteract clock drifting.
163 */ 163 */
164 tk->mult = clock->mult; 164 tk->tkr.mult = clock->mult;
165} 165}
166 166
167/* Timekeeper helper functions. */ 167/* Timekeeper helper functions. */
@@ -179,13 +179,13 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
179 s64 nsec; 179 s64 nsec;
180 180
181 /* read clocksource: */ 181 /* read clocksource: */
182 cycle_now = tk->read(tk->clock); 182 cycle_now = tk->tkr.read(tk->tkr.clock);
183 183
184 /* calculate the delta since the last update_wall_time: */ 184 /* calculate the delta since the last update_wall_time: */
185 delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask); 185 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
186 186
187 nsec = delta * tk->mult + tk->xtime_nsec; 187 nsec = delta * tk->tkr.mult + tk->tkr.xtime_nsec;
188 nsec >>= tk->shift; 188 nsec >>= tk->tkr.shift;
189 189
190 /* If arch requires, add in get_arch_timeoffset() */ 190 /* If arch requires, add in get_arch_timeoffset() */
191 return nsec + arch_gettimeoffset(); 191 return nsec + arch_gettimeoffset();
@@ -193,15 +193,15 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
193 193
194static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) 194static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
195{ 195{
196 struct clocksource *clock = tk->clock; 196 struct clocksource *clock = tk->tkr.clock;
197 cycle_t cycle_now, delta; 197 cycle_t cycle_now, delta;
198 s64 nsec; 198 s64 nsec;
199 199
200 /* read clocksource: */ 200 /* read clocksource: */
201 cycle_now = tk->read(clock); 201 cycle_now = tk->tkr.read(clock);
202 202
203 /* calculate the delta since the last update_wall_time: */ 203 /* calculate the delta since the last update_wall_time: */
204 delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask); 204 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
205 205
206 /* convert delta to nanoseconds. */ 206 /* convert delta to nanoseconds. */
207 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); 207 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
@@ -217,8 +217,8 @@ static inline void update_vsyscall(struct timekeeper *tk)
217 struct timespec xt; 217 struct timespec xt;
218 218
219 xt = tk_xtime(tk); 219 xt = tk_xtime(tk);
220 update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult, 220 update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult,
221 tk->cycle_last); 221 tk->tkr.cycle_last);
222} 222}
223 223
224static inline void old_vsyscall_fixup(struct timekeeper *tk) 224static inline void old_vsyscall_fixup(struct timekeeper *tk)
@@ -235,11 +235,11 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
235 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD 235 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
236 * users are removed, this can be killed. 236 * users are removed, this can be killed.
237 */ 237 */
238 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1); 238 remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
239 tk->xtime_nsec -= remainder; 239 tk->tkr.xtime_nsec -= remainder;
240 tk->xtime_nsec += 1ULL << tk->shift; 240 tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
241 tk->ntp_error += remainder << tk->ntp_error_shift; 241 tk->ntp_error += remainder << tk->ntp_error_shift;
242 tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; 242 tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
243} 243}
244#else 244#else
245#define old_vsyscall_fixup(tk) 245#define old_vsyscall_fixup(tk)
@@ -304,7 +304,7 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
304 nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); 304 nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
305 nsec *= NSEC_PER_SEC; 305 nsec *= NSEC_PER_SEC;
306 nsec += tk->wall_to_monotonic.tv_nsec; 306 nsec += tk->wall_to_monotonic.tv_nsec;
307 tk->base_mono = ns_to_ktime(nsec); 307 tk->tkr.base_mono = ns_to_ktime(nsec);
308 308
309 /* Update the monotonic raw base */ 309 /* Update the monotonic raw base */
310 tk->base_raw = timespec64_to_ktime(tk->raw_time); 310 tk->base_raw = timespec64_to_ktime(tk->raw_time);
@@ -336,18 +336,18 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
336 */ 336 */
337static void timekeeping_forward_now(struct timekeeper *tk) 337static void timekeeping_forward_now(struct timekeeper *tk)
338{ 338{
339 struct clocksource *clock = tk->clock; 339 struct clocksource *clock = tk->tkr.clock;
340 cycle_t cycle_now, delta; 340 cycle_t cycle_now, delta;
341 s64 nsec; 341 s64 nsec;
342 342
343 cycle_now = tk->read(clock); 343 cycle_now = tk->tkr.read(clock);
344 delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask); 344 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
345 tk->cycle_last = cycle_now; 345 tk->tkr.cycle_last = cycle_now;
346 346
347 tk->xtime_nsec += delta * tk->mult; 347 tk->tkr.xtime_nsec += delta * tk->tkr.mult;
348 348
349 /* If arch requires, add in get_arch_timeoffset() */ 349 /* If arch requires, add in get_arch_timeoffset() */
350 tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift; 350 tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
351 351
352 tk_normalize_xtime(tk); 352 tk_normalize_xtime(tk);
353 353
@@ -412,7 +412,7 @@ ktime_t ktime_get(void)
412 412
413 do { 413 do {
414 seq = read_seqcount_begin(&tk_core.seq); 414 seq = read_seqcount_begin(&tk_core.seq);
415 base = tk->base_mono; 415 base = tk->tkr.base_mono;
416 nsecs = timekeeping_get_ns(tk); 416 nsecs = timekeeping_get_ns(tk);
417 417
418 } while (read_seqcount_retry(&tk_core.seq, seq)); 418 } while (read_seqcount_retry(&tk_core.seq, seq));
@@ -438,7 +438,7 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs)
438 438
439 do { 439 do {
440 seq = read_seqcount_begin(&tk_core.seq); 440 seq = read_seqcount_begin(&tk_core.seq);
441 base = ktime_add(tk->base_mono, *offset); 441 base = ktime_add(tk->tkr.base_mono, *offset);
442 nsecs = timekeeping_get_ns(tk); 442 nsecs = timekeeping_get_ns(tk);
443 443
444 } while (read_seqcount_retry(&tk_core.seq, seq)); 444 } while (read_seqcount_retry(&tk_core.seq, seq));
@@ -731,7 +731,7 @@ static int change_clocksource(void *data)
731 */ 731 */
732 if (try_module_get(new->owner)) { 732 if (try_module_get(new->owner)) {
733 if (!new->enable || new->enable(new) == 0) { 733 if (!new->enable || new->enable(new) == 0) {
734 old = tk->clock; 734 old = tk->tkr.clock;
735 tk_setup_internals(tk, new); 735 tk_setup_internals(tk, new);
736 if (old->disable) 736 if (old->disable)
737 old->disable(old); 737 old->disable(old);
@@ -759,11 +759,11 @@ int timekeeping_notify(struct clocksource *clock)
759{ 759{
760 struct timekeeper *tk = &tk_core.timekeeper; 760 struct timekeeper *tk = &tk_core.timekeeper;
761 761
762 if (tk->clock == clock) 762 if (tk->tkr.clock == clock)
763 return 0; 763 return 0;
764 stop_machine(change_clocksource, clock, NULL); 764 stop_machine(change_clocksource, clock, NULL);
765 tick_clock_notify(); 765 tick_clock_notify();
766 return tk->clock == clock ? 0 : -1; 766 return tk->tkr.clock == clock ? 0 : -1;
767} 767}
768 768
769/** 769/**
@@ -803,7 +803,7 @@ int timekeeping_valid_for_hres(void)
803 do { 803 do {
804 seq = read_seqcount_begin(&tk_core.seq); 804 seq = read_seqcount_begin(&tk_core.seq);
805 805
806 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 806 ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
807 807
808 } while (read_seqcount_retry(&tk_core.seq, seq)); 808 } while (read_seqcount_retry(&tk_core.seq, seq));
809 809
@@ -822,7 +822,7 @@ u64 timekeeping_max_deferment(void)
822 do { 822 do {
823 seq = read_seqcount_begin(&tk_core.seq); 823 seq = read_seqcount_begin(&tk_core.seq);
824 824
825 ret = tk->clock->max_idle_ns; 825 ret = tk->tkr.clock->max_idle_ns;
826 826
827 } while (read_seqcount_retry(&tk_core.seq, seq)); 827 } while (read_seqcount_retry(&tk_core.seq, seq));
828 828
@@ -989,7 +989,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
989static void timekeeping_resume(void) 989static void timekeeping_resume(void)
990{ 990{
991 struct timekeeper *tk = &tk_core.timekeeper; 991 struct timekeeper *tk = &tk_core.timekeeper;
992 struct clocksource *clock = tk->clock; 992 struct clocksource *clock = tk->tkr.clock;
993 unsigned long flags; 993 unsigned long flags;
994 struct timespec64 ts_new, ts_delta; 994 struct timespec64 ts_new, ts_delta;
995 struct timespec tmp; 995 struct timespec tmp;
@@ -1017,16 +1017,16 @@ static void timekeeping_resume(void)
1017 * The less preferred source will only be tried if there is no better 1017 * The less preferred source will only be tried if there is no better
1018 * usable source. The rtc part is handled separately in rtc core code. 1018 * usable source. The rtc part is handled separately in rtc core code.
1019 */ 1019 */
1020 cycle_now = tk->read(clock); 1020 cycle_now = tk->tkr.read(clock);
1021 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && 1021 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1022 cycle_now > tk->cycle_last) { 1022 cycle_now > tk->tkr.cycle_last) {
1023 u64 num, max = ULLONG_MAX; 1023 u64 num, max = ULLONG_MAX;
1024 u32 mult = clock->mult; 1024 u32 mult = clock->mult;
1025 u32 shift = clock->shift; 1025 u32 shift = clock->shift;
1026 s64 nsec = 0; 1026 s64 nsec = 0;
1027 1027
1028 cycle_delta = clocksource_delta(cycle_now, tk->cycle_last, 1028 cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
1029 tk->mask); 1029 tk->tkr.mask);
1030 1030
1031 /* 1031 /*
1032 * "cycle_delta * mutl" may cause 64 bits overflow, if the 1032 * "cycle_delta * mutl" may cause 64 bits overflow, if the
@@ -1052,7 +1052,7 @@ static void timekeeping_resume(void)
1052 __timekeeping_inject_sleeptime(tk, &ts_delta); 1052 __timekeeping_inject_sleeptime(tk, &ts_delta);
1053 1053
1054 /* Re-base the last cycle value */ 1054 /* Re-base the last cycle value */
1055 tk->cycle_last = cycle_now; 1055 tk->tkr.cycle_last = cycle_now;
1056 tk->ntp_error = 0; 1056 tk->ntp_error = 0;
1057 timekeeping_suspended = 0; 1057 timekeeping_suspended = 0;
1058 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); 1058 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
@@ -1239,12 +1239,12 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1239 } 1239 }
1240 } 1240 }
1241 1241
1242 if (unlikely(tk->clock->maxadj && 1242 if (unlikely(tk->tkr.clock->maxadj &&
1243 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { 1243 (tk->tkr.mult + adj > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
1244 printk_deferred_once(KERN_WARNING 1244 printk_deferred_once(KERN_WARNING
1245 "Adjusting %s more than 11%% (%ld vs %ld)\n", 1245 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1246 tk->clock->name, (long)tk->mult + adj, 1246 tk->tkr.clock->name, (long)tk->tkr.mult + adj,
1247 (long)tk->clock->mult + tk->clock->maxadj); 1247 (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
1248 } 1248 }
1249 /* 1249 /*
1250 * So the following can be confusing. 1250 * So the following can be confusing.
@@ -1295,9 +1295,9 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1295 * 1295 *
1296 * XXX - TODO: Doc ntp_error calculation. 1296 * XXX - TODO: Doc ntp_error calculation.
1297 */ 1297 */
1298 tk->mult += adj; 1298 tk->tkr.mult += adj;
1299 tk->xtime_interval += interval; 1299 tk->xtime_interval += interval;
1300 tk->xtime_nsec -= offset; 1300 tk->tkr.xtime_nsec -= offset;
1301 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; 1301 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1302 1302
1303out_adjust: 1303out_adjust:
@@ -1315,9 +1315,9 @@ out_adjust:
1315 * We'll correct this error next time through this function, when 1315 * We'll correct this error next time through this function, when
1316 * xtime_nsec is not as small. 1316 * xtime_nsec is not as small.
1317 */ 1317 */
1318 if (unlikely((s64)tk->xtime_nsec < 0)) { 1318 if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
1319 s64 neg = -(s64)tk->xtime_nsec; 1319 s64 neg = -(s64)tk->tkr.xtime_nsec;
1320 tk->xtime_nsec = 0; 1320 tk->tkr.xtime_nsec = 0;
1321 tk->ntp_error += neg << tk->ntp_error_shift; 1321 tk->ntp_error += neg << tk->ntp_error_shift;
1322 } 1322 }
1323 1323
@@ -1333,13 +1333,13 @@ out_adjust:
1333 */ 1333 */
1334static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) 1334static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1335{ 1335{
1336 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; 1336 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
1337 unsigned int clock_set = 0; 1337 unsigned int clock_set = 0;
1338 1338
1339 while (tk->xtime_nsec >= nsecps) { 1339 while (tk->tkr.xtime_nsec >= nsecps) {
1340 int leap; 1340 int leap;
1341 1341
1342 tk->xtime_nsec -= nsecps; 1342 tk->tkr.xtime_nsec -= nsecps;
1343 tk->xtime_sec++; 1343 tk->xtime_sec++;
1344 1344
1345 /* Figure out if its a leap sec and apply if needed */ 1345 /* Figure out if its a leap sec and apply if needed */
@@ -1384,9 +1384,9 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1384 1384
1385 /* Accumulate one shifted interval */ 1385 /* Accumulate one shifted interval */
1386 offset -= interval; 1386 offset -= interval;
1387 tk->cycle_last += interval; 1387 tk->tkr.cycle_last += interval;
1388 1388
1389 tk->xtime_nsec += tk->xtime_interval << shift; 1389 tk->tkr.xtime_nsec += tk->xtime_interval << shift;
1390 *clock_set |= accumulate_nsecs_to_secs(tk); 1390 *clock_set |= accumulate_nsecs_to_secs(tk);
1391 1391
1392 /* Accumulate raw time */ 1392 /* Accumulate raw time */
@@ -1429,8 +1429,8 @@ void update_wall_time(void)
1429#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 1429#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1430 offset = real_tk->cycle_interval; 1430 offset = real_tk->cycle_interval;
1431#else 1431#else
1432 offset = clocksource_delta(tk->read(tk->clock), tk->cycle_last, 1432 offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
1433 tk->mask); 1433 tk->tkr.cycle_last, tk->tkr.mask);
1434#endif 1434#endif
1435 1435
1436 /* Check if there's really nothing to do */ 1436 /* Check if there's really nothing to do */
@@ -1591,8 +1591,8 @@ ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
1591 do { 1591 do {
1592 seq = read_seqcount_begin(&tk_core.seq); 1592 seq = read_seqcount_begin(&tk_core.seq);
1593 1593
1594 base = tk->base_mono; 1594 base = tk->tkr.base_mono;
1595 nsecs = tk->xtime_nsec >> tk->shift; 1595 nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
1596 1596
1597 *offs_real = tk->offs_real; 1597 *offs_real = tk->offs_real;
1598 *offs_boot = tk->offs_boot; 1598 *offs_boot = tk->offs_boot;
@@ -1623,7 +1623,7 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
1623 do { 1623 do {
1624 seq = read_seqcount_begin(&tk_core.seq); 1624 seq = read_seqcount_begin(&tk_core.seq);
1625 1625
1626 base = tk->base_mono; 1626 base = tk->tkr.base_mono;
1627 nsecs = timekeeping_get_ns(tk); 1627 nsecs = timekeeping_get_ns(tk);
1628 1628
1629 *offs_real = tk->offs_real; 1629 *offs_real = tk->offs_real;