diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-03-19 05:09:06 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-03-27 04:45:06 -0400 |
commit | 876e78818def2983be55878b21f7152fbaebbd36 (patch) | |
tree | 7ca968f093715153c0773716c0f04d3707688b9f /kernel/time | |
parent | 32fea568aec5b73ae27253125522b5c2a970a1f0 (diff) |
time: Rename timekeeper::tkr to timekeeper::tkr_mono
In preparation of adding another tkr field, rename this one to
tkr_mono. Also rename tk_read_base::base_mono to tk_read_base::base,
since the structure is not specific to CLOCK_MONOTONIC and the mono
name got added to the tk_read_base instance.
Lots of trivial churn.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150319093400.344679419@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/timekeeping.c | 150 |
1 files changed, 75 insertions, 75 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 892f6cbf1e67..1405091f3acb 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -68,8 +68,8 @@ bool __read_mostly persistent_clock_exist = false; | |||
68 | 68 | ||
69 | static inline void tk_normalize_xtime(struct timekeeper *tk) | 69 | static inline void tk_normalize_xtime(struct timekeeper *tk) |
70 | { | 70 | { |
71 | while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) { | 71 | while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { |
72 | tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift; | 72 | tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; |
73 | tk->xtime_sec++; | 73 | tk->xtime_sec++; |
74 | } | 74 | } |
75 | } | 75 | } |
@@ -79,20 +79,20 @@ static inline struct timespec64 tk_xtime(struct timekeeper *tk) | |||
79 | struct timespec64 ts; | 79 | struct timespec64 ts; |
80 | 80 | ||
81 | ts.tv_sec = tk->xtime_sec; | 81 | ts.tv_sec = tk->xtime_sec; |
82 | ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift); | 82 | ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); |
83 | return ts; | 83 | return ts; |
84 | } | 84 | } |
85 | 85 | ||
86 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) | 86 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) |
87 | { | 87 | { |
88 | tk->xtime_sec = ts->tv_sec; | 88 | tk->xtime_sec = ts->tv_sec; |
89 | tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift; | 89 | tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; |
90 | } | 90 | } |
91 | 91 | ||
92 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) | 92 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) |
93 | { | 93 | { |
94 | tk->xtime_sec += ts->tv_sec; | 94 | tk->xtime_sec += ts->tv_sec; |
95 | tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift; | 95 | tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; |
96 | tk_normalize_xtime(tk); | 96 | tk_normalize_xtime(tk); |
97 | } | 97 | } |
98 | 98 | ||
@@ -136,8 +136,8 @@ static long timekeeping_last_warning; | |||
136 | static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) | 136 | static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) |
137 | { | 137 | { |
138 | 138 | ||
139 | cycle_t max_cycles = tk->tkr.clock->max_cycles; | 139 | cycle_t max_cycles = tk->tkr_mono.clock->max_cycles; |
140 | const char *name = tk->tkr.clock->name; | 140 | const char *name = tk->tkr_mono.clock->name; |
141 | 141 | ||
142 | if (offset > max_cycles) { | 142 | if (offset > max_cycles) { |
143 | printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n", | 143 | printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n", |
@@ -246,11 +246,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
246 | u64 tmp, ntpinterval; | 246 | u64 tmp, ntpinterval; |
247 | struct clocksource *old_clock; | 247 | struct clocksource *old_clock; |
248 | 248 | ||
249 | old_clock = tk->tkr.clock; | 249 | old_clock = tk->tkr_mono.clock; |
250 | tk->tkr.clock = clock; | 250 | tk->tkr_mono.clock = clock; |
251 | tk->tkr.read = clock->read; | 251 | tk->tkr_mono.read = clock->read; |
252 | tk->tkr.mask = clock->mask; | 252 | tk->tkr_mono.mask = clock->mask; |
253 | tk->tkr.cycle_last = tk->tkr.read(clock); | 253 | tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); |
254 | 254 | ||
255 | /* Do the ns -> cycle conversion first, using original mult */ | 255 | /* Do the ns -> cycle conversion first, using original mult */ |
256 | tmp = NTP_INTERVAL_LENGTH; | 256 | tmp = NTP_INTERVAL_LENGTH; |
@@ -274,11 +274,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
274 | if (old_clock) { | 274 | if (old_clock) { |
275 | int shift_change = clock->shift - old_clock->shift; | 275 | int shift_change = clock->shift - old_clock->shift; |
276 | if (shift_change < 0) | 276 | if (shift_change < 0) |
277 | tk->tkr.xtime_nsec >>= -shift_change; | 277 | tk->tkr_mono.xtime_nsec >>= -shift_change; |
278 | else | 278 | else |
279 | tk->tkr.xtime_nsec <<= shift_change; | 279 | tk->tkr_mono.xtime_nsec <<= shift_change; |
280 | } | 280 | } |
281 | tk->tkr.shift = clock->shift; | 281 | tk->tkr_mono.shift = clock->shift; |
282 | 282 | ||
283 | tk->ntp_error = 0; | 283 | tk->ntp_error = 0; |
284 | tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; | 284 | tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; |
@@ -289,7 +289,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
289 | * active clocksource. These value will be adjusted via NTP | 289 | * active clocksource. These value will be adjusted via NTP |
290 | * to counteract clock drifting. | 290 | * to counteract clock drifting. |
291 | */ | 291 | */ |
292 | tk->tkr.mult = clock->mult; | 292 | tk->tkr_mono.mult = clock->mult; |
293 | tk->ntp_err_mult = 0; | 293 | tk->ntp_err_mult = 0; |
294 | } | 294 | } |
295 | 295 | ||
@@ -318,11 +318,11 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) | |||
318 | 318 | ||
319 | static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) | 319 | static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) |
320 | { | 320 | { |
321 | struct clocksource *clock = tk->tkr.clock; | 321 | struct clocksource *clock = tk->tkr_mono.clock; |
322 | cycle_t delta; | 322 | cycle_t delta; |
323 | s64 nsec; | 323 | s64 nsec; |
324 | 324 | ||
325 | delta = timekeeping_get_delta(&tk->tkr); | 325 | delta = timekeeping_get_delta(&tk->tkr_mono); |
326 | 326 | ||
327 | /* convert delta to nanoseconds. */ | 327 | /* convert delta to nanoseconds. */ |
328 | nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); | 328 | nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); |
@@ -428,7 +428,7 @@ u64 notrace ktime_get_mono_fast_ns(void) | |||
428 | do { | 428 | do { |
429 | seq = raw_read_seqcount(&tk_fast_mono.seq); | 429 | seq = raw_read_seqcount(&tk_fast_mono.seq); |
430 | tkr = tk_fast_mono.base + (seq & 0x01); | 430 | tkr = tk_fast_mono.base + (seq & 0x01); |
431 | now = ktime_to_ns(tkr->base_mono) + timekeeping_get_ns(tkr); | 431 | now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr); |
432 | 432 | ||
433 | } while (read_seqcount_retry(&tk_fast_mono.seq, seq)); | 433 | } while (read_seqcount_retry(&tk_fast_mono.seq, seq)); |
434 | return now; | 434 | return now; |
@@ -456,7 +456,7 @@ static cycle_t dummy_clock_read(struct clocksource *cs) | |||
456 | static void halt_fast_timekeeper(struct timekeeper *tk) | 456 | static void halt_fast_timekeeper(struct timekeeper *tk) |
457 | { | 457 | { |
458 | static struct tk_read_base tkr_dummy; | 458 | static struct tk_read_base tkr_dummy; |
459 | struct tk_read_base *tkr = &tk->tkr; | 459 | struct tk_read_base *tkr = &tk->tkr_mono; |
460 | 460 | ||
461 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); | 461 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); |
462 | cycles_at_suspend = tkr->read(tkr->clock); | 462 | cycles_at_suspend = tkr->read(tkr->clock); |
@@ -472,8 +472,8 @@ static inline void update_vsyscall(struct timekeeper *tk) | |||
472 | 472 | ||
473 | xt = timespec64_to_timespec(tk_xtime(tk)); | 473 | xt = timespec64_to_timespec(tk_xtime(tk)); |
474 | wm = timespec64_to_timespec(tk->wall_to_monotonic); | 474 | wm = timespec64_to_timespec(tk->wall_to_monotonic); |
475 | update_vsyscall_old(&xt, &wm, tk->tkr.clock, tk->tkr.mult, | 475 | update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult, |
476 | tk->tkr.cycle_last); | 476 | tk->tkr_mono.cycle_last); |
477 | } | 477 | } |
478 | 478 | ||
479 | static inline void old_vsyscall_fixup(struct timekeeper *tk) | 479 | static inline void old_vsyscall_fixup(struct timekeeper *tk) |
@@ -490,11 +490,11 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) | |||
490 | * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD | 490 | * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD |
491 | * users are removed, this can be killed. | 491 | * users are removed, this can be killed. |
492 | */ | 492 | */ |
493 | remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1); | 493 | remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1); |
494 | tk->tkr.xtime_nsec -= remainder; | 494 | tk->tkr_mono.xtime_nsec -= remainder; |
495 | tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift; | 495 | tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift; |
496 | tk->ntp_error += remainder << tk->ntp_error_shift; | 496 | tk->ntp_error += remainder << tk->ntp_error_shift; |
497 | tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift; | 497 | tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift; |
498 | } | 498 | } |
499 | #else | 499 | #else |
500 | #define old_vsyscall_fixup(tk) | 500 | #define old_vsyscall_fixup(tk) |
@@ -559,7 +559,7 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) | |||
559 | */ | 559 | */ |
560 | seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); | 560 | seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); |
561 | nsec = (u32) tk->wall_to_monotonic.tv_nsec; | 561 | nsec = (u32) tk->wall_to_monotonic.tv_nsec; |
562 | tk->tkr.base_mono = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); | 562 | tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); |
563 | 563 | ||
564 | /* Update the monotonic raw base */ | 564 | /* Update the monotonic raw base */ |
565 | tk->base_raw = timespec64_to_ktime(tk->raw_time); | 565 | tk->base_raw = timespec64_to_ktime(tk->raw_time); |
@@ -569,7 +569,7 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) | |||
569 | * wall_to_monotonic can be greater/equal one second. Take | 569 | * wall_to_monotonic can be greater/equal one second. Take |
570 | * this into account before updating tk->ktime_sec. | 570 | * this into account before updating tk->ktime_sec. |
571 | */ | 571 | */ |
572 | nsec += (u32)(tk->tkr.xtime_nsec >> tk->tkr.shift); | 572 | nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); |
573 | if (nsec >= NSEC_PER_SEC) | 573 | if (nsec >= NSEC_PER_SEC) |
574 | seconds++; | 574 | seconds++; |
575 | tk->ktime_sec = seconds; | 575 | tk->ktime_sec = seconds; |
@@ -592,7 +592,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
592 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, | 592 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, |
593 | sizeof(tk_core.timekeeper)); | 593 | sizeof(tk_core.timekeeper)); |
594 | 594 | ||
595 | update_fast_timekeeper(&tk->tkr); | 595 | update_fast_timekeeper(&tk->tkr_mono); |
596 | } | 596 | } |
597 | 597 | ||
598 | /** | 598 | /** |
@@ -604,18 +604,18 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
604 | */ | 604 | */ |
605 | static void timekeeping_forward_now(struct timekeeper *tk) | 605 | static void timekeeping_forward_now(struct timekeeper *tk) |
606 | { | 606 | { |
607 | struct clocksource *clock = tk->tkr.clock; | 607 | struct clocksource *clock = tk->tkr_mono.clock; |
608 | cycle_t cycle_now, delta; | 608 | cycle_t cycle_now, delta; |
609 | s64 nsec; | 609 | s64 nsec; |
610 | 610 | ||
611 | cycle_now = tk->tkr.read(clock); | 611 | cycle_now = tk->tkr_mono.read(clock); |
612 | delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask); | 612 | delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); |
613 | tk->tkr.cycle_last = cycle_now; | 613 | tk->tkr_mono.cycle_last = cycle_now; |
614 | 614 | ||
615 | tk->tkr.xtime_nsec += delta * tk->tkr.mult; | 615 | tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult; |
616 | 616 | ||
617 | /* If arch requires, add in get_arch_timeoffset() */ | 617 | /* If arch requires, add in get_arch_timeoffset() */ |
618 | tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift; | 618 | tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift; |
619 | 619 | ||
620 | tk_normalize_xtime(tk); | 620 | tk_normalize_xtime(tk); |
621 | 621 | ||
@@ -640,7 +640,7 @@ int __getnstimeofday64(struct timespec64 *ts) | |||
640 | seq = read_seqcount_begin(&tk_core.seq); | 640 | seq = read_seqcount_begin(&tk_core.seq); |
641 | 641 | ||
642 | ts->tv_sec = tk->xtime_sec; | 642 | ts->tv_sec = tk->xtime_sec; |
643 | nsecs = timekeeping_get_ns(&tk->tkr); | 643 | nsecs = timekeeping_get_ns(&tk->tkr_mono); |
644 | 644 | ||
645 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 645 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
646 | 646 | ||
@@ -680,8 +680,8 @@ ktime_t ktime_get(void) | |||
680 | 680 | ||
681 | do { | 681 | do { |
682 | seq = read_seqcount_begin(&tk_core.seq); | 682 | seq = read_seqcount_begin(&tk_core.seq); |
683 | base = tk->tkr.base_mono; | 683 | base = tk->tkr_mono.base; |
684 | nsecs = timekeeping_get_ns(&tk->tkr); | 684 | nsecs = timekeeping_get_ns(&tk->tkr_mono); |
685 | 685 | ||
686 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 686 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
687 | 687 | ||
@@ -706,8 +706,8 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs) | |||
706 | 706 | ||
707 | do { | 707 | do { |
708 | seq = read_seqcount_begin(&tk_core.seq); | 708 | seq = read_seqcount_begin(&tk_core.seq); |
709 | base = ktime_add(tk->tkr.base_mono, *offset); | 709 | base = ktime_add(tk->tkr_mono.base, *offset); |
710 | nsecs = timekeeping_get_ns(&tk->tkr); | 710 | nsecs = timekeeping_get_ns(&tk->tkr_mono); |
711 | 711 | ||
712 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 712 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
713 | 713 | ||
@@ -777,7 +777,7 @@ void ktime_get_ts64(struct timespec64 *ts) | |||
777 | do { | 777 | do { |
778 | seq = read_seqcount_begin(&tk_core.seq); | 778 | seq = read_seqcount_begin(&tk_core.seq); |
779 | ts->tv_sec = tk->xtime_sec; | 779 | ts->tv_sec = tk->xtime_sec; |
780 | nsec = timekeeping_get_ns(&tk->tkr); | 780 | nsec = timekeeping_get_ns(&tk->tkr_mono); |
781 | tomono = tk->wall_to_monotonic; | 781 | tomono = tk->wall_to_monotonic; |
782 | 782 | ||
783 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 783 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
@@ -863,7 +863,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | |||
863 | ts_real->tv_nsec = 0; | 863 | ts_real->tv_nsec = 0; |
864 | 864 | ||
865 | nsecs_raw = timekeeping_get_ns_raw(tk); | 865 | nsecs_raw = timekeeping_get_ns_raw(tk); |
866 | nsecs_real = timekeeping_get_ns(&tk->tkr); | 866 | nsecs_real = timekeeping_get_ns(&tk->tkr_mono); |
867 | 867 | ||
868 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 868 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
869 | 869 | ||
@@ -1046,7 +1046,7 @@ static int change_clocksource(void *data) | |||
1046 | */ | 1046 | */ |
1047 | if (try_module_get(new->owner)) { | 1047 | if (try_module_get(new->owner)) { |
1048 | if (!new->enable || new->enable(new) == 0) { | 1048 | if (!new->enable || new->enable(new) == 0) { |
1049 | old = tk->tkr.clock; | 1049 | old = tk->tkr_mono.clock; |
1050 | tk_setup_internals(tk, new); | 1050 | tk_setup_internals(tk, new); |
1051 | if (old->disable) | 1051 | if (old->disable) |
1052 | old->disable(old); | 1052 | old->disable(old); |
@@ -1074,11 +1074,11 @@ int timekeeping_notify(struct clocksource *clock) | |||
1074 | { | 1074 | { |
1075 | struct timekeeper *tk = &tk_core.timekeeper; | 1075 | struct timekeeper *tk = &tk_core.timekeeper; |
1076 | 1076 | ||
1077 | if (tk->tkr.clock == clock) | 1077 | if (tk->tkr_mono.clock == clock) |
1078 | return 0; | 1078 | return 0; |
1079 | stop_machine(change_clocksource, clock, NULL); | 1079 | stop_machine(change_clocksource, clock, NULL); |
1080 | tick_clock_notify(); | 1080 | tick_clock_notify(); |
1081 | return tk->tkr.clock == clock ? 0 : -1; | 1081 | return tk->tkr_mono.clock == clock ? 0 : -1; |
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | /** | 1084 | /** |
@@ -1119,7 +1119,7 @@ int timekeeping_valid_for_hres(void) | |||
1119 | do { | 1119 | do { |
1120 | seq = read_seqcount_begin(&tk_core.seq); | 1120 | seq = read_seqcount_begin(&tk_core.seq); |
1121 | 1121 | ||
1122 | ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; | 1122 | ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
1123 | 1123 | ||
1124 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 1124 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1125 | 1125 | ||
@@ -1138,7 +1138,7 @@ u64 timekeeping_max_deferment(void) | |||
1138 | do { | 1138 | do { |
1139 | seq = read_seqcount_begin(&tk_core.seq); | 1139 | seq = read_seqcount_begin(&tk_core.seq); |
1140 | 1140 | ||
1141 | ret = tk->tkr.clock->max_idle_ns; | 1141 | ret = tk->tkr_mono.clock->max_idle_ns; |
1142 | 1142 | ||
1143 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 1143 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1144 | 1144 | ||
@@ -1303,7 +1303,7 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta) | |||
1303 | void timekeeping_resume(void) | 1303 | void timekeeping_resume(void) |
1304 | { | 1304 | { |
1305 | struct timekeeper *tk = &tk_core.timekeeper; | 1305 | struct timekeeper *tk = &tk_core.timekeeper; |
1306 | struct clocksource *clock = tk->tkr.clock; | 1306 | struct clocksource *clock = tk->tkr_mono.clock; |
1307 | unsigned long flags; | 1307 | unsigned long flags; |
1308 | struct timespec64 ts_new, ts_delta; | 1308 | struct timespec64 ts_new, ts_delta; |
1309 | struct timespec tmp; | 1309 | struct timespec tmp; |
@@ -1331,16 +1331,16 @@ void timekeeping_resume(void) | |||
1331 | * The less preferred source will only be tried if there is no better | 1331 | * The less preferred source will only be tried if there is no better |
1332 | * usable source. The rtc part is handled separately in rtc core code. | 1332 | * usable source. The rtc part is handled separately in rtc core code. |
1333 | */ | 1333 | */ |
1334 | cycle_now = tk->tkr.read(clock); | 1334 | cycle_now = tk->tkr_mono.read(clock); |
1335 | if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && | 1335 | if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && |
1336 | cycle_now > tk->tkr.cycle_last) { | 1336 | cycle_now > tk->tkr_mono.cycle_last) { |
1337 | u64 num, max = ULLONG_MAX; | 1337 | u64 num, max = ULLONG_MAX; |
1338 | u32 mult = clock->mult; | 1338 | u32 mult = clock->mult; |
1339 | u32 shift = clock->shift; | 1339 | u32 shift = clock->shift; |
1340 | s64 nsec = 0; | 1340 | s64 nsec = 0; |
1341 | 1341 | ||
1342 | cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, | 1342 | cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, |
1343 | tk->tkr.mask); | 1343 | tk->tkr_mono.mask); |
1344 | 1344 | ||
1345 | /* | 1345 | /* |
1346 | * "cycle_delta * mutl" may cause 64 bits overflow, if the | 1346 | * "cycle_delta * mutl" may cause 64 bits overflow, if the |
@@ -1366,7 +1366,7 @@ void timekeeping_resume(void) | |||
1366 | __timekeeping_inject_sleeptime(tk, &ts_delta); | 1366 | __timekeeping_inject_sleeptime(tk, &ts_delta); |
1367 | 1367 | ||
1368 | /* Re-base the last cycle value */ | 1368 | /* Re-base the last cycle value */ |
1369 | tk->tkr.cycle_last = cycle_now; | 1369 | tk->tkr_mono.cycle_last = cycle_now; |
1370 | tk->ntp_error = 0; | 1370 | tk->ntp_error = 0; |
1371 | timekeeping_suspended = 0; | 1371 | timekeeping_suspended = 0; |
1372 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); | 1372 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
@@ -1519,15 +1519,15 @@ static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk, | |||
1519 | * | 1519 | * |
1520 | * XXX - TODO: Doc ntp_error calculation. | 1520 | * XXX - TODO: Doc ntp_error calculation. |
1521 | */ | 1521 | */ |
1522 | if ((mult_adj > 0) && (tk->tkr.mult + mult_adj < mult_adj)) { | 1522 | if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) { |
1523 | /* NTP adjustment caused clocksource mult overflow */ | 1523 | /* NTP adjustment caused clocksource mult overflow */ |
1524 | WARN_ON_ONCE(1); | 1524 | WARN_ON_ONCE(1); |
1525 | return; | 1525 | return; |
1526 | } | 1526 | } |
1527 | 1527 | ||
1528 | tk->tkr.mult += mult_adj; | 1528 | tk->tkr_mono.mult += mult_adj; |
1529 | tk->xtime_interval += interval; | 1529 | tk->xtime_interval += interval; |
1530 | tk->tkr.xtime_nsec -= offset; | 1530 | tk->tkr_mono.xtime_nsec -= offset; |
1531 | tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; | 1531 | tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; |
1532 | } | 1532 | } |
1533 | 1533 | ||
@@ -1589,13 +1589,13 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
1589 | tk->ntp_err_mult = 0; | 1589 | tk->ntp_err_mult = 0; |
1590 | } | 1590 | } |
1591 | 1591 | ||
1592 | if (unlikely(tk->tkr.clock->maxadj && | 1592 | if (unlikely(tk->tkr_mono.clock->maxadj && |
1593 | (abs(tk->tkr.mult - tk->tkr.clock->mult) | 1593 | (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult) |
1594 | > tk->tkr.clock->maxadj))) { | 1594 | > tk->tkr_mono.clock->maxadj))) { |
1595 | printk_once(KERN_WARNING | 1595 | printk_once(KERN_WARNING |
1596 | "Adjusting %s more than 11%% (%ld vs %ld)\n", | 1596 | "Adjusting %s more than 11%% (%ld vs %ld)\n", |
1597 | tk->tkr.clock->name, (long)tk->tkr.mult, | 1597 | tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult, |
1598 | (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj); | 1598 | (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj); |
1599 | } | 1599 | } |
1600 | 1600 | ||
1601 | /* | 1601 | /* |
@@ -1612,9 +1612,9 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
1612 | * We'll correct this error next time through this function, when | 1612 | * We'll correct this error next time through this function, when |
1613 | * xtime_nsec is not as small. | 1613 | * xtime_nsec is not as small. |
1614 | */ | 1614 | */ |
1615 | if (unlikely((s64)tk->tkr.xtime_nsec < 0)) { | 1615 | if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) { |
1616 | s64 neg = -(s64)tk->tkr.xtime_nsec; | 1616 | s64 neg = -(s64)tk->tkr_mono.xtime_nsec; |
1617 | tk->tkr.xtime_nsec = 0; | 1617 | tk->tkr_mono.xtime_nsec = 0; |
1618 | tk->ntp_error += neg << tk->ntp_error_shift; | 1618 | tk->ntp_error += neg << tk->ntp_error_shift; |
1619 | } | 1619 | } |
1620 | } | 1620 | } |
@@ -1629,13 +1629,13 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
1629 | */ | 1629 | */ |
1630 | static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) | 1630 | static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) |
1631 | { | 1631 | { |
1632 | u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift; | 1632 | u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift; |
1633 | unsigned int clock_set = 0; | 1633 | unsigned int clock_set = 0; |
1634 | 1634 | ||
1635 | while (tk->tkr.xtime_nsec >= nsecps) { | 1635 | while (tk->tkr_mono.xtime_nsec >= nsecps) { |
1636 | int leap; | 1636 | int leap; |
1637 | 1637 | ||
1638 | tk->tkr.xtime_nsec -= nsecps; | 1638 | tk->tkr_mono.xtime_nsec -= nsecps; |
1639 | tk->xtime_sec++; | 1639 | tk->xtime_sec++; |
1640 | 1640 | ||
1641 | /* Figure out if its a leap sec and apply if needed */ | 1641 | /* Figure out if its a leap sec and apply if needed */ |
@@ -1680,9 +1680,9 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, | |||
1680 | 1680 | ||
1681 | /* Accumulate one shifted interval */ | 1681 | /* Accumulate one shifted interval */ |
1682 | offset -= interval; | 1682 | offset -= interval; |
1683 | tk->tkr.cycle_last += interval; | 1683 | tk->tkr_mono.cycle_last += interval; |
1684 | 1684 | ||
1685 | tk->tkr.xtime_nsec += tk->xtime_interval << shift; | 1685 | tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift; |
1686 | *clock_set |= accumulate_nsecs_to_secs(tk); | 1686 | *clock_set |= accumulate_nsecs_to_secs(tk); |
1687 | 1687 | ||
1688 | /* Accumulate raw time */ | 1688 | /* Accumulate raw time */ |
@@ -1725,8 +1725,8 @@ void update_wall_time(void) | |||
1725 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | 1725 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
1726 | offset = real_tk->cycle_interval; | 1726 | offset = real_tk->cycle_interval; |
1727 | #else | 1727 | #else |
1728 | offset = clocksource_delta(tk->tkr.read(tk->tkr.clock), | 1728 | offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), |
1729 | tk->tkr.cycle_last, tk->tkr.mask); | 1729 | tk->tkr_mono.cycle_last, tk->tkr_mono.mask); |
1730 | #endif | 1730 | #endif |
1731 | 1731 | ||
1732 | /* Check if there's really nothing to do */ | 1732 | /* Check if there's really nothing to do */ |
@@ -1890,8 +1890,8 @@ ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot, | |||
1890 | do { | 1890 | do { |
1891 | seq = read_seqcount_begin(&tk_core.seq); | 1891 | seq = read_seqcount_begin(&tk_core.seq); |
1892 | 1892 | ||
1893 | base = tk->tkr.base_mono; | 1893 | base = tk->tkr_mono.base; |
1894 | nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift; | 1894 | nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; |
1895 | 1895 | ||
1896 | *offs_real = tk->offs_real; | 1896 | *offs_real = tk->offs_real; |
1897 | *offs_boot = tk->offs_boot; | 1897 | *offs_boot = tk->offs_boot; |
@@ -1922,8 +1922,8 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot, | |||
1922 | do { | 1922 | do { |
1923 | seq = read_seqcount_begin(&tk_core.seq); | 1923 | seq = read_seqcount_begin(&tk_core.seq); |
1924 | 1924 | ||
1925 | base = tk->tkr.base_mono; | 1925 | base = tk->tkr_mono.base; |
1926 | nsecs = timekeeping_get_ns(&tk->tkr); | 1926 | nsecs = timekeeping_get_ns(&tk->tkr_mono); |
1927 | 1927 | ||
1928 | *offs_real = tk->offs_real; | 1928 | *offs_real = tk->offs_real; |
1929 | *offs_boot = tk->offs_boot; | 1929 | *offs_boot = tk->offs_boot; |