aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/kernel/vdso.c10
-rw-r--r--arch/s390/kernel/time.c16
-rw-r--r--arch/tile/kernel/time.c10
-rw-r--r--arch/x86/kernel/vsyscall_gtod.c23
-rw-r--r--arch/x86/kvm/x86.c14
-rw-r--r--include/linux/timekeeper_internal.h103
-rw-r--r--kernel/time/timekeeping.c132
7 files changed, 158 insertions, 150 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 574672f001f7..8296f7f5f0ba 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -211,7 +211,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
211void update_vsyscall(struct timekeeper *tk) 211void update_vsyscall(struct timekeeper *tk)
212{ 212{
213 struct timespec xtime_coarse; 213 struct timespec xtime_coarse;
214 u32 use_syscall = strcmp(tk->clock->name, "arch_sys_counter"); 214 u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter");
215 215
216 ++vdso_data->tb_seq_count; 216 ++vdso_data->tb_seq_count;
217 smp_wmb(); 217 smp_wmb();
@@ -224,11 +224,11 @@ void update_vsyscall(struct timekeeper *tk)
224 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; 224 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
225 225
226 if (!use_syscall) { 226 if (!use_syscall) {
227 vdso_data->cs_cycle_last = tk->cycle_last; 227 vdso_data->cs_cycle_last = tk->tkr.cycle_last;
228 vdso_data->xtime_clock_sec = tk->xtime_sec; 228 vdso_data->xtime_clock_sec = tk->xtime_sec;
229 vdso_data->xtime_clock_nsec = tk->xtime_nsec; 229 vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
230 vdso_data->cs_mult = tk->mult; 230 vdso_data->cs_mult = tk->tkr.mult;
231 vdso_data->cs_shift = tk->shift; 231 vdso_data->cs_shift = tk->tkr.shift;
232 } 232 }
233 233
234 smp_wmb(); 234 smp_wmb();
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 97950f392613..4cef607f3711 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -214,26 +214,26 @@ void update_vsyscall(struct timekeeper *tk)
214{ 214{
215 u64 nsecps; 215 u64 nsecps;
216 216
217 if (tk->clock != &clocksource_tod) 217 if (tk->tkr.clock != &clocksource_tod)
218 return; 218 return;
219 219
220 /* Make userspace gettimeofday spin until we're done. */ 220 /* Make userspace gettimeofday spin until we're done. */
221 ++vdso_data->tb_update_count; 221 ++vdso_data->tb_update_count;
222 smp_wmb(); 222 smp_wmb();
223 vdso_data->xtime_tod_stamp = tk->cycle_last; 223 vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
224 vdso_data->xtime_clock_sec = tk->xtime_sec; 224 vdso_data->xtime_clock_sec = tk->xtime_sec;
225 vdso_data->xtime_clock_nsec = tk->xtime_nsec; 225 vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
226 vdso_data->wtom_clock_sec = 226 vdso_data->wtom_clock_sec =
227 tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 227 tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
228 vdso_data->wtom_clock_nsec = tk->xtime_nsec + 228 vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec +
229 + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift); 229 + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift);
230 nsecps = (u64) NSEC_PER_SEC << tk->shift; 230 nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift;
231 while (vdso_data->wtom_clock_nsec >= nsecps) { 231 while (vdso_data->wtom_clock_nsec >= nsecps) {
232 vdso_data->wtom_clock_nsec -= nsecps; 232 vdso_data->wtom_clock_nsec -= nsecps;
233 vdso_data->wtom_clock_sec++; 233 vdso_data->wtom_clock_sec++;
234 } 234 }
235 vdso_data->tk_mult = tk->mult; 235 vdso_data->tk_mult = tk->tkr.mult;
236 vdso_data->tk_shift = tk->shift; 236 vdso_data->tk_shift = tk->tkr.shift;
237 smp_wmb(); 237 smp_wmb();
238 ++vdso_data->tb_update_count; 238 ++vdso_data->tb_update_count;
239} 239}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index d22d5bfc1e4e..d8fbc289e680 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -261,7 +261,7 @@ void update_vsyscall_tz(void)
261void update_vsyscall(struct timekeeper *tk) 261void update_vsyscall(struct timekeeper *tk)
262{ 262{
263 struct timespec *wtm = &tk->wall_to_monotonic; 263 struct timespec *wtm = &tk->wall_to_monotonic;
264 struct clocksource *clock = tk->clock; 264 struct clocksource *clock = tk->tkr.clock;
265 265
266 if (clock != &cycle_counter_cs) 266 if (clock != &cycle_counter_cs)
267 return; 267 return;
@@ -269,13 +269,13 @@ void update_vsyscall(struct timekeeper *tk)
269 /* Userspace gettimeofday will spin while this value is odd. */ 269 /* Userspace gettimeofday will spin while this value is odd. */
270 ++vdso_data->tb_update_count; 270 ++vdso_data->tb_update_count;
271 smp_wmb(); 271 smp_wmb();
272 vdso_data->xtime_tod_stamp = tk->cycle_last; 272 vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
273 vdso_data->xtime_clock_sec = tk->xtime_sec; 273 vdso_data->xtime_clock_sec = tk->xtime_sec;
274 vdso_data->xtime_clock_nsec = tk->xtime_nsec; 274 vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
275 vdso_data->wtom_clock_sec = wtm->tv_sec; 275 vdso_data->wtom_clock_sec = wtm->tv_sec;
276 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 276 vdso_data->wtom_clock_nsec = wtm->tv_nsec;
277 vdso_data->mult = tk->mult; 277 vdso_data->mult = tk->tkr.mult;
278 vdso_data->shift = tk->shift; 278 vdso_data->shift = tk->tkr.shift;
279 smp_wmb(); 279 smp_wmb();
280 ++vdso_data->tb_update_count; 280 ++vdso_data->tb_update_count;
281} 281}
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c
index c3cb3c144591..c7d791f32b98 100644
--- a/arch/x86/kernel/vsyscall_gtod.c
+++ b/arch/x86/kernel/vsyscall_gtod.c
@@ -31,29 +31,30 @@ void update_vsyscall(struct timekeeper *tk)
31 gtod_write_begin(vdata); 31 gtod_write_begin(vdata);
32 32
33 /* copy vsyscall data */ 33 /* copy vsyscall data */
34 vdata->vclock_mode = tk->clock->archdata.vclock_mode; 34 vdata->vclock_mode = tk->tkr.clock->archdata.vclock_mode;
35 vdata->cycle_last = tk->cycle_last; 35 vdata->cycle_last = tk->tkr.cycle_last;
36 vdata->mask = tk->clock->mask; 36 vdata->mask = tk->tkr.mask;
37 vdata->mult = tk->mult; 37 vdata->mult = tk->tkr.mult;
38 vdata->shift = tk->shift; 38 vdata->shift = tk->tkr.shift;
39 39
40 vdata->wall_time_sec = tk->xtime_sec; 40 vdata->wall_time_sec = tk->xtime_sec;
41 vdata->wall_time_snsec = tk->xtime_nsec; 41 vdata->wall_time_snsec = tk->tkr.xtime_nsec;
42 42
43 vdata->monotonic_time_sec = tk->xtime_sec 43 vdata->monotonic_time_sec = tk->xtime_sec
44 + tk->wall_to_monotonic.tv_sec; 44 + tk->wall_to_monotonic.tv_sec;
45 vdata->monotonic_time_snsec = tk->xtime_nsec 45 vdata->monotonic_time_snsec = tk->tkr.xtime_nsec
46 + ((u64)tk->wall_to_monotonic.tv_nsec 46 + ((u64)tk->wall_to_monotonic.tv_nsec
47 << tk->shift); 47 << tk->tkr.shift);
48 while (vdata->monotonic_time_snsec >= 48 while (vdata->monotonic_time_snsec >=
49 (((u64)NSEC_PER_SEC) << tk->shift)) { 49 (((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
50 vdata->monotonic_time_snsec -= 50 vdata->monotonic_time_snsec -=
51 ((u64)NSEC_PER_SEC) << tk->shift; 51 ((u64)NSEC_PER_SEC) << tk->tkr.shift;
52 vdata->monotonic_time_sec++; 52 vdata->monotonic_time_sec++;
53 } 53 }
54 54
55 vdata->wall_time_coarse_sec = tk->xtime_sec; 55 vdata->wall_time_coarse_sec = tk->xtime_sec;
56 vdata->wall_time_coarse_nsec = (long)(tk->xtime_nsec >> tk->shift); 56 vdata->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
57 tk->tkr.shift);
57 58
58 vdata->monotonic_time_coarse_sec = 59 vdata->monotonic_time_coarse_sec =
59 vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; 60 vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7b25125f3f42..b7e57946d1c1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -995,19 +995,19 @@ static void update_pvclock_gtod(struct timekeeper *tk)
995 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 995 struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
996 u64 boot_ns; 996 u64 boot_ns;
997 997
998 boot_ns = ktime_to_ns(ktime_add(tk->base_mono, tk->offs_boot)); 998 boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot));
999 999
1000 write_seqcount_begin(&vdata->seq); 1000 write_seqcount_begin(&vdata->seq);
1001 1001
1002 /* copy pvclock gtod data */ 1002 /* copy pvclock gtod data */
1003 vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; 1003 vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode;
1004 vdata->clock.cycle_last = tk->cycle_last; 1004 vdata->clock.cycle_last = tk->tkr.cycle_last;
1005 vdata->clock.mask = tk->clock->mask; 1005 vdata->clock.mask = tk->tkr.mask;
1006 vdata->clock.mult = tk->mult; 1006 vdata->clock.mult = tk->tkr.mult;
1007 vdata->clock.shift = tk->shift; 1007 vdata->clock.shift = tk->tkr.shift;
1008 1008
1009 vdata->boot_ns = boot_ns; 1009 vdata->boot_ns = boot_ns;
1010 vdata->nsec_base = tk->xtime_nsec; 1010 vdata->nsec_base = tk->tkr.xtime_nsec;
1011 1011
1012 write_seqcount_end(&vdata->seq); 1012 write_seqcount_end(&vdata->seq);
1013} 1013}
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 75bb8add78f5..97381997625b 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -10,80 +10,87 @@
10#include <linux/jiffies.h> 10#include <linux/jiffies.h>
11#include <linux/time.h> 11#include <linux/time.h>
12 12
13/* 13/**
14 * Structure holding internal timekeeping values. 14 * struct tk_read_base - base structure for timekeeping readout
15 * 15 * @clock: Current clocksource used for timekeeping.
16 * Note: wall_to_monotonic is what we need to add to xtime (or xtime 16 * @read: Read function of @clock
17 * corrected for sub jiffie times) to get to monotonic time. 17 * @mask: Bitmask for two's complement subtraction of non 64bit clocks
18 * Monotonic is pegged at zero at system boot time, so 18 * @cycle_last: @clock cycle value at last update
19 * wall_to_monotonic will be negative, however, we will ALWAYS keep 19 * @mult: NTP adjusted multiplier for scaled math conversion
20 * the tv_nsec part positive so we can use the usual normalization. 20 * @shift: Shift value for scaled math conversion
21 * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
22 * @base_mono: ktime_t (nanoseconds) base time for readout
21 * 23 *
22 * wall_to_monotonic is moved after resume from suspend for the 24 * This struct has size 56 byte on 64 bit. Together with a seqcount it
23 * monotonic time not to jump. To calculate the real boot time offset 25 * occupies a single 64byte cache line.
24 * we need to do offs_real - offs_boot.
25 * 26 *
26 * - wall_to_monotonic is no longer the boot time, getboottime must be 27 * The struct is separate from struct timekeeper as it is also used
27 * used instead. 28 * for a fast NMI safe accessor to clock monotonic.
28 */ 29 */
29struct timekeeper { 30struct tk_read_base {
30 /* Current clocksource used for timekeeping. */
31 struct clocksource *clock; 31 struct clocksource *clock;
32 /* Read function of @clock */
33 cycle_t (*read)(struct clocksource *cs); 32 cycle_t (*read)(struct clocksource *cs);
34 /* Bitmask for two's complement subtraction of non 64bit counters */
35 cycle_t mask; 33 cycle_t mask;
36 /* Last cycle value */
37 cycle_t cycle_last; 34 cycle_t cycle_last;
38 /* NTP adjusted clock multiplier */
39 u32 mult; 35 u32 mult;
40 /* The shift value of the current clocksource. */
41 u32 shift; 36 u32 shift;
42 /* Clock shifted nano seconds */
43 u64 xtime_nsec; 37 u64 xtime_nsec;
44
45 /* Monotonic base time */
46 ktime_t base_mono; 38 ktime_t base_mono;
39};
47 40
48 /* Current CLOCK_REALTIME time in seconds */ 41/**
42 * struct timekeeper - Structure holding internal timekeeping values.
43 * @tkr: The readout base structure
44 * @xtime_sec: Current CLOCK_REALTIME time in seconds
45 * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
46 * @offs_real: Offset clock monotonic -> clock realtime
47 * @offs_boot: Offset clock monotonic -> clock boottime
48 * @offs_tai: Offset clock monotonic -> clock tai
49 * @tai_offset: The current UTC to TAI offset in seconds
50 * @base_raw: Monotonic raw base time in ktime_t format
51 * @raw_time: Monotonic raw base time in timespec64 format
52 * @cycle_interval: Number of clock cycles in one NTP interval
53 * @xtime_interval: Number of clock shifted nano seconds in one NTP
54 * interval.
55 * @xtime_remainder: Shifted nano seconds left over when rounding
56 * @cycle_interval
57 * @raw_interval: Raw nano seconds accumulated per NTP interval.
58 * @ntp_error: Difference between accumulated time and NTP time in ntp
59 * shifted nano seconds.
60 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
61 * ntp shifted nano seconds.
62 *
63 * Note: For timespec(64) based interfaces wall_to_monotonic is what
64 * we need to add to xtime (or xtime corrected for sub jiffie times)
65 * to get to monotonic time. Monotonic is pegged at zero at system
66 * boot time, so wall_to_monotonic will be negative, however, we will
67 * ALWAYS keep the tv_nsec part positive so we can use the usual
68 * normalization.
69 *
70 * wall_to_monotonic is moved after resume from suspend for the
71 * monotonic time not to jump. We need to add total_sleep_time to
72 * wall_to_monotonic to get the real boot based time offset.
73 *
74 * wall_to_monotonic is no longer the boot time, getboottime must be
75 * used instead.
76 */
77struct timekeeper {
78 struct tk_read_base tkr;
49 u64 xtime_sec; 79 u64 xtime_sec;
50 /* CLOCK_REALTIME to CLOCK_MONOTONIC offset */
51 struct timespec64 wall_to_monotonic; 80 struct timespec64 wall_to_monotonic;
52
53 /* Offset clock monotonic -> clock realtime */
54 ktime_t offs_real; 81 ktime_t offs_real;
55 /* Offset clock monotonic -> clock boottime */
56 ktime_t offs_boot; 82 ktime_t offs_boot;
57 /* Offset clock monotonic -> clock tai */
58 ktime_t offs_tai; 83 ktime_t offs_tai;
59
60 /* The current UTC to TAI offset in seconds */
61 s32 tai_offset; 84 s32 tai_offset;
62
63 /* Monotonic raw base time */
64 ktime_t base_raw; 85 ktime_t base_raw;
65
66 /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
67 struct timespec64 raw_time; 86 struct timespec64 raw_time;
68 87
69 /* Number of clock cycles in one NTP interval. */ 88 /* The following members are for timekeeping internal use */
70 cycle_t cycle_interval; 89 cycle_t cycle_interval;
71 /* Number of clock shifted nano seconds in one NTP interval. */
72 u64 xtime_interval; 90 u64 xtime_interval;
73 /* shifted nano seconds left over when rounding cycle_interval */
74 s64 xtime_remainder; 91 s64 xtime_remainder;
75 /* Raw nano seconds accumulated per NTP interval. */
76 u32 raw_interval; 92 u32 raw_interval;
77
78 /*
79 * Difference between accumulated time and NTP time in ntp
80 * shifted nano seconds.
81 */
82 s64 ntp_error; 93 s64 ntp_error;
83 /*
84 * Shift conversion between clock shifted nano seconds and
85 * ntp shifted nano seconds.
86 */
87 u32 ntp_error_shift; 94 u32 ntp_error_shift;
88}; 95};
89 96
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 14b7367e6b94..ccb69980ef7e 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -52,8 +52,8 @@ bool __read_mostly persistent_clock_exist = false;
52 52
53static inline void tk_normalize_xtime(struct timekeeper *tk) 53static inline void tk_normalize_xtime(struct timekeeper *tk)
54{ 54{
55 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { 55 while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
56 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift; 56 tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
57 tk->xtime_sec++; 57 tk->xtime_sec++;
58 } 58 }
59} 59}
@@ -63,20 +63,20 @@ static inline struct timespec64 tk_xtime(struct timekeeper *tk)
63 struct timespec64 ts; 63 struct timespec64 ts;
64 64
65 ts.tv_sec = tk->xtime_sec; 65 ts.tv_sec = tk->xtime_sec;
66 ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); 66 ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
67 return ts; 67 return ts;
68} 68}
69 69
70static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) 70static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
71{ 71{
72 tk->xtime_sec = ts->tv_sec; 72 tk->xtime_sec = ts->tv_sec;
73 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift; 73 tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
74} 74}
75 75
76static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) 76static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
77{ 77{
78 tk->xtime_sec += ts->tv_sec; 78 tk->xtime_sec += ts->tv_sec;
79 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; 79 tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
80 tk_normalize_xtime(tk); 80 tk_normalize_xtime(tk);
81} 81}
82 82
@@ -119,11 +119,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
119 u64 tmp, ntpinterval; 119 u64 tmp, ntpinterval;
120 struct clocksource *old_clock; 120 struct clocksource *old_clock;
121 121
122 old_clock = tk->clock; 122 old_clock = tk->tkr.clock;
123 tk->clock = clock; 123 tk->tkr.clock = clock;
124 tk->read = clock->read; 124 tk->tkr.read = clock->read;
125 tk->mask = clock->mask; 125 tk->tkr.mask = clock->mask;
126 tk->cycle_last = tk->read(clock); 126 tk->tkr.cycle_last = tk->tkr.read(clock);
127 127
128 /* Do the ns -> cycle conversion first, using original mult */ 128 /* Do the ns -> cycle conversion first, using original mult */
129 tmp = NTP_INTERVAL_LENGTH; 129 tmp = NTP_INTERVAL_LENGTH;
@@ -147,11 +147,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
147 if (old_clock) { 147 if (old_clock) {
148 int shift_change = clock->shift - old_clock->shift; 148 int shift_change = clock->shift - old_clock->shift;
149 if (shift_change < 0) 149 if (shift_change < 0)
150 tk->xtime_nsec >>= -shift_change; 150 tk->tkr.xtime_nsec >>= -shift_change;
151 else 151 else
152 tk->xtime_nsec <<= shift_change; 152 tk->tkr.xtime_nsec <<= shift_change;
153 } 153 }
154 tk->shift = clock->shift; 154 tk->tkr.shift = clock->shift;
155 155
156 tk->ntp_error = 0; 156 tk->ntp_error = 0;
157 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; 157 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
@@ -161,7 +161,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
161 * active clocksource. These value will be adjusted via NTP 161 * active clocksource. These value will be adjusted via NTP
162 * to counteract clock drifting. 162 * to counteract clock drifting.
163 */ 163 */
164 tk->mult = clock->mult; 164 tk->tkr.mult = clock->mult;
165} 165}
166 166
167/* Timekeeper helper functions. */ 167/* Timekeeper helper functions. */
@@ -179,13 +179,13 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
179 s64 nsec; 179 s64 nsec;
180 180
181 /* read clocksource: */ 181 /* read clocksource: */
182 cycle_now = tk->read(tk->clock); 182 cycle_now = tk->tkr.read(tk->tkr.clock);
183 183
184 /* calculate the delta since the last update_wall_time: */ 184 /* calculate the delta since the last update_wall_time: */
185 delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask); 185 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
186 186
187 nsec = delta * tk->mult + tk->xtime_nsec; 187 nsec = delta * tk->tkr.mult + tk->tkr.xtime_nsec;
188 nsec >>= tk->shift; 188 nsec >>= tk->tkr.shift;
189 189
190 /* If arch requires, add in get_arch_timeoffset() */ 190 /* If arch requires, add in get_arch_timeoffset() */
191 return nsec + arch_gettimeoffset(); 191 return nsec + arch_gettimeoffset();
@@ -193,15 +193,15 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
193 193
194static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) 194static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
195{ 195{
196 struct clocksource *clock = tk->clock; 196 struct clocksource *clock = tk->tkr.clock;
197 cycle_t cycle_now, delta; 197 cycle_t cycle_now, delta;
198 s64 nsec; 198 s64 nsec;
199 199
200 /* read clocksource: */ 200 /* read clocksource: */
201 cycle_now = tk->read(clock); 201 cycle_now = tk->tkr.read(clock);
202 202
203 /* calculate the delta since the last update_wall_time: */ 203 /* calculate the delta since the last update_wall_time: */
204 delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask); 204 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
205 205
206 /* convert delta to nanoseconds. */ 206 /* convert delta to nanoseconds. */
207 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); 207 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
@@ -217,8 +217,8 @@ static inline void update_vsyscall(struct timekeeper *tk)
217 struct timespec xt; 217 struct timespec xt;
218 218
219 xt = tk_xtime(tk); 219 xt = tk_xtime(tk);
220 update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult, 220 update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult,
221 tk->cycle_last); 221 tk->tkr.cycle_last);
222} 222}
223 223
224static inline void old_vsyscall_fixup(struct timekeeper *tk) 224static inline void old_vsyscall_fixup(struct timekeeper *tk)
@@ -235,11 +235,11 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
235 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD 235 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
236 * users are removed, this can be killed. 236 * users are removed, this can be killed.
237 */ 237 */
238 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1); 238 remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
239 tk->xtime_nsec -= remainder; 239 tk->tkr.xtime_nsec -= remainder;
240 tk->xtime_nsec += 1ULL << tk->shift; 240 tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
241 tk->ntp_error += remainder << tk->ntp_error_shift; 241 tk->ntp_error += remainder << tk->ntp_error_shift;
242 tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; 242 tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
243} 243}
244#else 244#else
245#define old_vsyscall_fixup(tk) 245#define old_vsyscall_fixup(tk)
@@ -304,7 +304,7 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
304 nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); 304 nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
305 nsec *= NSEC_PER_SEC; 305 nsec *= NSEC_PER_SEC;
306 nsec += tk->wall_to_monotonic.tv_nsec; 306 nsec += tk->wall_to_monotonic.tv_nsec;
307 tk->base_mono = ns_to_ktime(nsec); 307 tk->tkr.base_mono = ns_to_ktime(nsec);
308 308
309 /* Update the monotonic raw base */ 309 /* Update the monotonic raw base */
310 tk->base_raw = timespec64_to_ktime(tk->raw_time); 310 tk->base_raw = timespec64_to_ktime(tk->raw_time);
@@ -336,18 +336,18 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
336 */ 336 */
337static void timekeeping_forward_now(struct timekeeper *tk) 337static void timekeeping_forward_now(struct timekeeper *tk)
338{ 338{
339 struct clocksource *clock = tk->clock; 339 struct clocksource *clock = tk->tkr.clock;
340 cycle_t cycle_now, delta; 340 cycle_t cycle_now, delta;
341 s64 nsec; 341 s64 nsec;
342 342
343 cycle_now = tk->read(clock); 343 cycle_now = tk->tkr.read(clock);
344 delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask); 344 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
345 tk->cycle_last = cycle_now; 345 tk->tkr.cycle_last = cycle_now;
346 346
347 tk->xtime_nsec += delta * tk->mult; 347 tk->tkr.xtime_nsec += delta * tk->tkr.mult;
348 348
349 /* If arch requires, add in get_arch_timeoffset() */ 349 /* If arch requires, add in get_arch_timeoffset() */
350 tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift; 350 tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
351 351
352 tk_normalize_xtime(tk); 352 tk_normalize_xtime(tk);
353 353
@@ -412,7 +412,7 @@ ktime_t ktime_get(void)
412 412
413 do { 413 do {
414 seq = read_seqcount_begin(&tk_core.seq); 414 seq = read_seqcount_begin(&tk_core.seq);
415 base = tk->base_mono; 415 base = tk->tkr.base_mono;
416 nsecs = timekeeping_get_ns(tk); 416 nsecs = timekeeping_get_ns(tk);
417 417
418 } while (read_seqcount_retry(&tk_core.seq, seq)); 418 } while (read_seqcount_retry(&tk_core.seq, seq));
@@ -438,7 +438,7 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs)
438 438
439 do { 439 do {
440 seq = read_seqcount_begin(&tk_core.seq); 440 seq = read_seqcount_begin(&tk_core.seq);
441 base = ktime_add(tk->base_mono, *offset); 441 base = ktime_add(tk->tkr.base_mono, *offset);
442 nsecs = timekeeping_get_ns(tk); 442 nsecs = timekeeping_get_ns(tk);
443 443
444 } while (read_seqcount_retry(&tk_core.seq, seq)); 444 } while (read_seqcount_retry(&tk_core.seq, seq));
@@ -731,7 +731,7 @@ static int change_clocksource(void *data)
731 */ 731 */
732 if (try_module_get(new->owner)) { 732 if (try_module_get(new->owner)) {
733 if (!new->enable || new->enable(new) == 0) { 733 if (!new->enable || new->enable(new) == 0) {
734 old = tk->clock; 734 old = tk->tkr.clock;
735 tk_setup_internals(tk, new); 735 tk_setup_internals(tk, new);
736 if (old->disable) 736 if (old->disable)
737 old->disable(old); 737 old->disable(old);
@@ -759,11 +759,11 @@ int timekeeping_notify(struct clocksource *clock)
759{ 759{
760 struct timekeeper *tk = &tk_core.timekeeper; 760 struct timekeeper *tk = &tk_core.timekeeper;
761 761
762 if (tk->clock == clock) 762 if (tk->tkr.clock == clock)
763 return 0; 763 return 0;
764 stop_machine(change_clocksource, clock, NULL); 764 stop_machine(change_clocksource, clock, NULL);
765 tick_clock_notify(); 765 tick_clock_notify();
766 return tk->clock == clock ? 0 : -1; 766 return tk->tkr.clock == clock ? 0 : -1;
767} 767}
768 768
769/** 769/**
@@ -803,7 +803,7 @@ int timekeeping_valid_for_hres(void)
803 do { 803 do {
804 seq = read_seqcount_begin(&tk_core.seq); 804 seq = read_seqcount_begin(&tk_core.seq);
805 805
806 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 806 ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
807 807
808 } while (read_seqcount_retry(&tk_core.seq, seq)); 808 } while (read_seqcount_retry(&tk_core.seq, seq));
809 809
@@ -822,7 +822,7 @@ u64 timekeeping_max_deferment(void)
822 do { 822 do {
823 seq = read_seqcount_begin(&tk_core.seq); 823 seq = read_seqcount_begin(&tk_core.seq);
824 824
825 ret = tk->clock->max_idle_ns; 825 ret = tk->tkr.clock->max_idle_ns;
826 826
827 } while (read_seqcount_retry(&tk_core.seq, seq)); 827 } while (read_seqcount_retry(&tk_core.seq, seq));
828 828
@@ -989,7 +989,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
989static void timekeeping_resume(void) 989static void timekeeping_resume(void)
990{ 990{
991 struct timekeeper *tk = &tk_core.timekeeper; 991 struct timekeeper *tk = &tk_core.timekeeper;
992 struct clocksource *clock = tk->clock; 992 struct clocksource *clock = tk->tkr.clock;
993 unsigned long flags; 993 unsigned long flags;
994 struct timespec64 ts_new, ts_delta; 994 struct timespec64 ts_new, ts_delta;
995 struct timespec tmp; 995 struct timespec tmp;
@@ -1017,16 +1017,16 @@ static void timekeeping_resume(void)
1017 * The less preferred source will only be tried if there is no better 1017 * The less preferred source will only be tried if there is no better
1018 * usable source. The rtc part is handled separately in rtc core code. 1018 * usable source. The rtc part is handled separately in rtc core code.
1019 */ 1019 */
1020 cycle_now = tk->read(clock); 1020 cycle_now = tk->tkr.read(clock);
1021 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && 1021 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1022 cycle_now > tk->cycle_last) { 1022 cycle_now > tk->tkr.cycle_last) {
1023 u64 num, max = ULLONG_MAX; 1023 u64 num, max = ULLONG_MAX;
1024 u32 mult = clock->mult; 1024 u32 mult = clock->mult;
1025 u32 shift = clock->shift; 1025 u32 shift = clock->shift;
1026 s64 nsec = 0; 1026 s64 nsec = 0;
1027 1027
1028 cycle_delta = clocksource_delta(cycle_now, tk->cycle_last, 1028 cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
1029 tk->mask); 1029 tk->tkr.mask);
1030 1030
1031 /* 1031 /*
1032 * "cycle_delta * mutl" may cause 64 bits overflow, if the 1032 * "cycle_delta * mutl" may cause 64 bits overflow, if the
@@ -1052,7 +1052,7 @@ static void timekeeping_resume(void)
1052 __timekeeping_inject_sleeptime(tk, &ts_delta); 1052 __timekeeping_inject_sleeptime(tk, &ts_delta);
1053 1053
1054 /* Re-base the last cycle value */ 1054 /* Re-base the last cycle value */
1055 tk->cycle_last = cycle_now; 1055 tk->tkr.cycle_last = cycle_now;
1056 tk->ntp_error = 0; 1056 tk->ntp_error = 0;
1057 timekeeping_suspended = 0; 1057 timekeeping_suspended = 0;
1058 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); 1058 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
@@ -1239,12 +1239,12 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1239 } 1239 }
1240 } 1240 }
1241 1241
1242 if (unlikely(tk->clock->maxadj && 1242 if (unlikely(tk->tkr.clock->maxadj &&
1243 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { 1243 (tk->tkr.mult + adj > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
1244 printk_deferred_once(KERN_WARNING 1244 printk_deferred_once(KERN_WARNING
1245 "Adjusting %s more than 11%% (%ld vs %ld)\n", 1245 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1246 tk->clock->name, (long)tk->mult + adj, 1246 tk->tkr.clock->name, (long)tk->tkr.mult + adj,
1247 (long)tk->clock->mult + tk->clock->maxadj); 1247 (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
1248 } 1248 }
1249 /* 1249 /*
1250 * So the following can be confusing. 1250 * So the following can be confusing.
@@ -1295,9 +1295,9 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1295 * 1295 *
1296 * XXX - TODO: Doc ntp_error calculation. 1296 * XXX - TODO: Doc ntp_error calculation.
1297 */ 1297 */
1298 tk->mult += adj; 1298 tk->tkr.mult += adj;
1299 tk->xtime_interval += interval; 1299 tk->xtime_interval += interval;
1300 tk->xtime_nsec -= offset; 1300 tk->tkr.xtime_nsec -= offset;
1301 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; 1301 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1302 1302
1303out_adjust: 1303out_adjust:
@@ -1315,9 +1315,9 @@ out_adjust:
1315 * We'll correct this error next time through this function, when 1315 * We'll correct this error next time through this function, when
1316 * xtime_nsec is not as small. 1316 * xtime_nsec is not as small.
1317 */ 1317 */
1318 if (unlikely((s64)tk->xtime_nsec < 0)) { 1318 if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
1319 s64 neg = -(s64)tk->xtime_nsec; 1319 s64 neg = -(s64)tk->tkr.xtime_nsec;
1320 tk->xtime_nsec = 0; 1320 tk->tkr.xtime_nsec = 0;
1321 tk->ntp_error += neg << tk->ntp_error_shift; 1321 tk->ntp_error += neg << tk->ntp_error_shift;
1322 } 1322 }
1323 1323
@@ -1333,13 +1333,13 @@ out_adjust:
1333 */ 1333 */
1334static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) 1334static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1335{ 1335{
1336 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; 1336 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
1337 unsigned int clock_set = 0; 1337 unsigned int clock_set = 0;
1338 1338
1339 while (tk->xtime_nsec >= nsecps) { 1339 while (tk->tkr.xtime_nsec >= nsecps) {
1340 int leap; 1340 int leap;
1341 1341
1342 tk->xtime_nsec -= nsecps; 1342 tk->tkr.xtime_nsec -= nsecps;
1343 tk->xtime_sec++; 1343 tk->xtime_sec++;
1344 1344
1345 /* Figure out if its a leap sec and apply if needed */ 1345 /* Figure out if its a leap sec and apply if needed */
@@ -1384,9 +1384,9 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1384 1384
1385 /* Accumulate one shifted interval */ 1385 /* Accumulate one shifted interval */
1386 offset -= interval; 1386 offset -= interval;
1387 tk->cycle_last += interval; 1387 tk->tkr.cycle_last += interval;
1388 1388
1389 tk->xtime_nsec += tk->xtime_interval << shift; 1389 tk->tkr.xtime_nsec += tk->xtime_interval << shift;
1390 *clock_set |= accumulate_nsecs_to_secs(tk); 1390 *clock_set |= accumulate_nsecs_to_secs(tk);
1391 1391
1392 /* Accumulate raw time */ 1392 /* Accumulate raw time */
@@ -1429,8 +1429,8 @@ void update_wall_time(void)
1429#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 1429#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1430 offset = real_tk->cycle_interval; 1430 offset = real_tk->cycle_interval;
1431#else 1431#else
1432 offset = clocksource_delta(tk->read(tk->clock), tk->cycle_last, 1432 offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
1433 tk->mask); 1433 tk->tkr.cycle_last, tk->tkr.mask);
1434#endif 1434#endif
1435 1435
1436 /* Check if there's really nothing to do */ 1436 /* Check if there's really nothing to do */
@@ -1591,8 +1591,8 @@ ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
1591 do { 1591 do {
1592 seq = read_seqcount_begin(&tk_core.seq); 1592 seq = read_seqcount_begin(&tk_core.seq);
1593 1593
1594 base = tk->base_mono; 1594 base = tk->tkr.base_mono;
1595 nsecs = tk->xtime_nsec >> tk->shift; 1595 nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
1596 1596
1597 *offs_real = tk->offs_real; 1597 *offs_real = tk->offs_real;
1598 *offs_boot = tk->offs_boot; 1598 *offs_boot = tk->offs_boot;
@@ -1623,7 +1623,7 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
1623 do { 1623 do {
1624 seq = read_seqcount_begin(&tk_core.seq); 1624 seq = read_seqcount_begin(&tk_core.seq);
1625 1625
1626 base = tk->base_mono; 1626 base = tk->tkr.base_mono;
1627 nsecs = timekeeping_get_ns(tk); 1627 nsecs = timekeeping_get_ns(tk);
1628 1628
1629 *offs_real = tk->offs_real; 1629 *offs_real = tk->offs_real;