diff options
-rw-r--r-- | arch/arm64/kernel/vdso.c | 10 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 18 | ||||
-rw-r--r-- | arch/tile/kernel/time.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_gtod.c | 24 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 14 | ||||
-rw-r--r-- | include/linux/timekeeper_internal.h | 12 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 150 |
7 files changed, 126 insertions, 126 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 32aeea083d93..ec37ab3f524f 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c | |||
@@ -200,7 +200,7 @@ up_fail: | |||
200 | void update_vsyscall(struct timekeeper *tk) | 200 | void update_vsyscall(struct timekeeper *tk) |
201 | { | 201 | { |
202 | struct timespec xtime_coarse; | 202 | struct timespec xtime_coarse; |
203 | u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter"); | 203 | u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); |
204 | 204 | ||
205 | ++vdso_data->tb_seq_count; | 205 | ++vdso_data->tb_seq_count; |
206 | smp_wmb(); | 206 | smp_wmb(); |
@@ -213,11 +213,11 @@ void update_vsyscall(struct timekeeper *tk) | |||
213 | vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; | 213 | vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; |
214 | 214 | ||
215 | if (!use_syscall) { | 215 | if (!use_syscall) { |
216 | vdso_data->cs_cycle_last = tk->tkr.cycle_last; | 216 | vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; |
217 | vdso_data->xtime_clock_sec = tk->xtime_sec; | 217 | vdso_data->xtime_clock_sec = tk->xtime_sec; |
218 | vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; | 218 | vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; |
219 | vdso_data->cs_mult = tk->tkr.mult; | 219 | vdso_data->cs_mult = tk->tkr_mono.mult; |
220 | vdso_data->cs_shift = tk->tkr.shift; | 220 | vdso_data->cs_shift = tk->tkr_mono.shift; |
221 | } | 221 | } |
222 | 222 | ||
223 | smp_wmb(); | 223 | smp_wmb(); |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 6c273cd815bb..170ddd2018b3 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -215,20 +215,20 @@ void update_vsyscall(struct timekeeper *tk) | |||
215 | { | 215 | { |
216 | u64 nsecps; | 216 | u64 nsecps; |
217 | 217 | ||
218 | if (tk->tkr.clock != &clocksource_tod) | 218 | if (tk->tkr_mono.clock != &clocksource_tod) |
219 | return; | 219 | return; |
220 | 220 | ||
221 | /* Make userspace gettimeofday spin until we're done. */ | 221 | /* Make userspace gettimeofday spin until we're done. */ |
222 | ++vdso_data->tb_update_count; | 222 | ++vdso_data->tb_update_count; |
223 | smp_wmb(); | 223 | smp_wmb(); |
224 | vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; | 224 | vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last; |
225 | vdso_data->xtime_clock_sec = tk->xtime_sec; | 225 | vdso_data->xtime_clock_sec = tk->xtime_sec; |
226 | vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; | 226 | vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; |
227 | vdso_data->wtom_clock_sec = | 227 | vdso_data->wtom_clock_sec = |
228 | tk->xtime_sec + tk->wall_to_monotonic.tv_sec; | 228 | tk->xtime_sec + tk->wall_to_monotonic.tv_sec; |
229 | vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec + | 229 | vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec + |
230 | + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift); | 230 | + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); |
231 | nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift; | 231 | nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift; |
232 | while (vdso_data->wtom_clock_nsec >= nsecps) { | 232 | while (vdso_data->wtom_clock_nsec >= nsecps) { |
233 | vdso_data->wtom_clock_nsec -= nsecps; | 233 | vdso_data->wtom_clock_nsec -= nsecps; |
234 | vdso_data->wtom_clock_sec++; | 234 | vdso_data->wtom_clock_sec++; |
@@ -236,7 +236,7 @@ void update_vsyscall(struct timekeeper *tk) | |||
236 | 236 | ||
237 | vdso_data->xtime_coarse_sec = tk->xtime_sec; | 237 | vdso_data->xtime_coarse_sec = tk->xtime_sec; |
238 | vdso_data->xtime_coarse_nsec = | 238 | vdso_data->xtime_coarse_nsec = |
239 | (long)(tk->tkr.xtime_nsec >> tk->tkr.shift); | 239 | (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); |
240 | vdso_data->wtom_coarse_sec = | 240 | vdso_data->wtom_coarse_sec = |
241 | vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec; | 241 | vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec; |
242 | vdso_data->wtom_coarse_nsec = | 242 | vdso_data->wtom_coarse_nsec = |
@@ -246,8 +246,8 @@ void update_vsyscall(struct timekeeper *tk) | |||
246 | vdso_data->wtom_coarse_sec++; | 246 | vdso_data->wtom_coarse_sec++; |
247 | } | 247 | } |
248 | 248 | ||
249 | vdso_data->tk_mult = tk->tkr.mult; | 249 | vdso_data->tk_mult = tk->tkr_mono.mult; |
250 | vdso_data->tk_shift = tk->tkr.shift; | 250 | vdso_data->tk_shift = tk->tkr_mono.shift; |
251 | smp_wmb(); | 251 | smp_wmb(); |
252 | ++vdso_data->tb_update_count; | 252 | ++vdso_data->tb_update_count; |
253 | } | 253 | } |
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index d412b0856c0a..00178ecf9aea 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c | |||
@@ -257,34 +257,34 @@ void update_vsyscall_tz(void) | |||
257 | 257 | ||
258 | void update_vsyscall(struct timekeeper *tk) | 258 | void update_vsyscall(struct timekeeper *tk) |
259 | { | 259 | { |
260 | if (tk->tkr.clock != &cycle_counter_cs) | 260 | if (tk->tkr_mono.clock != &cycle_counter_cs) |
261 | return; | 261 | return; |
262 | 262 | ||
263 | write_seqcount_begin(&vdso_data->tb_seq); | 263 | write_seqcount_begin(&vdso_data->tb_seq); |
264 | 264 | ||
265 | vdso_data->cycle_last = tk->tkr.cycle_last; | 265 | vdso_data->cycle_last = tk->tkr_mono.cycle_last; |
266 | vdso_data->mask = tk->tkr.mask; | 266 | vdso_data->mask = tk->tkr_mono.mask; |
267 | vdso_data->mult = tk->tkr.mult; | 267 | vdso_data->mult = tk->tkr_mono.mult; |
268 | vdso_data->shift = tk->tkr.shift; | 268 | vdso_data->shift = tk->tkr_mono.shift; |
269 | 269 | ||
270 | vdso_data->wall_time_sec = tk->xtime_sec; | 270 | vdso_data->wall_time_sec = tk->xtime_sec; |
271 | vdso_data->wall_time_snsec = tk->tkr.xtime_nsec; | 271 | vdso_data->wall_time_snsec = tk->tkr_mono.xtime_nsec; |
272 | 272 | ||
273 | vdso_data->monotonic_time_sec = tk->xtime_sec | 273 | vdso_data->monotonic_time_sec = tk->xtime_sec |
274 | + tk->wall_to_monotonic.tv_sec; | 274 | + tk->wall_to_monotonic.tv_sec; |
275 | vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec | 275 | vdso_data->monotonic_time_snsec = tk->tkr_mono.xtime_nsec |
276 | + ((u64)tk->wall_to_monotonic.tv_nsec | 276 | + ((u64)tk->wall_to_monotonic.tv_nsec |
277 | << tk->tkr.shift); | 277 | << tk->tkr_mono.shift); |
278 | while (vdso_data->monotonic_time_snsec >= | 278 | while (vdso_data->monotonic_time_snsec >= |
279 | (((u64)NSEC_PER_SEC) << tk->tkr.shift)) { | 279 | (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { |
280 | vdso_data->monotonic_time_snsec -= | 280 | vdso_data->monotonic_time_snsec -= |
281 | ((u64)NSEC_PER_SEC) << tk->tkr.shift; | 281 | ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; |
282 | vdso_data->monotonic_time_sec++; | 282 | vdso_data->monotonic_time_sec++; |
283 | } | 283 | } |
284 | 284 | ||
285 | vdso_data->wall_time_coarse_sec = tk->xtime_sec; | 285 | vdso_data->wall_time_coarse_sec = tk->xtime_sec; |
286 | vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >> | 286 | vdso_data->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >> |
287 | tk->tkr.shift); | 287 | tk->tkr_mono.shift); |
288 | 288 | ||
289 | vdso_data->monotonic_time_coarse_sec = | 289 | vdso_data->monotonic_time_coarse_sec = |
290 | vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; | 290 | vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; |
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c index c7d791f32b98..51e330416995 100644 --- a/arch/x86/kernel/vsyscall_gtod.c +++ b/arch/x86/kernel/vsyscall_gtod.c | |||
@@ -31,30 +31,30 @@ void update_vsyscall(struct timekeeper *tk) | |||
31 | gtod_write_begin(vdata); | 31 | gtod_write_begin(vdata); |
32 | 32 | ||
33 | /* copy vsyscall data */ | 33 | /* copy vsyscall data */ |
34 | vdata->vclock_mode = tk->tkr.clock->archdata.vclock_mode; | 34 | vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; |
35 | vdata->cycle_last = tk->tkr.cycle_last; | 35 | vdata->cycle_last = tk->tkr_mono.cycle_last; |
36 | vdata->mask = tk->tkr.mask; | 36 | vdata->mask = tk->tkr_mono.mask; |
37 | vdata->mult = tk->tkr.mult; | 37 | vdata->mult = tk->tkr_mono.mult; |
38 | vdata->shift = tk->tkr.shift; | 38 | vdata->shift = tk->tkr_mono.shift; |
39 | 39 | ||
40 | vdata->wall_time_sec = tk->xtime_sec; | 40 | vdata->wall_time_sec = tk->xtime_sec; |
41 | vdata->wall_time_snsec = tk->tkr.xtime_nsec; | 41 | vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec; |
42 | 42 | ||
43 | vdata->monotonic_time_sec = tk->xtime_sec | 43 | vdata->monotonic_time_sec = tk->xtime_sec |
44 | + tk->wall_to_monotonic.tv_sec; | 44 | + tk->wall_to_monotonic.tv_sec; |
45 | vdata->monotonic_time_snsec = tk->tkr.xtime_nsec | 45 | vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec |
46 | + ((u64)tk->wall_to_monotonic.tv_nsec | 46 | + ((u64)tk->wall_to_monotonic.tv_nsec |
47 | << tk->tkr.shift); | 47 | << tk->tkr_mono.shift); |
48 | while (vdata->monotonic_time_snsec >= | 48 | while (vdata->monotonic_time_snsec >= |
49 | (((u64)NSEC_PER_SEC) << tk->tkr.shift)) { | 49 | (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { |
50 | vdata->monotonic_time_snsec -= | 50 | vdata->monotonic_time_snsec -= |
51 | ((u64)NSEC_PER_SEC) << tk->tkr.shift; | 51 | ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; |
52 | vdata->monotonic_time_sec++; | 52 | vdata->monotonic_time_sec++; |
53 | } | 53 | } |
54 | 54 | ||
55 | vdata->wall_time_coarse_sec = tk->xtime_sec; | 55 | vdata->wall_time_coarse_sec = tk->xtime_sec; |
56 | vdata->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >> | 56 | vdata->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >> |
57 | tk->tkr.shift); | 57 | tk->tkr_mono.shift); |
58 | 58 | ||
59 | vdata->monotonic_time_coarse_sec = | 59 | vdata->monotonic_time_coarse_sec = |
60 | vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; | 60 | vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bd7a70be41b3..d7a300e0147f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1070,19 +1070,19 @@ static void update_pvclock_gtod(struct timekeeper *tk) | |||
1070 | struct pvclock_gtod_data *vdata = &pvclock_gtod_data; | 1070 | struct pvclock_gtod_data *vdata = &pvclock_gtod_data; |
1071 | u64 boot_ns; | 1071 | u64 boot_ns; |
1072 | 1072 | ||
1073 | boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot)); | 1073 | boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot)); |
1074 | 1074 | ||
1075 | write_seqcount_begin(&vdata->seq); | 1075 | write_seqcount_begin(&vdata->seq); |
1076 | 1076 | ||
1077 | /* copy pvclock gtod data */ | 1077 | /* copy pvclock gtod data */ |
1078 | vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode; | 1078 | vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; |
1079 | vdata->clock.cycle_last = tk->tkr.cycle_last; | 1079 | vdata->clock.cycle_last = tk->tkr_mono.cycle_last; |
1080 | vdata->clock.mask = tk->tkr.mask; | 1080 | vdata->clock.mask = tk->tkr_mono.mask; |
1081 | vdata->clock.mult = tk->tkr.mult; | 1081 | vdata->clock.mult = tk->tkr_mono.mult; |
1082 | vdata->clock.shift = tk->tkr.shift; | 1082 | vdata->clock.shift = tk->tkr_mono.shift; |
1083 | 1083 | ||
1084 | vdata->boot_ns = boot_ns; | 1084 | vdata->boot_ns = boot_ns; |
1085 | vdata->nsec_base = tk->tkr.xtime_nsec; | 1085 | vdata->nsec_base = tk->tkr_mono.xtime_nsec; |
1086 | 1086 | ||
1087 | write_seqcount_end(&vdata->seq); | 1087 | write_seqcount_end(&vdata->seq); |
1088 | } | 1088 | } |
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 05af9a334893..73df17f1535f 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h | |||
@@ -16,16 +16,16 @@ | |||
16 | * @read: Read function of @clock | 16 | * @read: Read function of @clock |
17 | * @mask: Bitmask for two's complement subtraction of non 64bit clocks | 17 | * @mask: Bitmask for two's complement subtraction of non 64bit clocks |
18 | * @cycle_last: @clock cycle value at last update | 18 | * @cycle_last: @clock cycle value at last update |
19 | * @mult: NTP adjusted multiplier for scaled math conversion | 19 | * @mult: (NTP adjusted) multiplier for scaled math conversion |
20 | * @shift: Shift value for scaled math conversion | 20 | * @shift: Shift value for scaled math conversion |
21 | * @xtime_nsec: Shifted (fractional) nano seconds offset for readout | 21 | * @xtime_nsec: Shifted (fractional) nano seconds offset for readout |
22 | * @base_mono: ktime_t (nanoseconds) base time for readout | 22 | * @base: ktime_t (nanoseconds) base time for readout |
23 | * | 23 | * |
24 | * This struct has size 56 byte on 64 bit. Together with a seqcount it | 24 | * This struct has size 56 byte on 64 bit. Together with a seqcount it |
25 | * occupies a single 64byte cache line. | 25 | * occupies a single 64byte cache line. |
26 | * | 26 | * |
27 | * The struct is separate from struct timekeeper as it is also used | 27 | * The struct is separate from struct timekeeper as it is also used |
28 | * for a fast NMI safe accessor to clock monotonic. | 28 | * for a fast NMI safe accessors. |
29 | */ | 29 | */ |
30 | struct tk_read_base { | 30 | struct tk_read_base { |
31 | struct clocksource *clock; | 31 | struct clocksource *clock; |
@@ -35,12 +35,12 @@ struct tk_read_base { | |||
35 | u32 mult; | 35 | u32 mult; |
36 | u32 shift; | 36 | u32 shift; |
37 | u64 xtime_nsec; | 37 | u64 xtime_nsec; |
38 | ktime_t base_mono; | 38 | ktime_t base; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | /** | 41 | /** |
42 | * struct timekeeper - Structure holding internal timekeeping values. | 42 | * struct timekeeper - Structure holding internal timekeeping values. |
43 | * @tkr: The readout base structure | 43 | * @tkr_mono: The readout base structure for CLOCK_MONOTONIC |
44 | * @xtime_sec: Current CLOCK_REALTIME time in seconds | 44 | * @xtime_sec: Current CLOCK_REALTIME time in seconds |
45 | * @ktime_sec: Current CLOCK_MONOTONIC time in seconds | 45 | * @ktime_sec: Current CLOCK_MONOTONIC time in seconds |
46 | * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset | 46 | * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset |
@@ -76,7 +76,7 @@ struct tk_read_base { | |||
76 | * used instead. | 76 | * used instead. |
77 | */ | 77 | */ |
78 | struct timekeeper { | 78 | struct timekeeper { |
79 | struct tk_read_base tkr; | 79 | struct tk_read_base tkr_mono; |
80 | u64 xtime_sec; | 80 | u64 xtime_sec; |
81 | unsigned long ktime_sec; | 81 | unsigned long ktime_sec; |
82 | struct timespec64 wall_to_monotonic; | 82 | struct timespec64 wall_to_monotonic; |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 892f6cbf1e67..1405091f3acb 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -68,8 +68,8 @@ bool __read_mostly persistent_clock_exist = false; | |||
68 | 68 | ||
69 | static inline void tk_normalize_xtime(struct timekeeper *tk) | 69 | static inline void tk_normalize_xtime(struct timekeeper *tk) |
70 | { | 70 | { |
71 | while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) { | 71 | while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { |
72 | tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift; | 72 | tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; |
73 | tk->xtime_sec++; | 73 | tk->xtime_sec++; |
74 | } | 74 | } |
75 | } | 75 | } |
@@ -79,20 +79,20 @@ static inline struct timespec64 tk_xtime(struct timekeeper *tk) | |||
79 | struct timespec64 ts; | 79 | struct timespec64 ts; |
80 | 80 | ||
81 | ts.tv_sec = tk->xtime_sec; | 81 | ts.tv_sec = tk->xtime_sec; |
82 | ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift); | 82 | ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); |
83 | return ts; | 83 | return ts; |
84 | } | 84 | } |
85 | 85 | ||
86 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) | 86 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) |
87 | { | 87 | { |
88 | tk->xtime_sec = ts->tv_sec; | 88 | tk->xtime_sec = ts->tv_sec; |
89 | tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift; | 89 | tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; |
90 | } | 90 | } |
91 | 91 | ||
92 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) | 92 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) |
93 | { | 93 | { |
94 | tk->xtime_sec += ts->tv_sec; | 94 | tk->xtime_sec += ts->tv_sec; |
95 | tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift; | 95 | tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; |
96 | tk_normalize_xtime(tk); | 96 | tk_normalize_xtime(tk); |
97 | } | 97 | } |
98 | 98 | ||
@@ -136,8 +136,8 @@ static long timekeeping_last_warning; | |||
136 | static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) | 136 | static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) |
137 | { | 137 | { |
138 | 138 | ||
139 | cycle_t max_cycles = tk->tkr.clock->max_cycles; | 139 | cycle_t max_cycles = tk->tkr_mono.clock->max_cycles; |
140 | const char *name = tk->tkr.clock->name; | 140 | const char *name = tk->tkr_mono.clock->name; |
141 | 141 | ||
142 | if (offset > max_cycles) { | 142 | if (offset > max_cycles) { |
143 | printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n", | 143 | printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n", |
@@ -246,11 +246,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
246 | u64 tmp, ntpinterval; | 246 | u64 tmp, ntpinterval; |
247 | struct clocksource *old_clock; | 247 | struct clocksource *old_clock; |
248 | 248 | ||
249 | old_clock = tk->tkr.clock; | 249 | old_clock = tk->tkr_mono.clock; |
250 | tk->tkr.clock = clock; | 250 | tk->tkr_mono.clock = clock; |
251 | tk->tkr.read = clock->read; | 251 | tk->tkr_mono.read = clock->read; |
252 | tk->tkr.mask = clock->mask; | 252 | tk->tkr_mono.mask = clock->mask; |
253 | tk->tkr.cycle_last = tk->tkr.read(clock); | 253 | tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); |
254 | 254 | ||
255 | /* Do the ns -> cycle conversion first, using original mult */ | 255 | /* Do the ns -> cycle conversion first, using original mult */ |
256 | tmp = NTP_INTERVAL_LENGTH; | 256 | tmp = NTP_INTERVAL_LENGTH; |
@@ -274,11 +274,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
274 | if (old_clock) { | 274 | if (old_clock) { |
275 | int shift_change = clock->shift - old_clock->shift; | 275 | int shift_change = clock->shift - old_clock->shift; |
276 | if (shift_change < 0) | 276 | if (shift_change < 0) |
277 | tk->tkr.xtime_nsec >>= -shift_change; | 277 | tk->tkr_mono.xtime_nsec >>= -shift_change; |
278 | else | 278 | else |
279 | tk->tkr.xtime_nsec <<= shift_change; | 279 | tk->tkr_mono.xtime_nsec <<= shift_change; |
280 | } | 280 | } |
281 | tk->tkr.shift = clock->shift; | 281 | tk->tkr_mono.shift = clock->shift; |
282 | 282 | ||
283 | tk->ntp_error = 0; | 283 | tk->ntp_error = 0; |
284 | tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; | 284 | tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; |
@@ -289,7 +289,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
289 | * active clocksource. These value will be adjusted via NTP | 289 | * active clocksource. These value will be adjusted via NTP |
290 | * to counteract clock drifting. | 290 | * to counteract clock drifting. |
291 | */ | 291 | */ |
292 | tk->tkr.mult = clock->mult; | 292 | tk->tkr_mono.mult = clock->mult; |
293 | tk->ntp_err_mult = 0; | 293 | tk->ntp_err_mult = 0; |
294 | } | 294 | } |
295 | 295 | ||
@@ -318,11 +318,11 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) | |||
318 | 318 | ||
319 | static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) | 319 | static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) |
320 | { | 320 | { |
321 | struct clocksource *clock = tk->tkr.clock; | 321 | struct clocksource *clock = tk->tkr_mono.clock; |
322 | cycle_t delta; | 322 | cycle_t delta; |
323 | s64 nsec; | 323 | s64 nsec; |
324 | 324 | ||
325 | delta = timekeeping_get_delta(&tk->tkr); | 325 | delta = timekeeping_get_delta(&tk->tkr_mono); |
326 | 326 | ||
327 | /* convert delta to nanoseconds. */ | 327 | /* convert delta to nanoseconds. */ |
328 | nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); | 328 | nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); |
@@ -428,7 +428,7 @@ u64 notrace ktime_get_mono_fast_ns(void) | |||
428 | do { | 428 | do { |
429 | seq = raw_read_seqcount(&tk_fast_mono.seq); | 429 | seq = raw_read_seqcount(&tk_fast_mono.seq); |
430 | tkr = tk_fast_mono.base + (seq & 0x01); | 430 | tkr = tk_fast_mono.base + (seq & 0x01); |
431 | now = ktime_to_ns(tkr->base_mono) + timekeeping_get_ns(tkr); | 431 | now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr); |
432 | 432 | ||
433 | } while (read_seqcount_retry(&tk_fast_mono.seq, seq)); | 433 | } while (read_seqcount_retry(&tk_fast_mono.seq, seq)); |
434 | return now; | 434 | return now; |
@@ -456,7 +456,7 @@ static cycle_t dummy_clock_read(struct clocksource *cs) | |||
456 | static void halt_fast_timekeeper(struct timekeeper *tk) | 456 | static void halt_fast_timekeeper(struct timekeeper *tk) |
457 | { | 457 | { |
458 | static struct tk_read_base tkr_dummy; | 458 | static struct tk_read_base tkr_dummy; |
459 | struct tk_read_base *tkr = &tk->tkr; | 459 | struct tk_read_base *tkr = &tk->tkr_mono; |
460 | 460 | ||
461 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); | 461 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); |
462 | cycles_at_suspend = tkr->read(tkr->clock); | 462 | cycles_at_suspend = tkr->read(tkr->clock); |
@@ -472,8 +472,8 @@ static inline void update_vsyscall(struct timekeeper *tk) | |||
472 | 472 | ||
473 | xt = timespec64_to_timespec(tk_xtime(tk)); | 473 | xt = timespec64_to_timespec(tk_xtime(tk)); |
474 | wm = timespec64_to_timespec(tk->wall_to_monotonic); | 474 | wm = timespec64_to_timespec(tk->wall_to_monotonic); |
475 | update_vsyscall_old(&xt, &wm, tk->tkr.clock, tk->tkr.mult, | 475 | update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult, |
476 | tk->tkr.cycle_last); | 476 | tk->tkr_mono.cycle_last); |
477 | } | 477 | } |
478 | 478 | ||
479 | static inline void old_vsyscall_fixup(struct timekeeper *tk) | 479 | static inline void old_vsyscall_fixup(struct timekeeper *tk) |
@@ -490,11 +490,11 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) | |||
490 | * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD | 490 | * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD |
491 | * users are removed, this can be killed. | 491 | * users are removed, this can be killed. |
492 | */ | 492 | */ |
493 | remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1); | 493 | remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1); |
494 | tk->tkr.xtime_nsec -= remainder; | 494 | tk->tkr_mono.xtime_nsec -= remainder; |
495 | tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift; | 495 | tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift; |
496 | tk->ntp_error += remainder << tk->ntp_error_shift; | 496 | tk->ntp_error += remainder << tk->ntp_error_shift; |
497 | tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift; | 497 | tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift; |
498 | } | 498 | } |
499 | #else | 499 | #else |
500 | #define old_vsyscall_fixup(tk) | 500 | #define old_vsyscall_fixup(tk) |
@@ -559,7 +559,7 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) | |||
559 | */ | 559 | */ |
560 | seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); | 560 | seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); |
561 | nsec = (u32) tk->wall_to_monotonic.tv_nsec; | 561 | nsec = (u32) tk->wall_to_monotonic.tv_nsec; |
562 | tk->tkr.base_mono = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); | 562 | tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); |
563 | 563 | ||
564 | /* Update the monotonic raw base */ | 564 | /* Update the monotonic raw base */ |
565 | tk->base_raw = timespec64_to_ktime(tk->raw_time); | 565 | tk->base_raw = timespec64_to_ktime(tk->raw_time); |
@@ -569,7 +569,7 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) | |||
569 | * wall_to_monotonic can be greater/equal one second. Take | 569 | * wall_to_monotonic can be greater/equal one second. Take |
570 | * this into account before updating tk->ktime_sec. | 570 | * this into account before updating tk->ktime_sec. |
571 | */ | 571 | */ |
572 | nsec += (u32)(tk->tkr.xtime_nsec >> tk->tkr.shift); | 572 | nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); |
573 | if (nsec >= NSEC_PER_SEC) | 573 | if (nsec >= NSEC_PER_SEC) |
574 | seconds++; | 574 | seconds++; |
575 | tk->ktime_sec = seconds; | 575 | tk->ktime_sec = seconds; |
@@ -592,7 +592,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
592 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, | 592 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, |
593 | sizeof(tk_core.timekeeper)); | 593 | sizeof(tk_core.timekeeper)); |
594 | 594 | ||
595 | update_fast_timekeeper(&tk->tkr); | 595 | update_fast_timekeeper(&tk->tkr_mono); |
596 | } | 596 | } |
597 | 597 | ||
598 | /** | 598 | /** |
@@ -604,18 +604,18 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
604 | */ | 604 | */ |
605 | static void timekeeping_forward_now(struct timekeeper *tk) | 605 | static void timekeeping_forward_now(struct timekeeper *tk) |
606 | { | 606 | { |
607 | struct clocksource *clock = tk->tkr.clock; | 607 | struct clocksource *clock = tk->tkr_mono.clock; |
608 | cycle_t cycle_now, delta; | 608 | cycle_t cycle_now, delta; |
609 | s64 nsec; | 609 | s64 nsec; |
610 | 610 | ||
611 | cycle_now = tk->tkr.read(clock); | 611 | cycle_now = tk->tkr_mono.read(clock); |
612 | delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask); | 612 | delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); |
613 | tk->tkr.cycle_last = cycle_now; | 613 | tk->tkr_mono.cycle_last = cycle_now; |
614 | 614 | ||
615 | tk->tkr.xtime_nsec += delta * tk->tkr.mult; | 615 | tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult; |
616 | 616 | ||
617 | /* If arch requires, add in get_arch_timeoffset() */ | 617 | /* If arch requires, add in get_arch_timeoffset() */ |
618 | tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift; | 618 | tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift; |
619 | 619 | ||
620 | tk_normalize_xtime(tk); | 620 | tk_normalize_xtime(tk); |
621 | 621 | ||
@@ -640,7 +640,7 @@ int __getnstimeofday64(struct timespec64 *ts) | |||
640 | seq = read_seqcount_begin(&tk_core.seq); | 640 | seq = read_seqcount_begin(&tk_core.seq); |
641 | 641 | ||
642 | ts->tv_sec = tk->xtime_sec; | 642 | ts->tv_sec = tk->xtime_sec; |
643 | nsecs = timekeeping_get_ns(&tk->tkr); | 643 | nsecs = timekeeping_get_ns(&tk->tkr_mono); |
644 | 644 | ||
645 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 645 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
646 | 646 | ||
@@ -680,8 +680,8 @@ ktime_t ktime_get(void) | |||
680 | 680 | ||
681 | do { | 681 | do { |
682 | seq = read_seqcount_begin(&tk_core.seq); | 682 | seq = read_seqcount_begin(&tk_core.seq); |
683 | base = tk->tkr.base_mono; | 683 | base = tk->tkr_mono.base; |
684 | nsecs = timekeeping_get_ns(&tk->tkr); | 684 | nsecs = timekeeping_get_ns(&tk->tkr_mono); |
685 | 685 | ||
686 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 686 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
687 | 687 | ||
@@ -706,8 +706,8 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs) | |||
706 | 706 | ||
707 | do { | 707 | do { |
708 | seq = read_seqcount_begin(&tk_core.seq); | 708 | seq = read_seqcount_begin(&tk_core.seq); |
709 | base = ktime_add(tk->tkr.base_mono, *offset); | 709 | base = ktime_add(tk->tkr_mono.base, *offset); |
710 | nsecs = timekeeping_get_ns(&tk->tkr); | 710 | nsecs = timekeeping_get_ns(&tk->tkr_mono); |
711 | 711 | ||
712 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 712 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
713 | 713 | ||
@@ -777,7 +777,7 @@ void ktime_get_ts64(struct timespec64 *ts) | |||
777 | do { | 777 | do { |
778 | seq = read_seqcount_begin(&tk_core.seq); | 778 | seq = read_seqcount_begin(&tk_core.seq); |
779 | ts->tv_sec = tk->xtime_sec; | 779 | ts->tv_sec = tk->xtime_sec; |
780 | nsec = timekeeping_get_ns(&tk->tkr); | 780 | nsec = timekeeping_get_ns(&tk->tkr_mono); |
781 | tomono = tk->wall_to_monotonic; | 781 | tomono = tk->wall_to_monotonic; |
782 | 782 | ||
783 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 783 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
@@ -863,7 +863,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | |||
863 | ts_real->tv_nsec = 0; | 863 | ts_real->tv_nsec = 0; |
864 | 864 | ||
865 | nsecs_raw = timekeeping_get_ns_raw(tk); | 865 | nsecs_raw = timekeeping_get_ns_raw(tk); |
866 | nsecs_real = timekeeping_get_ns(&tk->tkr); | 866 | nsecs_real = timekeeping_get_ns(&tk->tkr_mono); |
867 | 867 | ||
868 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 868 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
869 | 869 | ||
@@ -1046,7 +1046,7 @@ static int change_clocksource(void *data) | |||
1046 | */ | 1046 | */ |
1047 | if (try_module_get(new->owner)) { | 1047 | if (try_module_get(new->owner)) { |
1048 | if (!new->enable || new->enable(new) == 0) { | 1048 | if (!new->enable || new->enable(new) == 0) { |
1049 | old = tk->tkr.clock; | 1049 | old = tk->tkr_mono.clock; |
1050 | tk_setup_internals(tk, new); | 1050 | tk_setup_internals(tk, new); |
1051 | if (old->disable) | 1051 | if (old->disable) |
1052 | old->disable(old); | 1052 | old->disable(old); |
@@ -1074,11 +1074,11 @@ int timekeeping_notify(struct clocksource *clock) | |||
1074 | { | 1074 | { |
1075 | struct timekeeper *tk = &tk_core.timekeeper; | 1075 | struct timekeeper *tk = &tk_core.timekeeper; |
1076 | 1076 | ||
1077 | if (tk->tkr.clock == clock) | 1077 | if (tk->tkr_mono.clock == clock) |
1078 | return 0; | 1078 | return 0; |
1079 | stop_machine(change_clocksource, clock, NULL); | 1079 | stop_machine(change_clocksource, clock, NULL); |
1080 | tick_clock_notify(); | 1080 | tick_clock_notify(); |
1081 | return tk->tkr.clock == clock ? 0 : -1; | 1081 | return tk->tkr_mono.clock == clock ? 0 : -1; |
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | /** | 1084 | /** |
@@ -1119,7 +1119,7 @@ int timekeeping_valid_for_hres(void) | |||
1119 | do { | 1119 | do { |
1120 | seq = read_seqcount_begin(&tk_core.seq); | 1120 | seq = read_seqcount_begin(&tk_core.seq); |
1121 | 1121 | ||
1122 | ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; | 1122 | ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
1123 | 1123 | ||
1124 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 1124 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1125 | 1125 | ||
@@ -1138,7 +1138,7 @@ u64 timekeeping_max_deferment(void) | |||
1138 | do { | 1138 | do { |
1139 | seq = read_seqcount_begin(&tk_core.seq); | 1139 | seq = read_seqcount_begin(&tk_core.seq); |
1140 | 1140 | ||
1141 | ret = tk->tkr.clock->max_idle_ns; | 1141 | ret = tk->tkr_mono.clock->max_idle_ns; |
1142 | 1142 | ||
1143 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 1143 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1144 | 1144 | ||
@@ -1303,7 +1303,7 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta) | |||
1303 | void timekeeping_resume(void) | 1303 | void timekeeping_resume(void) |
1304 | { | 1304 | { |
1305 | struct timekeeper *tk = &tk_core.timekeeper; | 1305 | struct timekeeper *tk = &tk_core.timekeeper; |
1306 | struct clocksource *clock = tk->tkr.clock; | 1306 | struct clocksource *clock = tk->tkr_mono.clock; |
1307 | unsigned long flags; | 1307 | unsigned long flags; |
1308 | struct timespec64 ts_new, ts_delta; | 1308 | struct timespec64 ts_new, ts_delta; |
1309 | struct timespec tmp; | 1309 | struct timespec tmp; |
@@ -1331,16 +1331,16 @@ void timekeeping_resume(void) | |||
1331 | * The less preferred source will only be tried if there is no better | 1331 | * The less preferred source will only be tried if there is no better |
1332 | * usable source. The rtc part is handled separately in rtc core code. | 1332 | * usable source. The rtc part is handled separately in rtc core code. |
1333 | */ | 1333 | */ |
1334 | cycle_now = tk->tkr.read(clock); | 1334 | cycle_now = tk->tkr_mono.read(clock); |
1335 | if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && | 1335 | if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && |
1336 | cycle_now > tk->tkr.cycle_last) { | 1336 | cycle_now > tk->tkr_mono.cycle_last) { |
1337 | u64 num, max = ULLONG_MAX; | 1337 | u64 num, max = ULLONG_MAX; |
1338 | u32 mult = clock->mult; | 1338 | u32 mult = clock->mult; |
1339 | u32 shift = clock->shift; | 1339 | u32 shift = clock->shift; |
1340 | s64 nsec = 0; | 1340 | s64 nsec = 0; |
1341 | 1341 | ||
1342 | cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, | 1342 | cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, |
1343 | tk->tkr.mask); | 1343 | tk->tkr_mono.mask); |
1344 | 1344 | ||
1345 | /* | 1345 | /* |
1346 | * "cycle_delta * mutl" may cause 64 bits overflow, if the | 1346 | * "cycle_delta * mutl" may cause 64 bits overflow, if the |
@@ -1366,7 +1366,7 @@ void timekeeping_resume(void) | |||
1366 | __timekeeping_inject_sleeptime(tk, &ts_delta); | 1366 | __timekeeping_inject_sleeptime(tk, &ts_delta); |
1367 | 1367 | ||
1368 | /* Re-base the last cycle value */ | 1368 | /* Re-base the last cycle value */ |
1369 | tk->tkr.cycle_last = cycle_now; | 1369 | tk->tkr_mono.cycle_last = cycle_now; |
1370 | tk->ntp_error = 0; | 1370 | tk->ntp_error = 0; |
1371 | timekeeping_suspended = 0; | 1371 | timekeeping_suspended = 0; |
1372 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); | 1372 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
@@ -1519,15 +1519,15 @@ static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk, | |||
1519 | * | 1519 | * |
1520 | * XXX - TODO: Doc ntp_error calculation. | 1520 | * XXX - TODO: Doc ntp_error calculation. |
1521 | */ | 1521 | */ |
1522 | if ((mult_adj > 0) && (tk->tkr.mult + mult_adj < mult_adj)) { | 1522 | if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) { |
1523 | /* NTP adjustment caused clocksource mult overflow */ | 1523 | /* NTP adjustment caused clocksource mult overflow */ |
1524 | WARN_ON_ONCE(1); | 1524 | WARN_ON_ONCE(1); |
1525 | return; | 1525 | return; |
1526 | } | 1526 | } |
1527 | 1527 | ||
1528 | tk->tkr.mult += mult_adj; | 1528 | tk->tkr_mono.mult += mult_adj; |
1529 | tk->xtime_interval += interval; | 1529 | tk->xtime_interval += interval; |
1530 | tk->tkr.xtime_nsec -= offset; | 1530 | tk->tkr_mono.xtime_nsec -= offset; |
1531 | tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; | 1531 | tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; |
1532 | } | 1532 | } |
1533 | 1533 | ||
@@ -1589,13 +1589,13 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
1589 | tk->ntp_err_mult = 0; | 1589 | tk->ntp_err_mult = 0; |
1590 | } | 1590 | } |
1591 | 1591 | ||
1592 | if (unlikely(tk->tkr.clock->maxadj && | 1592 | if (unlikely(tk->tkr_mono.clock->maxadj && |
1593 | (abs(tk->tkr.mult - tk->tkr.clock->mult) | 1593 | (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult) |
1594 | > tk->tkr.clock->maxadj))) { | 1594 | > tk->tkr_mono.clock->maxadj))) { |
1595 | printk_once(KERN_WARNING | 1595 | printk_once(KERN_WARNING |
1596 | "Adjusting %s more than 11%% (%ld vs %ld)\n", | 1596 | "Adjusting %s more than 11%% (%ld vs %ld)\n", |
1597 | tk->tkr.clock->name, (long)tk->tkr.mult, | 1597 | tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult, |
1598 | (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj); | 1598 | (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj); |
1599 | } | 1599 | } |
1600 | 1600 | ||
1601 | /* | 1601 | /* |
@@ -1612,9 +1612,9 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
1612 | * We'll correct this error next time through this function, when | 1612 | * We'll correct this error next time through this function, when |
1613 | * xtime_nsec is not as small. | 1613 | * xtime_nsec is not as small. |
1614 | */ | 1614 | */ |
1615 | if (unlikely((s64)tk->tkr.xtime_nsec < 0)) { | 1615 | if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) { |
1616 | s64 neg = -(s64)tk->tkr.xtime_nsec; | 1616 | s64 neg = -(s64)tk->tkr_mono.xtime_nsec; |
1617 | tk->tkr.xtime_nsec = 0; | 1617 | tk->tkr_mono.xtime_nsec = 0; |
1618 | tk->ntp_error += neg << tk->ntp_error_shift; | 1618 | tk->ntp_error += neg << tk->ntp_error_shift; |
1619 | } | 1619 | } |
1620 | } | 1620 | } |
@@ -1629,13 +1629,13 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
1629 | */ | 1629 | */ |
1630 | static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) | 1630 | static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) |
1631 | { | 1631 | { |
1632 | u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift; | 1632 | u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift; |
1633 | unsigned int clock_set = 0; | 1633 | unsigned int clock_set = 0; |
1634 | 1634 | ||
1635 | while (tk->tkr.xtime_nsec >= nsecps) { | 1635 | while (tk->tkr_mono.xtime_nsec >= nsecps) { |
1636 | int leap; | 1636 | int leap; |
1637 | 1637 | ||
1638 | tk->tkr.xtime_nsec -= nsecps; | 1638 | tk->tkr_mono.xtime_nsec -= nsecps; |
1639 | tk->xtime_sec++; | 1639 | tk->xtime_sec++; |
1640 | 1640 | ||
1641 | /* Figure out if its a leap sec and apply if needed */ | 1641 | /* Figure out if its a leap sec and apply if needed */ |
@@ -1680,9 +1680,9 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, | |||
1680 | 1680 | ||
1681 | /* Accumulate one shifted interval */ | 1681 | /* Accumulate one shifted interval */ |
1682 | offset -= interval; | 1682 | offset -= interval; |
1683 | tk->tkr.cycle_last += interval; | 1683 | tk->tkr_mono.cycle_last += interval; |
1684 | 1684 | ||
1685 | tk->tkr.xtime_nsec += tk->xtime_interval << shift; | 1685 | tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift; |
1686 | *clock_set |= accumulate_nsecs_to_secs(tk); | 1686 | *clock_set |= accumulate_nsecs_to_secs(tk); |
1687 | 1687 | ||
1688 | /* Accumulate raw time */ | 1688 | /* Accumulate raw time */ |
@@ -1725,8 +1725,8 @@ void update_wall_time(void) | |||
1725 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | 1725 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
1726 | offset = real_tk->cycle_interval; | 1726 | offset = real_tk->cycle_interval; |
1727 | #else | 1727 | #else |
1728 | offset = clocksource_delta(tk->tkr.read(tk->tkr.clock), | 1728 | offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), |
1729 | tk->tkr.cycle_last, tk->tkr.mask); | 1729 | tk->tkr_mono.cycle_last, tk->tkr_mono.mask); |
1730 | #endif | 1730 | #endif |
1731 | 1731 | ||
1732 | /* Check if there's really nothing to do */ | 1732 | /* Check if there's really nothing to do */ |
@@ -1890,8 +1890,8 @@ ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot, | |||
1890 | do { | 1890 | do { |
1891 | seq = read_seqcount_begin(&tk_core.seq); | 1891 | seq = read_seqcount_begin(&tk_core.seq); |
1892 | 1892 | ||
1893 | base = tk->tkr.base_mono; | 1893 | base = tk->tkr_mono.base; |
1894 | nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift; | 1894 | nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; |
1895 | 1895 | ||
1896 | *offs_real = tk->offs_real; | 1896 | *offs_real = tk->offs_real; |
1897 | *offs_boot = tk->offs_boot; | 1897 | *offs_boot = tk->offs_boot; |
@@ -1922,8 +1922,8 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot, | |||
1922 | do { | 1922 | do { |
1923 | seq = read_seqcount_begin(&tk_core.seq); | 1923 | seq = read_seqcount_begin(&tk_core.seq); |
1924 | 1924 | ||
1925 | base = tk->tkr.base_mono; | 1925 | base = tk->tkr_mono.base; |
1926 | nsecs = timekeeping_get_ns(&tk->tkr); | 1926 | nsecs = timekeeping_get_ns(&tk->tkr_mono); |
1927 | 1927 | ||
1928 | *offs_real = tk->offs_real; | 1928 | *offs_real = tk->offs_real; |
1929 | *offs_boot = tk->offs_boot; | 1929 | *offs_boot = tk->offs_boot; |