diff options
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r-- | kernel/time/timekeeping.c | 511 |
1 files changed, 303 insertions, 208 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 6f46a00a1e8a..f045cc50832d 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -24,32 +24,32 @@ | |||
24 | /* Structure holding internal timekeeping values. */ | 24 | /* Structure holding internal timekeeping values. */ |
25 | struct timekeeper { | 25 | struct timekeeper { |
26 | /* Current clocksource used for timekeeping. */ | 26 | /* Current clocksource used for timekeeping. */ |
27 | struct clocksource *clock; | 27 | struct clocksource *clock; |
28 | /* NTP adjusted clock multiplier */ | 28 | /* NTP adjusted clock multiplier */ |
29 | u32 mult; | 29 | u32 mult; |
30 | /* The shift value of the current clocksource. */ | 30 | /* The shift value of the current clocksource. */ |
31 | int shift; | 31 | u32 shift; |
32 | |||
33 | /* Number of clock cycles in one NTP interval. */ | 32 | /* Number of clock cycles in one NTP interval. */ |
34 | cycle_t cycle_interval; | 33 | cycle_t cycle_interval; |
35 | /* Number of clock shifted nano seconds in one NTP interval. */ | 34 | /* Number of clock shifted nano seconds in one NTP interval. */ |
36 | u64 xtime_interval; | 35 | u64 xtime_interval; |
37 | /* shifted nano seconds left over when rounding cycle_interval */ | 36 | /* shifted nano seconds left over when rounding cycle_interval */ |
38 | s64 xtime_remainder; | 37 | s64 xtime_remainder; |
39 | /* Raw nano seconds accumulated per NTP interval. */ | 38 | /* Raw nano seconds accumulated per NTP interval. */ |
40 | u32 raw_interval; | 39 | u32 raw_interval; |
40 | |||
41 | /* Current CLOCK_REALTIME time in seconds */ | ||
42 | u64 xtime_sec; | ||
43 | /* Clock shifted nano seconds */ | ||
44 | u64 xtime_nsec; | ||
41 | 45 | ||
42 | /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */ | ||
43 | u64 xtime_nsec; | ||
44 | /* Difference between accumulated time and NTP time in ntp | 46 | /* Difference between accumulated time and NTP time in ntp |
45 | * shifted nano seconds. */ | 47 | * shifted nano seconds. */ |
46 | s64 ntp_error; | 48 | s64 ntp_error; |
47 | /* Shift conversion between clock shifted nano seconds and | 49 | /* Shift conversion between clock shifted nano seconds and |
48 | * ntp shifted nano seconds. */ | 50 | * ntp shifted nano seconds. */ |
49 | int ntp_error_shift; | 51 | u32 ntp_error_shift; |
50 | 52 | ||
51 | /* The current time */ | ||
52 | struct timespec xtime; | ||
53 | /* | 53 | /* |
54 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | 54 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected |
55 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | 55 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged |
@@ -64,14 +64,17 @@ struct timekeeper { | |||
64 | * - wall_to_monotonic is no longer the boot time, getboottime must be | 64 | * - wall_to_monotonic is no longer the boot time, getboottime must be |
65 | * used instead. | 65 | * used instead. |
66 | */ | 66 | */ |
67 | struct timespec wall_to_monotonic; | 67 | struct timespec wall_to_monotonic; |
68 | /* time spent in suspend */ | 68 | /* time spent in suspend */ |
69 | struct timespec total_sleep_time; | 69 | struct timespec total_sleep_time; |
70 | /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ | 70 | /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ |
71 | struct timespec raw_time; | 71 | struct timespec raw_time; |
72 | 72 | /* Offset clock monotonic -> clock realtime */ | |
73 | ktime_t offs_real; | ||
74 | /* Offset clock monotonic -> clock boottime */ | ||
75 | ktime_t offs_boot; | ||
73 | /* Seqlock for all timekeeper values */ | 76 | /* Seqlock for all timekeeper values */ |
74 | seqlock_t lock; | 77 | seqlock_t lock; |
75 | }; | 78 | }; |
76 | 79 | ||
77 | static struct timekeeper timekeeper; | 80 | static struct timekeeper timekeeper; |
@@ -82,11 +85,37 @@ static struct timekeeper timekeeper; | |||
82 | */ | 85 | */ |
83 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); | 86 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); |
84 | 87 | ||
85 | |||
86 | /* flag for if timekeeping is suspended */ | 88 | /* flag for if timekeeping is suspended */ |
87 | int __read_mostly timekeeping_suspended; | 89 | int __read_mostly timekeeping_suspended; |
88 | 90 | ||
91 | static inline void tk_normalize_xtime(struct timekeeper *tk) | ||
92 | { | ||
93 | while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { | ||
94 | tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift; | ||
95 | tk->xtime_sec++; | ||
96 | } | ||
97 | } | ||
98 | |||
99 | static struct timespec tk_xtime(struct timekeeper *tk) | ||
100 | { | ||
101 | struct timespec ts; | ||
102 | |||
103 | ts.tv_sec = tk->xtime_sec; | ||
104 | ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); | ||
105 | return ts; | ||
106 | } | ||
107 | |||
108 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) | ||
109 | { | ||
110 | tk->xtime_sec = ts->tv_sec; | ||
111 | tk->xtime_nsec = ts->tv_nsec << tk->shift; | ||
112 | } | ||
89 | 113 | ||
114 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) | ||
115 | { | ||
116 | tk->xtime_sec += ts->tv_sec; | ||
117 | tk->xtime_nsec += ts->tv_nsec << tk->shift; | ||
118 | } | ||
90 | 119 | ||
91 | /** | 120 | /** |
92 | * timekeeper_setup_internals - Set up internals to use clocksource clock. | 121 | * timekeeper_setup_internals - Set up internals to use clocksource clock. |
@@ -98,12 +127,14 @@ int __read_mostly timekeeping_suspended; | |||
98 | * | 127 | * |
99 | * Unless you're the timekeeping code, you should not be using this! | 128 | * Unless you're the timekeeping code, you should not be using this! |
100 | */ | 129 | */ |
101 | static void timekeeper_setup_internals(struct clocksource *clock) | 130 | static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) |
102 | { | 131 | { |
103 | cycle_t interval; | 132 | cycle_t interval; |
104 | u64 tmp, ntpinterval; | 133 | u64 tmp, ntpinterval; |
134 | struct clocksource *old_clock; | ||
105 | 135 | ||
106 | timekeeper.clock = clock; | 136 | old_clock = tk->clock; |
137 | tk->clock = clock; | ||
107 | clock->cycle_last = clock->read(clock); | 138 | clock->cycle_last = clock->read(clock); |
108 | 139 | ||
109 | /* Do the ns -> cycle conversion first, using original mult */ | 140 | /* Do the ns -> cycle conversion first, using original mult */ |
@@ -116,71 +147,96 @@ static void timekeeper_setup_internals(struct clocksource *clock) | |||
116 | tmp = 1; | 147 | tmp = 1; |
117 | 148 | ||
118 | interval = (cycle_t) tmp; | 149 | interval = (cycle_t) tmp; |
119 | timekeeper.cycle_interval = interval; | 150 | tk->cycle_interval = interval; |
120 | 151 | ||
121 | /* Go back from cycles -> shifted ns */ | 152 | /* Go back from cycles -> shifted ns */ |
122 | timekeeper.xtime_interval = (u64) interval * clock->mult; | 153 | tk->xtime_interval = (u64) interval * clock->mult; |
123 | timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval; | 154 | tk->xtime_remainder = ntpinterval - tk->xtime_interval; |
124 | timekeeper.raw_interval = | 155 | tk->raw_interval = |
125 | ((u64) interval * clock->mult) >> clock->shift; | 156 | ((u64) interval * clock->mult) >> clock->shift; |
126 | 157 | ||
127 | timekeeper.xtime_nsec = 0; | 158 | /* if changing clocks, convert xtime_nsec shift units */ |
128 | timekeeper.shift = clock->shift; | 159 | if (old_clock) { |
160 | int shift_change = clock->shift - old_clock->shift; | ||
161 | if (shift_change < 0) | ||
162 | tk->xtime_nsec >>= -shift_change; | ||
163 | else | ||
164 | tk->xtime_nsec <<= shift_change; | ||
165 | } | ||
166 | tk->shift = clock->shift; | ||
129 | 167 | ||
130 | timekeeper.ntp_error = 0; | 168 | tk->ntp_error = 0; |
131 | timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; | 169 | tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; |
132 | 170 | ||
133 | /* | 171 | /* |
134 | * The timekeeper keeps its own mult values for the currently | 172 | * The timekeeper keeps its own mult values for the currently |
135 | * active clocksource. These value will be adjusted via NTP | 173 | * active clocksource. These value will be adjusted via NTP |
136 | * to counteract clock drifting. | 174 | * to counteract clock drifting. |
137 | */ | 175 | */ |
138 | timekeeper.mult = clock->mult; | 176 | tk->mult = clock->mult; |
139 | } | 177 | } |
140 | 178 | ||
141 | /* Timekeeper helper functions. */ | 179 | /* Timekeeper helper functions. */ |
142 | static inline s64 timekeeping_get_ns(void) | 180 | static inline s64 timekeeping_get_ns(struct timekeeper *tk) |
143 | { | 181 | { |
144 | cycle_t cycle_now, cycle_delta; | 182 | cycle_t cycle_now, cycle_delta; |
145 | struct clocksource *clock; | 183 | struct clocksource *clock; |
184 | s64 nsec; | ||
146 | 185 | ||
147 | /* read clocksource: */ | 186 | /* read clocksource: */ |
148 | clock = timekeeper.clock; | 187 | clock = tk->clock; |
149 | cycle_now = clock->read(clock); | 188 | cycle_now = clock->read(clock); |
150 | 189 | ||
151 | /* calculate the delta since the last update_wall_time: */ | 190 | /* calculate the delta since the last update_wall_time: */ |
152 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 191 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
153 | 192 | ||
154 | /* return delta convert to nanoseconds using ntp adjusted mult. */ | 193 | nsec = cycle_delta * tk->mult + tk->xtime_nsec; |
155 | return clocksource_cyc2ns(cycle_delta, timekeeper.mult, | 194 | nsec >>= tk->shift; |
156 | timekeeper.shift); | 195 | |
196 | /* If arch requires, add in gettimeoffset() */ | ||
197 | return nsec + arch_gettimeoffset(); | ||
157 | } | 198 | } |
158 | 199 | ||
159 | static inline s64 timekeeping_get_ns_raw(void) | 200 | static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) |
160 | { | 201 | { |
161 | cycle_t cycle_now, cycle_delta; | 202 | cycle_t cycle_now, cycle_delta; |
162 | struct clocksource *clock; | 203 | struct clocksource *clock; |
204 | s64 nsec; | ||
163 | 205 | ||
164 | /* read clocksource: */ | 206 | /* read clocksource: */ |
165 | clock = timekeeper.clock; | 207 | clock = tk->clock; |
166 | cycle_now = clock->read(clock); | 208 | cycle_now = clock->read(clock); |
167 | 209 | ||
168 | /* calculate the delta since the last update_wall_time: */ | 210 | /* calculate the delta since the last update_wall_time: */ |
169 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 211 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
170 | 212 | ||
171 | /* return delta convert to nanoseconds. */ | 213 | /* convert delta to nanoseconds. */ |
172 | return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); | 214 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
215 | |||
216 | /* If arch requires, add in gettimeoffset() */ | ||
217 | return nsec + arch_gettimeoffset(); | ||
218 | } | ||
219 | |||
220 | static void update_rt_offset(struct timekeeper *tk) | ||
221 | { | ||
222 | struct timespec tmp, *wtm = &tk->wall_to_monotonic; | ||
223 | |||
224 | set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec); | ||
225 | tk->offs_real = timespec_to_ktime(tmp); | ||
173 | } | 226 | } |
174 | 227 | ||
175 | /* must hold write on timekeeper.lock */ | 228 | /* must hold write on timekeeper.lock */ |
176 | static void timekeeping_update(bool clearntp) | 229 | static void timekeeping_update(struct timekeeper *tk, bool clearntp) |
177 | { | 230 | { |
231 | struct timespec xt; | ||
232 | |||
178 | if (clearntp) { | 233 | if (clearntp) { |
179 | timekeeper.ntp_error = 0; | 234 | tk->ntp_error = 0; |
180 | ntp_clear(); | 235 | ntp_clear(); |
181 | } | 236 | } |
182 | update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic, | 237 | update_rt_offset(tk); |
183 | timekeeper.clock, timekeeper.mult); | 238 | xt = tk_xtime(tk); |
239 | update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult); | ||
184 | } | 240 | } |
185 | 241 | ||
186 | 242 | ||
@@ -191,27 +247,26 @@ static void timekeeping_update(bool clearntp) | |||
191 | * update_wall_time(). This is useful before significant clock changes, | 247 | * update_wall_time(). This is useful before significant clock changes, |
192 | * as it avoids having to deal with this time offset explicitly. | 248 | * as it avoids having to deal with this time offset explicitly. |
193 | */ | 249 | */ |
194 | static void timekeeping_forward_now(void) | 250 | static void timekeeping_forward_now(struct timekeeper *tk) |
195 | { | 251 | { |
196 | cycle_t cycle_now, cycle_delta; | 252 | cycle_t cycle_now, cycle_delta; |
197 | struct clocksource *clock; | 253 | struct clocksource *clock; |
198 | s64 nsec; | 254 | s64 nsec; |
199 | 255 | ||
200 | clock = timekeeper.clock; | 256 | clock = tk->clock; |
201 | cycle_now = clock->read(clock); | 257 | cycle_now = clock->read(clock); |
202 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 258 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
203 | clock->cycle_last = cycle_now; | 259 | clock->cycle_last = cycle_now; |
204 | 260 | ||
205 | nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult, | 261 | tk->xtime_nsec += cycle_delta * tk->mult; |
206 | timekeeper.shift); | ||
207 | 262 | ||
208 | /* If arch requires, add in gettimeoffset() */ | 263 | /* If arch requires, add in gettimeoffset() */ |
209 | nsec += arch_gettimeoffset(); | 264 | tk->xtime_nsec += arch_gettimeoffset() << tk->shift; |
210 | 265 | ||
211 | timespec_add_ns(&timekeeper.xtime, nsec); | 266 | tk_normalize_xtime(tk); |
212 | 267 | ||
213 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); | 268 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
214 | timespec_add_ns(&timekeeper.raw_time, nsec); | 269 | timespec_add_ns(&tk->raw_time, nsec); |
215 | } | 270 | } |
216 | 271 | ||
217 | /** | 272 | /** |
@@ -223,18 +278,15 @@ static void timekeeping_forward_now(void) | |||
223 | void getnstimeofday(struct timespec *ts) | 278 | void getnstimeofday(struct timespec *ts) |
224 | { | 279 | { |
225 | unsigned long seq; | 280 | unsigned long seq; |
226 | s64 nsecs; | 281 | s64 nsecs = 0; |
227 | 282 | ||
228 | WARN_ON(timekeeping_suspended); | 283 | WARN_ON(timekeeping_suspended); |
229 | 284 | ||
230 | do { | 285 | do { |
231 | seq = read_seqbegin(&timekeeper.lock); | 286 | seq = read_seqbegin(&timekeeper.lock); |
232 | 287 | ||
233 | *ts = timekeeper.xtime; | 288 | ts->tv_sec = timekeeper.xtime_sec; |
234 | nsecs = timekeeping_get_ns(); | 289 | ts->tv_nsec = timekeeping_get_ns(&timekeeper); |
235 | |||
236 | /* If arch requires, add in gettimeoffset() */ | ||
237 | nsecs += arch_gettimeoffset(); | ||
238 | 290 | ||
239 | } while (read_seqretry(&timekeeper.lock, seq)); | 291 | } while (read_seqretry(&timekeeper.lock, seq)); |
240 | 292 | ||
@@ -251,13 +303,10 @@ ktime_t ktime_get(void) | |||
251 | 303 | ||
252 | do { | 304 | do { |
253 | seq = read_seqbegin(&timekeeper.lock); | 305 | seq = read_seqbegin(&timekeeper.lock); |
254 | secs = timekeeper.xtime.tv_sec + | 306 | secs = timekeeper.xtime_sec + |
255 | timekeeper.wall_to_monotonic.tv_sec; | 307 | timekeeper.wall_to_monotonic.tv_sec; |
256 | nsecs = timekeeper.xtime.tv_nsec + | 308 | nsecs = timekeeping_get_ns(&timekeeper) + |
257 | timekeeper.wall_to_monotonic.tv_nsec; | 309 | timekeeper.wall_to_monotonic.tv_nsec; |
258 | nsecs += timekeeping_get_ns(); | ||
259 | /* If arch requires, add in gettimeoffset() */ | ||
260 | nsecs += arch_gettimeoffset(); | ||
261 | 310 | ||
262 | } while (read_seqretry(&timekeeper.lock, seq)); | 311 | } while (read_seqretry(&timekeeper.lock, seq)); |
263 | /* | 312 | /* |
@@ -280,22 +329,19 @@ void ktime_get_ts(struct timespec *ts) | |||
280 | { | 329 | { |
281 | struct timespec tomono; | 330 | struct timespec tomono; |
282 | unsigned int seq; | 331 | unsigned int seq; |
283 | s64 nsecs; | ||
284 | 332 | ||
285 | WARN_ON(timekeeping_suspended); | 333 | WARN_ON(timekeeping_suspended); |
286 | 334 | ||
287 | do { | 335 | do { |
288 | seq = read_seqbegin(&timekeeper.lock); | 336 | seq = read_seqbegin(&timekeeper.lock); |
289 | *ts = timekeeper.xtime; | 337 | ts->tv_sec = timekeeper.xtime_sec; |
338 | ts->tv_nsec = timekeeping_get_ns(&timekeeper); | ||
290 | tomono = timekeeper.wall_to_monotonic; | 339 | tomono = timekeeper.wall_to_monotonic; |
291 | nsecs = timekeeping_get_ns(); | ||
292 | /* If arch requires, add in gettimeoffset() */ | ||
293 | nsecs += arch_gettimeoffset(); | ||
294 | 340 | ||
295 | } while (read_seqretry(&timekeeper.lock, seq)); | 341 | } while (read_seqretry(&timekeeper.lock, seq)); |
296 | 342 | ||
297 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | 343 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, |
298 | ts->tv_nsec + tomono.tv_nsec + nsecs); | 344 | ts->tv_nsec + tomono.tv_nsec); |
299 | } | 345 | } |
300 | EXPORT_SYMBOL_GPL(ktime_get_ts); | 346 | EXPORT_SYMBOL_GPL(ktime_get_ts); |
301 | 347 | ||
@@ -318,20 +364,14 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | |||
318 | WARN_ON_ONCE(timekeeping_suspended); | 364 | WARN_ON_ONCE(timekeeping_suspended); |
319 | 365 | ||
320 | do { | 366 | do { |
321 | u32 arch_offset; | ||
322 | |||
323 | seq = read_seqbegin(&timekeeper.lock); | 367 | seq = read_seqbegin(&timekeeper.lock); |
324 | 368 | ||
325 | *ts_raw = timekeeper.raw_time; | 369 | *ts_raw = timekeeper.raw_time; |
326 | *ts_real = timekeeper.xtime; | 370 | ts_real->tv_sec = timekeeper.xtime_sec; |
327 | 371 | ts_real->tv_nsec = 0; | |
328 | nsecs_raw = timekeeping_get_ns_raw(); | ||
329 | nsecs_real = timekeeping_get_ns(); | ||
330 | 372 | ||
331 | /* If arch requires, add in gettimeoffset() */ | 373 | nsecs_raw = timekeeping_get_ns_raw(&timekeeper); |
332 | arch_offset = arch_gettimeoffset(); | 374 | nsecs_real = timekeeping_get_ns(&timekeeper); |
333 | nsecs_raw += arch_offset; | ||
334 | nsecs_real += arch_offset; | ||
335 | 375 | ||
336 | } while (read_seqretry(&timekeeper.lock, seq)); | 376 | } while (read_seqretry(&timekeeper.lock, seq)); |
337 | 377 | ||
@@ -366,7 +406,7 @@ EXPORT_SYMBOL(do_gettimeofday); | |||
366 | */ | 406 | */ |
367 | int do_settimeofday(const struct timespec *tv) | 407 | int do_settimeofday(const struct timespec *tv) |
368 | { | 408 | { |
369 | struct timespec ts_delta; | 409 | struct timespec ts_delta, xt; |
370 | unsigned long flags; | 410 | unsigned long flags; |
371 | 411 | ||
372 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | 412 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) |
@@ -374,15 +414,18 @@ int do_settimeofday(const struct timespec *tv) | |||
374 | 414 | ||
375 | write_seqlock_irqsave(&timekeeper.lock, flags); | 415 | write_seqlock_irqsave(&timekeeper.lock, flags); |
376 | 416 | ||
377 | timekeeping_forward_now(); | 417 | timekeeping_forward_now(&timekeeper); |
418 | |||
419 | xt = tk_xtime(&timekeeper); | ||
420 | ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; | ||
421 | ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; | ||
378 | 422 | ||
379 | ts_delta.tv_sec = tv->tv_sec - timekeeper.xtime.tv_sec; | ||
380 | ts_delta.tv_nsec = tv->tv_nsec - timekeeper.xtime.tv_nsec; | ||
381 | timekeeper.wall_to_monotonic = | 423 | timekeeper.wall_to_monotonic = |
382 | timespec_sub(timekeeper.wall_to_monotonic, ts_delta); | 424 | timespec_sub(timekeeper.wall_to_monotonic, ts_delta); |
383 | 425 | ||
384 | timekeeper.xtime = *tv; | 426 | tk_set_xtime(&timekeeper, tv); |
385 | timekeeping_update(true); | 427 | |
428 | timekeeping_update(&timekeeper, true); | ||
386 | 429 | ||
387 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | 430 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
388 | 431 | ||
@@ -409,13 +452,14 @@ int timekeeping_inject_offset(struct timespec *ts) | |||
409 | 452 | ||
410 | write_seqlock_irqsave(&timekeeper.lock, flags); | 453 | write_seqlock_irqsave(&timekeeper.lock, flags); |
411 | 454 | ||
412 | timekeeping_forward_now(); | 455 | timekeeping_forward_now(&timekeeper); |
413 | 456 | ||
414 | timekeeper.xtime = timespec_add(timekeeper.xtime, *ts); | 457 | |
458 | tk_xtime_add(&timekeeper, ts); | ||
415 | timekeeper.wall_to_monotonic = | 459 | timekeeper.wall_to_monotonic = |
416 | timespec_sub(timekeeper.wall_to_monotonic, *ts); | 460 | timespec_sub(timekeeper.wall_to_monotonic, *ts); |
417 | 461 | ||
418 | timekeeping_update(true); | 462 | timekeeping_update(&timekeeper, true); |
419 | 463 | ||
420 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | 464 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
421 | 465 | ||
@@ -440,14 +484,14 @@ static int change_clocksource(void *data) | |||
440 | 484 | ||
441 | write_seqlock_irqsave(&timekeeper.lock, flags); | 485 | write_seqlock_irqsave(&timekeeper.lock, flags); |
442 | 486 | ||
443 | timekeeping_forward_now(); | 487 | timekeeping_forward_now(&timekeeper); |
444 | if (!new->enable || new->enable(new) == 0) { | 488 | if (!new->enable || new->enable(new) == 0) { |
445 | old = timekeeper.clock; | 489 | old = timekeeper.clock; |
446 | timekeeper_setup_internals(new); | 490 | tk_setup_internals(&timekeeper, new); |
447 | if (old->disable) | 491 | if (old->disable) |
448 | old->disable(old); | 492 | old->disable(old); |
449 | } | 493 | } |
450 | timekeeping_update(true); | 494 | timekeeping_update(&timekeeper, true); |
451 | 495 | ||
452 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | 496 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
453 | 497 | ||
@@ -497,7 +541,7 @@ void getrawmonotonic(struct timespec *ts) | |||
497 | 541 | ||
498 | do { | 542 | do { |
499 | seq = read_seqbegin(&timekeeper.lock); | 543 | seq = read_seqbegin(&timekeeper.lock); |
500 | nsecs = timekeeping_get_ns_raw(); | 544 | nsecs = timekeeping_get_ns_raw(&timekeeper); |
501 | *ts = timekeeper.raw_time; | 545 | *ts = timekeeper.raw_time; |
502 | 546 | ||
503 | } while (read_seqretry(&timekeeper.lock, seq)); | 547 | } while (read_seqretry(&timekeeper.lock, seq)); |
@@ -532,6 +576,7 @@ u64 timekeeping_max_deferment(void) | |||
532 | { | 576 | { |
533 | unsigned long seq; | 577 | unsigned long seq; |
534 | u64 ret; | 578 | u64 ret; |
579 | |||
535 | do { | 580 | do { |
536 | seq = read_seqbegin(&timekeeper.lock); | 581 | seq = read_seqbegin(&timekeeper.lock); |
537 | 582 | ||
@@ -592,18 +637,17 @@ void __init timekeeping_init(void) | |||
592 | clock = clocksource_default_clock(); | 637 | clock = clocksource_default_clock(); |
593 | if (clock->enable) | 638 | if (clock->enable) |
594 | clock->enable(clock); | 639 | clock->enable(clock); |
595 | timekeeper_setup_internals(clock); | 640 | tk_setup_internals(&timekeeper, clock); |
596 | 641 | ||
597 | timekeeper.xtime.tv_sec = now.tv_sec; | 642 | tk_set_xtime(&timekeeper, &now); |
598 | timekeeper.xtime.tv_nsec = now.tv_nsec; | ||
599 | timekeeper.raw_time.tv_sec = 0; | 643 | timekeeper.raw_time.tv_sec = 0; |
600 | timekeeper.raw_time.tv_nsec = 0; | 644 | timekeeper.raw_time.tv_nsec = 0; |
601 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) { | 645 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) |
602 | boot.tv_sec = timekeeper.xtime.tv_sec; | 646 | boot = tk_xtime(&timekeeper); |
603 | boot.tv_nsec = timekeeper.xtime.tv_nsec; | 647 | |
604 | } | ||
605 | set_normalized_timespec(&timekeeper.wall_to_monotonic, | 648 | set_normalized_timespec(&timekeeper.wall_to_monotonic, |
606 | -boot.tv_sec, -boot.tv_nsec); | 649 | -boot.tv_sec, -boot.tv_nsec); |
650 | update_rt_offset(&timekeeper); | ||
607 | timekeeper.total_sleep_time.tv_sec = 0; | 651 | timekeeper.total_sleep_time.tv_sec = 0; |
608 | timekeeper.total_sleep_time.tv_nsec = 0; | 652 | timekeeper.total_sleep_time.tv_nsec = 0; |
609 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | 653 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
@@ -612,6 +656,12 @@ void __init timekeeping_init(void) | |||
612 | /* time in seconds when suspend began */ | 656 | /* time in seconds when suspend began */ |
613 | static struct timespec timekeeping_suspend_time; | 657 | static struct timespec timekeeping_suspend_time; |
614 | 658 | ||
659 | static void update_sleep_time(struct timespec t) | ||
660 | { | ||
661 | timekeeper.total_sleep_time = t; | ||
662 | timekeeper.offs_boot = timespec_to_ktime(t); | ||
663 | } | ||
664 | |||
615 | /** | 665 | /** |
616 | * __timekeeping_inject_sleeptime - Internal function to add sleep interval | 666 | * __timekeeping_inject_sleeptime - Internal function to add sleep interval |
617 | * @delta: pointer to a timespec delta value | 667 | * @delta: pointer to a timespec delta value |
@@ -619,7 +669,8 @@ static struct timespec timekeeping_suspend_time; | |||
619 | * Takes a timespec offset measuring a suspend interval and properly | 669 | * Takes a timespec offset measuring a suspend interval and properly |
620 | * adds the sleep offset to the timekeeping variables. | 670 | * adds the sleep offset to the timekeeping variables. |
621 | */ | 671 | */ |
622 | static void __timekeeping_inject_sleeptime(struct timespec *delta) | 672 | static void __timekeeping_inject_sleeptime(struct timekeeper *tk, |
673 | struct timespec *delta) | ||
623 | { | 674 | { |
624 | if (!timespec_valid(delta)) { | 675 | if (!timespec_valid(delta)) { |
625 | printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " | 676 | printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " |
@@ -627,11 +678,9 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta) | |||
627 | return; | 678 | return; |
628 | } | 679 | } |
629 | 680 | ||
630 | timekeeper.xtime = timespec_add(timekeeper.xtime, *delta); | 681 | tk_xtime_add(tk, delta); |
631 | timekeeper.wall_to_monotonic = | 682 | tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta); |
632 | timespec_sub(timekeeper.wall_to_monotonic, *delta); | 683 | update_sleep_time(timespec_add(tk->total_sleep_time, *delta)); |
633 | timekeeper.total_sleep_time = timespec_add( | ||
634 | timekeeper.total_sleep_time, *delta); | ||
635 | } | 684 | } |
636 | 685 | ||
637 | 686 | ||
@@ -657,11 +706,11 @@ void timekeeping_inject_sleeptime(struct timespec *delta) | |||
657 | 706 | ||
658 | write_seqlock_irqsave(&timekeeper.lock, flags); | 707 | write_seqlock_irqsave(&timekeeper.lock, flags); |
659 | 708 | ||
660 | timekeeping_forward_now(); | 709 | timekeeping_forward_now(&timekeeper); |
661 | 710 | ||
662 | __timekeeping_inject_sleeptime(delta); | 711 | __timekeeping_inject_sleeptime(&timekeeper, delta); |
663 | 712 | ||
664 | timekeeping_update(true); | 713 | timekeeping_update(&timekeeper, true); |
665 | 714 | ||
666 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | 715 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
667 | 716 | ||
@@ -690,12 +739,13 @@ static void timekeeping_resume(void) | |||
690 | 739 | ||
691 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { | 740 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { |
692 | ts = timespec_sub(ts, timekeeping_suspend_time); | 741 | ts = timespec_sub(ts, timekeeping_suspend_time); |
693 | __timekeeping_inject_sleeptime(&ts); | 742 | __timekeeping_inject_sleeptime(&timekeeper, &ts); |
694 | } | 743 | } |
695 | /* re-base the last cycle value */ | 744 | /* re-base the last cycle value */ |
696 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); | 745 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
697 | timekeeper.ntp_error = 0; | 746 | timekeeper.ntp_error = 0; |
698 | timekeeping_suspended = 0; | 747 | timekeeping_suspended = 0; |
748 | timekeeping_update(&timekeeper, false); | ||
699 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | 749 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
700 | 750 | ||
701 | touch_softlockup_watchdog(); | 751 | touch_softlockup_watchdog(); |
@@ -715,7 +765,7 @@ static int timekeeping_suspend(void) | |||
715 | read_persistent_clock(&timekeeping_suspend_time); | 765 | read_persistent_clock(&timekeeping_suspend_time); |
716 | 766 | ||
717 | write_seqlock_irqsave(&timekeeper.lock, flags); | 767 | write_seqlock_irqsave(&timekeeper.lock, flags); |
718 | timekeeping_forward_now(); | 768 | timekeeping_forward_now(&timekeeper); |
719 | timekeeping_suspended = 1; | 769 | timekeeping_suspended = 1; |
720 | 770 | ||
721 | /* | 771 | /* |
@@ -724,7 +774,7 @@ static int timekeeping_suspend(void) | |||
724 | * try to compensate so the difference in system time | 774 | * try to compensate so the difference in system time |
725 | * and persistent_clock time stays close to constant. | 775 | * and persistent_clock time stays close to constant. |
726 | */ | 776 | */ |
727 | delta = timespec_sub(timekeeper.xtime, timekeeping_suspend_time); | 777 | delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time); |
728 | delta_delta = timespec_sub(delta, old_delta); | 778 | delta_delta = timespec_sub(delta, old_delta); |
729 | if (abs(delta_delta.tv_sec) >= 2) { | 779 | if (abs(delta_delta.tv_sec) >= 2) { |
730 | /* | 780 | /* |
@@ -763,7 +813,8 @@ device_initcall(timekeeping_init_ops); | |||
763 | * If the error is already larger, we look ahead even further | 813 | * If the error is already larger, we look ahead even further |
764 | * to compensate for late or lost adjustments. | 814 | * to compensate for late or lost adjustments. |
765 | */ | 815 | */ |
766 | static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, | 816 | static __always_inline int timekeeping_bigadjust(struct timekeeper *tk, |
817 | s64 error, s64 *interval, | ||
767 | s64 *offset) | 818 | s64 *offset) |
768 | { | 819 | { |
769 | s64 tick_error, i; | 820 | s64 tick_error, i; |
@@ -779,7 +830,7 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, | |||
779 | * here. This is tuned so that an error of about 1 msec is adjusted | 830 | * here. This is tuned so that an error of about 1 msec is adjusted |
780 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). | 831 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). |
781 | */ | 832 | */ |
782 | error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); | 833 | error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); |
783 | error2 = abs(error2); | 834 | error2 = abs(error2); |
784 | for (look_ahead = 0; error2 > 0; look_ahead++) | 835 | for (look_ahead = 0; error2 > 0; look_ahead++) |
785 | error2 >>= 2; | 836 | error2 >>= 2; |
@@ -788,8 +839,8 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, | |||
788 | * Now calculate the error in (1 << look_ahead) ticks, but first | 839 | * Now calculate the error in (1 << look_ahead) ticks, but first |
789 | * remove the single look ahead already included in the error. | 840 | * remove the single look ahead already included in the error. |
790 | */ | 841 | */ |
791 | tick_error = ntp_tick_length() >> (timekeeper.ntp_error_shift + 1); | 842 | tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1); |
792 | tick_error -= timekeeper.xtime_interval >> 1; | 843 | tick_error -= tk->xtime_interval >> 1; |
793 | error = ((error - tick_error) >> look_ahead) + tick_error; | 844 | error = ((error - tick_error) >> look_ahead) + tick_error; |
794 | 845 | ||
795 | /* Finally calculate the adjustment shift value. */ | 846 | /* Finally calculate the adjustment shift value. */ |
@@ -814,9 +865,9 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, | |||
814 | * this is optimized for the most common adjustments of -1,0,1, | 865 | * this is optimized for the most common adjustments of -1,0,1, |
815 | * for other values we can do a bit more work. | 866 | * for other values we can do a bit more work. |
816 | */ | 867 | */ |
817 | static void timekeeping_adjust(s64 offset) | 868 | static void timekeeping_adjust(struct timekeeper *tk, s64 offset) |
818 | { | 869 | { |
819 | s64 error, interval = timekeeper.cycle_interval; | 870 | s64 error, interval = tk->cycle_interval; |
820 | int adj; | 871 | int adj; |
821 | 872 | ||
822 | /* | 873 | /* |
@@ -832,7 +883,7 @@ static void timekeeping_adjust(s64 offset) | |||
832 | * | 883 | * |
833 | * Note: It does not "save" on aggravation when reading the code. | 884 | * Note: It does not "save" on aggravation when reading the code. |
834 | */ | 885 | */ |
835 | error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); | 886 | error = tk->ntp_error >> (tk->ntp_error_shift - 1); |
836 | if (error > interval) { | 887 | if (error > interval) { |
837 | /* | 888 | /* |
838 | * We now divide error by 4(via shift), which checks if | 889 | * We now divide error by 4(via shift), which checks if |
@@ -854,7 +905,8 @@ static void timekeeping_adjust(s64 offset) | |||
854 | if (likely(error <= interval)) | 905 | if (likely(error <= interval)) |
855 | adj = 1; | 906 | adj = 1; |
856 | else | 907 | else |
857 | adj = timekeeping_bigadjust(error, &interval, &offset); | 908 | adj = timekeeping_bigadjust(tk, error, &interval, |
909 | &offset); | ||
858 | } else if (error < -interval) { | 910 | } else if (error < -interval) { |
859 | /* See comment above, this is just switched for the negative */ | 911 | /* See comment above, this is just switched for the negative */ |
860 | error >>= 2; | 912 | error >>= 2; |
@@ -863,18 +915,17 @@ static void timekeeping_adjust(s64 offset) | |||
863 | interval = -interval; | 915 | interval = -interval; |
864 | offset = -offset; | 916 | offset = -offset; |
865 | } else | 917 | } else |
866 | adj = timekeeping_bigadjust(error, &interval, &offset); | 918 | adj = timekeeping_bigadjust(tk, error, &interval, |
867 | } else /* No adjustment needed */ | 919 | &offset); |
920 | } else | ||
868 | return; | 921 | return; |
869 | 922 | ||
870 | if (unlikely(timekeeper.clock->maxadj && | 923 | if (unlikely(tk->clock->maxadj && |
871 | (timekeeper.mult + adj > | 924 | (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { |
872 | timekeeper.clock->mult + timekeeper.clock->maxadj))) { | ||
873 | printk_once(KERN_WARNING | 925 | printk_once(KERN_WARNING |
874 | "Adjusting %s more than 11%% (%ld vs %ld)\n", | 926 | "Adjusting %s more than 11%% (%ld vs %ld)\n", |
875 | timekeeper.clock->name, (long)timekeeper.mult + adj, | 927 | tk->clock->name, (long)tk->mult + adj, |
876 | (long)timekeeper.clock->mult + | 928 | (long)tk->clock->mult + tk->clock->maxadj); |
877 | timekeeper.clock->maxadj); | ||
878 | } | 929 | } |
879 | /* | 930 | /* |
880 | * So the following can be confusing. | 931 | * So the following can be confusing. |
@@ -925,11 +976,60 @@ static void timekeeping_adjust(s64 offset) | |||
925 | * | 976 | * |
926 | * XXX - TODO: Doc ntp_error calculation. | 977 | * XXX - TODO: Doc ntp_error calculation. |
927 | */ | 978 | */ |
928 | timekeeper.mult += adj; | 979 | tk->mult += adj; |
929 | timekeeper.xtime_interval += interval; | 980 | tk->xtime_interval += interval; |
930 | timekeeper.xtime_nsec -= offset; | 981 | tk->xtime_nsec -= offset; |
931 | timekeeper.ntp_error -= (interval - offset) << | 982 | tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; |
932 | timekeeper.ntp_error_shift; | 983 | |
984 | /* | ||
985 | * It may be possible that when we entered this function, xtime_nsec | ||
986 | * was very small. Further, if we're slightly speeding the clocksource | ||
987 | * in the code above, its possible the required corrective factor to | ||
988 | * xtime_nsec could cause it to underflow. | ||
989 | * | ||
990 | * Now, since we already accumulated the second, cannot simply roll | ||
991 | * the accumulated second back, since the NTP subsystem has been | ||
992 | * notified via second_overflow. So instead we push xtime_nsec forward | ||
993 | * by the amount we underflowed, and add that amount into the error. | ||
994 | * | ||
995 | * We'll correct this error next time through this function, when | ||
996 | * xtime_nsec is not as small. | ||
997 | */ | ||
998 | if (unlikely((s64)tk->xtime_nsec < 0)) { | ||
999 | s64 neg = -(s64)tk->xtime_nsec; | ||
1000 | tk->xtime_nsec = 0; | ||
1001 | tk->ntp_error += neg << tk->ntp_error_shift; | ||
1002 | } | ||
1003 | |||
1004 | } | ||
1005 | |||
1006 | |||
1007 | /** | ||
1008 | * accumulate_nsecs_to_secs - Accumulates nsecs into secs | ||
1009 | * | ||
1010 | * Helper function that accumulates a the nsecs greater then a second | ||
1011 | * from the xtime_nsec field to the xtime_secs field. | ||
1012 | * It also calls into the NTP code to handle leapsecond processing. | ||
1013 | * | ||
1014 | */ | ||
1015 | static inline void accumulate_nsecs_to_secs(struct timekeeper *tk) | ||
1016 | { | ||
1017 | u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; | ||
1018 | |||
1019 | while (tk->xtime_nsec >= nsecps) { | ||
1020 | int leap; | ||
1021 | |||
1022 | tk->xtime_nsec -= nsecps; | ||
1023 | tk->xtime_sec++; | ||
1024 | |||
1025 | /* Figure out if its a leap sec and apply if needed */ | ||
1026 | leap = second_overflow(tk->xtime_sec); | ||
1027 | tk->xtime_sec += leap; | ||
1028 | tk->wall_to_monotonic.tv_sec -= leap; | ||
1029 | if (leap) | ||
1030 | clock_was_set_delayed(); | ||
1031 | |||
1032 | } | ||
933 | } | 1033 | } |
934 | 1034 | ||
935 | 1035 | ||
@@ -942,44 +1042,36 @@ static void timekeeping_adjust(s64 offset) | |||
942 | * | 1042 | * |
943 | * Returns the unconsumed cycles. | 1043 | * Returns the unconsumed cycles. |
944 | */ | 1044 | */ |
945 | static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | 1045 | static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, |
1046 | u32 shift) | ||
946 | { | 1047 | { |
947 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | ||
948 | u64 raw_nsecs; | 1048 | u64 raw_nsecs; |
949 | 1049 | ||
950 | /* If the offset is smaller than a shifted interval, do nothing */ | 1050 | /* If the offset is smaller then a shifted interval, do nothing */ |
951 | if (offset < timekeeper.cycle_interval<<shift) | 1051 | if (offset < tk->cycle_interval<<shift) |
952 | return offset; | 1052 | return offset; |
953 | 1053 | ||
954 | /* Accumulate one shifted interval */ | 1054 | /* Accumulate one shifted interval */ |
955 | offset -= timekeeper.cycle_interval << shift; | 1055 | offset -= tk->cycle_interval << shift; |
956 | timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; | 1056 | tk->clock->cycle_last += tk->cycle_interval << shift; |
957 | 1057 | ||
958 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; | 1058 | tk->xtime_nsec += tk->xtime_interval << shift; |
959 | while (timekeeper.xtime_nsec >= nsecps) { | 1059 | accumulate_nsecs_to_secs(tk); |
960 | int leap; | ||
961 | timekeeper.xtime_nsec -= nsecps; | ||
962 | timekeeper.xtime.tv_sec++; | ||
963 | leap = second_overflow(timekeeper.xtime.tv_sec); | ||
964 | timekeeper.xtime.tv_sec += leap; | ||
965 | timekeeper.wall_to_monotonic.tv_sec -= leap; | ||
966 | } | ||
967 | 1060 | ||
968 | /* Accumulate raw time */ | 1061 | /* Accumulate raw time */ |
969 | raw_nsecs = timekeeper.raw_interval << shift; | 1062 | raw_nsecs = tk->raw_interval << shift; |
970 | raw_nsecs += timekeeper.raw_time.tv_nsec; | 1063 | raw_nsecs += tk->raw_time.tv_nsec; |
971 | if (raw_nsecs >= NSEC_PER_SEC) { | 1064 | if (raw_nsecs >= NSEC_PER_SEC) { |
972 | u64 raw_secs = raw_nsecs; | 1065 | u64 raw_secs = raw_nsecs; |
973 | raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); | 1066 | raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); |
974 | timekeeper.raw_time.tv_sec += raw_secs; | 1067 | tk->raw_time.tv_sec += raw_secs; |
975 | } | 1068 | } |
976 | timekeeper.raw_time.tv_nsec = raw_nsecs; | 1069 | tk->raw_time.tv_nsec = raw_nsecs; |
977 | 1070 | ||
978 | /* Accumulate error between NTP and clock interval */ | 1071 | /* Accumulate error between NTP and clock interval */ |
979 | timekeeper.ntp_error += ntp_tick_length() << shift; | 1072 | tk->ntp_error += ntp_tick_length() << shift; |
980 | timekeeper.ntp_error -= | 1073 | tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) << |
981 | (timekeeper.xtime_interval + timekeeper.xtime_remainder) << | 1074 | (tk->ntp_error_shift + shift); |
982 | (timekeeper.ntp_error_shift + shift); | ||
983 | 1075 | ||
984 | return offset; | 1076 | return offset; |
985 | } | 1077 | } |
@@ -995,6 +1087,7 @@ static void update_wall_time(void) | |||
995 | cycle_t offset; | 1087 | cycle_t offset; |
996 | int shift = 0, maxshift; | 1088 | int shift = 0, maxshift; |
997 | unsigned long flags; | 1089 | unsigned long flags; |
1090 | s64 remainder; | ||
998 | 1091 | ||
999 | write_seqlock_irqsave(&timekeeper.lock, flags); | 1092 | write_seqlock_irqsave(&timekeeper.lock, flags); |
1000 | 1093 | ||
@@ -1009,8 +1102,6 @@ static void update_wall_time(void) | |||
1009 | #else | 1102 | #else |
1010 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; | 1103 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; |
1011 | #endif | 1104 | #endif |
1012 | timekeeper.xtime_nsec = (s64)timekeeper.xtime.tv_nsec << | ||
1013 | timekeeper.shift; | ||
1014 | 1105 | ||
1015 | /* | 1106 | /* |
1016 | * With NO_HZ we may have to accumulate many cycle_intervals | 1107 | * With NO_HZ we may have to accumulate many cycle_intervals |
@@ -1026,62 +1117,36 @@ static void update_wall_time(void) | |||
1026 | maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; | 1117 | maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; |
1027 | shift = min(shift, maxshift); | 1118 | shift = min(shift, maxshift); |
1028 | while (offset >= timekeeper.cycle_interval) { | 1119 | while (offset >= timekeeper.cycle_interval) { |
1029 | offset = logarithmic_accumulation(offset, shift); | 1120 | offset = logarithmic_accumulation(&timekeeper, offset, shift); |
1030 | if(offset < timekeeper.cycle_interval<<shift) | 1121 | if(offset < timekeeper.cycle_interval<<shift) |
1031 | shift--; | 1122 | shift--; |
1032 | } | 1123 | } |
1033 | 1124 | ||
1034 | /* correct the clock when NTP error is too big */ | 1125 | /* correct the clock when NTP error is too big */ |
1035 | timekeeping_adjust(offset); | 1126 | timekeeping_adjust(&timekeeper, offset); |
1036 | |||
1037 | /* | ||
1038 | * Since in the loop above, we accumulate any amount of time | ||
1039 | * in xtime_nsec over a second into xtime.tv_sec, its possible for | ||
1040 | * xtime_nsec to be fairly small after the loop. Further, if we're | ||
1041 | * slightly speeding the clocksource up in timekeeping_adjust(), | ||
1042 | * its possible the required corrective factor to xtime_nsec could | ||
1043 | * cause it to underflow. | ||
1044 | * | ||
1045 | * Now, we cannot simply roll the accumulated second back, since | ||
1046 | * the NTP subsystem has been notified via second_overflow. So | ||
1047 | * instead we push xtime_nsec forward by the amount we underflowed, | ||
1048 | * and add that amount into the error. | ||
1049 | * | ||
1050 | * We'll correct this error next time through this function, when | ||
1051 | * xtime_nsec is not as small. | ||
1052 | */ | ||
1053 | if (unlikely((s64)timekeeper.xtime_nsec < 0)) { | ||
1054 | s64 neg = -(s64)timekeeper.xtime_nsec; | ||
1055 | timekeeper.xtime_nsec = 0; | ||
1056 | timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; | ||
1057 | } | ||
1058 | 1127 | ||
1059 | 1128 | ||
1060 | /* | 1129 | /* |
1061 | * Store full nanoseconds into xtime after rounding it up and | 1130 | * Store only full nanoseconds into xtime_nsec after rounding |
1062 | * add the remainder to the error difference. | 1131 | * it up and add the remainder to the error difference. |
1063 | */ | 1132 | * XXX - This is necessary to avoid small 1ns inconsistnecies caused |
1064 | timekeeper.xtime.tv_nsec = ((s64)timekeeper.xtime_nsec >> | 1133 | * by truncating the remainder in vsyscalls. However, it causes |
1065 | timekeeper.shift) + 1; | 1134 | * additional work to be done in timekeeping_adjust(). Once |
1066 | timekeeper.xtime_nsec -= (s64)timekeeper.xtime.tv_nsec << | 1135 | * the vsyscall implementations are converted to use xtime_nsec |
1067 | timekeeper.shift; | 1136 | * (shifted nanoseconds), this can be killed. |
1068 | timekeeper.ntp_error += timekeeper.xtime_nsec << | 1137 | */ |
1069 | timekeeper.ntp_error_shift; | 1138 | remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1); |
1139 | timekeeper.xtime_nsec -= remainder; | ||
1140 | timekeeper.xtime_nsec += 1 << timekeeper.shift; | ||
1141 | timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift; | ||
1070 | 1142 | ||
1071 | /* | 1143 | /* |
1072 | * Finally, make sure that after the rounding | 1144 | * Finally, make sure that after the rounding |
1073 | * xtime.tv_nsec isn't larger than NSEC_PER_SEC | 1145 | * xtime_nsec isn't larger than NSEC_PER_SEC |
1074 | */ | 1146 | */ |
1075 | if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { | 1147 | accumulate_nsecs_to_secs(&timekeeper); |
1076 | int leap; | ||
1077 | timekeeper.xtime.tv_nsec -= NSEC_PER_SEC; | ||
1078 | timekeeper.xtime.tv_sec++; | ||
1079 | leap = second_overflow(timekeeper.xtime.tv_sec); | ||
1080 | timekeeper.xtime.tv_sec += leap; | ||
1081 | timekeeper.wall_to_monotonic.tv_sec -= leap; | ||
1082 | } | ||
1083 | 1148 | ||
1084 | timekeeping_update(false); | 1149 | timekeeping_update(&timekeeper, false); |
1085 | 1150 | ||
1086 | out: | 1151 | out: |
1087 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | 1152 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
@@ -1126,21 +1191,20 @@ void get_monotonic_boottime(struct timespec *ts) | |||
1126 | { | 1191 | { |
1127 | struct timespec tomono, sleep; | 1192 | struct timespec tomono, sleep; |
1128 | unsigned int seq; | 1193 | unsigned int seq; |
1129 | s64 nsecs; | ||
1130 | 1194 | ||
1131 | WARN_ON(timekeeping_suspended); | 1195 | WARN_ON(timekeeping_suspended); |
1132 | 1196 | ||
1133 | do { | 1197 | do { |
1134 | seq = read_seqbegin(&timekeeper.lock); | 1198 | seq = read_seqbegin(&timekeeper.lock); |
1135 | *ts = timekeeper.xtime; | 1199 | ts->tv_sec = timekeeper.xtime_sec; |
1200 | ts->tv_nsec = timekeeping_get_ns(&timekeeper); | ||
1136 | tomono = timekeeper.wall_to_monotonic; | 1201 | tomono = timekeeper.wall_to_monotonic; |
1137 | sleep = timekeeper.total_sleep_time; | 1202 | sleep = timekeeper.total_sleep_time; |
1138 | nsecs = timekeeping_get_ns(); | ||
1139 | 1203 | ||
1140 | } while (read_seqretry(&timekeeper.lock, seq)); | 1204 | } while (read_seqretry(&timekeeper.lock, seq)); |
1141 | 1205 | ||
1142 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, | 1206 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, |
1143 | ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs); | 1207 | ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec); |
1144 | } | 1208 | } |
1145 | EXPORT_SYMBOL_GPL(get_monotonic_boottime); | 1209 | EXPORT_SYMBOL_GPL(get_monotonic_boottime); |
1146 | 1210 | ||
@@ -1173,13 +1237,13 @@ EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | |||
1173 | 1237 | ||
1174 | unsigned long get_seconds(void) | 1238 | unsigned long get_seconds(void) |
1175 | { | 1239 | { |
1176 | return timekeeper.xtime.tv_sec; | 1240 | return timekeeper.xtime_sec; |
1177 | } | 1241 | } |
1178 | EXPORT_SYMBOL(get_seconds); | 1242 | EXPORT_SYMBOL(get_seconds); |
1179 | 1243 | ||
1180 | struct timespec __current_kernel_time(void) | 1244 | struct timespec __current_kernel_time(void) |
1181 | { | 1245 | { |
1182 | return timekeeper.xtime; | 1246 | return tk_xtime(&timekeeper); |
1183 | } | 1247 | } |
1184 | 1248 | ||
1185 | struct timespec current_kernel_time(void) | 1249 | struct timespec current_kernel_time(void) |
@@ -1190,7 +1254,7 @@ struct timespec current_kernel_time(void) | |||
1190 | do { | 1254 | do { |
1191 | seq = read_seqbegin(&timekeeper.lock); | 1255 | seq = read_seqbegin(&timekeeper.lock); |
1192 | 1256 | ||
1193 | now = timekeeper.xtime; | 1257 | now = tk_xtime(&timekeeper); |
1194 | } while (read_seqretry(&timekeeper.lock, seq)); | 1258 | } while (read_seqretry(&timekeeper.lock, seq)); |
1195 | 1259 | ||
1196 | return now; | 1260 | return now; |
@@ -1205,7 +1269,7 @@ struct timespec get_monotonic_coarse(void) | |||
1205 | do { | 1269 | do { |
1206 | seq = read_seqbegin(&timekeeper.lock); | 1270 | seq = read_seqbegin(&timekeeper.lock); |
1207 | 1271 | ||
1208 | now = timekeeper.xtime; | 1272 | now = tk_xtime(&timekeeper); |
1209 | mono = timekeeper.wall_to_monotonic; | 1273 | mono = timekeeper.wall_to_monotonic; |
1210 | } while (read_seqretry(&timekeeper.lock, seq)); | 1274 | } while (read_seqretry(&timekeeper.lock, seq)); |
1211 | 1275 | ||
@@ -1240,12 +1304,43 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, | |||
1240 | 1304 | ||
1241 | do { | 1305 | do { |
1242 | seq = read_seqbegin(&timekeeper.lock); | 1306 | seq = read_seqbegin(&timekeeper.lock); |
1243 | *xtim = timekeeper.xtime; | 1307 | *xtim = tk_xtime(&timekeeper); |
1244 | *wtom = timekeeper.wall_to_monotonic; | 1308 | *wtom = timekeeper.wall_to_monotonic; |
1245 | *sleep = timekeeper.total_sleep_time; | 1309 | *sleep = timekeeper.total_sleep_time; |
1246 | } while (read_seqretry(&timekeeper.lock, seq)); | 1310 | } while (read_seqretry(&timekeeper.lock, seq)); |
1247 | } | 1311 | } |
1248 | 1312 | ||
1313 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1314 | /** | ||
1315 | * ktime_get_update_offsets - hrtimer helper | ||
1316 | * @offs_real: pointer to storage for monotonic -> realtime offset | ||
1317 | * @offs_boot: pointer to storage for monotonic -> boottime offset | ||
1318 | * | ||
1319 | * Returns current monotonic time and updates the offsets | ||
1320 | * Called from hrtimer_interupt() or retrigger_next_event() | ||
1321 | */ | ||
1322 | ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) | ||
1323 | { | ||
1324 | ktime_t now; | ||
1325 | unsigned int seq; | ||
1326 | u64 secs, nsecs; | ||
1327 | |||
1328 | do { | ||
1329 | seq = read_seqbegin(&timekeeper.lock); | ||
1330 | |||
1331 | secs = timekeeper.xtime_sec; | ||
1332 | nsecs = timekeeping_get_ns(&timekeeper); | ||
1333 | |||
1334 | *offs_real = timekeeper.offs_real; | ||
1335 | *offs_boot = timekeeper.offs_boot; | ||
1336 | } while (read_seqretry(&timekeeper.lock, seq)); | ||
1337 | |||
1338 | now = ktime_add_ns(ktime_set(secs, 0), nsecs); | ||
1339 | now = ktime_sub(now, *offs_real); | ||
1340 | return now; | ||
1341 | } | ||
1342 | #endif | ||
1343 | |||
1249 | /** | 1344 | /** |
1250 | * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format | 1345 | * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format |
1251 | */ | 1346 | */ |