aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--include/linux/clocksource.h54
-rw-r--r--kernel/time/clocksource.c6
-rw-r--r--kernel/time/timekeeping.c235
4 files changed, 164 insertions, 132 deletions
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index afefe514df0f..e76c2e7a8b9a 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -280,7 +280,6 @@ void __init time_init(void)
280 now = get_clock(); 280 now = get_clock();
281 tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime); 281 tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime);
282 clocksource_tod.cycle_last = now; 282 clocksource_tod.cycle_last = now;
283 clocksource_tod.raw_time = xtime;
284 tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts); 283 tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts);
285 set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec); 284 set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec);
286 write_sequnlock_irqrestore(&xtime_lock, flags); 285 write_sequnlock_irqrestore(&xtime_lock, flags);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 19ad43af62d0..e12e3095e2fb 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -155,8 +155,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
155 * @flags: flags describing special properties 155 * @flags: flags describing special properties
156 * @vread: vsyscall based read 156 * @vread: vsyscall based read
157 * @resume: resume function for the clocksource, if necessary 157 * @resume: resume function for the clocksource, if necessary
158 * @cycle_interval: Used internally by timekeeping core, please ignore.
159 * @xtime_interval: Used internally by timekeeping core, please ignore.
160 */ 158 */
161struct clocksource { 159struct clocksource {
162 /* 160 /*
@@ -182,19 +180,12 @@ struct clocksource {
182#define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) 180#define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0)
183#endif 181#endif
184 182
185 /* timekeeping specific data, ignore */
186 cycle_t cycle_interval;
187 u64 xtime_interval;
188 u32 raw_interval;
189 /* 183 /*
190 * Second part is written at each timer interrupt 184 * Second part is written at each timer interrupt
191 * Keep it in a different cache line to dirty no 185 * Keep it in a different cache line to dirty no
192 * more than one cache line. 186 * more than one cache line.
193 */ 187 */
194 cycle_t cycle_last ____cacheline_aligned_in_smp; 188 cycle_t cycle_last ____cacheline_aligned_in_smp;
195 u64 xtime_nsec;
196 s64 error;
197 struct timespec raw_time;
198 189
199#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 190#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
200 /* Watchdog related data, used by the framework */ 191 /* Watchdog related data, used by the framework */
@@ -203,8 +194,6 @@ struct clocksource {
203#endif 194#endif
204}; 195};
205 196
206extern struct clocksource *clock; /* current clocksource */
207
208/* 197/*
209 * Clock source flags bits:: 198 * Clock source flags bits::
210 */ 199 */
@@ -270,50 +259,15 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
270} 259}
271 260
272/** 261/**
273 * cyc2ns - converts clocksource cycles to nanoseconds 262 * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
274 * @cs: Pointer to clocksource
275 * @cycles: Cycles
276 * 263 *
277 * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. 264 * Converts cycles to nanoseconds, using the given mult and shift.
278 * 265 *
279 * XXX - This could use some mult_lxl_ll() asm optimization 266 * XXX - This could use some mult_lxl_ll() asm optimization
280 */ 267 */
281static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) 268static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
282{ 269{
283 u64 ret = (u64)cycles; 270 return ((u64) cycles * mult) >> shift;
284 ret = (ret * cs->mult) >> cs->shift;
285 return ret;
286}
287
288/**
289 * clocksource_calculate_interval - Calculates a clocksource interval struct
290 *
291 * @c: Pointer to clocksource.
292 * @length_nsec: Desired interval length in nanoseconds.
293 *
294 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
295 * pair and interval request.
296 *
297 * Unless you're the timekeeping code, you should not be using this!
298 */
299static inline void clocksource_calculate_interval(struct clocksource *c,
300 unsigned long length_nsec)
301{
302 u64 tmp;
303
304 /* Do the ns -> cycle conversion first, using original mult */
305 tmp = length_nsec;
306 tmp <<= c->shift;
307 tmp += c->mult_orig/2;
308 do_div(tmp, c->mult_orig);
309
310 c->cycle_interval = (cycle_t)tmp;
311 if (c->cycle_interval == 0)
312 c->cycle_interval = 1;
313
314 /* Go back from cycles -> shifted ns, this time use ntp adjused mult */
315 c->xtime_interval = (u64)c->cycle_interval * c->mult;
316 c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
317} 271}
318 272
319 273
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index f1508019bfb4..f18c9a6bdcf4 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -177,7 +177,8 @@ static void clocksource_watchdog(unsigned long data)
177 goto out; 177 goto out;
178 178
179 wdnow = watchdog->read(watchdog); 179 wdnow = watchdog->read(watchdog);
180 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 180 wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
181 watchdog->mult, watchdog->shift);
181 watchdog_last = wdnow; 182 watchdog_last = wdnow;
182 183
183 list_for_each_entry(cs, &watchdog_list, wd_list) { 184 list_for_each_entry(cs, &watchdog_list, wd_list) {
@@ -196,7 +197,8 @@ static void clocksource_watchdog(unsigned long data)
196 } 197 }
197 198
198 /* Check the deviation from the watchdog clocksource. */ 199 /* Check the deviation from the watchdog clocksource. */
199 cs_nsec = cyc2ns(cs, (csnow - cs->wd_last) & cs->mask); 200 cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
201 cs->mask, cs->mult, cs->shift);
200 cs->wd_last = csnow; 202 cs->wd_last = csnow;
201 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 203 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
202 clocksource_unstable(cs, cs_nsec - wd_nsec); 204 clocksource_unstable(cs, cs_nsec - wd_nsec);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 325a9b63265a..7af45cbf6b13 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -19,6 +19,65 @@
19#include <linux/time.h> 19#include <linux/time.h>
20#include <linux/tick.h> 20#include <linux/tick.h>
21 21
22/* Structure holding internal timekeeping values. */
23struct timekeeper {
24 /* Current clocksource used for timekeeping. */
25 struct clocksource *clock;
26
27 /* Number of clock cycles in one NTP interval. */
28 cycle_t cycle_interval;
29 /* Number of clock shifted nano seconds in one NTP interval. */
30 u64 xtime_interval;
31 /* Raw nano seconds accumulated per NTP interval. */
32 u32 raw_interval;
33
34 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
35 u64 xtime_nsec;
36 /* Difference between accumulated time and NTP time in ntp
37 * shifted nano seconds. */
38 s64 ntp_error;
39};
40
41struct timekeeper timekeeper;
42
43/**
44 * timekeeper_setup_internals - Set up internals to use clocksource clock.
45 *
46 * @clock: Pointer to clocksource.
47 *
48 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
49 * pair and interval request.
50 *
51 * Unless you're the timekeeping code, you should not be using this!
52 */
53static void timekeeper_setup_internals(struct clocksource *clock)
54{
55 cycle_t interval;
56 u64 tmp;
57
58 timekeeper.clock = clock;
59 clock->cycle_last = clock->read(clock);
60
61 /* Do the ns -> cycle conversion first, using original mult */
62 tmp = NTP_INTERVAL_LENGTH;
63 tmp <<= clock->shift;
64 tmp += clock->mult_orig/2;
65 do_div(tmp, clock->mult_orig);
66 if (tmp == 0)
67 tmp = 1;
68
69 interval = (cycle_t) tmp;
70 timekeeper.cycle_interval = interval;
71
72 /* Go back from cycles -> shifted ns */
73 timekeeper.xtime_interval = (u64) interval * clock->mult;
74 timekeeper.raw_interval =
75 ((u64) interval * clock->mult_orig) >> clock->shift;
76
77 timekeeper.xtime_nsec = 0;
78
79 timekeeper.ntp_error = 0;
80}
22 81
23/* 82/*
24 * This read-write spinlock protects us from races in SMP while 83 * This read-write spinlock protects us from races in SMP while
@@ -46,6 +105,11 @@ struct timespec xtime __attribute__ ((aligned (16)));
46struct timespec wall_to_monotonic __attribute__ ((aligned (16))); 105struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
47static unsigned long total_sleep_time; /* seconds */ 106static unsigned long total_sleep_time; /* seconds */
48 107
108/*
109 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
110 */
111struct timespec raw_time;
112
49/* flag for if timekeeping is suspended */ 113/* flag for if timekeeping is suspended */
50int __read_mostly timekeeping_suspended; 114int __read_mostly timekeeping_suspended;
51 115
@@ -56,42 +120,42 @@ void update_xtime_cache(u64 nsec)
56 timespec_add_ns(&xtime_cache, nsec); 120 timespec_add_ns(&xtime_cache, nsec);
57} 121}
58 122
59struct clocksource *clock;
60
61/* must hold xtime_lock */ 123/* must hold xtime_lock */
62void timekeeping_leap_insert(int leapsecond) 124void timekeeping_leap_insert(int leapsecond)
63{ 125{
64 xtime.tv_sec += leapsecond; 126 xtime.tv_sec += leapsecond;
65 wall_to_monotonic.tv_sec -= leapsecond; 127 wall_to_monotonic.tv_sec -= leapsecond;
66 update_vsyscall(&xtime, clock); 128 update_vsyscall(&xtime, timekeeper.clock);
67} 129}
68 130
69#ifdef CONFIG_GENERIC_TIME 131#ifdef CONFIG_GENERIC_TIME
70/** 132/**
71 * clocksource_forward_now - update clock to the current time 133 * timekeeping_forward_now - update clock to the current time
72 * 134 *
73 * Forward the current clock to update its state since the last call to 135 * Forward the current clock to update its state since the last call to
74 * update_wall_time(). This is useful before significant clock changes, 136 * update_wall_time(). This is useful before significant clock changes,
75 * as it avoids having to deal with this time offset explicitly. 137 * as it avoids having to deal with this time offset explicitly.
76 */ 138 */
77static void clocksource_forward_now(void) 139static void timekeeping_forward_now(void)
78{ 140{
79 cycle_t cycle_now, cycle_delta; 141 cycle_t cycle_now, cycle_delta;
142 struct clocksource *clock;
80 s64 nsec; 143 s64 nsec;
81 144
145 clock = timekeeper.clock;
82 cycle_now = clock->read(clock); 146 cycle_now = clock->read(clock);
83 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 147 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
84 clock->cycle_last = cycle_now; 148 clock->cycle_last = cycle_now;
85 149
86 nsec = cyc2ns(clock, cycle_delta); 150 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
87 151
88 /* If arch requires, add in gettimeoffset() */ 152 /* If arch requires, add in gettimeoffset() */
89 nsec += arch_gettimeoffset(); 153 nsec += arch_gettimeoffset();
90 154
91 timespec_add_ns(&xtime, nsec); 155 timespec_add_ns(&xtime, nsec);
92 156
93 nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; 157 nsec = clocksource_cyc2ns(cycle_delta, clock->mult_orig, clock->shift);
94 clock->raw_time.tv_nsec += nsec; 158 timespec_add_ns(&raw_time, nsec);
95} 159}
96 160
97/** 161/**
@@ -103,6 +167,7 @@ static void clocksource_forward_now(void)
103void getnstimeofday(struct timespec *ts) 167void getnstimeofday(struct timespec *ts)
104{ 168{
105 cycle_t cycle_now, cycle_delta; 169 cycle_t cycle_now, cycle_delta;
170 struct clocksource *clock;
106 unsigned long seq; 171 unsigned long seq;
107 s64 nsecs; 172 s64 nsecs;
108 173
@@ -114,13 +179,15 @@ void getnstimeofday(struct timespec *ts)
114 *ts = xtime; 179 *ts = xtime;
115 180
116 /* read clocksource: */ 181 /* read clocksource: */
182 clock = timekeeper.clock;
117 cycle_now = clock->read(clock); 183 cycle_now = clock->read(clock);
118 184
119 /* calculate the delta since the last update_wall_time: */ 185 /* calculate the delta since the last update_wall_time: */
120 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 186 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
121 187
122 /* convert to nanoseconds: */ 188 /* convert to nanoseconds: */
123 nsecs = cyc2ns(clock, cycle_delta); 189 nsecs = clocksource_cyc2ns(cycle_delta, clock->mult,
190 clock->shift);
124 191
125 /* If arch requires, add in gettimeoffset() */ 192 /* If arch requires, add in gettimeoffset() */
126 nsecs += arch_gettimeoffset(); 193 nsecs += arch_gettimeoffset();
@@ -135,6 +202,7 @@ EXPORT_SYMBOL(getnstimeofday);
135ktime_t ktime_get(void) 202ktime_t ktime_get(void)
136{ 203{
137 cycle_t cycle_now, cycle_delta; 204 cycle_t cycle_now, cycle_delta;
205 struct clocksource *clock;
138 unsigned int seq; 206 unsigned int seq;
139 s64 secs, nsecs; 207 s64 secs, nsecs;
140 208
@@ -146,13 +214,15 @@ ktime_t ktime_get(void)
146 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; 214 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
147 215
148 /* read clocksource: */ 216 /* read clocksource: */
217 clock = timekeeper.clock;
149 cycle_now = clock->read(clock); 218 cycle_now = clock->read(clock);
150 219
151 /* calculate the delta since the last update_wall_time: */ 220 /* calculate the delta since the last update_wall_time: */
152 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 221 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
153 222
154 /* convert to nanoseconds: */ 223 /* convert to nanoseconds: */
155 nsecs += cyc2ns(clock, cycle_delta); 224 nsecs += clocksource_cyc2ns(cycle_delta, clock->mult,
225 clock->shift);
156 226
157 } while (read_seqretry(&xtime_lock, seq)); 227 } while (read_seqretry(&xtime_lock, seq));
158 /* 228 /*
@@ -174,6 +244,7 @@ EXPORT_SYMBOL_GPL(ktime_get);
174void ktime_get_ts(struct timespec *ts) 244void ktime_get_ts(struct timespec *ts)
175{ 245{
176 cycle_t cycle_now, cycle_delta; 246 cycle_t cycle_now, cycle_delta;
247 struct clocksource *clock;
177 struct timespec tomono; 248 struct timespec tomono;
178 unsigned int seq; 249 unsigned int seq;
179 s64 nsecs; 250 s64 nsecs;
@@ -186,13 +257,15 @@ void ktime_get_ts(struct timespec *ts)
186 tomono = wall_to_monotonic; 257 tomono = wall_to_monotonic;
187 258
188 /* read clocksource: */ 259 /* read clocksource: */
260 clock = timekeeper.clock;
189 cycle_now = clock->read(clock); 261 cycle_now = clock->read(clock);
190 262
191 /* calculate the delta since the last update_wall_time: */ 263 /* calculate the delta since the last update_wall_time: */
192 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 264 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
193 265
194 /* convert to nanoseconds: */ 266 /* convert to nanoseconds: */
195 nsecs = cyc2ns(clock, cycle_delta); 267 nsecs = clocksource_cyc2ns(cycle_delta, clock->mult,
268 clock->shift);
196 269
197 } while (read_seqretry(&xtime_lock, seq)); 270 } while (read_seqretry(&xtime_lock, seq));
198 271
@@ -233,7 +306,7 @@ int do_settimeofday(struct timespec *tv)
233 306
234 write_seqlock_irqsave(&xtime_lock, flags); 307 write_seqlock_irqsave(&xtime_lock, flags);
235 308
236 clocksource_forward_now(); 309 timekeeping_forward_now();
237 310
238 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; 311 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
239 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; 312 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
@@ -243,10 +316,10 @@ int do_settimeofday(struct timespec *tv)
243 316
244 update_xtime_cache(0); 317 update_xtime_cache(0);
245 318
246 clock->error = 0; 319 timekeeper.ntp_error = 0;
247 ntp_clear(); 320 ntp_clear();
248 321
249 update_vsyscall(&xtime, clock); 322 update_vsyscall(&xtime, timekeeper.clock);
250 323
251 write_sequnlock_irqrestore(&xtime_lock, flags); 324 write_sequnlock_irqrestore(&xtime_lock, flags);
252 325
@@ -269,10 +342,10 @@ static void change_clocksource(void)
269 342
270 new = clocksource_get_next(); 343 new = clocksource_get_next();
271 344
272 if (!new || clock == new) 345 if (!new || timekeeper.clock == new)
273 return; 346 return;
274 347
275 clocksource_forward_now(); 348 timekeeping_forward_now();
276 349
277 if (new->enable && !new->enable(new)) 350 if (new->enable && !new->enable(new))
278 return; 351 return;
@@ -284,9 +357,9 @@ static void change_clocksource(void)
284 */ 357 */
285 new->mult_orig = new->mult; 358 new->mult_orig = new->mult;
286 359
287 new->raw_time = clock->raw_time; 360 old = timekeeper.clock;
288 old = clock; 361 timekeeper_setup_internals(new);
289 clock = new; 362
290 /* 363 /*
291 * Save mult_orig in mult so that the value can be restored 364 * Save mult_orig in mult so that the value can be restored
292 * regardless if ->enable() updates the value of mult or not. 365 * regardless if ->enable() updates the value of mult or not.
@@ -295,22 +368,10 @@ static void change_clocksource(void)
295 if (old->disable) 368 if (old->disable)
296 old->disable(old); 369 old->disable(old);
297 370
298 clock->cycle_last = clock->read(clock);
299 clock->error = 0;
300 clock->xtime_nsec = 0;
301 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
302
303 tick_clock_notify(); 371 tick_clock_notify();
304
305 /*
306 * We're holding xtime lock and waking up klogd would deadlock
307 * us on enqueue. So no printing!
308 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
309 clock->name);
310 */
311} 372}
312#else /* GENERIC_TIME */ 373#else /* GENERIC_TIME */
313static inline void clocksource_forward_now(void) { } 374static inline void timekeeping_forward_now(void) { }
314static inline void change_clocksource(void) { } 375static inline void change_clocksource(void) { }
315 376
316/** 377/**
@@ -380,20 +441,23 @@ void getrawmonotonic(struct timespec *ts)
380 unsigned long seq; 441 unsigned long seq;
381 s64 nsecs; 442 s64 nsecs;
382 cycle_t cycle_now, cycle_delta; 443 cycle_t cycle_now, cycle_delta;
444 struct clocksource *clock;
383 445
384 do { 446 do {
385 seq = read_seqbegin(&xtime_lock); 447 seq = read_seqbegin(&xtime_lock);
386 448
387 /* read clocksource: */ 449 /* read clocksource: */
450 clock = timekeeper.clock;
388 cycle_now = clock->read(clock); 451 cycle_now = clock->read(clock);
389 452
390 /* calculate the delta since the last update_wall_time: */ 453 /* calculate the delta since the last update_wall_time: */
391 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 454 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
392 455
393 /* convert to nanoseconds: */ 456 /* convert to nanoseconds: */
394 nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; 457 nsecs = clocksource_cyc2ns(cycle_delta, clock->mult_orig,
458 clock->shift);
395 459
396 *ts = clock->raw_time; 460 *ts = raw_time;
397 461
398 } while (read_seqretry(&xtime_lock, seq)); 462 } while (read_seqretry(&xtime_lock, seq));
399 463
@@ -413,7 +477,7 @@ int timekeeping_valid_for_hres(void)
413 do { 477 do {
414 seq = read_seqbegin(&xtime_lock); 478 seq = read_seqbegin(&xtime_lock);
415 479
416 ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 480 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
417 481
418 } while (read_seqretry(&xtime_lock, seq)); 482 } while (read_seqretry(&xtime_lock, seq));
419 483
@@ -439,6 +503,7 @@ unsigned long __attribute__((weak)) read_persistent_clock(void)
439 */ 503 */
440void __init timekeeping_init(void) 504void __init timekeeping_init(void)
441{ 505{
506 struct clocksource *clock;
442 unsigned long flags; 507 unsigned long flags;
443 unsigned long sec = read_persistent_clock(); 508 unsigned long sec = read_persistent_clock();
444 509
@@ -451,11 +516,13 @@ void __init timekeeping_init(void)
451 clock->enable(clock); 516 clock->enable(clock);
452 /* set mult_orig on enable */ 517 /* set mult_orig on enable */
453 clock->mult_orig = clock->mult; 518 clock->mult_orig = clock->mult;
454 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 519
455 clock->cycle_last = clock->read(clock); 520 timekeeper_setup_internals(clock);
456 521
457 xtime.tv_sec = sec; 522 xtime.tv_sec = sec;
458 xtime.tv_nsec = 0; 523 xtime.tv_nsec = 0;
524 raw_time.tv_sec = 0;
525 raw_time.tv_nsec = 0;
459 set_normalized_timespec(&wall_to_monotonic, 526 set_normalized_timespec(&wall_to_monotonic,
460 -xtime.tv_sec, -xtime.tv_nsec); 527 -xtime.tv_sec, -xtime.tv_nsec);
461 update_xtime_cache(0); 528 update_xtime_cache(0);
@@ -492,8 +559,8 @@ static int timekeeping_resume(struct sys_device *dev)
492 } 559 }
493 update_xtime_cache(0); 560 update_xtime_cache(0);
494 /* re-base the last cycle value */ 561 /* re-base the last cycle value */
495 clock->cycle_last = clock->read(clock); 562 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
496 clock->error = 0; 563 timekeeper.ntp_error = 0;
497 timekeeping_suspended = 0; 564 timekeeping_suspended = 0;
498 write_sequnlock_irqrestore(&xtime_lock, flags); 565 write_sequnlock_irqrestore(&xtime_lock, flags);
499 566
@@ -514,7 +581,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
514 timekeeping_suspend_time = read_persistent_clock(); 581 timekeeping_suspend_time = read_persistent_clock();
515 582
516 write_seqlock_irqsave(&xtime_lock, flags); 583 write_seqlock_irqsave(&xtime_lock, flags);
517 clocksource_forward_now(); 584 timekeeping_forward_now();
518 timekeeping_suspended = 1; 585 timekeeping_suspended = 1;
519 write_sequnlock_irqrestore(&xtime_lock, flags); 586 write_sequnlock_irqrestore(&xtime_lock, flags);
520 587
@@ -549,7 +616,7 @@ device_initcall(timekeeping_init_device);
549 * If the error is already larger, we look ahead even further 616 * If the error is already larger, we look ahead even further
550 * to compensate for late or lost adjustments. 617 * to compensate for late or lost adjustments.
551 */ 618 */
552static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, 619static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
553 s64 *offset) 620 s64 *offset)
554{ 621{
555 s64 tick_error, i; 622 s64 tick_error, i;
@@ -565,7 +632,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
565 * here. This is tuned so that an error of about 1 msec is adjusted 632 * here. This is tuned so that an error of about 1 msec is adjusted
566 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). 633 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
567 */ 634 */
568 error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); 635 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
569 error2 = abs(error2); 636 error2 = abs(error2);
570 for (look_ahead = 0; error2 > 0; look_ahead++) 637 for (look_ahead = 0; error2 > 0; look_ahead++)
571 error2 >>= 2; 638 error2 >>= 2;
@@ -574,8 +641,9 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
574 * Now calculate the error in (1 << look_ahead) ticks, but first 641 * Now calculate the error in (1 << look_ahead) ticks, but first
575 * remove the single look ahead already included in the error. 642 * remove the single look ahead already included in the error.
576 */ 643 */
577 tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1); 644 tick_error = tick_length >>
578 tick_error -= clock->xtime_interval >> 1; 645 (NTP_SCALE_SHIFT - timekeeper.clock->shift + 1);
646 tick_error -= timekeeper.xtime_interval >> 1;
579 error = ((error - tick_error) >> look_ahead) + tick_error; 647 error = ((error - tick_error) >> look_ahead) + tick_error;
580 648
581 /* Finally calculate the adjustment shift value. */ 649 /* Finally calculate the adjustment shift value. */
@@ -600,18 +668,19 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
600 * this is optimized for the most common adjustments of -1,0,1, 668 * this is optimized for the most common adjustments of -1,0,1,
601 * for other values we can do a bit more work. 669 * for other values we can do a bit more work.
602 */ 670 */
603static void clocksource_adjust(s64 offset) 671static void timekeeping_adjust(s64 offset)
604{ 672{
605 s64 error, interval = clock->cycle_interval; 673 s64 error, interval = timekeeper.cycle_interval;
606 int adj; 674 int adj;
607 675
608 error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1); 676 error = timekeeper.ntp_error >>
677 (NTP_SCALE_SHIFT - timekeeper.clock->shift - 1);
609 if (error > interval) { 678 if (error > interval) {
610 error >>= 2; 679 error >>= 2;
611 if (likely(error <= interval)) 680 if (likely(error <= interval))
612 adj = 1; 681 adj = 1;
613 else 682 else
614 adj = clocksource_bigadjust(error, &interval, &offset); 683 adj = timekeeping_bigadjust(error, &interval, &offset);
615 } else if (error < -interval) { 684 } else if (error < -interval) {
616 error >>= 2; 685 error >>= 2;
617 if (likely(error >= -interval)) { 686 if (likely(error >= -interval)) {
@@ -619,15 +688,15 @@ static void clocksource_adjust(s64 offset)
619 interval = -interval; 688 interval = -interval;
620 offset = -offset; 689 offset = -offset;
621 } else 690 } else
622 adj = clocksource_bigadjust(error, &interval, &offset); 691 adj = timekeeping_bigadjust(error, &interval, &offset);
623 } else 692 } else
624 return; 693 return;
625 694
626 clock->mult += adj; 695 timekeeper.clock->mult += adj;
627 clock->xtime_interval += interval; 696 timekeeper.xtime_interval += interval;
628 clock->xtime_nsec -= offset; 697 timekeeper.xtime_nsec -= offset;
629 clock->error -= (interval - offset) << 698 timekeeper.ntp_error -= (interval - offset) <<
630 (NTP_SCALE_SHIFT - clock->shift); 699 (NTP_SCALE_SHIFT - timekeeper.clock->shift);
631} 700}
632 701
633/** 702/**
@@ -637,53 +706,59 @@ static void clocksource_adjust(s64 offset)
637 */ 706 */
638void update_wall_time(void) 707void update_wall_time(void)
639{ 708{
709 struct clocksource *clock;
640 cycle_t offset; 710 cycle_t offset;
711 s64 nsecs;
641 712
642 /* Make sure we're fully resumed: */ 713 /* Make sure we're fully resumed: */
643 if (unlikely(timekeeping_suspended)) 714 if (unlikely(timekeeping_suspended))
644 return; 715 return;
645 716
717 clock = timekeeper.clock;
646#ifdef CONFIG_GENERIC_TIME 718#ifdef CONFIG_GENERIC_TIME
647 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 719 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
648#else 720#else
649 offset = clock->cycle_interval; 721 offset = timekeeper.cycle_interval;
650#endif 722#endif
651 clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift; 723 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
652 724
653 /* normally this loop will run just once, however in the 725 /* normally this loop will run just once, however in the
654 * case of lost or late ticks, it will accumulate correctly. 726 * case of lost or late ticks, it will accumulate correctly.
655 */ 727 */
656 while (offset >= clock->cycle_interval) { 728 while (offset >= timekeeper.cycle_interval) {
729 u64 nsecps = (u64)NSEC_PER_SEC << clock->shift;
730
657 /* accumulate one interval */ 731 /* accumulate one interval */
658 offset -= clock->cycle_interval; 732 offset -= timekeeper.cycle_interval;
659 clock->cycle_last += clock->cycle_interval; 733 clock->cycle_last += timekeeper.cycle_interval;
660 734
661 clock->xtime_nsec += clock->xtime_interval; 735 timekeeper.xtime_nsec += timekeeper.xtime_interval;
662 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { 736 if (timekeeper.xtime_nsec >= nsecps) {
663 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; 737 timekeeper.xtime_nsec -= nsecps;
664 xtime.tv_sec++; 738 xtime.tv_sec++;
665 second_overflow(); 739 second_overflow();
666 } 740 }
667 741
668 clock->raw_time.tv_nsec += clock->raw_interval; 742 raw_time.tv_nsec += timekeeper.raw_interval;
669 if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) { 743 if (raw_time.tv_nsec >= NSEC_PER_SEC) {
670 clock->raw_time.tv_nsec -= NSEC_PER_SEC; 744 raw_time.tv_nsec -= NSEC_PER_SEC;
671 clock->raw_time.tv_sec++; 745 raw_time.tv_sec++;
672 } 746 }
673 747
674 /* accumulate error between NTP and clock interval */ 748 /* accumulate error between NTP and clock interval */
675 clock->error += tick_length; 749 timekeeper.ntp_error += tick_length;
676 clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); 750 timekeeper.ntp_error -= timekeeper.xtime_interval <<
751 (NTP_SCALE_SHIFT - clock->shift);
677 } 752 }
678 753
679 /* correct the clock when NTP error is too big */ 754 /* correct the clock when NTP error is too big */
680 clocksource_adjust(offset); 755 timekeeping_adjust(offset);
681 756
682 /* 757 /*
683 * Since in the loop above, we accumulate any amount of time 758 * Since in the loop above, we accumulate any amount of time
684 * in xtime_nsec over a second into xtime.tv_sec, its possible for 759 * in xtime_nsec over a second into xtime.tv_sec, its possible for
685 * xtime_nsec to be fairly small after the loop. Further, if we're 760 * xtime_nsec to be fairly small after the loop. Further, if we're
686 * slightly speeding the clocksource up in clocksource_adjust(), 761 * slightly speeding the clocksource up in timekeeping_adjust(),
687 * its possible the required corrective factor to xtime_nsec could 762 * its possible the required corrective factor to xtime_nsec could
688 * cause it to underflow. 763 * cause it to underflow.
689 * 764 *
@@ -695,24 +770,26 @@ void update_wall_time(void)
695 * We'll correct this error next time through this function, when 770 * We'll correct this error next time through this function, when
696 * xtime_nsec is not as small. 771 * xtime_nsec is not as small.
697 */ 772 */
698 if (unlikely((s64)clock->xtime_nsec < 0)) { 773 if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
699 s64 neg = -(s64)clock->xtime_nsec; 774 s64 neg = -(s64)timekeeper.xtime_nsec;
700 clock->xtime_nsec = 0; 775 timekeeper.xtime_nsec = 0;
701 clock->error += neg << (NTP_SCALE_SHIFT - clock->shift); 776 timekeeper.ntp_error += neg << (NTP_SCALE_SHIFT - clock->shift);
702 } 777 }
703 778
704 /* store full nanoseconds into xtime after rounding it up and 779 /* store full nanoseconds into xtime after rounding it up and
705 * add the remainder to the error difference. 780 * add the remainder to the error difference.
706 */ 781 */
707 xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1; 782 xtime.tv_nsec = ((s64)timekeeper.xtime_nsec >> clock->shift) + 1;
708 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; 783 timekeeper.xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
709 clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift); 784 timekeeper.ntp_error += timekeeper.xtime_nsec <<
785 (NTP_SCALE_SHIFT - clock->shift);
710 786
711 update_xtime_cache(cyc2ns(clock, offset)); 787 nsecs = clocksource_cyc2ns(offset, clock->mult, clock->shift);
788 update_xtime_cache(nsecs);
712 789
713 /* check to see if there is a new clocksource to use */ 790 /* check to see if there is a new clocksource to use */
714 change_clocksource(); 791 change_clocksource();
715 update_vsyscall(&xtime, clock); 792 update_vsyscall(&xtime, timekeeper.clock);
716} 793}
717 794
718/** 795/**