diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-08-14 09:47:19 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-08-15 04:55:45 -0400 |
commit | a0f7d48bfb95a4c5172a2756dbc4b82afc8e9ae4 (patch) | |
tree | b82580eb76f99ee1352f399975050630ca21936a /kernel | |
parent | 31089c13bcb18d2cd2a3ddfbe3a28666346f237e (diff) |
timekeeping: Remove clocksource inline functions
The three inline functions clocksource_read, clocksource_enable and
clocksource_disable are simple wrappers of an indirect call plus the
copy from and to the mult_orig value. The functions are exclusively
used by the timekeeping code which has intimate knowledge of the
clocksource anyway. Therefore remove the inline functions. No
functional change.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Acked-by: John Stultz <johnstul@us.ibm.com>
Cc: Daniel Walker <dwalker@fifo99.com>
LKML-Reference: <20090814134807.903108946@de.ibm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/time/timekeeping.c | 41 |
1 files changed, 28 insertions, 13 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index b8b70fb545fc..016a2591d719 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -79,7 +79,7 @@ static void clocksource_forward_now(void) | |||
79 | cycle_t cycle_now, cycle_delta; | 79 | cycle_t cycle_now, cycle_delta; |
80 | s64 nsec; | 80 | s64 nsec; |
81 | 81 | ||
82 | cycle_now = clocksource_read(clock); | 82 | cycle_now = clock->read(clock); |
83 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 83 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
84 | clock->cycle_last = cycle_now; | 84 | clock->cycle_last = cycle_now; |
85 | 85 | ||
@@ -114,7 +114,7 @@ void getnstimeofday(struct timespec *ts) | |||
114 | *ts = xtime; | 114 | *ts = xtime; |
115 | 115 | ||
116 | /* read clocksource: */ | 116 | /* read clocksource: */ |
117 | cycle_now = clocksource_read(clock); | 117 | cycle_now = clock->read(clock); |
118 | 118 | ||
119 | /* calculate the delta since the last update_wall_time: */ | 119 | /* calculate the delta since the last update_wall_time: */ |
120 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 120 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
@@ -146,7 +146,7 @@ ktime_t ktime_get(void) | |||
146 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; | 146 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; |
147 | 147 | ||
148 | /* read clocksource: */ | 148 | /* read clocksource: */ |
149 | cycle_now = clocksource_read(clock); | 149 | cycle_now = clock->read(clock); |
150 | 150 | ||
151 | /* calculate the delta since the last update_wall_time: */ | 151 | /* calculate the delta since the last update_wall_time: */ |
152 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 152 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
@@ -186,7 +186,7 @@ void ktime_get_ts(struct timespec *ts) | |||
186 | tomono = wall_to_monotonic; | 186 | tomono = wall_to_monotonic; |
187 | 187 | ||
188 | /* read clocksource: */ | 188 | /* read clocksource: */ |
189 | cycle_now = clocksource_read(clock); | 189 | cycle_now = clock->read(clock); |
190 | 190 | ||
191 | /* calculate the delta since the last update_wall_time: */ | 191 | /* calculate the delta since the last update_wall_time: */ |
192 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 192 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
@@ -274,16 +274,29 @@ static void change_clocksource(void) | |||
274 | 274 | ||
275 | clocksource_forward_now(); | 275 | clocksource_forward_now(); |
276 | 276 | ||
277 | if (clocksource_enable(new)) | 277 | if (new->enable && !new->enable(new)) |
278 | return; | 278 | return; |
279 | /* | ||
280 | * The frequency may have changed while the clocksource | ||
281 | * was disabled. If so the code in ->enable() must update | ||
282 | * the mult value to reflect the new frequency. Make sure | ||
283 | * mult_orig follows this change. | ||
284 | */ | ||
285 | new->mult_orig = new->mult; | ||
279 | 286 | ||
280 | new->raw_time = clock->raw_time; | 287 | new->raw_time = clock->raw_time; |
281 | old = clock; | 288 | old = clock; |
282 | clock = new; | 289 | clock = new; |
283 | clocksource_disable(old); | 290 | /* |
291 | * Save mult_orig in mult so that the value can be restored | ||
292 | * regardless if ->enable() updates the value of mult or not. | ||
293 | */ | ||
294 | old->mult = old->mult_orig; | ||
295 | if (old->disable) | ||
296 | old->disable(old); | ||
284 | 297 | ||
285 | clock->cycle_last = 0; | 298 | clock->cycle_last = 0; |
286 | clock->cycle_last = clocksource_read(clock); | 299 | clock->cycle_last = clock->read(clock); |
287 | clock->error = 0; | 300 | clock->error = 0; |
288 | clock->xtime_nsec = 0; | 301 | clock->xtime_nsec = 0; |
289 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | 302 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
@@ -373,7 +386,7 @@ void getrawmonotonic(struct timespec *ts) | |||
373 | seq = read_seqbegin(&xtime_lock); | 386 | seq = read_seqbegin(&xtime_lock); |
374 | 387 | ||
375 | /* read clocksource: */ | 388 | /* read clocksource: */ |
376 | cycle_now = clocksource_read(clock); | 389 | cycle_now = clock->read(clock); |
377 | 390 | ||
378 | /* calculate the delta since the last update_wall_time: */ | 391 | /* calculate the delta since the last update_wall_time: */ |
379 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 392 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
@@ -435,9 +448,12 @@ void __init timekeeping_init(void) | |||
435 | ntp_init(); | 448 | ntp_init(); |
436 | 449 | ||
437 | clock = clocksource_get_next(); | 450 | clock = clocksource_get_next(); |
438 | clocksource_enable(clock); | 451 | if (clock->enable) |
452 | clock->enable(clock); | ||
453 | /* set mult_orig on enable */ | ||
454 | clock->mult_orig = clock->mult; | ||
439 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | 455 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
440 | clock->cycle_last = clocksource_read(clock); | 456 | clock->cycle_last = clock->read(clock); |
441 | 457 | ||
442 | xtime.tv_sec = sec; | 458 | xtime.tv_sec = sec; |
443 | xtime.tv_nsec = 0; | 459 | xtime.tv_nsec = 0; |
@@ -477,8 +493,7 @@ static int timekeeping_resume(struct sys_device *dev) | |||
477 | } | 493 | } |
478 | update_xtime_cache(0); | 494 | update_xtime_cache(0); |
479 | /* re-base the last cycle value */ | 495 | /* re-base the last cycle value */ |
480 | clock->cycle_last = 0; | 496 | clock->cycle_last = clock->read(clock); |
481 | clock->cycle_last = clocksource_read(clock); | ||
482 | clock->error = 0; | 497 | clock->error = 0; |
483 | timekeeping_suspended = 0; | 498 | timekeeping_suspended = 0; |
484 | write_sequnlock_irqrestore(&xtime_lock, flags); | 499 | write_sequnlock_irqrestore(&xtime_lock, flags); |
@@ -630,7 +645,7 @@ void update_wall_time(void) | |||
630 | return; | 645 | return; |
631 | 646 | ||
632 | #ifdef CONFIG_GENERIC_TIME | 647 | #ifdef CONFIG_GENERIC_TIME |
633 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; | 648 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; |
634 | #else | 649 | #else |
635 | offset = clock->cycle_interval; | 650 | offset = clock->cycle_interval; |
636 | #endif | 651 | #endif |