diff options
| author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-08-14 09:47:29 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2009-08-15 04:55:46 -0400 |
| commit | 2ba2a3054fdffc8e6452f4ee120760322a6fbd43 (patch) | |
| tree | 1ce2f3b323cb332d2b3d5ba4930ea10bb5d9e27e /kernel/time | |
| parent | 0a54419836254a27baecd9037103171bcbabaf67 (diff) | |
timekeeping: Add timekeeper read_clock helper functions
Add timekeeper_read_clock_ntp and timekeeper_read_clock_raw and use
them for getnstimeofday, ktime_get, ktime_get_ts and getrawmonotonic.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Acked-by: John Stultz <johnstul@us.ibm.com>
Cc: Daniel Walker <dwalker@fifo99.com>
LKML-Reference: <20090814134810.435105711@de.ibm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/timekeeping.c | 91 |
1 files changed, 38 insertions, 53 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index f4056f6c2632..27ae01b596b7 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -95,6 +95,40 @@ static void timekeeper_setup_internals(struct clocksource *clock) | |||
| 95 | timekeeper.mult = clock->mult; | 95 | timekeeper.mult = clock->mult; |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | /* Timekeeper helper functions. */ | ||
| 99 | static inline s64 timekeeping_get_ns(void) | ||
| 100 | { | ||
| 101 | cycle_t cycle_now, cycle_delta; | ||
| 102 | struct clocksource *clock; | ||
| 103 | |||
| 104 | /* read clocksource: */ | ||
| 105 | clock = timekeeper.clock; | ||
| 106 | cycle_now = clock->read(clock); | ||
| 107 | |||
| 108 | /* calculate the delta since the last update_wall_time: */ | ||
| 109 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
| 110 | |||
| 111 | /* return delta convert to nanoseconds using ntp adjusted mult. */ | ||
| 112 | return clocksource_cyc2ns(cycle_delta, timekeeper.mult, | ||
| 113 | timekeeper.shift); | ||
| 114 | } | ||
| 115 | |||
| 116 | static inline s64 timekeeping_get_ns_raw(void) | ||
| 117 | { | ||
| 118 | cycle_t cycle_now, cycle_delta; | ||
| 119 | struct clocksource *clock; | ||
| 120 | |||
| 121 | /* read clocksource: */ | ||
| 122 | clock = timekeeper.clock; | ||
| 123 | cycle_now = clock->read(clock); | ||
| 124 | |||
| 125 | /* calculate the delta since the last update_wall_time: */ | ||
| 126 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
| 127 | |||
| 128 | /* return delta convert to nanoseconds using ntp adjusted mult. */ | ||
| 129 | return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); | ||
| 130 | } | ||
| 131 | |||
| 98 | /* | 132 | /* |
| 99 | * This read-write spinlock protects us from races in SMP while | 133 | * This read-write spinlock protects us from races in SMP while |
| 100 | * playing with xtime. | 134 | * playing with xtime. |
| @@ -183,8 +217,6 @@ static void timekeeping_forward_now(void) | |||
| 183 | */ | 217 | */ |
| 184 | void getnstimeofday(struct timespec *ts) | 218 | void getnstimeofday(struct timespec *ts) |
| 185 | { | 219 | { |
| 186 | cycle_t cycle_now, cycle_delta; | ||
| 187 | struct clocksource *clock; | ||
| 188 | unsigned long seq; | 220 | unsigned long seq; |
| 189 | s64 nsecs; | 221 | s64 nsecs; |
| 190 | 222 | ||
| @@ -194,17 +226,7 @@ void getnstimeofday(struct timespec *ts) | |||
| 194 | seq = read_seqbegin(&xtime_lock); | 226 | seq = read_seqbegin(&xtime_lock); |
| 195 | 227 | ||
| 196 | *ts = xtime; | 228 | *ts = xtime; |
| 197 | 229 | nsecs = timekeeping_get_ns(); | |
| 198 | /* read clocksource: */ | ||
| 199 | clock = timekeeper.clock; | ||
| 200 | cycle_now = clock->read(clock); | ||
| 201 | |||
| 202 | /* calculate the delta since the last update_wall_time: */ | ||
| 203 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
| 204 | |||
| 205 | /* convert to nanoseconds: */ | ||
| 206 | nsecs = clocksource_cyc2ns(cycle_delta, timekeeper.mult, | ||
| 207 | timekeeper.shift); | ||
| 208 | 230 | ||
| 209 | /* If arch requires, add in gettimeoffset() */ | 231 | /* If arch requires, add in gettimeoffset() */ |
| 210 | nsecs += arch_gettimeoffset(); | 232 | nsecs += arch_gettimeoffset(); |
| @@ -218,8 +240,6 @@ EXPORT_SYMBOL(getnstimeofday); | |||
| 218 | 240 | ||
| 219 | ktime_t ktime_get(void) | 241 | ktime_t ktime_get(void) |
| 220 | { | 242 | { |
| 221 | cycle_t cycle_now, cycle_delta; | ||
| 222 | struct clocksource *clock; | ||
| 223 | unsigned int seq; | 243 | unsigned int seq; |
| 224 | s64 secs, nsecs; | 244 | s64 secs, nsecs; |
| 225 | 245 | ||
| @@ -229,17 +249,7 @@ ktime_t ktime_get(void) | |||
| 229 | seq = read_seqbegin(&xtime_lock); | 249 | seq = read_seqbegin(&xtime_lock); |
| 230 | secs = xtime.tv_sec + wall_to_monotonic.tv_sec; | 250 | secs = xtime.tv_sec + wall_to_monotonic.tv_sec; |
| 231 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; | 251 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; |
| 232 | 252 | nsecs += timekeeping_get_ns(); | |
| 233 | /* read clocksource: */ | ||
| 234 | clock = timekeeper.clock; | ||
| 235 | cycle_now = clock->read(clock); | ||
| 236 | |||
| 237 | /* calculate the delta since the last update_wall_time: */ | ||
| 238 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
| 239 | |||
| 240 | /* convert to nanoseconds: */ | ||
| 241 | nsecs += clocksource_cyc2ns(cycle_delta, timekeeper.mult, | ||
| 242 | timekeeper.shift); | ||
| 243 | 253 | ||
| 244 | } while (read_seqretry(&xtime_lock, seq)); | 254 | } while (read_seqretry(&xtime_lock, seq)); |
| 245 | /* | 255 | /* |
| @@ -260,8 +270,6 @@ EXPORT_SYMBOL_GPL(ktime_get); | |||
| 260 | */ | 270 | */ |
| 261 | void ktime_get_ts(struct timespec *ts) | 271 | void ktime_get_ts(struct timespec *ts) |
| 262 | { | 272 | { |
| 263 | cycle_t cycle_now, cycle_delta; | ||
| 264 | struct clocksource *clock; | ||
| 265 | struct timespec tomono; | 273 | struct timespec tomono; |
| 266 | unsigned int seq; | 274 | unsigned int seq; |
| 267 | s64 nsecs; | 275 | s64 nsecs; |
| @@ -272,17 +280,7 @@ void ktime_get_ts(struct timespec *ts) | |||
| 272 | seq = read_seqbegin(&xtime_lock); | 280 | seq = read_seqbegin(&xtime_lock); |
| 273 | *ts = xtime; | 281 | *ts = xtime; |
| 274 | tomono = wall_to_monotonic; | 282 | tomono = wall_to_monotonic; |
| 275 | 283 | nsecs = timekeeping_get_ns(); | |
| 276 | /* read clocksource: */ | ||
| 277 | clock = timekeeper.clock; | ||
| 278 | cycle_now = clock->read(clock); | ||
| 279 | |||
| 280 | /* calculate the delta since the last update_wall_time: */ | ||
| 281 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
| 282 | |||
| 283 | /* convert to nanoseconds: */ | ||
| 284 | nsecs = clocksource_cyc2ns(cycle_delta, timekeeper.mult, | ||
| 285 | timekeeper.shift); | ||
| 286 | 284 | ||
| 287 | } while (read_seqretry(&xtime_lock, seq)); | 285 | } while (read_seqretry(&xtime_lock, seq)); |
| 288 | 286 | ||
| @@ -445,23 +443,10 @@ void getrawmonotonic(struct timespec *ts) | |||
| 445 | { | 443 | { |
| 446 | unsigned long seq; | 444 | unsigned long seq; |
| 447 | s64 nsecs; | 445 | s64 nsecs; |
| 448 | cycle_t cycle_now, cycle_delta; | ||
| 449 | struct clocksource *clock; | ||
| 450 | 446 | ||
| 451 | do { | 447 | do { |
| 452 | seq = read_seqbegin(&xtime_lock); | 448 | seq = read_seqbegin(&xtime_lock); |
| 453 | 449 | nsecs = timekeeping_get_ns_raw(); | |
| 454 | /* read clocksource: */ | ||
| 455 | clock = timekeeper.clock; | ||
| 456 | cycle_now = clock->read(clock); | ||
| 457 | |||
| 458 | /* calculate the delta since the last update_wall_time: */ | ||
| 459 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
| 460 | |||
| 461 | /* convert to nanoseconds: */ | ||
| 462 | nsecs = clocksource_cyc2ns(cycle_delta, clock->mult, | ||
| 463 | clock->shift); | ||
| 464 | |||
| 465 | *ts = raw_time; | 450 | *ts = raw_time; |
| 466 | 451 | ||
| 467 | } while (read_seqretry(&xtime_lock, seq)); | 452 | } while (read_seqretry(&xtime_lock, seq)); |
