aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/clocksource.h
diff options
context:
space:
mode:
authorJohn Stultz <johnstul@us.ibm.com>2008-08-20 19:37:28 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-21 03:50:23 -0400
commit1aa5dfb751d275ae7117d3b73ac423b4a46f2a73 (patch)
treedde8d56d000c7885c43df0e61f72f4a75d5759fe /include/linux/clocksource.h
parentee974e01e5ef2914036f08c8e41d1a3fa8bfc9d9 (diff)
clocksource: keep track of original clocksource frequency
The clocksource frequency is represented by clocksource->mult/2^(clocksource->shift). Currently, when NTP makes adjustments to the clock frequency, they are made directly to the mult value. This has the drawback that once changed, we cannot know what the orignal mult value was, or how much adjustment has been applied. This property causes problems in calculating proper ntp intervals when switching back and forth between clocksources. This patch separates the current mult value into a mult and mult_orig pair. The mult_orig value stays constant, while the ntp clocksource adjustments are done only to the mult value. This allows for correct ntp interval calculation and additionally lays the groundwork for a new notion of time, what I'm calling the monotonic-raw time, which is introduced in a following patch. Signed-off-by: John Stultz <johnstul@us.ibm.com> Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/clocksource.h')
-rw-r--r--include/linux/clocksource.h11
1 files changed, 7 insertions, 4 deletions
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 55e434feec9..f0a7fb98441 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -45,7 +45,8 @@ struct clocksource;
45 * @read: returns a cycle value 45 * @read: returns a cycle value
46 * @mask: bitmask for two's complement 46 * @mask: bitmask for two's complement
47 * subtraction of non 64 bit counters 47 * subtraction of non 64 bit counters
48 * @mult: cycle to nanosecond multiplier 48 * @mult: cycle to nanosecond multiplier (adjusted by NTP)
49 * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP)
49 * @shift: cycle to nanosecond divisor (power of two) 50 * @shift: cycle to nanosecond divisor (power of two)
50 * @flags: flags describing special properties 51 * @flags: flags describing special properties
51 * @vread: vsyscall based read 52 * @vread: vsyscall based read
@@ -63,6 +64,7 @@ struct clocksource {
63 cycle_t (*read)(void); 64 cycle_t (*read)(void);
64 cycle_t mask; 65 cycle_t mask;
65 u32 mult; 66 u32 mult;
67 u32 mult_orig;
66 u32 shift; 68 u32 shift;
67 unsigned long flags; 69 unsigned long flags;
68 cycle_t (*vread)(void); 70 cycle_t (*vread)(void);
@@ -201,16 +203,17 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
201{ 203{
202 u64 tmp; 204 u64 tmp;
203 205
204 /* XXX - All of this could use a whole lot of optimization */ 206 /* Do the ns -> cycle conversion first, using original mult */
205 tmp = length_nsec; 207 tmp = length_nsec;
206 tmp <<= c->shift; 208 tmp <<= c->shift;
207 tmp += c->mult/2; 209 tmp += c->mult_orig/2;
208 do_div(tmp, c->mult); 210 do_div(tmp, c->mult_orig);
209 211
210 c->cycle_interval = (cycle_t)tmp; 212 c->cycle_interval = (cycle_t)tmp;
211 if (c->cycle_interval == 0) 213 if (c->cycle_interval == 0)
212 c->cycle_interval = 1; 214 c->cycle_interval = 1;
213 215
216 /* Go back from cycles -> shifted ns, this time use ntp adjused mult */
214 c->xtime_interval = (u64)c->cycle_interval * c->mult; 217 c->xtime_interval = (u64)c->cycle_interval * c->mult;
215} 218}
216 219