diff options
author | Roman Zippel <zippel@linux-m68k.org> | 2008-08-20 19:37:28 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-21 03:50:24 -0400 |
commit | 9a055117d3d9cb562f83f8d4cd88772761f4cab0 (patch) | |
tree | a822607ebb46491e3b480d11c136c5bc8585e38c /kernel/time | |
parent | 1aa5dfb751d275ae7117d3b73ac423b4a46f2a73 (diff) |
clocksource: introduce clocksource_forward_now()
To keep the raw monotonic patch simple first introduce
clocksource_forward_now(), which takes care of the offset since the last
update_wall_time() call and adds it to the clock, so there is no need
anymore to deal with it explicitly at various places, which need to make
significant changes to the clock.
This is also gets rid of the timekeeping_suspend_nsecs, instead of
waiting until resume, the value is accumulated during suspend. In the end
there is only a single user of __get_nsec_offset() left, so I integrated
it back to getnstimeofday().
Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/timekeeping.c | 71 |
1 files changed, 33 insertions, 38 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e91c29f961c9..83d3555a6998 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -58,27 +58,23 @@ struct clocksource *clock; | |||
58 | 58 | ||
59 | #ifdef CONFIG_GENERIC_TIME | 59 | #ifdef CONFIG_GENERIC_TIME |
60 | /** | 60 | /** |
61 | * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook | 61 | * clocksource_forward_now - update clock to the current time |
62 | * | 62 | * |
63 | * private function, must hold xtime_lock lock when being | 63 | * Forward the current clock to update its state since the last call to |
64 | * called. Returns the number of nanoseconds since the | 64 | * update_wall_time(). This is useful before significant clock changes, |
65 | * last call to update_wall_time() (adjusted by NTP scaling) | 65 | * as it avoids having to deal with this time offset explicitly. |
66 | */ | 66 | */ |
67 | static inline s64 __get_nsec_offset(void) | 67 | static void clocksource_forward_now(void) |
68 | { | 68 | { |
69 | cycle_t cycle_now, cycle_delta; | 69 | cycle_t cycle_now, cycle_delta; |
70 | s64 ns_offset; | 70 | s64 nsec; |
71 | 71 | ||
72 | /* read clocksource: */ | ||
73 | cycle_now = clocksource_read(clock); | 72 | cycle_now = clocksource_read(clock); |
74 | |||
75 | /* calculate the delta since the last update_wall_time: */ | ||
76 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 73 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
74 | clock->cycle_last = cycle_now; | ||
77 | 75 | ||
78 | /* convert to nanoseconds: */ | 76 | nsec = cyc2ns(clock, cycle_delta); |
79 | ns_offset = cyc2ns(clock, cycle_delta); | 77 | timespec_add_ns(&xtime, nsec); |
80 | |||
81 | return ns_offset; | ||
82 | } | 78 | } |
83 | 79 | ||
84 | /** | 80 | /** |
@@ -89,6 +85,7 @@ static inline s64 __get_nsec_offset(void) | |||
89 | */ | 85 | */ |
90 | void getnstimeofday(struct timespec *ts) | 86 | void getnstimeofday(struct timespec *ts) |
91 | { | 87 | { |
88 | cycle_t cycle_now, cycle_delta; | ||
92 | unsigned long seq; | 89 | unsigned long seq; |
93 | s64 nsecs; | 90 | s64 nsecs; |
94 | 91 | ||
@@ -96,7 +93,15 @@ void getnstimeofday(struct timespec *ts) | |||
96 | seq = read_seqbegin(&xtime_lock); | 93 | seq = read_seqbegin(&xtime_lock); |
97 | 94 | ||
98 | *ts = xtime; | 95 | *ts = xtime; |
99 | nsecs = __get_nsec_offset(); | 96 | |
97 | /* read clocksource: */ | ||
98 | cycle_now = clocksource_read(clock); | ||
99 | |||
100 | /* calculate the delta since the last update_wall_time: */ | ||
101 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
102 | |||
103 | /* convert to nanoseconds: */ | ||
104 | nsecs = cyc2ns(clock, cycle_delta); | ||
100 | 105 | ||
101 | } while (read_seqretry(&xtime_lock, seq)); | 106 | } while (read_seqretry(&xtime_lock, seq)); |
102 | 107 | ||
@@ -129,22 +134,22 @@ EXPORT_SYMBOL(do_gettimeofday); | |||
129 | */ | 134 | */ |
130 | int do_settimeofday(struct timespec *tv) | 135 | int do_settimeofday(struct timespec *tv) |
131 | { | 136 | { |
137 | struct timespec ts_delta; | ||
132 | unsigned long flags; | 138 | unsigned long flags; |
133 | time_t wtm_sec, sec = tv->tv_sec; | ||
134 | long wtm_nsec, nsec = tv->tv_nsec; | ||
135 | 139 | ||
136 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | 140 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) |
137 | return -EINVAL; | 141 | return -EINVAL; |
138 | 142 | ||
139 | write_seqlock_irqsave(&xtime_lock, flags); | 143 | write_seqlock_irqsave(&xtime_lock, flags); |
140 | 144 | ||
141 | nsec -= __get_nsec_offset(); | 145 | clocksource_forward_now(); |
146 | |||
147 | ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; | ||
148 | ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; | ||
149 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); | ||
142 | 150 | ||
143 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | 151 | xtime = *tv; |
144 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
145 | 152 | ||
146 | set_normalized_timespec(&xtime, sec, nsec); | ||
147 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
148 | update_xtime_cache(0); | 153 | update_xtime_cache(0); |
149 | 154 | ||
150 | clock->error = 0; | 155 | clock->error = 0; |
@@ -170,22 +175,17 @@ EXPORT_SYMBOL(do_settimeofday); | |||
170 | static void change_clocksource(void) | 175 | static void change_clocksource(void) |
171 | { | 176 | { |
172 | struct clocksource *new; | 177 | struct clocksource *new; |
173 | cycle_t now; | ||
174 | u64 nsec; | ||
175 | 178 | ||
176 | new = clocksource_get_next(); | 179 | new = clocksource_get_next(); |
177 | 180 | ||
178 | if (clock == new) | 181 | if (clock == new) |
179 | return; | 182 | return; |
180 | 183 | ||
181 | new->cycle_last = 0; | 184 | clocksource_forward_now(); |
182 | now = clocksource_read(new); | ||
183 | nsec = __get_nsec_offset(); | ||
184 | timespec_add_ns(&xtime, nsec); | ||
185 | 185 | ||
186 | clock = new; | 186 | clock = new; |
187 | clock->cycle_last = now; | 187 | clock->cycle_last = 0; |
188 | 188 | clock->cycle_last = clocksource_read(new); | |
189 | clock->error = 0; | 189 | clock->error = 0; |
190 | clock->xtime_nsec = 0; | 190 | clock->xtime_nsec = 0; |
191 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | 191 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
@@ -200,8 +200,8 @@ static void change_clocksource(void) | |||
200 | */ | 200 | */ |
201 | } | 201 | } |
202 | #else | 202 | #else |
203 | static inline void clocksource_forward_now(void) { } | ||
203 | static inline void change_clocksource(void) { } | 204 | static inline void change_clocksource(void) { } |
204 | static inline s64 __get_nsec_offset(void) { return 0; } | ||
205 | #endif | 205 | #endif |
206 | 206 | ||
207 | /** | 207 | /** |
@@ -265,8 +265,6 @@ void __init timekeeping_init(void) | |||
265 | static int timekeeping_suspended; | 265 | static int timekeeping_suspended; |
266 | /* time in seconds when suspend began */ | 266 | /* time in seconds when suspend began */ |
267 | static unsigned long timekeeping_suspend_time; | 267 | static unsigned long timekeeping_suspend_time; |
268 | /* xtime offset when we went into suspend */ | ||
269 | static s64 timekeeping_suspend_nsecs; | ||
270 | 268 | ||
271 | /** | 269 | /** |
272 | * timekeeping_resume - Resumes the generic timekeeping subsystem. | 270 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
@@ -292,8 +290,6 @@ static int timekeeping_resume(struct sys_device *dev) | |||
292 | wall_to_monotonic.tv_sec -= sleep_length; | 290 | wall_to_monotonic.tv_sec -= sleep_length; |
293 | total_sleep_time += sleep_length; | 291 | total_sleep_time += sleep_length; |
294 | } | 292 | } |
295 | /* Make sure that we have the correct xtime reference */ | ||
296 | timespec_add_ns(&xtime, timekeeping_suspend_nsecs); | ||
297 | update_xtime_cache(0); | 293 | update_xtime_cache(0); |
298 | /* re-base the last cycle value */ | 294 | /* re-base the last cycle value */ |
299 | clock->cycle_last = 0; | 295 | clock->cycle_last = 0; |
@@ -319,8 +315,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |||
319 | timekeeping_suspend_time = read_persistent_clock(); | 315 | timekeeping_suspend_time = read_persistent_clock(); |
320 | 316 | ||
321 | write_seqlock_irqsave(&xtime_lock, flags); | 317 | write_seqlock_irqsave(&xtime_lock, flags); |
322 | /* Get the current xtime offset */ | 318 | clocksource_forward_now(); |
323 | timekeeping_suspend_nsecs = __get_nsec_offset(); | ||
324 | timekeeping_suspended = 1; | 319 | timekeeping_suspended = 1; |
325 | write_sequnlock_irqrestore(&xtime_lock, flags); | 320 | write_sequnlock_irqrestore(&xtime_lock, flags); |
326 | 321 | ||
@@ -461,10 +456,10 @@ void update_wall_time(void) | |||
461 | */ | 456 | */ |
462 | while (offset >= clock->cycle_interval) { | 457 | while (offset >= clock->cycle_interval) { |
463 | /* accumulate one interval */ | 458 | /* accumulate one interval */ |
464 | clock->xtime_nsec += clock->xtime_interval; | ||
465 | clock->cycle_last += clock->cycle_interval; | ||
466 | offset -= clock->cycle_interval; | 459 | offset -= clock->cycle_interval; |
460 | clock->cycle_last += clock->cycle_interval; | ||
467 | 461 | ||
462 | clock->xtime_nsec += clock->xtime_interval; | ||
468 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { | 463 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { |
469 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; | 464 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; |
470 | xtime.tv_sec++; | 465 | xtime.tv_sec++; |