diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-05-09 05:35:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:56 -0400 |
commit | b52f52a093bb1e841e014c2087b5bee7162da413 (patch) | |
tree | 7b7135897195fc9d14473d3ab824d59a4b65e5ad /kernel/time/clocksource.c | |
parent | 4037d452202e34214e8a939fa5621b2b3bbb45b7 (diff) |
clocksource: fix resume logic
We need to make sure that the clocksources are resumed, when timekeeping is
resumed. The current resume logic does not guarantee this.
Add a resume function pointer to the clocksource struct, so clocksource
drivers which need to reinitialize the clocksource can provide a resume
function.
Add a resume function, which calls the maybe available clocksource resume
functions and resets the watchdog function, so a stable TSC can be used
accross suspend/resume.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/time/clocksource.c')
-rw-r--r-- | kernel/time/clocksource.c | 45 |
1 files changed, 45 insertions, 0 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index db0c725de5ea..3db5c3c460d7 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -74,6 +74,8 @@ static struct clocksource *watchdog; | |||
74 | static struct timer_list watchdog_timer; | 74 | static struct timer_list watchdog_timer; |
75 | static DEFINE_SPINLOCK(watchdog_lock); | 75 | static DEFINE_SPINLOCK(watchdog_lock); |
76 | static cycle_t watchdog_last; | 76 | static cycle_t watchdog_last; |
77 | static int watchdog_resumed; | ||
78 | |||
77 | /* | 79 | /* |
78 | * Interval: 0.5sec Threshold: 0.0625s | 80 | * Interval: 0.5sec Threshold: 0.0625s |
79 | */ | 81 | */ |
@@ -98,15 +100,26 @@ static void clocksource_watchdog(unsigned long data) | |||
98 | struct clocksource *cs, *tmp; | 100 | struct clocksource *cs, *tmp; |
99 | cycle_t csnow, wdnow; | 101 | cycle_t csnow, wdnow; |
100 | int64_t wd_nsec, cs_nsec; | 102 | int64_t wd_nsec, cs_nsec; |
103 | int resumed; | ||
101 | 104 | ||
102 | spin_lock(&watchdog_lock); | 105 | spin_lock(&watchdog_lock); |
103 | 106 | ||
107 | resumed = watchdog_resumed; | ||
108 | if (unlikely(resumed)) | ||
109 | watchdog_resumed = 0; | ||
110 | |||
104 | wdnow = watchdog->read(); | 111 | wdnow = watchdog->read(); |
105 | wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); | 112 | wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); |
106 | watchdog_last = wdnow; | 113 | watchdog_last = wdnow; |
107 | 114 | ||
108 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { | 115 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { |
109 | csnow = cs->read(); | 116 | csnow = cs->read(); |
117 | |||
118 | if (unlikely(resumed)) { | ||
119 | cs->wd_last = csnow; | ||
120 | continue; | ||
121 | } | ||
122 | |||
110 | /* Initialized ? */ | 123 | /* Initialized ? */ |
111 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { | 124 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { |
112 | if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && | 125 | if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && |
@@ -136,6 +149,13 @@ static void clocksource_watchdog(unsigned long data) | |||
136 | } | 149 | } |
137 | spin_unlock(&watchdog_lock); | 150 | spin_unlock(&watchdog_lock); |
138 | } | 151 | } |
152 | static void clocksource_resume_watchdog(void) | ||
153 | { | ||
154 | spin_lock(&watchdog_lock); | ||
155 | watchdog_resumed = 1; | ||
156 | spin_unlock(&watchdog_lock); | ||
157 | } | ||
158 | |||
139 | static void clocksource_check_watchdog(struct clocksource *cs) | 159 | static void clocksource_check_watchdog(struct clocksource *cs) |
140 | { | 160 | { |
141 | struct clocksource *cse; | 161 | struct clocksource *cse; |
@@ -182,9 +202,34 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
182 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | 202 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
183 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | 203 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
184 | } | 204 | } |
205 | |||
206 | static inline void clocksource_resume_watchdog(void) { } | ||
185 | #endif | 207 | #endif |
186 | 208 | ||
187 | /** | 209 | /** |
210 | * clocksource_resume - resume the clocksource(s) | ||
211 | */ | ||
212 | void clocksource_resume(void) | ||
213 | { | ||
214 | struct list_head *tmp; | ||
215 | unsigned long flags; | ||
216 | |||
217 | spin_lock_irqsave(&clocksource_lock, flags); | ||
218 | |||
219 | list_for_each(tmp, &clocksource_list) { | ||
220 | struct clocksource *cs; | ||
221 | |||
222 | cs = list_entry(tmp, struct clocksource, list); | ||
223 | if (cs->resume) | ||
224 | cs->resume(); | ||
225 | } | ||
226 | |||
227 | clocksource_resume_watchdog(); | ||
228 | |||
229 | spin_unlock_irqrestore(&clocksource_lock, flags); | ||
230 | } | ||
231 | |||
232 | /** | ||
188 | * clocksource_get_next - Returns the selected clocksource | 233 | * clocksource_get_next - Returns the selected clocksource |
189 | * | 234 | * |
190 | */ | 235 | */ |