aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-05-09 05:35:15 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:56 -0400
commitb52f52a093bb1e841e014c2087b5bee7162da413 (patch)
tree7b7135897195fc9d14473d3ab824d59a4b65e5ad
parent4037d452202e34214e8a939fa5621b2b3bbb45b7 (diff)
clocksource: fix resume logic
We need to make sure that the clocksources are resumed, when timekeeping is resumed. The current resume logic does not guarantee this. Add a resume function pointer to the clocksource struct, so clocksource drivers which need to reinitialize the clocksource can provide a resume function. Add a resume function, which calls the maybe available clocksource resume functions and resets the watchdog function, so a stable TSC can be used accross suspend/resume. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: john stultz <johnstul@us.ibm.com> Cc: Andi Kleen <ak@suse.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--kernel/time/clocksource.c45
-rw-r--r--kernel/timer.c2
3 files changed, 50 insertions, 0 deletions
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 2665ca04cf8f..bf297b03a4e4 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -49,6 +49,7 @@ struct clocksource;
49 * @shift: cycle to nanosecond divisor (power of two) 49 * @shift: cycle to nanosecond divisor (power of two)
50 * @flags: flags describing special properties 50 * @flags: flags describing special properties
51 * @vread: vsyscall based read 51 * @vread: vsyscall based read
52 * @resume: resume function for the clocksource, if necessary
52 * @cycle_interval: Used internally by timekeeping core, please ignore. 53 * @cycle_interval: Used internally by timekeeping core, please ignore.
53 * @xtime_interval: Used internally by timekeeping core, please ignore. 54 * @xtime_interval: Used internally by timekeeping core, please ignore.
54 */ 55 */
@@ -65,6 +66,7 @@ struct clocksource {
65 u32 shift; 66 u32 shift;
66 unsigned long flags; 67 unsigned long flags;
67 cycle_t (*vread)(void); 68 cycle_t (*vread)(void);
69 void (*resume)(void);
68 70
69 /* timekeeping specific data, ignore */ 71 /* timekeeping specific data, ignore */
70 cycle_t cycle_interval; 72 cycle_t cycle_interval;
@@ -209,6 +211,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
209extern int clocksource_register(struct clocksource*); 211extern int clocksource_register(struct clocksource*);
210extern struct clocksource* clocksource_get_next(void); 212extern struct clocksource* clocksource_get_next(void);
211extern void clocksource_change_rating(struct clocksource *cs, int rating); 213extern void clocksource_change_rating(struct clocksource *cs, int rating);
214extern void clocksource_resume(void);
212 215
213#ifdef CONFIG_GENERIC_TIME_VSYSCALL 216#ifdef CONFIG_GENERIC_TIME_VSYSCALL
214extern void update_vsyscall(struct timespec *ts, struct clocksource *c); 217extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index db0c725de5ea..3db5c3c460d7 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -74,6 +74,8 @@ static struct clocksource *watchdog;
74static struct timer_list watchdog_timer; 74static struct timer_list watchdog_timer;
75static DEFINE_SPINLOCK(watchdog_lock); 75static DEFINE_SPINLOCK(watchdog_lock);
76static cycle_t watchdog_last; 76static cycle_t watchdog_last;
77static int watchdog_resumed;
78
77/* 79/*
78 * Interval: 0.5sec Threshold: 0.0625s 80 * Interval: 0.5sec Threshold: 0.0625s
79 */ 81 */
@@ -98,15 +100,26 @@ static void clocksource_watchdog(unsigned long data)
98 struct clocksource *cs, *tmp; 100 struct clocksource *cs, *tmp;
99 cycle_t csnow, wdnow; 101 cycle_t csnow, wdnow;
100 int64_t wd_nsec, cs_nsec; 102 int64_t wd_nsec, cs_nsec;
103 int resumed;
101 104
102 spin_lock(&watchdog_lock); 105 spin_lock(&watchdog_lock);
103 106
107 resumed = watchdog_resumed;
108 if (unlikely(resumed))
109 watchdog_resumed = 0;
110
104 wdnow = watchdog->read(); 111 wdnow = watchdog->read();
105 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 112 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
106 watchdog_last = wdnow; 113 watchdog_last = wdnow;
107 114
108 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 115 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
109 csnow = cs->read(); 116 csnow = cs->read();
117
118 if (unlikely(resumed)) {
119 cs->wd_last = csnow;
120 continue;
121 }
122
110 /* Initialized ? */ 123 /* Initialized ? */
111 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 124 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
112 if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 125 if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
@@ -136,6 +149,13 @@ static void clocksource_watchdog(unsigned long data)
136 } 149 }
137 spin_unlock(&watchdog_lock); 150 spin_unlock(&watchdog_lock);
138} 151}
152static void clocksource_resume_watchdog(void)
153{
154 spin_lock(&watchdog_lock);
155 watchdog_resumed = 1;
156 spin_unlock(&watchdog_lock);
157}
158
139static void clocksource_check_watchdog(struct clocksource *cs) 159static void clocksource_check_watchdog(struct clocksource *cs)
140{ 160{
141 struct clocksource *cse; 161 struct clocksource *cse;
@@ -182,9 +202,34 @@ static void clocksource_check_watchdog(struct clocksource *cs)
182 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 202 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
183 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 203 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
184} 204}
205
206static inline void clocksource_resume_watchdog(void) { }
185#endif 207#endif
186 208
187/** 209/**
210 * clocksource_resume - resume the clocksource(s)
211 */
212void clocksource_resume(void)
213{
214 struct list_head *tmp;
215 unsigned long flags;
216
217 spin_lock_irqsave(&clocksource_lock, flags);
218
219 list_for_each(tmp, &clocksource_list) {
220 struct clocksource *cs;
221
222 cs = list_entry(tmp, struct clocksource, list);
223 if (cs->resume)
224 cs->resume();
225 }
226
227 clocksource_resume_watchdog();
228
229 spin_unlock_irqrestore(&clocksource_lock, flags);
230}
231
232/**
188 * clocksource_get_next - Returns the selected clocksource 233 * clocksource_get_next - Returns the selected clocksource
189 * 234 *
190 */ 235 */
diff --git a/kernel/timer.c b/kernel/timer.c
index de85f8491c1d..59a28b1752f8 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1499,6 +1499,8 @@ unregister_time_interpolator(struct time_interpolator *ti)
1499 prev = &curr->next; 1499 prev = &curr->next;
1500 } 1500 }
1501 1501
1502 clocksource_resume();
1503
1502 write_seqlock_irqsave(&xtime_lock, flags); 1504 write_seqlock_irqsave(&xtime_lock, flags);
1503 if (ti == time_interpolator) { 1505 if (ti == time_interpolator) {
1504 /* we lost the best time-interpolator: */ 1506 /* we lost the best time-interpolator: */