aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2009-08-14 09:47:25 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-08-15 04:55:46 -0400
commitc55c87c892c1875deace0c8fc28787335277fdf2 (patch)
tree8b3a4c08f0eea40683cfb94bc44ef18507ae3011 /kernel/time
parentfb63a0ebe615fba9de8c75ea44ded999d1e24c65 (diff)
clocksource: Move watchdog downgrade to a work queue thread
Move the downgrade of an unstable clocksource from the timer interrupt context into the process context of a work queue thread. This is needed to be able to do the clocksource switch with stop_machine. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Acked-by: John Stultz <johnstul@us.ibm.com> Cc: Daniel Walker <dwalker@fifo99.com> LKML-Reference: <20090814134809.354926067@de.ibm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clocksource.c56
1 files changed, 39 insertions, 17 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 56aaa749645..f1508019bfb 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -143,10 +143,13 @@ fs_initcall(clocksource_done_booting);
143static LIST_HEAD(watchdog_list); 143static LIST_HEAD(watchdog_list);
144static struct clocksource *watchdog; 144static struct clocksource *watchdog;
145static struct timer_list watchdog_timer; 145static struct timer_list watchdog_timer;
146static struct work_struct watchdog_work;
146static DEFINE_SPINLOCK(watchdog_lock); 147static DEFINE_SPINLOCK(watchdog_lock);
147static cycle_t watchdog_last; 148static cycle_t watchdog_last;
148static int watchdog_running; 149static int watchdog_running;
149 150
151static void clocksource_watchdog_work(struct work_struct *work);
152
150/* 153/*
151 * Interval: 0.5sec Threshold: 0.0625s 154 * Interval: 0.5sec Threshold: 0.0625s
152 */ 155 */
@@ -158,15 +161,16 @@ static void clocksource_unstable(struct clocksource *cs, int64_t delta)
158 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", 161 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
159 cs->name, delta); 162 cs->name, delta);
160 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 163 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
161 clocksource_change_rating(cs, 0); 164 cs->flags |= CLOCK_SOURCE_UNSTABLE;
162 list_del(&cs->wd_list); 165 schedule_work(&watchdog_work);
163} 166}
164 167
165static void clocksource_watchdog(unsigned long data) 168static void clocksource_watchdog(unsigned long data)
166{ 169{
167 struct clocksource *cs, *tmp; 170 struct clocksource *cs;
168 cycle_t csnow, wdnow; 171 cycle_t csnow, wdnow;
169 int64_t wd_nsec, cs_nsec; 172 int64_t wd_nsec, cs_nsec;
173 int next_cpu;
170 174
171 spin_lock(&watchdog_lock); 175 spin_lock(&watchdog_lock);
172 if (!watchdog_running) 176 if (!watchdog_running)
@@ -176,7 +180,12 @@ static void clocksource_watchdog(unsigned long data)
176 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 180 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
177 watchdog_last = wdnow; 181 watchdog_last = wdnow;
178 182
179 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 183 list_for_each_entry(cs, &watchdog_list, wd_list) {
184
185 /* Clocksource already marked unstable? */
186 if (cs->flags & CLOCK_SOURCE_UNSTABLE)
187 continue;
188
180 csnow = cs->read(cs); 189 csnow = cs->read(cs);
181 190
182 /* Clocksource initialized ? */ 191 /* Clocksource initialized ? */
@@ -207,19 +216,15 @@ static void clocksource_watchdog(unsigned long data)
207 } 216 }
208 } 217 }
209 218
210 if (!list_empty(&watchdog_list)) { 219 /*
211 /* 220 * Cycle through CPUs to check if the CPUs stay synchronized
212 * Cycle through CPUs to check if the CPUs stay 221 * to each other.
213 * synchronized to each other. 222 */
214 */ 223 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
215 int next_cpu = cpumask_next(raw_smp_processor_id(), 224 if (next_cpu >= nr_cpu_ids)
216 cpu_online_mask); 225 next_cpu = cpumask_first(cpu_online_mask);
217 226 watchdog_timer.expires += WATCHDOG_INTERVAL;
218 if (next_cpu >= nr_cpu_ids) 227 add_timer_on(&watchdog_timer, next_cpu);
219 next_cpu = cpumask_first(cpu_online_mask);
220 watchdog_timer.expires += WATCHDOG_INTERVAL;
221 add_timer_on(&watchdog_timer, next_cpu);
222 }
223out: 228out:
224 spin_unlock(&watchdog_lock); 229 spin_unlock(&watchdog_lock);
225} 230}
@@ -228,6 +233,7 @@ static inline void clocksource_start_watchdog(void)
228{ 233{
229 if (watchdog_running || !watchdog || list_empty(&watchdog_list)) 234 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
230 return; 235 return;
236 INIT_WORK(&watchdog_work, clocksource_watchdog_work);
231 init_timer(&watchdog_timer); 237 init_timer(&watchdog_timer);
232 watchdog_timer.function = clocksource_watchdog; 238 watchdog_timer.function = clocksource_watchdog;
233 watchdog_last = watchdog->read(watchdog); 239 watchdog_last = watchdog->read(watchdog);
@@ -313,6 +319,22 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
313 spin_unlock_irqrestore(&watchdog_lock, flags); 319 spin_unlock_irqrestore(&watchdog_lock, flags);
314} 320}
315 321
322static void clocksource_watchdog_work(struct work_struct *work)
323{
324 struct clocksource *cs, *tmp;
325 unsigned long flags;
326
327 spin_lock_irqsave(&watchdog_lock, flags);
328 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
329 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
330 list_del_init(&cs->wd_list);
331 clocksource_change_rating(cs, 0);
332 }
333 /* Check if the watchdog timer needs to be stopped. */
334 clocksource_stop_watchdog();
335 spin_unlock(&watchdog_lock);
336}
337
316#else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 338#else /* CONFIG_CLOCKSOURCE_WATCHDOG */
317 339
318static void clocksource_enqueue_watchdog(struct clocksource *cs) 340static void clocksource_enqueue_watchdog(struct clocksource *cs)