aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/clocksource.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2009-08-18 11:09:42 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-08-19 06:00:56 -0400
commit01548f4d3e8e94caf323a4f664eb347fd34a34ab (patch)
tree2717e7d4dd781be2d57737df96b074451090f3d9 /kernel/time/clocksource.c
parentd0981a1b21a03866c8da7f44e35e389c2e0d6061 (diff)
clocksource: Avoid clocksource watchdog circular locking dependency
stop_machine from a multithreaded workqueue is not allowed because of a circular locking dependency between cpu_down and the workqueue execution. Use a kernel thread to do the clocksource downgrade. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: john stultz <johnstul@us.ibm.com> LKML-Reference: <20090818170942.3ab80c91@skybase> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time/clocksource.c')
-rw-r--r--kernel/time/clocksource.c19
1 files changed, 16 insertions, 3 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c6bff11f7957..e0c86ad6e9fb 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 30#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31#include <linux/tick.h> 31#include <linux/tick.h>
32#include <linux/kthread.h>
32 33
33void timecounter_init(struct timecounter *tc, 34void timecounter_init(struct timecounter *tc,
34 const struct cyclecounter *cc, 35 const struct cyclecounter *cc,
@@ -130,7 +131,7 @@ static DEFINE_SPINLOCK(watchdog_lock);
130static cycle_t watchdog_last; 131static cycle_t watchdog_last;
131static int watchdog_running; 132static int watchdog_running;
132 133
133static void clocksource_watchdog_work(struct work_struct *work); 134static int clocksource_watchdog_kthread(void *data);
134static void __clocksource_change_rating(struct clocksource *cs, int rating); 135static void __clocksource_change_rating(struct clocksource *cs, int rating);
135 136
136/* 137/*
@@ -139,6 +140,15 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating);
139#define WATCHDOG_INTERVAL (HZ >> 1) 140#define WATCHDOG_INTERVAL (HZ >> 1)
140#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 141#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
141 142
143static void clocksource_watchdog_work(struct work_struct *work)
144{
145 /*
146 * If kthread_run fails the next watchdog scan over the
147 * watchdog_list will find the unstable clock again.
148 */
149 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
150}
151
142static void clocksource_unstable(struct clocksource *cs, int64_t delta) 152static void clocksource_unstable(struct clocksource *cs, int64_t delta)
143{ 153{
144 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", 154 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
@@ -167,8 +177,10 @@ static void clocksource_watchdog(unsigned long data)
167 list_for_each_entry(cs, &watchdog_list, wd_list) { 177 list_for_each_entry(cs, &watchdog_list, wd_list) {
168 178
169 /* Clocksource already marked unstable? */ 179 /* Clocksource already marked unstable? */
170 if (cs->flags & CLOCK_SOURCE_UNSTABLE) 180 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
181 schedule_work(&watchdog_work);
171 continue; 182 continue;
183 }
172 184
173 csnow = cs->read(cs); 185 csnow = cs->read(cs);
174 186
@@ -304,7 +316,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
304 spin_unlock_irqrestore(&watchdog_lock, flags); 316 spin_unlock_irqrestore(&watchdog_lock, flags);
305} 317}
306 318
307static void clocksource_watchdog_work(struct work_struct *work) 319static int clocksource_watchdog_kthread(void *data)
308{ 320{
309 struct clocksource *cs, *tmp; 321 struct clocksource *cs, *tmp;
310 unsigned long flags; 322 unsigned long flags;
@@ -327,6 +339,7 @@ static void clocksource_watchdog_work(struct work_struct *work)
327 __clocksource_change_rating(cs, 0); 339 __clocksource_change_rating(cs, 0);
328 } 340 }
329 mutex_unlock(&clocksource_mutex); 341 mutex_unlock(&clocksource_mutex);
342 return 0;
330} 343}
331 344
332#else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 345#else /* CONFIG_CLOCKSOURCE_WATCHDOG */