diff options
author | Andi Kleen <ak@suse.de> | 2006-05-08 09:17:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-05-08 12:34:56 -0400 |
commit | 6810b548b25114607e0814612d84125abccc0a4f (patch) | |
tree | 51dab8f9b809479b038e2d957e74fba7e1e99e49 /drivers/cpufreq/cpufreq_ondemand.c | |
parent | ac71d12c990526b01ef6cfe50907ef8530a30331 (diff) |
[PATCH] x86_64: Move ondemand timer into own work queue
Taking the cpu hotplug semaphore in a normal events workqueue
is unsafe because other tasks can wait for any workqueues with
it hold. This results in a deadlock.
Move the DBS timer into its own work queue which is not
affected by other work queue flushes to avoid this.
Has been acked by Venkatesh.
Cc: venkatesh.pallipadi@intel.com
Cc: cpufreq@lists.linux.org.uk
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 956d121cb161..3e6ffcaa5af4 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -74,6 +74,8 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ | |||
74 | static DEFINE_MUTEX (dbs_mutex); | 74 | static DEFINE_MUTEX (dbs_mutex); |
75 | static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); | 75 | static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); |
76 | 76 | ||
77 | static struct workqueue_struct *dbs_workq; | ||
78 | |||
77 | struct dbs_tuners { | 79 | struct dbs_tuners { |
78 | unsigned int sampling_rate; | 80 | unsigned int sampling_rate; |
79 | unsigned int sampling_down_factor; | 81 | unsigned int sampling_down_factor; |
@@ -364,23 +366,29 @@ static void do_dbs_timer(void *data) | |||
364 | mutex_lock(&dbs_mutex); | 366 | mutex_lock(&dbs_mutex); |
365 | for_each_online_cpu(i) | 367 | for_each_online_cpu(i) |
366 | dbs_check_cpu(i); | 368 | dbs_check_cpu(i); |
367 | schedule_delayed_work(&dbs_work, | 369 | queue_delayed_work(dbs_workq, &dbs_work, |
368 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 370 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
369 | mutex_unlock(&dbs_mutex); | 371 | mutex_unlock(&dbs_mutex); |
370 | } | 372 | } |
371 | 373 | ||
372 | static inline void dbs_timer_init(void) | 374 | static inline void dbs_timer_init(void) |
373 | { | 375 | { |
374 | INIT_WORK(&dbs_work, do_dbs_timer, NULL); | 376 | INIT_WORK(&dbs_work, do_dbs_timer, NULL); |
375 | schedule_delayed_work(&dbs_work, | 377 | if (!dbs_workq) |
376 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 378 | dbs_workq = create_singlethread_workqueue("ondemand"); |
379 | if (!dbs_workq) { | ||
380 | printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n"); | ||
381 | return; | ||
382 | } | ||
383 | queue_delayed_work(dbs_workq, &dbs_work, | ||
384 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | ||
377 | return; | 385 | return; |
378 | } | 386 | } |
379 | 387 | ||
380 | static inline void dbs_timer_exit(void) | 388 | static inline void dbs_timer_exit(void) |
381 | { | 389 | { |
382 | cancel_delayed_work(&dbs_work); | 390 | if (dbs_workq) |
383 | return; | 391 | cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work); |
384 | } | 392 | } |
385 | 393 | ||
386 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 394 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, |
@@ -489,8 +497,12 @@ static int __init cpufreq_gov_dbs_init(void) | |||
489 | 497 | ||
490 | static void __exit cpufreq_gov_dbs_exit(void) | 498 | static void __exit cpufreq_gov_dbs_exit(void) |
491 | { | 499 | { |
492 | /* Make sure that the scheduled work is indeed not running */ | 500 | /* Make sure that the scheduled work is indeed not running. |
493 | flush_scheduled_work(); | 501 | Assumes the timer has been cancelled first. */ |
502 | if (dbs_workq) { | ||
503 | flush_workqueue(dbs_workq); | ||
504 | destroy_workqueue(dbs_workq); | ||
505 | } | ||
494 | 506 | ||
495 | cpufreq_unregister_governor(&cpufreq_gov_dbs); | 507 | cpufreq_unregister_governor(&cpufreq_gov_dbs); |
496 | } | 508 | } |