aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-01-26 06:12:50 -0500
committerTejun Heo <tj@kernel.org>2011-01-26 06:12:50 -0500
commit57df5573a56322e6895451f759c19e875252817d (patch)
treed34ec0be43b4fa29831f949f7be126c78cfba706 /drivers/cpufreq/cpufreq_ondemand.c
parentbcb6d9161d1720cf68c7f4de0630e91cb95ee60c (diff)
cpufreq: use system_wq instead of dedicated workqueues
With cmwq, there's no reason for cpufreq drivers to use separate workqueues. Remove the dedicated workqueues from cpufreq_conservative and cpufreq_ondemand and use system_wq instead. The work items are already sync canceled on stop, so it's already guaranteed that no work is running on module exit. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Dave Jones <davej@redhat.com> Cc: cpufreq@vger.kernel.org
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
1 files changed, 3 insertions, 17 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c631f27a3dcc..58aa85ea5ec6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -104,8 +104,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
104 */ 104 */
105static DEFINE_MUTEX(dbs_mutex); 105static DEFINE_MUTEX(dbs_mutex);
106 106
107static struct workqueue_struct *kondemand_wq;
108
109static struct dbs_tuners { 107static struct dbs_tuners {
110 unsigned int sampling_rate; 108 unsigned int sampling_rate;
111 unsigned int up_threshold; 109 unsigned int up_threshold;
@@ -667,7 +665,7 @@ static void do_dbs_timer(struct work_struct *work)
667 __cpufreq_driver_target(dbs_info->cur_policy, 665 __cpufreq_driver_target(dbs_info->cur_policy,
668 dbs_info->freq_lo, CPUFREQ_RELATION_H); 666 dbs_info->freq_lo, CPUFREQ_RELATION_H);
669 } 667 }
670 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 668 schedule_delayed_work_on(cpu, &dbs_info->work, delay);
671 mutex_unlock(&dbs_info->timer_mutex); 669 mutex_unlock(&dbs_info->timer_mutex);
672} 670}
673 671
@@ -681,8 +679,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
681 679
682 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 680 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
683 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 681 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
684 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, 682 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
685 delay);
686} 683}
687 684
688static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 685static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -814,7 +811,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
814 811
815static int __init cpufreq_gov_dbs_init(void) 812static int __init cpufreq_gov_dbs_init(void)
816{ 813{
817 int err;
818 cputime64_t wall; 814 cputime64_t wall;
819 u64 idle_time; 815 u64 idle_time;
820 int cpu = get_cpu(); 816 int cpu = get_cpu();
@@ -838,22 +834,12 @@ static int __init cpufreq_gov_dbs_init(void)
838 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); 834 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
839 } 835 }
840 836
841 kondemand_wq = create_workqueue("kondemand"); 837 return cpufreq_register_governor(&cpufreq_gov_ondemand);
842 if (!kondemand_wq) {
843 printk(KERN_ERR "Creation of kondemand failed\n");
844 return -EFAULT;
845 }
846 err = cpufreq_register_governor(&cpufreq_gov_ondemand);
847 if (err)
848 destroy_workqueue(kondemand_wq);
849
850 return err;
851} 838}
852 839
853static void __exit cpufreq_gov_dbs_exit(void) 840static void __exit cpufreq_gov_dbs_exit(void)
854{ 841{
855 cpufreq_unregister_governor(&cpufreq_gov_ondemand); 842 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
856 destroy_workqueue(kondemand_wq);
857} 843}
858 844
859 845