diff options
author | Tejun Heo <tj@kernel.org> | 2011-01-26 06:12:50 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-01-26 06:12:50 -0500 |
commit | 57df5573a56322e6895451f759c19e875252817d (patch) | |
tree | d34ec0be43b4fa29831f949f7be126c78cfba706 /drivers | |
parent | bcb6d9161d1720cf68c7f4de0630e91cb95ee60c (diff) |
cpufreq: use system_wq instead of dedicated workqueues
With cmwq, there's no reason for cpufreq drivers to use separate
workqueues. Remove the dedicated workqueues from cpufreq_conservative
and cpufreq_ondemand and use system_wq instead. The work items are
already sync canceled on stop, so it's already guaranteed that no work
is running on module exit.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Dave Jones <davej@redhat.com>
Cc: cpufreq@vger.kernel.org
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 22 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 20 |
2 files changed, 6 insertions, 36 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 526bfbf69611..94284c8473b1 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -81,8 +81,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ | |||
81 | */ | 81 | */ |
82 | static DEFINE_MUTEX(dbs_mutex); | 82 | static DEFINE_MUTEX(dbs_mutex); |
83 | 83 | ||
84 | static struct workqueue_struct *kconservative_wq; | ||
85 | |||
86 | static struct dbs_tuners { | 84 | static struct dbs_tuners { |
87 | unsigned int sampling_rate; | 85 | unsigned int sampling_rate; |
88 | unsigned int sampling_down_factor; | 86 | unsigned int sampling_down_factor; |
@@ -560,7 +558,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
560 | 558 | ||
561 | dbs_check_cpu(dbs_info); | 559 | dbs_check_cpu(dbs_info); |
562 | 560 | ||
563 | queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); | 561 | schedule_delayed_work_on(cpu, &dbs_info->work, delay); |
564 | mutex_unlock(&dbs_info->timer_mutex); | 562 | mutex_unlock(&dbs_info->timer_mutex); |
565 | } | 563 | } |
566 | 564 | ||
@@ -572,8 +570,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
572 | 570 | ||
573 | dbs_info->enable = 1; | 571 | dbs_info->enable = 1; |
574 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 572 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
575 | queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, | 573 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); |
576 | delay); | ||
577 | } | 574 | } |
578 | 575 | ||
579 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 576 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
@@ -716,25 +713,12 @@ struct cpufreq_governor cpufreq_gov_conservative = { | |||
716 | 713 | ||
717 | static int __init cpufreq_gov_dbs_init(void) | 714 | static int __init cpufreq_gov_dbs_init(void) |
718 | { | 715 | { |
719 | int err; | 716 | return cpufreq_register_governor(&cpufreq_gov_conservative); |
720 | |||
721 | kconservative_wq = create_workqueue("kconservative"); | ||
722 | if (!kconservative_wq) { | ||
723 | printk(KERN_ERR "Creation of kconservative failed\n"); | ||
724 | return -EFAULT; | ||
725 | } | ||
726 | |||
727 | err = cpufreq_register_governor(&cpufreq_gov_conservative); | ||
728 | if (err) | ||
729 | destroy_workqueue(kconservative_wq); | ||
730 | |||
731 | return err; | ||
732 | } | 717 | } |
733 | 718 | ||
734 | static void __exit cpufreq_gov_dbs_exit(void) | 719 | static void __exit cpufreq_gov_dbs_exit(void) |
735 | { | 720 | { |
736 | cpufreq_unregister_governor(&cpufreq_gov_conservative); | 721 | cpufreq_unregister_governor(&cpufreq_gov_conservative); |
737 | destroy_workqueue(kconservative_wq); | ||
738 | } | 722 | } |
739 | 723 | ||
740 | 724 | ||
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index c631f27a3dcc..58aa85ea5ec6 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -104,8 +104,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ | |||
104 | */ | 104 | */ |
105 | static DEFINE_MUTEX(dbs_mutex); | 105 | static DEFINE_MUTEX(dbs_mutex); |
106 | 106 | ||
107 | static struct workqueue_struct *kondemand_wq; | ||
108 | |||
109 | static struct dbs_tuners { | 107 | static struct dbs_tuners { |
110 | unsigned int sampling_rate; | 108 | unsigned int sampling_rate; |
111 | unsigned int up_threshold; | 109 | unsigned int up_threshold; |
@@ -667,7 +665,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
667 | __cpufreq_driver_target(dbs_info->cur_policy, | 665 | __cpufreq_driver_target(dbs_info->cur_policy, |
668 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | 666 | dbs_info->freq_lo, CPUFREQ_RELATION_H); |
669 | } | 667 | } |
670 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 668 | schedule_delayed_work_on(cpu, &dbs_info->work, delay); |
671 | mutex_unlock(&dbs_info->timer_mutex); | 669 | mutex_unlock(&dbs_info->timer_mutex); |
672 | } | 670 | } |
673 | 671 | ||
@@ -681,8 +679,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
681 | 679 | ||
682 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 680 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
683 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 681 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
684 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, | 682 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); |
685 | delay); | ||
686 | } | 683 | } |
687 | 684 | ||
688 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 685 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
@@ -814,7 +811,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
814 | 811 | ||
815 | static int __init cpufreq_gov_dbs_init(void) | 812 | static int __init cpufreq_gov_dbs_init(void) |
816 | { | 813 | { |
817 | int err; | ||
818 | cputime64_t wall; | 814 | cputime64_t wall; |
819 | u64 idle_time; | 815 | u64 idle_time; |
820 | int cpu = get_cpu(); | 816 | int cpu = get_cpu(); |
@@ -838,22 +834,12 @@ static int __init cpufreq_gov_dbs_init(void) | |||
838 | MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); | 834 | MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); |
839 | } | 835 | } |
840 | 836 | ||
841 | kondemand_wq = create_workqueue("kondemand"); | 837 | return cpufreq_register_governor(&cpufreq_gov_ondemand); |
842 | if (!kondemand_wq) { | ||
843 | printk(KERN_ERR "Creation of kondemand failed\n"); | ||
844 | return -EFAULT; | ||
845 | } | ||
846 | err = cpufreq_register_governor(&cpufreq_gov_ondemand); | ||
847 | if (err) | ||
848 | destroy_workqueue(kondemand_wq); | ||
849 | |||
850 | return err; | ||
851 | } | 838 | } |
852 | 839 | ||
853 | static void __exit cpufreq_gov_dbs_exit(void) | 840 | static void __exit cpufreq_gov_dbs_exit(void) |
854 | { | 841 | { |
855 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); | 842 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); |
856 | destroy_workqueue(kondemand_wq); | ||
857 | } | 843 | } |
858 | 844 | ||
859 | 845 | ||