aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_conservative.c
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2013-01-31 12:28:02 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-02-01 19:02:44 -0500
commit4447266b842d27f77b017a59eb9dc38ad7b299f1 (patch)
treef8ae835b7ecee18948afabd75633ea7676b6a4ac /drivers/cpufreq/cpufreq_conservative.c
parent8eeed0956615294200be783bb67d851280b5b1b9 (diff)
cpufreq: governors: Remove code redundancy between governors
With the inclusion of following patches: 9f4eb10 cpufreq: conservative: call dbs_check_cpu only when necessary 772b4b1 cpufreq: ondemand: call dbs_check_cpu only when necessary code redundancy between the conservative and ondemand governors is introduced again, so get rid of it. [rjw: Changelog] Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Tested-by: Fabio Baltieri <fabio.baltieri@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_conservative.c')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c52
1 files changed, 9 insertions, 43 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c18a304b3a38..e8bb91571672 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -111,58 +111,24 @@ static void cs_check_cpu(int cpu, unsigned int load)
111 } 111 }
112} 112}
113 113
114static void cs_timer_update(struct cs_cpu_dbs_info_s *dbs_info, bool sample, 114static void cs_dbs_timer(struct work_struct *work)
115 struct delayed_work *dw)
116{ 115{
116 struct delayed_work *dw = to_delayed_work(work);
117 struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
118 struct cs_cpu_dbs_info_s, cdbs.work.work);
117 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 119 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
120 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
121 cpu);
118 int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); 122 int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
119 123
120 if (sample) 124 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
125 if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
121 dbs_check_cpu(&cs_dbs_data, cpu); 126 dbs_check_cpu(&cs_dbs_data, cpu);
122 127
123 schedule_delayed_work_on(smp_processor_id(), dw, delay); 128 schedule_delayed_work_on(smp_processor_id(), dw, delay);
129 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
124} 130}
125 131
126static void cs_timer_coordinated(struct cs_cpu_dbs_info_s *dbs_info_local,
127 struct delayed_work *dw)
128{
129 struct cs_cpu_dbs_info_s *dbs_info;
130 ktime_t time_now;
131 s64 delta_us;
132 bool sample = true;
133
134 /* use leader CPU's dbs_info */
135 dbs_info = &per_cpu(cs_cpu_dbs_info,
136 dbs_info_local->cdbs.cur_policy->cpu);
137 mutex_lock(&dbs_info->cdbs.timer_mutex);
138
139 time_now = ktime_get();
140 delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
141
142 /* Do nothing if we recently have sampled */
143 if (delta_us < (s64)(cs_tuners.sampling_rate / 2))
144 sample = false;
145 else
146 dbs_info->cdbs.time_stamp = time_now;
147
148 cs_timer_update(dbs_info, sample, dw);
149 mutex_unlock(&dbs_info->cdbs.timer_mutex);
150}
151
152static void cs_dbs_timer(struct work_struct *work)
153{
154 struct delayed_work *dw = to_delayed_work(work);
155 struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
156 struct cs_cpu_dbs_info_s, cdbs.work.work);
157
158 if (policy_is_shared(dbs_info->cdbs.cur_policy)) {
159 cs_timer_coordinated(dbs_info, dw);
160 } else {
161 mutex_lock(&dbs_info->cdbs.timer_mutex);
162 cs_timer_update(dbs_info, true, dw);
163 mutex_unlock(&dbs_info->cdbs.timer_mutex);
164 }
165}
166static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 132static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
167 void *data) 133 void *data)
168{ 134{