aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2013-01-31 12:28:02 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-02-01 19:02:44 -0500
commit4447266b842d27f77b017a59eb9dc38ad7b299f1 (patch)
treef8ae835b7ecee18948afabd75633ea7676b6a4ac /drivers/cpufreq
parent8eeed0956615294200be783bb67d851280b5b1b9 (diff)
cpufreq: governors: Remove code redundancy between governors
With the inclusion of following patches: 9f4eb10 cpufreq: conservative: call dbs_check_cpu only when necessary 772b4b1 cpufreq: ondemand: call dbs_check_cpu only when necessary code redundancy between the conservative and ondemand governors is introduced again, so get rid of it. [rjw: Changelog] Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Tested-by: Fabio Baltieri <fabio.baltieri@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c52
-rw-r--r--drivers/cpufreq/cpufreq_governor.c19
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c77
4 files changed, 53 insertions, 97 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c18a304b3a38..e8bb91571672 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -111,58 +111,24 @@ static void cs_check_cpu(int cpu, unsigned int load)
111 } 111 }
112} 112}
113 113
114static void cs_timer_update(struct cs_cpu_dbs_info_s *dbs_info, bool sample, 114static void cs_dbs_timer(struct work_struct *work)
115 struct delayed_work *dw)
116{ 115{
116 struct delayed_work *dw = to_delayed_work(work);
117 struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
118 struct cs_cpu_dbs_info_s, cdbs.work.work);
117 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 119 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
120 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
121 cpu);
118 int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); 122 int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
119 123
120 if (sample) 124 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
125 if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
121 dbs_check_cpu(&cs_dbs_data, cpu); 126 dbs_check_cpu(&cs_dbs_data, cpu);
122 127
123 schedule_delayed_work_on(smp_processor_id(), dw, delay); 128 schedule_delayed_work_on(smp_processor_id(), dw, delay);
129 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
124} 130}
125 131
126static void cs_timer_coordinated(struct cs_cpu_dbs_info_s *dbs_info_local,
127 struct delayed_work *dw)
128{
129 struct cs_cpu_dbs_info_s *dbs_info;
130 ktime_t time_now;
131 s64 delta_us;
132 bool sample = true;
133
134 /* use leader CPU's dbs_info */
135 dbs_info = &per_cpu(cs_cpu_dbs_info,
136 dbs_info_local->cdbs.cur_policy->cpu);
137 mutex_lock(&dbs_info->cdbs.timer_mutex);
138
139 time_now = ktime_get();
140 delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
141
142 /* Do nothing if we recently have sampled */
143 if (delta_us < (s64)(cs_tuners.sampling_rate / 2))
144 sample = false;
145 else
146 dbs_info->cdbs.time_stamp = time_now;
147
148 cs_timer_update(dbs_info, sample, dw);
149 mutex_unlock(&dbs_info->cdbs.timer_mutex);
150}
151
152static void cs_dbs_timer(struct work_struct *work)
153{
154 struct delayed_work *dw = to_delayed_work(work);
155 struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
156 struct cs_cpu_dbs_info_s, cdbs.work.work);
157
158 if (policy_is_shared(dbs_info->cdbs.cur_policy)) {
159 cs_timer_coordinated(dbs_info, dw);
160 } else {
161 mutex_lock(&dbs_info->cdbs.timer_mutex);
162 cs_timer_update(dbs_info, true, dw);
163 mutex_unlock(&dbs_info->cdbs.timer_mutex);
164 }
165}
166static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 132static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
167 void *data) 133 void *data)
168{ 134{
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 29d6a59b1a15..7aaa9b151940 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -177,6 +177,25 @@ static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
177 cancel_delayed_work_sync(&cdbs->work); 177 cancel_delayed_work_sync(&cdbs->work);
178} 178}
179 179
180/* Will return if we need to evaluate cpu load again or not */
181bool need_load_eval(struct cpu_dbs_common_info *cdbs,
182 unsigned int sampling_rate)
183{
184 if (policy_is_shared(cdbs->cur_policy)) {
185 ktime_t time_now = ktime_get();
186 s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
187
188 /* Do nothing if we recently have sampled */
189 if (delta_us < (s64)(sampling_rate / 2))
190 return false;
191 else
192 cdbs->time_stamp = time_now;
193 }
194
195 return true;
196}
197EXPORT_SYMBOL_GPL(need_load_eval);
198
180int cpufreq_governor_dbs(struct dbs_data *dbs_data, 199int cpufreq_governor_dbs(struct dbs_data *dbs_data,
181 struct cpufreq_policy *policy, unsigned int event) 200 struct cpufreq_policy *policy, unsigned int event)
182{ 201{
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index c19a16c34361..16314b65ca67 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -171,6 +171,8 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
171 171
172u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); 172u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
173void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); 173void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
174bool need_load_eval(struct cpu_dbs_common_info *cdbs,
175 unsigned int sampling_rate);
174int cpufreq_governor_dbs(struct dbs_data *dbs_data, 176int cpufreq_governor_dbs(struct dbs_data *dbs_data,
175 struct cpufreq_policy *policy, unsigned int event); 177 struct cpufreq_policy *policy, unsigned int event);
176#endif /* _CPUFREQ_GOVERNER_H */ 178#endif /* _CPUFREQ_GOVERNER_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 75efd5ee00f8..f38b8da60128 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -216,75 +216,44 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
216 } 216 }
217} 217}
218 218
219static void od_timer_update(struct od_cpu_dbs_info_s *dbs_info, bool sample, 219static void od_dbs_timer(struct work_struct *work)
220 struct delayed_work *dw)
221{ 220{
221 struct delayed_work *dw = to_delayed_work(work);
222 struct od_cpu_dbs_info_s *dbs_info =
223 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
222 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 224 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
223 int delay, sample_type = dbs_info->sample_type; 225 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
226 cpu);
227 int delay, sample_type = core_dbs_info->sample_type;
228 bool eval_load;
229
230 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
231 eval_load = need_load_eval(&core_dbs_info->cdbs,
232 od_tuners.sampling_rate);
224 233
225 /* Common NORMAL_SAMPLE setup */ 234 /* Common NORMAL_SAMPLE setup */
226 dbs_info->sample_type = OD_NORMAL_SAMPLE; 235 core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
227 if (sample_type == OD_SUB_SAMPLE) { 236 if (sample_type == OD_SUB_SAMPLE) {
228 delay = dbs_info->freq_lo_jiffies; 237 delay = core_dbs_info->freq_lo_jiffies;
229 if (sample) 238 if (eval_load)
230 __cpufreq_driver_target(dbs_info->cdbs.cur_policy, 239 __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
231 dbs_info->freq_lo, 240 core_dbs_info->freq_lo,
232 CPUFREQ_RELATION_H); 241 CPUFREQ_RELATION_H);
233 } else { 242 } else {
234 if (sample) 243 if (eval_load)
235 dbs_check_cpu(&od_dbs_data, cpu); 244 dbs_check_cpu(&od_dbs_data, cpu);
236 if (dbs_info->freq_lo) { 245 if (core_dbs_info->freq_lo) {
237 /* Setup timer for SUB_SAMPLE */ 246 /* Setup timer for SUB_SAMPLE */
238 dbs_info->sample_type = OD_SUB_SAMPLE; 247 core_dbs_info->sample_type = OD_SUB_SAMPLE;
239 delay = dbs_info->freq_hi_jiffies; 248 delay = core_dbs_info->freq_hi_jiffies;
240 } else { 249 } else {
241 delay = delay_for_sampling_rate(od_tuners.sampling_rate 250 delay = delay_for_sampling_rate(od_tuners.sampling_rate
242 * dbs_info->rate_mult); 251 * core_dbs_info->rate_mult);
243 } 252 }
244 } 253 }
245 254
246 schedule_delayed_work_on(smp_processor_id(), dw, delay); 255 schedule_delayed_work_on(smp_processor_id(), dw, delay);
247} 256 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
248
249static void od_timer_coordinated(struct od_cpu_dbs_info_s *dbs_info_local,
250 struct delayed_work *dw)
251{
252 struct od_cpu_dbs_info_s *dbs_info;
253 ktime_t time_now;
254 s64 delta_us;
255 bool sample = true;
256
257 /* use leader CPU's dbs_info */
258 dbs_info = &per_cpu(od_cpu_dbs_info,
259 dbs_info_local->cdbs.cur_policy->cpu);
260 mutex_lock(&dbs_info->cdbs.timer_mutex);
261
262 time_now = ktime_get();
263 delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
264
265 /* Do nothing if we recently have sampled */
266 if (delta_us < (s64)(od_tuners.sampling_rate / 2))
267 sample = false;
268 else
269 dbs_info->cdbs.time_stamp = time_now;
270
271 od_timer_update(dbs_info, sample, dw);
272 mutex_unlock(&dbs_info->cdbs.timer_mutex);
273}
274
275static void od_dbs_timer(struct work_struct *work)
276{
277 struct delayed_work *dw = to_delayed_work(work);
278 struct od_cpu_dbs_info_s *dbs_info =
279 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
280
281 if (policy_is_shared(dbs_info->cdbs.cur_policy)) {
282 od_timer_coordinated(dbs_info, dw);
283 } else {
284 mutex_lock(&dbs_info->cdbs.timer_mutex);
285 od_timer_update(dbs_info, true, dw);
286 mutex_unlock(&dbs_info->cdbs.timer_mutex);
287 }
288} 257}
289 258
290/************************** sysfs interface ************************/ 259/************************** sysfs interface ************************/