aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2013-02-27 01:54:03 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-03-31 19:11:35 -0400
commit031299b3be30f3ecab110fff8faad85af70e1797 (patch)
tree3e823df16db43dffbf14b7895811059e22427a1e /drivers/cpufreq/cpufreq_ondemand.c
parent9d44592018e617abf62a5f6a5d92a04aa07e7625 (diff)
cpufreq: governors: Avoid unnecessary per cpu timer interrupts
Following patch has introduced per cpu timers or works for ondemand and conservative governors. commit 2abfa876f1117b0ab45f191fb1f82c41b1cbc8fe Author: Rickard Andersson <rickard.andersson@stericsson.com> Date: Thu Dec 27 14:55:38 2012 +0000 cpufreq: handle SW coordinated CPUs This causes additional unnecessary interrupts on all cpus when the load is recently evaluated by any other cpu. i.e. When load is recently evaluated by cpu x, we don't really need any other cpu to evaluate this load again for the next sampling_rate time. Some sort of code is present to avoid that but we are still getting timer interrupts for all cpus. A good way of avoiding this would be to modify delays for all cpus (policy->cpus) whenever any cpu has evaluated load. This patch does this change and some related code cleanup. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c90d345c636a..459f9ee39c74 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -216,7 +216,6 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
216 216
217static void od_dbs_timer(struct work_struct *work) 217static void od_dbs_timer(struct work_struct *work)
218{ 218{
219 struct delayed_work *dw = to_delayed_work(work);
220 struct od_cpu_dbs_info_s *dbs_info = 219 struct od_cpu_dbs_info_s *dbs_info =
221 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); 220 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
222 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 221 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
@@ -225,10 +224,13 @@ static void od_dbs_timer(struct work_struct *work)
225 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; 224 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
226 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 225 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
227 int delay = 0, sample_type = core_dbs_info->sample_type; 226 int delay = 0, sample_type = core_dbs_info->sample_type;
227 bool modify_all = true;
228 228
229 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 229 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
230 if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) 230 if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
231 modify_all = false;
231 goto max_delay; 232 goto max_delay;
233 }
232 234
233 /* Common NORMAL_SAMPLE setup */ 235 /* Common NORMAL_SAMPLE setup */
234 core_dbs_info->sample_type = OD_NORMAL_SAMPLE; 236 core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
@@ -250,7 +252,7 @@ max_delay:
250 delay = delay_for_sampling_rate(od_tuners->sampling_rate 252 delay = delay_for_sampling_rate(od_tuners->sampling_rate
251 * core_dbs_info->rate_mult); 253 * core_dbs_info->rate_mult);
252 254
253 schedule_delayed_work_on(smp_processor_id(), dw, delay); 255 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
254 mutex_unlock(&core_dbs_info->cdbs.timer_mutex); 256 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
255} 257}
256 258
@@ -310,8 +312,8 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
310 cancel_delayed_work_sync(&dbs_info->cdbs.work); 312 cancel_delayed_work_sync(&dbs_info->cdbs.work);
311 mutex_lock(&dbs_info->cdbs.timer_mutex); 313 mutex_lock(&dbs_info->cdbs.timer_mutex);
312 314
313 schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, 315 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
314 usecs_to_jiffies(new_rate)); 316 usecs_to_jiffies(new_rate), true);
315 317
316 } 318 }
317 mutex_unlock(&dbs_info->cdbs.timer_mutex); 319 mutex_unlock(&dbs_info->cdbs.timer_mutex);