aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c8
-rw-r--r--drivers/cpufreq/cpufreq_governor.c39
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c12
4 files changed, 42 insertions, 19 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 98b49462f4e9..6fe6050a3889 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -107,7 +107,6 @@ static void cs_check_cpu(int cpu, unsigned int load)
107 107
108static void cs_dbs_timer(struct work_struct *work) 108static void cs_dbs_timer(struct work_struct *work)
109{ 109{
110 struct delayed_work *dw = to_delayed_work(work);
111 struct cs_cpu_dbs_info_s *dbs_info = container_of(work, 110 struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
112 struct cs_cpu_dbs_info_s, cdbs.work.work); 111 struct cs_cpu_dbs_info_s, cdbs.work.work);
113 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 112 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
@@ -116,12 +115,15 @@ static void cs_dbs_timer(struct work_struct *work)
116 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; 115 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
117 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 116 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
118 int delay = delay_for_sampling_rate(cs_tuners->sampling_rate); 117 int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
118 bool modify_all = true;
119 119
120 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 120 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
121 if (need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate)) 121 if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
122 modify_all = false;
123 else
122 dbs_check_cpu(dbs_data, cpu); 124 dbs_check_cpu(dbs_data, cpu);
123 125
124 schedule_delayed_work_on(smp_processor_id(), dw, delay); 126 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
125 mutex_unlock(&core_dbs_info->cdbs.timer_mutex); 127 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
126} 128}
127 129
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 26fbb729bc1c..326f0c2e2bd5 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -178,20 +178,38 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
178} 178}
179EXPORT_SYMBOL_GPL(dbs_check_cpu); 179EXPORT_SYMBOL_GPL(dbs_check_cpu);
180 180
181static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu, 181static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
182 unsigned int sampling_rate) 182 unsigned int delay)
183{ 183{
184 int delay = delay_for_sampling_rate(sampling_rate);
185 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); 184 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
186 185
187 schedule_delayed_work_on(cpu, &cdbs->work, delay); 186 mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
188} 187}
189 188
190static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu) 189void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
190 unsigned int delay, bool all_cpus)
191{ 191{
192 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); 192 int i;
193
194 if (!all_cpus) {
195 __gov_queue_work(smp_processor_id(), dbs_data, delay);
196 } else {
197 for_each_cpu(i, policy->cpus)
198 __gov_queue_work(i, dbs_data, delay);
199 }
200}
201EXPORT_SYMBOL_GPL(gov_queue_work);
202
203static inline void gov_cancel_work(struct dbs_data *dbs_data,
204 struct cpufreq_policy *policy)
205{
206 struct cpu_dbs_common_info *cdbs;
207 int i;
193 208
194 cancel_delayed_work_sync(&cdbs->work); 209 for_each_cpu(i, policy->cpus) {
210 cdbs = dbs_data->cdata->get_cpu_cdbs(i);
211 cancel_delayed_work_sync(&cdbs->work);
212 }
195} 213}
196 214
197/* Will return if we need to evaluate cpu load again or not */ 215/* Will return if we need to evaluate cpu load again or not */
@@ -380,16 +398,15 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
380 /* Initiate timer time stamp */ 398 /* Initiate timer time stamp */
381 cpu_cdbs->time_stamp = ktime_get(); 399 cpu_cdbs->time_stamp = ktime_get();
382 400
383 for_each_cpu(j, policy->cpus) 401 gov_queue_work(dbs_data, policy,
384 dbs_timer_init(dbs_data, j, sampling_rate); 402 delay_for_sampling_rate(sampling_rate), true);
385 break; 403 break;
386 404
387 case CPUFREQ_GOV_STOP: 405 case CPUFREQ_GOV_STOP:
388 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) 406 if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
389 cs_dbs_info->enable = 0; 407 cs_dbs_info->enable = 0;
390 408
391 for_each_cpu(j, policy->cpus) 409 gov_cancel_work(dbs_data, policy);
392 dbs_timer_exit(dbs_data, j);
393 410
394 mutex_lock(&dbs_data->mutex); 411 mutex_lock(&dbs_data->mutex);
395 mutex_destroy(&cpu_cdbs->timer_mutex); 412 mutex_destroy(&cpu_cdbs->timer_mutex);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 27b588aeacc1..c9c269f2b973 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -262,4 +262,6 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,
262 unsigned int sampling_rate); 262 unsigned int sampling_rate);
263int cpufreq_governor_dbs(struct cpufreq_policy *policy, 263int cpufreq_governor_dbs(struct cpufreq_policy *policy,
264 struct common_dbs_data *cdata, unsigned int event); 264 struct common_dbs_data *cdata, unsigned int event);
265void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
266 unsigned int delay, bool all_cpus);
265#endif /* _CPUFREQ_GOVERNER_H */ 267#endif /* _CPUFREQ_GOVERNER_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c90d345c636a..459f9ee39c74 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -216,7 +216,6 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
216 216
217static void od_dbs_timer(struct work_struct *work) 217static void od_dbs_timer(struct work_struct *work)
218{ 218{
219 struct delayed_work *dw = to_delayed_work(work);
220 struct od_cpu_dbs_info_s *dbs_info = 219 struct od_cpu_dbs_info_s *dbs_info =
221 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); 220 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
222 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 221 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
@@ -225,10 +224,13 @@ static void od_dbs_timer(struct work_struct *work)
225 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; 224 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
226 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 225 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
227 int delay = 0, sample_type = core_dbs_info->sample_type; 226 int delay = 0, sample_type = core_dbs_info->sample_type;
227 bool modify_all = true;
228 228
229 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 229 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
230 if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) 230 if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
231 modify_all = false;
231 goto max_delay; 232 goto max_delay;
233 }
232 234
233 /* Common NORMAL_SAMPLE setup */ 235 /* Common NORMAL_SAMPLE setup */
234 core_dbs_info->sample_type = OD_NORMAL_SAMPLE; 236 core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
@@ -250,7 +252,7 @@ max_delay:
250 delay = delay_for_sampling_rate(od_tuners->sampling_rate 252 delay = delay_for_sampling_rate(od_tuners->sampling_rate
251 * core_dbs_info->rate_mult); 253 * core_dbs_info->rate_mult);
252 254
253 schedule_delayed_work_on(smp_processor_id(), dw, delay); 255 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
254 mutex_unlock(&core_dbs_info->cdbs.timer_mutex); 256 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
255} 257}
256 258
@@ -310,8 +312,8 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
310 cancel_delayed_work_sync(&dbs_info->cdbs.work); 312 cancel_delayed_work_sync(&dbs_info->cdbs.work);
311 mutex_lock(&dbs_info->cdbs.timer_mutex); 313 mutex_lock(&dbs_info->cdbs.timer_mutex);
312 314
313 schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, 315 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
314 usecs_to_jiffies(new_rate)); 316 usecs_to_jiffies(new_rate), true);
315 317
316 } 318 }
317 mutex_unlock(&dbs_info->cdbs.timer_mutex); 319 mutex_unlock(&dbs_info->cdbs.timer_mutex);