aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c58
1 files changed, 57 insertions, 1 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c3e0652520a1..836e9b062e5e 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -257,6 +257,62 @@ show_one(sampling_down_factor, sampling_down_factor);
257show_one(ignore_nice_load, ignore_nice); 257show_one(ignore_nice_load, ignore_nice);
258show_one(powersave_bias, powersave_bias); 258show_one(powersave_bias, powersave_bias);
259 259
260/**
261 * update_sampling_rate - update sampling rate effective immediately if needed.
262 * @new_rate: new sampling rate
263 *
264 * If new rate is smaller than the old, simply updaing
265 * dbs_tuners_int.sampling_rate might not be appropriate. For example,
266 * if the original sampling_rate was 1 second and the requested new sampling
267 * rate is 10 ms because the user needs immediate reaction from ondemand
268 * governor, but not sure if higher frequency will be required or not,
269 * then, the governor may change the sampling rate too late; up to 1 second
270 * later. Thus, if we are reducing the sampling rate, we need to make the
271 * new value effective immediately.
272 */
273static void update_sampling_rate(unsigned int new_rate)
274{
275 int cpu;
276
277 dbs_tuners_ins.sampling_rate = new_rate
278 = max(new_rate, min_sampling_rate);
279
280 for_each_online_cpu(cpu) {
281 struct cpufreq_policy *policy;
282 struct cpu_dbs_info_s *dbs_info;
283 unsigned long next_sampling, appointed_at;
284
285 policy = cpufreq_cpu_get(cpu);
286 if (!policy)
287 continue;
288 dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
289 cpufreq_cpu_put(policy);
290
291 mutex_lock(&dbs_info->timer_mutex);
292
293 if (!delayed_work_pending(&dbs_info->work)) {
294 mutex_unlock(&dbs_info->timer_mutex);
295 continue;
296 }
297
298 next_sampling = jiffies + usecs_to_jiffies(new_rate);
299 appointed_at = dbs_info->work.timer.expires;
300
301
302 if (time_before(next_sampling, appointed_at)) {
303
304 mutex_unlock(&dbs_info->timer_mutex);
305 cancel_delayed_work_sync(&dbs_info->work);
306 mutex_lock(&dbs_info->timer_mutex);
307
308 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
309 usecs_to_jiffies(new_rate));
310
311 }
312 mutex_unlock(&dbs_info->timer_mutex);
313 }
314}
315
260static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, 316static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
261 const char *buf, size_t count) 317 const char *buf, size_t count)
262{ 318{
@@ -265,7 +321,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
265 ret = sscanf(buf, "%u", &input); 321 ret = sscanf(buf, "%u", &input);
266 if (ret != 1) 322 if (ret != 1)
267 return -EINVAL; 323 return -EINVAL;
268 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); 324 update_sampling_rate(input);
269 return count; 325 return count;
270} 326}
271 327