diff options
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 65 |
1 files changed, 35 insertions, 30 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f56debd9a8d7..42fcb146ba22 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -334,9 +334,7 @@ static struct attribute_group dbs_attr_group = { | |||
334 | 334 | ||
335 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 335 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) |
336 | { | 336 | { |
337 | unsigned int idle_ticks, total_ticks; | 337 | unsigned int max_load_freq; |
338 | unsigned int load = 0; | ||
339 | cputime64_t cur_jiffies; | ||
340 | 338 | ||
341 | struct cpufreq_policy *policy; | 339 | struct cpufreq_policy *policy; |
342 | unsigned int j; | 340 | unsigned int j; |
@@ -346,13 +344,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
346 | 344 | ||
347 | this_dbs_info->freq_lo = 0; | 345 | this_dbs_info->freq_lo = 0; |
348 | policy = this_dbs_info->cur_policy; | 346 | policy = this_dbs_info->cur_policy; |
349 | cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); | ||
350 | total_ticks = (unsigned int) cputime64_sub(cur_jiffies, | ||
351 | this_dbs_info->prev_cpu_wall); | ||
352 | this_dbs_info->prev_cpu_wall = get_jiffies_64(); | ||
353 | 347 | ||
354 | if (!total_ticks) | ||
355 | return; | ||
356 | /* | 348 | /* |
357 | * Every sampling_rate, we check, if current idle time is less | 349 | * Every sampling_rate, we check, if current idle time is less |
358 | * than 20% (default), then we try to increase frequency | 350 | * than 20% (default), then we try to increase frequency |
@@ -365,27 +357,46 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
365 | * 5% (default) of current frequency | 357 | * 5% (default) of current frequency |
366 | */ | 358 | */ |
367 | 359 | ||
368 | /* Get Idle Time */ | 360 | /* Get Absolute Load - in terms of freq */ |
369 | idle_ticks = UINT_MAX; | 361 | max_load_freq = 0; |
362 | |||
370 | for_each_cpu_mask_nr(j, policy->cpus) { | 363 | for_each_cpu_mask_nr(j, policy->cpus) { |
371 | cputime64_t total_idle_ticks; | ||
372 | unsigned int tmp_idle_ticks; | ||
373 | struct cpu_dbs_info_s *j_dbs_info; | 364 | struct cpu_dbs_info_s *j_dbs_info; |
365 | cputime64_t cur_wall_time, cur_idle_time; | ||
366 | unsigned int idle_time, wall_time; | ||
367 | unsigned int load, load_freq; | ||
368 | int freq_avg; | ||
374 | 369 | ||
375 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 370 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
376 | total_idle_ticks = get_cpu_idle_time(j); | 371 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); |
377 | tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, | 372 | wall_time = (unsigned int) cputime64_sub(cur_wall_time, |
373 | j_dbs_info->prev_cpu_wall); | ||
374 | j_dbs_info->prev_cpu_wall = cur_wall_time; | ||
375 | |||
376 | cur_idle_time = get_cpu_idle_time(j); | ||
377 | idle_time = (unsigned int) cputime64_sub(cur_idle_time, | ||
378 | j_dbs_info->prev_cpu_idle); | 378 | j_dbs_info->prev_cpu_idle); |
379 | j_dbs_info->prev_cpu_idle = total_idle_ticks; | 379 | j_dbs_info->prev_cpu_idle = cur_idle_time; |
380 | 380 | ||
381 | if (tmp_idle_ticks < idle_ticks) | 381 | if (unlikely(wall_time <= idle_time || |
382 | idle_ticks = tmp_idle_ticks; | 382 | (cputime_to_msecs(wall_time) < |
383 | dbs_tuners_ins.sampling_rate / (2 * 1000)))) { | ||
384 | continue; | ||
385 | } | ||
386 | |||
387 | load = 100 * (wall_time - idle_time) / wall_time; | ||
388 | |||
389 | freq_avg = __cpufreq_driver_getavg(policy, j); | ||
390 | if (freq_avg <= 0) | ||
391 | freq_avg = policy->cur; | ||
392 | |||
393 | load_freq = load * freq_avg; | ||
394 | if (load_freq > max_load_freq) | ||
395 | max_load_freq = load_freq; | ||
383 | } | 396 | } |
384 | if (likely(total_ticks > idle_ticks)) | ||
385 | load = (100 * (total_ticks - idle_ticks)) / total_ticks; | ||
386 | 397 | ||
387 | /* Check for frequency increase */ | 398 | /* Check for frequency increase */ |
388 | if (load > dbs_tuners_ins.up_threshold) { | 399 | if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { |
389 | /* if we are already at full speed then break out early */ | 400 | /* if we are already at full speed then break out early */ |
390 | if (!dbs_tuners_ins.powersave_bias) { | 401 | if (!dbs_tuners_ins.powersave_bias) { |
391 | if (policy->cur == policy->max) | 402 | if (policy->cur == policy->max) |
@@ -412,15 +423,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
412 | * can support the current CPU usage without triggering the up | 423 | * can support the current CPU usage without triggering the up |
413 | * policy. To be safe, we focus 10 points under the threshold. | 424 | * policy. To be safe, we focus 10 points under the threshold. |
414 | */ | 425 | */ |
415 | if (load < (dbs_tuners_ins.up_threshold - 10)) { | 426 | if (max_load_freq < (dbs_tuners_ins.up_threshold - 10) * policy->cur) { |
416 | unsigned int freq_next, freq_cur; | 427 | unsigned int freq_next; |
417 | 428 | freq_next = max_load_freq / (dbs_tuners_ins.up_threshold - 10); | |
418 | freq_cur = __cpufreq_driver_getavg(policy, policy->cpu); | ||
419 | if (!freq_cur) | ||
420 | freq_cur = policy->cur; | ||
421 | |||
422 | freq_next = (freq_cur * load) / | ||
423 | (dbs_tuners_ins.up_threshold - 10); | ||
424 | 429 | ||
425 | if (!dbs_tuners_ins.powersave_bias) { | 430 | if (!dbs_tuners_ins.powersave_bias) { |
426 | __cpufreq_driver_target(policy, freq_next, | 431 | __cpufreq_driver_target(policy, freq_next, |