diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 47 |
1 files changed, 46 insertions, 1 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index ed472f8dfb72..8e9dbdc6c700 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -109,6 +109,7 @@ static struct dbs_tuners { | |||
109 | unsigned int down_differential; | 109 | unsigned int down_differential; |
110 | unsigned int ignore_nice; | 110 | unsigned int ignore_nice; |
111 | unsigned int powersave_bias; | 111 | unsigned int powersave_bias; |
112 | unsigned int io_is_busy; | ||
112 | } dbs_tuners_ins = { | 113 | } dbs_tuners_ins = { |
113 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 114 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
114 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, | 115 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, |
@@ -260,6 +261,7 @@ static ssize_t show_##file_name \ | |||
260 | return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ | 261 | return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ |
261 | } | 262 | } |
262 | show_one(sampling_rate, sampling_rate); | 263 | show_one(sampling_rate, sampling_rate); |
264 | show_one(io_is_busy, io_is_busy); | ||
263 | show_one(up_threshold, up_threshold); | 265 | show_one(up_threshold, up_threshold); |
264 | show_one(ignore_nice_load, ignore_nice); | 266 | show_one(ignore_nice_load, ignore_nice); |
265 | show_one(powersave_bias, powersave_bias); | 267 | show_one(powersave_bias, powersave_bias); |
@@ -310,6 +312,23 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, | |||
310 | return count; | 312 | return count; |
311 | } | 313 | } |
312 | 314 | ||
315 | static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, | ||
316 | const char *buf, size_t count) | ||
317 | { | ||
318 | unsigned int input; | ||
319 | int ret; | ||
320 | |||
321 | ret = sscanf(buf, "%u", &input); | ||
322 | if (ret != 1) | ||
323 | return -EINVAL; | ||
324 | |||
325 | mutex_lock(&dbs_mutex); | ||
326 | dbs_tuners_ins.io_is_busy = !!input; | ||
327 | mutex_unlock(&dbs_mutex); | ||
328 | |||
329 | return count; | ||
330 | } | ||
331 | |||
313 | static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | 332 | static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, |
314 | const char *buf, size_t count) | 333 | const char *buf, size_t count) |
315 | { | 334 | { |
@@ -392,6 +411,7 @@ static struct global_attr _name = \ | |||
392 | __ATTR(_name, 0644, show_##_name, store_##_name) | 411 | __ATTR(_name, 0644, show_##_name, store_##_name) |
393 | 412 | ||
394 | define_one_rw(sampling_rate); | 413 | define_one_rw(sampling_rate); |
414 | define_one_rw(io_is_busy); | ||
395 | define_one_rw(up_threshold); | 415 | define_one_rw(up_threshold); |
396 | define_one_rw(ignore_nice_load); | 416 | define_one_rw(ignore_nice_load); |
397 | define_one_rw(powersave_bias); | 417 | define_one_rw(powersave_bias); |
@@ -403,6 +423,7 @@ static struct attribute *dbs_attributes[] = { | |||
403 | &up_threshold.attr, | 423 | &up_threshold.attr, |
404 | &ignore_nice_load.attr, | 424 | &ignore_nice_load.attr, |
405 | &powersave_bias.attr, | 425 | &powersave_bias.attr, |
426 | &io_is_busy.attr, | ||
406 | NULL | 427 | NULL |
407 | }; | 428 | }; |
408 | 429 | ||
@@ -527,7 +548,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
527 | * from the cpu idle time. | 548 | * from the cpu idle time. |
528 | */ | 549 | */ |
529 | 550 | ||
530 | if (idle_time >= iowait_time) | 551 | if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) |
531 | idle_time -= iowait_time; | 552 | idle_time -= iowait_time; |
532 | 553 | ||
533 | if (unlikely(!wall_time || wall_time < idle_time)) | 554 | if (unlikely(!wall_time || wall_time < idle_time)) |
@@ -643,6 +664,29 @@ static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | |||
643 | cancel_delayed_work_sync(&dbs_info->work); | 664 | cancel_delayed_work_sync(&dbs_info->work); |
644 | } | 665 | } |
645 | 666 | ||
667 | /* | ||
668 | * Not all CPUs want IO time to be accounted as busy; this dependson how | ||
669 | * efficient idling at a higher frequency/voltage is. | ||
670 | * Pavel Machek says this is not so for various generations of AMD and old | ||
671 | * Intel systems. | ||
672 | * Mike Chan (androidlcom) calis this is also not true for ARM. | ||
673 | * Because of this, whitelist specific known (series) of CPUs by default, and | ||
674 | * leave all others up to the user. | ||
675 | */ | ||
676 | static int should_io_be_busy(void) | ||
677 | { | ||
678 | #if defined(CONFIG_X86) | ||
679 | /* | ||
680 | * For Intel, Core 2 (model 15) andl later have an efficient idle. | ||
681 | */ | ||
682 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
683 | boot_cpu_data.x86 == 6 && | ||
684 | boot_cpu_data.x86_model >= 15) | ||
685 | return 1; | ||
686 | #endif | ||
687 | return 0; | ||
688 | } | ||
689 | |||
646 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 690 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, |
647 | unsigned int event) | 691 | unsigned int event) |
648 | { | 692 | { |
@@ -705,6 +749,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
705 | dbs_tuners_ins.sampling_rate = | 749 | dbs_tuners_ins.sampling_rate = |
706 | max(min_sampling_rate, | 750 | max(min_sampling_rate, |
707 | latency * LATENCY_MULTIPLIER); | 751 | latency * LATENCY_MULTIPLIER); |
752 | dbs_tuners_ins.io_is_busy = should_io_be_busy(); | ||
708 | } | 753 | } |
709 | mutex_unlock(&dbs_mutex); | 754 | mutex_unlock(&dbs_mutex); |
710 | 755 | ||