aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq.c59
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c56
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c115
3 files changed, 135 insertions, 95 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2d5d575e889d..063b2184caf5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -662,32 +662,20 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
662 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 662 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
663} 663}
664 664
665#define define_one_ro(_name) \ 665cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
666static struct freq_attr _name = \ 666cpufreq_freq_attr_ro(cpuinfo_min_freq);
667__ATTR(_name, 0444, show_##_name, NULL) 667cpufreq_freq_attr_ro(cpuinfo_max_freq);
668 668cpufreq_freq_attr_ro(cpuinfo_transition_latency);
669#define define_one_ro0400(_name) \ 669cpufreq_freq_attr_ro(scaling_available_governors);
670static struct freq_attr _name = \ 670cpufreq_freq_attr_ro(scaling_driver);
671__ATTR(_name, 0400, show_##_name, NULL) 671cpufreq_freq_attr_ro(scaling_cur_freq);
672 672cpufreq_freq_attr_ro(bios_limit);
673#define define_one_rw(_name) \ 673cpufreq_freq_attr_ro(related_cpus);
674static struct freq_attr _name = \ 674cpufreq_freq_attr_ro(affected_cpus);
675__ATTR(_name, 0644, show_##_name, store_##_name) 675cpufreq_freq_attr_rw(scaling_min_freq);
676 676cpufreq_freq_attr_rw(scaling_max_freq);
677define_one_ro0400(cpuinfo_cur_freq); 677cpufreq_freq_attr_rw(scaling_governor);
678define_one_ro(cpuinfo_min_freq); 678cpufreq_freq_attr_rw(scaling_setspeed);
679define_one_ro(cpuinfo_max_freq);
680define_one_ro(cpuinfo_transition_latency);
681define_one_ro(scaling_available_governors);
682define_one_ro(scaling_driver);
683define_one_ro(scaling_cur_freq);
684define_one_ro(bios_limit);
685define_one_ro(related_cpus);
686define_one_ro(affected_cpus);
687define_one_rw(scaling_min_freq);
688define_one_rw(scaling_max_freq);
689define_one_rw(scaling_governor);
690define_one_rw(scaling_setspeed);
691 679
692static struct attribute *default_attrs[] = { 680static struct attribute *default_attrs[] = {
693 &cpuinfo_min_freq.attr, 681 &cpuinfo_min_freq.attr,
@@ -1113,6 +1101,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1113 unsigned int cpu = sys_dev->id; 1101 unsigned int cpu = sys_dev->id;
1114 unsigned long flags; 1102 unsigned long flags;
1115 struct cpufreq_policy *data; 1103 struct cpufreq_policy *data;
1104 struct kobject *kobj;
1105 struct completion *cmp;
1116#ifdef CONFIG_SMP 1106#ifdef CONFIG_SMP
1117 struct sys_device *cpu_sys_dev; 1107 struct sys_device *cpu_sys_dev;
1118 unsigned int j; 1108 unsigned int j;
@@ -1141,10 +1131,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1141 dprintk("removing link\n"); 1131 dprintk("removing link\n");
1142 cpumask_clear_cpu(cpu, data->cpus); 1132 cpumask_clear_cpu(cpu, data->cpus);
1143 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1133 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1144 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 1134 kobj = &sys_dev->kobj;
1145 cpufreq_cpu_put(data); 1135 cpufreq_cpu_put(data);
1146 cpufreq_debug_enable_ratelimit(); 1136 cpufreq_debug_enable_ratelimit();
1147 unlock_policy_rwsem_write(cpu); 1137 unlock_policy_rwsem_write(cpu);
1138 sysfs_remove_link(kobj, "cpufreq");
1148 return 0; 1139 return 0;
1149 } 1140 }
1150#endif 1141#endif
@@ -1181,7 +1172,10 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1181 data->governor->name, CPUFREQ_NAME_LEN); 1172 data->governor->name, CPUFREQ_NAME_LEN);
1182#endif 1173#endif
1183 cpu_sys_dev = get_cpu_sysdev(j); 1174 cpu_sys_dev = get_cpu_sysdev(j);
1184 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); 1175 kobj = &cpu_sys_dev->kobj;
1176 unlock_policy_rwsem_write(cpu);
1177 sysfs_remove_link(kobj, "cpufreq");
1178 lock_policy_rwsem_write(cpu);
1185 cpufreq_cpu_put(data); 1179 cpufreq_cpu_put(data);
1186 } 1180 }
1187 } 1181 }
@@ -1192,19 +1186,22 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1192 if (cpufreq_driver->target) 1186 if (cpufreq_driver->target)
1193 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1187 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1194 1188
1195 kobject_put(&data->kobj); 1189 kobj = &data->kobj;
1190 cmp = &data->kobj_unregister;
1191 unlock_policy_rwsem_write(cpu);
1192 kobject_put(kobj);
1196 1193
1197 /* we need to make sure that the underlying kobj is actually 1194 /* we need to make sure that the underlying kobj is actually
1198 * not referenced anymore by anybody before we proceed with 1195 * not referenced anymore by anybody before we proceed with
1199 * unloading. 1196 * unloading.
1200 */ 1197 */
1201 dprintk("waiting for dropping of refcount\n"); 1198 dprintk("waiting for dropping of refcount\n");
1202 wait_for_completion(&data->kobj_unregister); 1199 wait_for_completion(cmp);
1203 dprintk("wait complete\n"); 1200 dprintk("wait complete\n");
1204 1201
1202 lock_policy_rwsem_write(cpu);
1205 if (cpufreq_driver->exit) 1203 if (cpufreq_driver->exit)
1206 cpufreq_driver->exit(data); 1204 cpufreq_driver->exit(data);
1207
1208 unlock_policy_rwsem_write(cpu); 1205 unlock_policy_rwsem_write(cpu);
1209 1206
1210 free_cpumask_var(data->related_cpus); 1207 free_cpumask_var(data->related_cpus);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 599a40b25cb0..526bfbf69611 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -178,12 +178,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
178 return sprintf(buf, "%u\n", min_sampling_rate); 178 return sprintf(buf, "%u\n", min_sampling_rate);
179} 179}
180 180
181#define define_one_ro(_name) \ 181define_one_global_ro(sampling_rate_max);
182static struct global_attr _name = \ 182define_one_global_ro(sampling_rate_min);
183__ATTR(_name, 0444, show_##_name, NULL)
184
185define_one_ro(sampling_rate_max);
186define_one_ro(sampling_rate_min);
187 183
188/* cpufreq_conservative Governor Tunables */ 184/* cpufreq_conservative Governor Tunables */
189#define show_one(file_name, object) \ 185#define show_one(file_name, object) \
@@ -221,12 +217,8 @@ show_one_old(freq_step);
221show_one_old(sampling_rate_min); 217show_one_old(sampling_rate_min);
222show_one_old(sampling_rate_max); 218show_one_old(sampling_rate_max);
223 219
224#define define_one_ro_old(object, _name) \ 220cpufreq_freq_attr_ro_old(sampling_rate_min);
225static struct freq_attr object = \ 221cpufreq_freq_attr_ro_old(sampling_rate_max);
226__ATTR(_name, 0444, show_##_name##_old, NULL)
227
228define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
229define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
230 222
231/*** delete after deprecation time ***/ 223/*** delete after deprecation time ***/
232 224
@@ -364,16 +356,12 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
364 return count; 356 return count;
365} 357}
366 358
367#define define_one_rw(_name) \ 359define_one_global_rw(sampling_rate);
368static struct global_attr _name = \ 360define_one_global_rw(sampling_down_factor);
369__ATTR(_name, 0644, show_##_name, store_##_name) 361define_one_global_rw(up_threshold);
370 362define_one_global_rw(down_threshold);
371define_one_rw(sampling_rate); 363define_one_global_rw(ignore_nice_load);
372define_one_rw(sampling_down_factor); 364define_one_global_rw(freq_step);
373define_one_rw(up_threshold);
374define_one_rw(down_threshold);
375define_one_rw(ignore_nice_load);
376define_one_rw(freq_step);
377 365
378static struct attribute *dbs_attributes[] = { 366static struct attribute *dbs_attributes[] = {
379 &sampling_rate_max.attr, 367 &sampling_rate_max.attr,
@@ -409,16 +397,12 @@ write_one_old(down_threshold);
409write_one_old(ignore_nice_load); 397write_one_old(ignore_nice_load);
410write_one_old(freq_step); 398write_one_old(freq_step);
411 399
412#define define_one_rw_old(object, _name) \ 400cpufreq_freq_attr_rw_old(sampling_rate);
413static struct freq_attr object = \ 401cpufreq_freq_attr_rw_old(sampling_down_factor);
414__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) 402cpufreq_freq_attr_rw_old(up_threshold);
415 403cpufreq_freq_attr_rw_old(down_threshold);
416define_one_rw_old(sampling_rate_old, sampling_rate); 404cpufreq_freq_attr_rw_old(ignore_nice_load);
417define_one_rw_old(sampling_down_factor_old, sampling_down_factor); 405cpufreq_freq_attr_rw_old(freq_step);
418define_one_rw_old(up_threshold_old, up_threshold);
419define_one_rw_old(down_threshold_old, down_threshold);
420define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
421define_one_rw_old(freq_step_old, freq_step);
422 406
423static struct attribute *dbs_attributes_old[] = { 407static struct attribute *dbs_attributes_old[] = {
424 &sampling_rate_max_old.attr, 408 &sampling_rate_max_old.attr,
@@ -444,6 +428,7 @@ static struct attribute_group dbs_attr_group_old = {
444static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 428static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
445{ 429{
446 unsigned int load = 0; 430 unsigned int load = 0;
431 unsigned int max_load = 0;
447 unsigned int freq_target; 432 unsigned int freq_target;
448 433
449 struct cpufreq_policy *policy; 434 struct cpufreq_policy *policy;
@@ -501,6 +486,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
501 continue; 486 continue;
502 487
503 load = 100 * (wall_time - idle_time) / wall_time; 488 load = 100 * (wall_time - idle_time) / wall_time;
489
490 if (load > max_load)
491 max_load = load;
504 } 492 }
505 493
506 /* 494 /*
@@ -511,7 +499,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
511 return; 499 return;
512 500
513 /* Check for frequency increase */ 501 /* Check for frequency increase */
514 if (load > dbs_tuners_ins.up_threshold) { 502 if (max_load > dbs_tuners_ins.up_threshold) {
515 this_dbs_info->down_skip = 0; 503 this_dbs_info->down_skip = 0;
516 504
517 /* if we are already at full speed then break out early */ 505 /* if we are already at full speed then break out early */
@@ -538,7 +526,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
538 * can support the current CPU usage without triggering the up 526 * can support the current CPU usage without triggering the up
539 * policy. To be safe, we focus 10 points under the threshold. 527 * policy. To be safe, we focus 10 points under the threshold.
540 */ 528 */
541 if (load < (dbs_tuners_ins.down_threshold - 10)) { 529 if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
542 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 530 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
543 531
544 this_dbs_info->requested_freq -= freq_target; 532 this_dbs_info->requested_freq -= freq_target;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bd444dc93cf2..e1314212d8d4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -73,6 +73,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
73 73
74struct cpu_dbs_info_s { 74struct cpu_dbs_info_s {
75 cputime64_t prev_cpu_idle; 75 cputime64_t prev_cpu_idle;
76 cputime64_t prev_cpu_iowait;
76 cputime64_t prev_cpu_wall; 77 cputime64_t prev_cpu_wall;
77 cputime64_t prev_cpu_nice; 78 cputime64_t prev_cpu_nice;
78 struct cpufreq_policy *cur_policy; 79 struct cpufreq_policy *cur_policy;
@@ -108,6 +109,7 @@ static struct dbs_tuners {
108 unsigned int down_differential; 109 unsigned int down_differential;
109 unsigned int ignore_nice; 110 unsigned int ignore_nice;
110 unsigned int powersave_bias; 111 unsigned int powersave_bias;
112 unsigned int io_is_busy;
111} dbs_tuners_ins = { 113} dbs_tuners_ins = {
112 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 114 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
113 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, 115 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
@@ -148,6 +150,16 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
148 return idle_time; 150 return idle_time;
149} 151}
150 152
153static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
154{
155 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
156
157 if (iowait_time == -1ULL)
158 return 0;
159
160 return iowait_time;
161}
162
151/* 163/*
152 * Find right freq to be set now with powersave_bias on. 164 * Find right freq to be set now with powersave_bias on.
153 * Returns the freq_hi to be used right now and will set freq_hi_jiffies, 165 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
@@ -234,12 +246,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
234 return sprintf(buf, "%u\n", min_sampling_rate); 246 return sprintf(buf, "%u\n", min_sampling_rate);
235} 247}
236 248
237#define define_one_ro(_name) \ 249define_one_global_ro(sampling_rate_max);
238static struct global_attr _name = \ 250define_one_global_ro(sampling_rate_min);
239__ATTR(_name, 0444, show_##_name, NULL)
240
241define_one_ro(sampling_rate_max);
242define_one_ro(sampling_rate_min);
243 251
244/* cpufreq_ondemand Governor Tunables */ 252/* cpufreq_ondemand Governor Tunables */
245#define show_one(file_name, object) \ 253#define show_one(file_name, object) \
@@ -249,6 +257,7 @@ static ssize_t show_##file_name \
249 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 257 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
250} 258}
251show_one(sampling_rate, sampling_rate); 259show_one(sampling_rate, sampling_rate);
260show_one(io_is_busy, io_is_busy);
252show_one(up_threshold, up_threshold); 261show_one(up_threshold, up_threshold);
253show_one(ignore_nice_load, ignore_nice); 262show_one(ignore_nice_load, ignore_nice);
254show_one(powersave_bias, powersave_bias); 263show_one(powersave_bias, powersave_bias);
@@ -274,12 +283,8 @@ show_one_old(powersave_bias);
274show_one_old(sampling_rate_min); 283show_one_old(sampling_rate_min);
275show_one_old(sampling_rate_max); 284show_one_old(sampling_rate_max);
276 285
277#define define_one_ro_old(object, _name) \ 286cpufreq_freq_attr_ro_old(sampling_rate_min);
278static struct freq_attr object = \ 287cpufreq_freq_attr_ro_old(sampling_rate_max);
279__ATTR(_name, 0444, show_##_name##_old, NULL)
280
281define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
282define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
283 288
284/*** delete after deprecation time ***/ 289/*** delete after deprecation time ***/
285 290
@@ -299,6 +304,23 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
299 return count; 304 return count;
300} 305}
301 306
307static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
308 const char *buf, size_t count)
309{
310 unsigned int input;
311 int ret;
312
313 ret = sscanf(buf, "%u", &input);
314 if (ret != 1)
315 return -EINVAL;
316
317 mutex_lock(&dbs_mutex);
318 dbs_tuners_ins.io_is_busy = !!input;
319 mutex_unlock(&dbs_mutex);
320
321 return count;
322}
323
302static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, 324static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
303 const char *buf, size_t count) 325 const char *buf, size_t count)
304{ 326{
@@ -376,14 +398,11 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
376 return count; 398 return count;
377} 399}
378 400
379#define define_one_rw(_name) \ 401define_one_global_rw(sampling_rate);
380static struct global_attr _name = \ 402define_one_global_rw(io_is_busy);
381__ATTR(_name, 0644, show_##_name, store_##_name) 403define_one_global_rw(up_threshold);
382 404define_one_global_rw(ignore_nice_load);
383define_one_rw(sampling_rate); 405define_one_global_rw(powersave_bias);
384define_one_rw(up_threshold);
385define_one_rw(ignore_nice_load);
386define_one_rw(powersave_bias);
387 406
388static struct attribute *dbs_attributes[] = { 407static struct attribute *dbs_attributes[] = {
389 &sampling_rate_max.attr, 408 &sampling_rate_max.attr,
@@ -392,6 +411,7 @@ static struct attribute *dbs_attributes[] = {
392 &up_threshold.attr, 411 &up_threshold.attr,
393 &ignore_nice_load.attr, 412 &ignore_nice_load.attr,
394 &powersave_bias.attr, 413 &powersave_bias.attr,
414 &io_is_busy.attr,
395 NULL 415 NULL
396}; 416};
397 417
@@ -415,14 +435,10 @@ write_one_old(up_threshold);
415write_one_old(ignore_nice_load); 435write_one_old(ignore_nice_load);
416write_one_old(powersave_bias); 436write_one_old(powersave_bias);
417 437
418#define define_one_rw_old(object, _name) \ 438cpufreq_freq_attr_rw_old(sampling_rate);
419static struct freq_attr object = \ 439cpufreq_freq_attr_rw_old(up_threshold);
420__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) 440cpufreq_freq_attr_rw_old(ignore_nice_load);
421 441cpufreq_freq_attr_rw_old(powersave_bias);
422define_one_rw_old(sampling_rate_old, sampling_rate);
423define_one_rw_old(up_threshold_old, up_threshold);
424define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
425define_one_rw_old(powersave_bias_old, powersave_bias);
426 442
427static struct attribute *dbs_attributes_old[] = { 443static struct attribute *dbs_attributes_old[] = {
428 &sampling_rate_max_old.attr, 444 &sampling_rate_max_old.attr,
@@ -470,14 +486,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
470 486
471 for_each_cpu(j, policy->cpus) { 487 for_each_cpu(j, policy->cpus) {
472 struct cpu_dbs_info_s *j_dbs_info; 488 struct cpu_dbs_info_s *j_dbs_info;
473 cputime64_t cur_wall_time, cur_idle_time; 489 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
474 unsigned int idle_time, wall_time; 490 unsigned int idle_time, wall_time, iowait_time;
475 unsigned int load, load_freq; 491 unsigned int load, load_freq;
476 int freq_avg; 492 int freq_avg;
477 493
478 j_dbs_info = &per_cpu(od_cpu_dbs_info, j); 494 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
479 495
480 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 496 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
497 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
481 498
482 wall_time = (unsigned int) cputime64_sub(cur_wall_time, 499 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
483 j_dbs_info->prev_cpu_wall); 500 j_dbs_info->prev_cpu_wall);
@@ -487,6 +504,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
487 j_dbs_info->prev_cpu_idle); 504 j_dbs_info->prev_cpu_idle);
488 j_dbs_info->prev_cpu_idle = cur_idle_time; 505 j_dbs_info->prev_cpu_idle = cur_idle_time;
489 506
507 iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
508 j_dbs_info->prev_cpu_iowait);
509 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
510
490 if (dbs_tuners_ins.ignore_nice) { 511 if (dbs_tuners_ins.ignore_nice) {
491 cputime64_t cur_nice; 512 cputime64_t cur_nice;
492 unsigned long cur_nice_jiffies; 513 unsigned long cur_nice_jiffies;
@@ -504,6 +525,16 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
504 idle_time += jiffies_to_usecs(cur_nice_jiffies); 525 idle_time += jiffies_to_usecs(cur_nice_jiffies);
505 } 526 }
506 527
528 /*
529 * For the purpose of ondemand, waiting for disk IO is an
530 * indication that you're performance critical, and not that
531 * the system is actually idle. So subtract the iowait time
532 * from the cpu idle time.
533 */
534
535 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
536 idle_time -= iowait_time;
537
507 if (unlikely(!wall_time || wall_time < idle_time)) 538 if (unlikely(!wall_time || wall_time < idle_time))
508 continue; 539 continue;
509 540
@@ -617,6 +648,29 @@ static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
617 cancel_delayed_work_sync(&dbs_info->work); 648 cancel_delayed_work_sync(&dbs_info->work);
618} 649}
619 650
651/*
652 * Not all CPUs want IO time to be accounted as busy; this dependson how
653 * efficient idling at a higher frequency/voltage is.
654 * Pavel Machek says this is not so for various generations of AMD and old
655 * Intel systems.
656 * Mike Chan (androidlcom) calis this is also not true for ARM.
657 * Because of this, whitelist specific known (series) of CPUs by default, and
658 * leave all others up to the user.
659 */
660static int should_io_be_busy(void)
661{
662#if defined(CONFIG_X86)
663 /*
664 * For Intel, Core 2 (model 15) andl later have an efficient idle.
665 */
666 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
667 boot_cpu_data.x86 == 6 &&
668 boot_cpu_data.x86_model >= 15)
669 return 1;
670#endif
671 return 0;
672}
673
620static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 674static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
621 unsigned int event) 675 unsigned int event)
622{ 676{
@@ -679,6 +733,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
679 dbs_tuners_ins.sampling_rate = 733 dbs_tuners_ins.sampling_rate =
680 max(min_sampling_rate, 734 max(min_sampling_rate,
681 latency * LATENCY_MULTIPLIER); 735 latency * LATENCY_MULTIPLIER);
736 dbs_tuners_ins.io_is_busy = should_io_be_busy();
682 } 737 }
683 mutex_unlock(&dbs_mutex); 738 mutex_unlock(&dbs_mutex);
684 739