aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c53
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c58
2 files changed, 38 insertions, 73 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index dd2f5b272a4d..3082a3fa5ec4 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -89,6 +89,15 @@ static struct dbs_tuners dbs_tuners_ins = {
89 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 89 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
90}; 90};
91 91
92static inline unsigned int get_cpu_idle_time(unsigned int cpu)
93{
94 return kstat_cpu(cpu).cpustat.idle +
95 kstat_cpu(cpu).cpustat.iowait +
96 ( !dbs_tuners_ins.ignore_nice ?
97 kstat_cpu(cpu).cpustat.nice :
98 0);
99}
100
92/************************** sysfs interface ************************/ 101/************************** sysfs interface ************************/
93static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 102static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
94{ 103{
@@ -221,16 +230,10 @@ static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
221 dbs_tuners_ins.ignore_nice = input; 230 dbs_tuners_ins.ignore_nice = input;
222 231
223 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 232 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
224 for_each_cpu_mask(j, policy->cpus) { 233 for_each_online_cpu(j) {
225 struct cpu_dbs_info_s *j_dbs_info; 234 struct cpu_dbs_info_s *j_dbs_info;
226 j_dbs_info = &per_cpu(cpu_dbs_info, j); 235 j_dbs_info = &per_cpu(cpu_dbs_info, j);
227 j_dbs_info->cur_policy = policy; 236 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
228
229 j_dbs_info->prev_cpu_idle_up =
230 kstat_cpu(j).cpustat.idle +
231 kstat_cpu(j).cpustat.iowait +
232 ( !dbs_tuners_ins.ignore_nice
233 ? kstat_cpu(j).cpustat.nice : 0 );
234 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 237 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
235 } 238 }
236 up(&dbs_sem); 239 up(&dbs_sem);
@@ -335,11 +338,7 @@ static void dbs_check_cpu(int cpu)
335 */ 338 */
336 339
337 /* Check for frequency increase */ 340 /* Check for frequency increase */
338 total_idle_ticks = kstat_cpu(cpu).cpustat.idle + 341 total_idle_ticks = get_cpu_idle_time(cpu);
339 kstat_cpu(cpu).cpustat.iowait;
340 /* consider 'nice' tasks as 'idle' time too if required */
341 if (dbs_tuners_ins.ignore_nice == 0)
342 total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
343 idle_ticks = total_idle_ticks - 342 idle_ticks = total_idle_ticks -
344 this_dbs_info->prev_cpu_idle_up; 343 this_dbs_info->prev_cpu_idle_up;
345 this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 344 this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -354,11 +353,7 @@ static void dbs_check_cpu(int cpu)
354 353
355 j_dbs_info = &per_cpu(cpu_dbs_info, j); 354 j_dbs_info = &per_cpu(cpu_dbs_info, j);
356 /* Check for frequency increase */ 355 /* Check for frequency increase */
357 total_idle_ticks = kstat_cpu(j).cpustat.idle + 356 total_idle_ticks = get_cpu_idle_time(j);
358 kstat_cpu(j).cpustat.iowait;
359 /* consider 'nice' too? */
360 if (dbs_tuners_ins.ignore_nice == 0)
361 total_idle_ticks += kstat_cpu(j).cpustat.nice;
362 tmp_idle_ticks = total_idle_ticks - 357 tmp_idle_ticks = total_idle_ticks -
363 j_dbs_info->prev_cpu_idle_up; 358 j_dbs_info->prev_cpu_idle_up;
364 j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 359 j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -373,6 +368,8 @@ static void dbs_check_cpu(int cpu)
373 usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 368 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
374 369
375 if (idle_ticks < up_idle_ticks) { 370 if (idle_ticks < up_idle_ticks) {
371 down_skip[cpu] = 0;
372 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
376 /* if we are already at full speed then break out early */ 373 /* if we are already at full speed then break out early */
377 if (requested_freq[cpu] == policy->max) 374 if (requested_freq[cpu] == policy->max)
378 return; 375 return;
@@ -389,8 +386,6 @@ static void dbs_check_cpu(int cpu)
389 386
390 __cpufreq_driver_target(policy, requested_freq[cpu], 387 __cpufreq_driver_target(policy, requested_freq[cpu],
391 CPUFREQ_RELATION_H); 388 CPUFREQ_RELATION_H);
392 down_skip[cpu] = 0;
393 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
394 return; 389 return;
395 } 390 }
396 391
@@ -399,11 +394,7 @@ static void dbs_check_cpu(int cpu)
399 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 394 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
400 return; 395 return;
401 396
402 total_idle_ticks = kstat_cpu(cpu).cpustat.idle + 397 total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
403 kstat_cpu(cpu).cpustat.iowait;
404 /* consider 'nice' too? */
405 if (dbs_tuners_ins.ignore_nice == 0)
406 total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
407 idle_ticks = total_idle_ticks - 398 idle_ticks = total_idle_ticks -
408 this_dbs_info->prev_cpu_idle_down; 399 this_dbs_info->prev_cpu_idle_down;
409 this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 400 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -417,11 +408,7 @@ static void dbs_check_cpu(int cpu)
417 408
418 j_dbs_info = &per_cpu(cpu_dbs_info, j); 409 j_dbs_info = &per_cpu(cpu_dbs_info, j);
419 /* Check for frequency increase */ 410 /* Check for frequency increase */
420 total_idle_ticks = kstat_cpu(j).cpustat.idle + 411 total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
421 kstat_cpu(j).cpustat.iowait;
422 /* consider 'nice' too? */
423 if (dbs_tuners_ins.ignore_nice == 0)
424 total_idle_ticks += kstat_cpu(j).cpustat.nice;
425 tmp_idle_ticks = total_idle_ticks - 412 tmp_idle_ticks = total_idle_ticks -
426 j_dbs_info->prev_cpu_idle_down; 413 j_dbs_info->prev_cpu_idle_down;
427 j_dbs_info->prev_cpu_idle_down = total_idle_ticks; 414 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -516,11 +503,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
516 j_dbs_info = &per_cpu(cpu_dbs_info, j); 503 j_dbs_info = &per_cpu(cpu_dbs_info, j);
517 j_dbs_info->cur_policy = policy; 504 j_dbs_info->cur_policy = policy;
518 505
519 j_dbs_info->prev_cpu_idle_up = 506 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
520 kstat_cpu(j).cpustat.idle +
521 kstat_cpu(j).cpustat.iowait +
522 ( !dbs_tuners_ins.ignore_nice
523 ? kstat_cpu(j).cpustat.nice : 0 );
524 j_dbs_info->prev_cpu_idle_down 507 j_dbs_info->prev_cpu_idle_down
525 = j_dbs_info->prev_cpu_idle_up; 508 = j_dbs_info->prev_cpu_idle_up;
526 } 509 }
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 056591612467..26cf54b11ba6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -88,6 +88,15 @@ static struct dbs_tuners dbs_tuners_ins = {
88 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 88 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
89}; 89};
90 90
91static inline unsigned int get_cpu_idle_time(unsigned int cpu)
92{
93 return kstat_cpu(cpu).cpustat.idle +
94 kstat_cpu(cpu).cpustat.iowait +
95 ( !dbs_tuners_ins.ignore_nice ?
96 kstat_cpu(cpu).cpustat.nice :
97 0);
98}
99
91/************************** sysfs interface ************************/ 100/************************** sysfs interface ************************/
92static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 101static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
93{ 102{
@@ -220,16 +229,10 @@ static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
220 dbs_tuners_ins.ignore_nice = input; 229 dbs_tuners_ins.ignore_nice = input;
221 230
222 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 231 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
223 for_each_cpu_mask(j, policy->cpus) { 232 for_each_online_cpu(j) {
224 struct cpu_dbs_info_s *j_dbs_info; 233 struct cpu_dbs_info_s *j_dbs_info;
225 j_dbs_info = &per_cpu(cpu_dbs_info, j); 234 j_dbs_info = &per_cpu(cpu_dbs_info, j);
226 j_dbs_info->cur_policy = policy; 235 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
227
228 j_dbs_info->prev_cpu_idle_up =
229 kstat_cpu(j).cpustat.idle +
230 kstat_cpu(j).cpustat.iowait +
231 ( !dbs_tuners_ins.ignore_nice
232 ? kstat_cpu(j).cpustat.nice : 0 );
233 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 236 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
234 } 237 }
235 up(&dbs_sem); 238 up(&dbs_sem);
@@ -322,15 +325,10 @@ static void dbs_check_cpu(int cpu)
322 */ 325 */
323 326
324 /* Check for frequency increase */ 327 /* Check for frequency increase */
325 total_idle_ticks = kstat_cpu(cpu).cpustat.idle + 328 total_idle_ticks = get_cpu_idle_time(cpu);
326 kstat_cpu(cpu).cpustat.iowait;
327 /* consider 'nice' tasks as 'idle' time too if required */
328 if (dbs_tuners_ins.ignore_nice == 0)
329 total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
330 idle_ticks = total_idle_ticks - 329 idle_ticks = total_idle_ticks -
331 this_dbs_info->prev_cpu_idle_up; 330 this_dbs_info->prev_cpu_idle_up;
332 this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 331 this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
333
334 332
335 for_each_cpu_mask(j, policy->cpus) { 333 for_each_cpu_mask(j, policy->cpus) {
336 unsigned int tmp_idle_ticks; 334 unsigned int tmp_idle_ticks;
@@ -341,11 +339,7 @@ static void dbs_check_cpu(int cpu)
341 339
342 j_dbs_info = &per_cpu(cpu_dbs_info, j); 340 j_dbs_info = &per_cpu(cpu_dbs_info, j);
343 /* Check for frequency increase */ 341 /* Check for frequency increase */
344 total_idle_ticks = kstat_cpu(j).cpustat.idle + 342 total_idle_ticks = get_cpu_idle_time(j);
345 kstat_cpu(j).cpustat.iowait;
346 /* consider 'nice' too? */
347 if (dbs_tuners_ins.ignore_nice == 0)
348 total_idle_ticks += kstat_cpu(j).cpustat.nice;
349 tmp_idle_ticks = total_idle_ticks - 343 tmp_idle_ticks = total_idle_ticks -
350 j_dbs_info->prev_cpu_idle_up; 344 j_dbs_info->prev_cpu_idle_up;
351 j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 345 j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -360,14 +354,14 @@ static void dbs_check_cpu(int cpu)
360 usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 354 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
361 355
362 if (idle_ticks < up_idle_ticks) { 356 if (idle_ticks < up_idle_ticks) {
357 down_skip[cpu] = 0;
358 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
363 /* if we are already at full speed then break out early */ 359 /* if we are already at full speed then break out early */
364 if (policy->cur == policy->max) 360 if (policy->cur == policy->max)
365 return; 361 return;
366 362
367 __cpufreq_driver_target(policy, policy->max, 363 __cpufreq_driver_target(policy, policy->max,
368 CPUFREQ_RELATION_H); 364 CPUFREQ_RELATION_H);
369 down_skip[cpu] = 0;
370 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
371 return; 365 return;
372 } 366 }
373 367
@@ -376,11 +370,7 @@ static void dbs_check_cpu(int cpu)
376 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 370 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
377 return; 371 return;
378 372
379 total_idle_ticks = kstat_cpu(cpu).cpustat.idle + 373 total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
380 kstat_cpu(cpu).cpustat.iowait;
381 /* consider 'nice' too? */
382 if (dbs_tuners_ins.ignore_nice == 0)
383 total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
384 idle_ticks = total_idle_ticks - 374 idle_ticks = total_idle_ticks -
385 this_dbs_info->prev_cpu_idle_down; 375 this_dbs_info->prev_cpu_idle_down;
386 this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 376 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -393,12 +383,8 @@ static void dbs_check_cpu(int cpu)
393 continue; 383 continue;
394 384
395 j_dbs_info = &per_cpu(cpu_dbs_info, j); 385 j_dbs_info = &per_cpu(cpu_dbs_info, j);
396 /* Check for frequency increase */ 386 /* Check for frequency decrease */
397 total_idle_ticks = kstat_cpu(j).cpustat.idle + 387 total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
398 kstat_cpu(j).cpustat.iowait;
399 /* consider 'nice' too? */
400 if (dbs_tuners_ins.ignore_nice == 0)
401 total_idle_ticks += kstat_cpu(j).cpustat.nice;
402 tmp_idle_ticks = total_idle_ticks - 388 tmp_idle_ticks = total_idle_ticks -
403 j_dbs_info->prev_cpu_idle_down; 389 j_dbs_info->prev_cpu_idle_down;
404 j_dbs_info->prev_cpu_idle_down = total_idle_ticks; 390 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -414,7 +400,7 @@ static void dbs_check_cpu(int cpu)
414 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 400 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
415 dbs_tuners_ins.sampling_down_factor; 401 dbs_tuners_ins.sampling_down_factor;
416 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 402 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
417 usecs_to_jiffies(freq_down_sampling_rate); 403 usecs_to_jiffies(freq_down_sampling_rate);
418 404
419 if (idle_ticks > down_idle_ticks ) { 405 if (idle_ticks > down_idle_ticks ) {
420 /* if we are already at the lowest speed then break out early 406 /* if we are already at the lowest speed then break out early
@@ -488,11 +474,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
488 j_dbs_info = &per_cpu(cpu_dbs_info, j); 474 j_dbs_info = &per_cpu(cpu_dbs_info, j);
489 j_dbs_info->cur_policy = policy; 475 j_dbs_info->cur_policy = policy;
490 476
491 j_dbs_info->prev_cpu_idle_up = 477 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
492 kstat_cpu(j).cpustat.idle +
493 kstat_cpu(j).cpustat.iowait +
494 ( !dbs_tuners_ins.ignore_nice
495 ? kstat_cpu(j).cpustat.nice : 0 );
496 j_dbs_info->prev_cpu_idle_down 478 j_dbs_info->prev_cpu_idle_down
497 = j_dbs_info->prev_cpu_idle_up; 479 = j_dbs_info->prev_cpu_idle_up;
498 } 480 }