diff options
author | Dave Jones <davej@redhat.com> | 2005-05-31 22:03:49 -0400 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2005-05-31 22:03:49 -0400 |
commit | dac1c1a56279b4545a822ec7bc770003c233e546 (patch) | |
tree | 61175f7534ae731b1eaa4b75a3410a447058b4dc /drivers/cpufreq/cpufreq_ondemand.c | |
parent | 1206aaac285904e3e3995eecbf4129b6555a8973 (diff) |
[CPUFREQ] ondemand,conservative minor bug-fix and cleanup
[PATCH] [1/5] ondemand,conservative minor bug-fix and cleanup
Attached patch fixes some minor issues with Alexander's patch and related
cleanup in both ondemand and conservative governor.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 58 |
1 files changed, 20 insertions, 38 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 056591612467..26cf54b11ba6 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -88,6 +88,15 @@ static struct dbs_tuners dbs_tuners_ins = { | |||
88 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 88 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
89 | }; | 89 | }; |
90 | 90 | ||
91 | static inline unsigned int get_cpu_idle_time(unsigned int cpu) | ||
92 | { | ||
93 | return kstat_cpu(cpu).cpustat.idle + | ||
94 | kstat_cpu(cpu).cpustat.iowait + | ||
95 | ( !dbs_tuners_ins.ignore_nice ? | ||
96 | kstat_cpu(cpu).cpustat.nice : | ||
97 | 0); | ||
98 | } | ||
99 | |||
91 | /************************** sysfs interface ************************/ | 100 | /************************** sysfs interface ************************/ |
92 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) | 101 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) |
93 | { | 102 | { |
@@ -220,16 +229,10 @@ static ssize_t store_ignore_nice(struct cpufreq_policy *policy, | |||
220 | dbs_tuners_ins.ignore_nice = input; | 229 | dbs_tuners_ins.ignore_nice = input; |
221 | 230 | ||
222 | /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ | 231 | /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ |
223 | for_each_cpu_mask(j, policy->cpus) { | 232 | for_each_online_cpu(j) { |
224 | struct cpu_dbs_info_s *j_dbs_info; | 233 | struct cpu_dbs_info_s *j_dbs_info; |
225 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 234 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
226 | j_dbs_info->cur_policy = policy; | 235 | j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); |
227 | |||
228 | j_dbs_info->prev_cpu_idle_up = | ||
229 | kstat_cpu(j).cpustat.idle + | ||
230 | kstat_cpu(j).cpustat.iowait + | ||
231 | ( !dbs_tuners_ins.ignore_nice | ||
232 | ? kstat_cpu(j).cpustat.nice : 0 ); | ||
233 | j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; | 236 | j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; |
234 | } | 237 | } |
235 | up(&dbs_sem); | 238 | up(&dbs_sem); |
@@ -322,15 +325,10 @@ static void dbs_check_cpu(int cpu) | |||
322 | */ | 325 | */ |
323 | 326 | ||
324 | /* Check for frequency increase */ | 327 | /* Check for frequency increase */ |
325 | total_idle_ticks = kstat_cpu(cpu).cpustat.idle + | 328 | total_idle_ticks = get_cpu_idle_time(cpu); |
326 | kstat_cpu(cpu).cpustat.iowait; | ||
327 | /* consider 'nice' tasks as 'idle' time too if required */ | ||
328 | if (dbs_tuners_ins.ignore_nice == 0) | ||
329 | total_idle_ticks += kstat_cpu(cpu).cpustat.nice; | ||
330 | idle_ticks = total_idle_ticks - | 329 | idle_ticks = total_idle_ticks - |
331 | this_dbs_info->prev_cpu_idle_up; | 330 | this_dbs_info->prev_cpu_idle_up; |
332 | this_dbs_info->prev_cpu_idle_up = total_idle_ticks; | 331 | this_dbs_info->prev_cpu_idle_up = total_idle_ticks; |
333 | |||
334 | 332 | ||
335 | for_each_cpu_mask(j, policy->cpus) { | 333 | for_each_cpu_mask(j, policy->cpus) { |
336 | unsigned int tmp_idle_ticks; | 334 | unsigned int tmp_idle_ticks; |
@@ -341,11 +339,7 @@ static void dbs_check_cpu(int cpu) | |||
341 | 339 | ||
342 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 340 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
343 | /* Check for frequency increase */ | 341 | /* Check for frequency increase */ |
344 | total_idle_ticks = kstat_cpu(j).cpustat.idle + | 342 | total_idle_ticks = get_cpu_idle_time(j); |
345 | kstat_cpu(j).cpustat.iowait; | ||
346 | /* consider 'nice' too? */ | ||
347 | if (dbs_tuners_ins.ignore_nice == 0) | ||
348 | total_idle_ticks += kstat_cpu(j).cpustat.nice; | ||
349 | tmp_idle_ticks = total_idle_ticks - | 343 | tmp_idle_ticks = total_idle_ticks - |
350 | j_dbs_info->prev_cpu_idle_up; | 344 | j_dbs_info->prev_cpu_idle_up; |
351 | j_dbs_info->prev_cpu_idle_up = total_idle_ticks; | 345 | j_dbs_info->prev_cpu_idle_up = total_idle_ticks; |
@@ -360,14 +354,14 @@ static void dbs_check_cpu(int cpu) | |||
360 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 354 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
361 | 355 | ||
362 | if (idle_ticks < up_idle_ticks) { | 356 | if (idle_ticks < up_idle_ticks) { |
357 | down_skip[cpu] = 0; | ||
358 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
363 | /* if we are already at full speed then break out early */ | 359 | /* if we are already at full speed then break out early */ |
364 | if (policy->cur == policy->max) | 360 | if (policy->cur == policy->max) |
365 | return; | 361 | return; |
366 | 362 | ||
367 | __cpufreq_driver_target(policy, policy->max, | 363 | __cpufreq_driver_target(policy, policy->max, |
368 | CPUFREQ_RELATION_H); | 364 | CPUFREQ_RELATION_H); |
369 | down_skip[cpu] = 0; | ||
370 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
371 | return; | 365 | return; |
372 | } | 366 | } |
373 | 367 | ||
@@ -376,11 +370,7 @@ static void dbs_check_cpu(int cpu) | |||
376 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) | 370 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) |
377 | return; | 371 | return; |
378 | 372 | ||
379 | total_idle_ticks = kstat_cpu(cpu).cpustat.idle + | 373 | total_idle_ticks = this_dbs_info->prev_cpu_idle_up; |
380 | kstat_cpu(cpu).cpustat.iowait; | ||
381 | /* consider 'nice' too? */ | ||
382 | if (dbs_tuners_ins.ignore_nice == 0) | ||
383 | total_idle_ticks += kstat_cpu(cpu).cpustat.nice; | ||
384 | idle_ticks = total_idle_ticks - | 374 | idle_ticks = total_idle_ticks - |
385 | this_dbs_info->prev_cpu_idle_down; | 375 | this_dbs_info->prev_cpu_idle_down; |
386 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | 376 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; |
@@ -393,12 +383,8 @@ static void dbs_check_cpu(int cpu) | |||
393 | continue; | 383 | continue; |
394 | 384 | ||
395 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 385 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
396 | /* Check for frequency increase */ | 386 | /* Check for frequency decrease */ |
397 | total_idle_ticks = kstat_cpu(j).cpustat.idle + | 387 | total_idle_ticks = j_dbs_info->prev_cpu_idle_up; |
398 | kstat_cpu(j).cpustat.iowait; | ||
399 | /* consider 'nice' too? */ | ||
400 | if (dbs_tuners_ins.ignore_nice == 0) | ||
401 | total_idle_ticks += kstat_cpu(j).cpustat.nice; | ||
402 | tmp_idle_ticks = total_idle_ticks - | 388 | tmp_idle_ticks = total_idle_ticks - |
403 | j_dbs_info->prev_cpu_idle_down; | 389 | j_dbs_info->prev_cpu_idle_down; |
404 | j_dbs_info->prev_cpu_idle_down = total_idle_ticks; | 390 | j_dbs_info->prev_cpu_idle_down = total_idle_ticks; |
@@ -414,7 +400,7 @@ static void dbs_check_cpu(int cpu) | |||
414 | freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * | 400 | freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * |
415 | dbs_tuners_ins.sampling_down_factor; | 401 | dbs_tuners_ins.sampling_down_factor; |
416 | down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * | 402 | down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * |
417 | usecs_to_jiffies(freq_down_sampling_rate); | 403 | usecs_to_jiffies(freq_down_sampling_rate); |
418 | 404 | ||
419 | if (idle_ticks > down_idle_ticks ) { | 405 | if (idle_ticks > down_idle_ticks ) { |
420 | /* if we are already at the lowest speed then break out early | 406 | /* if we are already at the lowest speed then break out early |
@@ -488,11 +474,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
488 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 474 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
489 | j_dbs_info->cur_policy = policy; | 475 | j_dbs_info->cur_policy = policy; |
490 | 476 | ||
491 | j_dbs_info->prev_cpu_idle_up = | 477 | j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); |
492 | kstat_cpu(j).cpustat.idle + | ||
493 | kstat_cpu(j).cpustat.iowait + | ||
494 | ( !dbs_tuners_ins.ignore_nice | ||
495 | ? kstat_cpu(j).cpustat.nice : 0 ); | ||
496 | j_dbs_info->prev_cpu_idle_down | 478 | j_dbs_info->prev_cpu_idle_down |
497 | = j_dbs_info->prev_cpu_idle_up; | 479 | = j_dbs_info->prev_cpu_idle_up; |
498 | } | 480 | } |