diff options
author | Dave Jones <davej@redhat.com> | 2005-05-31 22:03:49 -0400 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2005-05-31 22:03:49 -0400 |
commit | dac1c1a56279b4545a822ec7bc770003c233e546 (patch) | |
tree | 61175f7534ae731b1eaa4b75a3410a447058b4dc /drivers/cpufreq/cpufreq_conservative.c | |
parent | 1206aaac285904e3e3995eecbf4129b6555a8973 (diff) |
[CPUFREQ] ondemand,conservative minor bug-fix and cleanup
[PATCH] [1/5] ondemand,conservative minor bug-fix and cleanup
Attached patch fixes some minor issues with Alexander's patch and related
cleanup in both ondemand and conservative governor.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_conservative.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 53 |
1 files changed, 18 insertions, 35 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index dd2f5b272a4d..3082a3fa5ec4 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -89,6 +89,15 @@ static struct dbs_tuners dbs_tuners_ins = { | |||
89 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 89 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
90 | }; | 90 | }; |
91 | 91 | ||
92 | static inline unsigned int get_cpu_idle_time(unsigned int cpu) | ||
93 | { | ||
94 | return kstat_cpu(cpu).cpustat.idle + | ||
95 | kstat_cpu(cpu).cpustat.iowait + | ||
96 | ( !dbs_tuners_ins.ignore_nice ? | ||
97 | kstat_cpu(cpu).cpustat.nice : | ||
98 | 0); | ||
99 | } | ||
100 | |||
92 | /************************** sysfs interface ************************/ | 101 | /************************** sysfs interface ************************/ |
93 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) | 102 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) |
94 | { | 103 | { |
@@ -221,16 +230,10 @@ static ssize_t store_ignore_nice(struct cpufreq_policy *policy, | |||
221 | dbs_tuners_ins.ignore_nice = input; | 230 | dbs_tuners_ins.ignore_nice = input; |
222 | 231 | ||
223 | /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ | 232 | /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ |
224 | for_each_cpu_mask(j, policy->cpus) { | 233 | for_each_online_cpu(j) { |
225 | struct cpu_dbs_info_s *j_dbs_info; | 234 | struct cpu_dbs_info_s *j_dbs_info; |
226 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 235 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
227 | j_dbs_info->cur_policy = policy; | 236 | j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); |
228 | |||
229 | j_dbs_info->prev_cpu_idle_up = | ||
230 | kstat_cpu(j).cpustat.idle + | ||
231 | kstat_cpu(j).cpustat.iowait + | ||
232 | ( !dbs_tuners_ins.ignore_nice | ||
233 | ? kstat_cpu(j).cpustat.nice : 0 ); | ||
234 | j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; | 237 | j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; |
235 | } | 238 | } |
236 | up(&dbs_sem); | 239 | up(&dbs_sem); |
@@ -335,11 +338,7 @@ static void dbs_check_cpu(int cpu) | |||
335 | */ | 338 | */ |
336 | 339 | ||
337 | /* Check for frequency increase */ | 340 | /* Check for frequency increase */ |
338 | total_idle_ticks = kstat_cpu(cpu).cpustat.idle + | 341 | total_idle_ticks = get_cpu_idle_time(cpu); |
339 | kstat_cpu(cpu).cpustat.iowait; | ||
340 | /* consider 'nice' tasks as 'idle' time too if required */ | ||
341 | if (dbs_tuners_ins.ignore_nice == 0) | ||
342 | total_idle_ticks += kstat_cpu(cpu).cpustat.nice; | ||
343 | idle_ticks = total_idle_ticks - | 342 | idle_ticks = total_idle_ticks - |
344 | this_dbs_info->prev_cpu_idle_up; | 343 | this_dbs_info->prev_cpu_idle_up; |
345 | this_dbs_info->prev_cpu_idle_up = total_idle_ticks; | 344 | this_dbs_info->prev_cpu_idle_up = total_idle_ticks; |
@@ -354,11 +353,7 @@ static void dbs_check_cpu(int cpu) | |||
354 | 353 | ||
355 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 354 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
356 | /* Check for frequency increase */ | 355 | /* Check for frequency increase */ |
357 | total_idle_ticks = kstat_cpu(j).cpustat.idle + | 356 | total_idle_ticks = get_cpu_idle_time(j); |
358 | kstat_cpu(j).cpustat.iowait; | ||
359 | /* consider 'nice' too? */ | ||
360 | if (dbs_tuners_ins.ignore_nice == 0) | ||
361 | total_idle_ticks += kstat_cpu(j).cpustat.nice; | ||
362 | tmp_idle_ticks = total_idle_ticks - | 357 | tmp_idle_ticks = total_idle_ticks - |
363 | j_dbs_info->prev_cpu_idle_up; | 358 | j_dbs_info->prev_cpu_idle_up; |
364 | j_dbs_info->prev_cpu_idle_up = total_idle_ticks; | 359 | j_dbs_info->prev_cpu_idle_up = total_idle_ticks; |
@@ -373,6 +368,8 @@ static void dbs_check_cpu(int cpu) | |||
373 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 368 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
374 | 369 | ||
375 | if (idle_ticks < up_idle_ticks) { | 370 | if (idle_ticks < up_idle_ticks) { |
371 | down_skip[cpu] = 0; | ||
372 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
376 | /* if we are already at full speed then break out early */ | 373 | /* if we are already at full speed then break out early */ |
377 | if (requested_freq[cpu] == policy->max) | 374 | if (requested_freq[cpu] == policy->max) |
378 | return; | 375 | return; |
@@ -389,8 +386,6 @@ static void dbs_check_cpu(int cpu) | |||
389 | 386 | ||
390 | __cpufreq_driver_target(policy, requested_freq[cpu], | 387 | __cpufreq_driver_target(policy, requested_freq[cpu], |
391 | CPUFREQ_RELATION_H); | 388 | CPUFREQ_RELATION_H); |
392 | down_skip[cpu] = 0; | ||
393 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
394 | return; | 389 | return; |
395 | } | 390 | } |
396 | 391 | ||
@@ -399,11 +394,7 @@ static void dbs_check_cpu(int cpu) | |||
399 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) | 394 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) |
400 | return; | 395 | return; |
401 | 396 | ||
402 | total_idle_ticks = kstat_cpu(cpu).cpustat.idle + | 397 | total_idle_ticks = this_dbs_info->prev_cpu_idle_up; |
403 | kstat_cpu(cpu).cpustat.iowait; | ||
404 | /* consider 'nice' too? */ | ||
405 | if (dbs_tuners_ins.ignore_nice == 0) | ||
406 | total_idle_ticks += kstat_cpu(cpu).cpustat.nice; | ||
407 | idle_ticks = total_idle_ticks - | 398 | idle_ticks = total_idle_ticks - |
408 | this_dbs_info->prev_cpu_idle_down; | 399 | this_dbs_info->prev_cpu_idle_down; |
409 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | 400 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; |
@@ -417,11 +408,7 @@ static void dbs_check_cpu(int cpu) | |||
417 | 408 | ||
418 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 409 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
419 | /* Check for frequency increase */ | 410 | /* Check for frequency increase */ |
420 | total_idle_ticks = kstat_cpu(j).cpustat.idle + | 411 | total_idle_ticks = j_dbs_info->prev_cpu_idle_up; |
421 | kstat_cpu(j).cpustat.iowait; | ||
422 | /* consider 'nice' too? */ | ||
423 | if (dbs_tuners_ins.ignore_nice == 0) | ||
424 | total_idle_ticks += kstat_cpu(j).cpustat.nice; | ||
425 | tmp_idle_ticks = total_idle_ticks - | 412 | tmp_idle_ticks = total_idle_ticks - |
426 | j_dbs_info->prev_cpu_idle_down; | 413 | j_dbs_info->prev_cpu_idle_down; |
427 | j_dbs_info->prev_cpu_idle_down = total_idle_ticks; | 414 | j_dbs_info->prev_cpu_idle_down = total_idle_ticks; |
@@ -516,11 +503,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
516 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 503 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
517 | j_dbs_info->cur_policy = policy; | 504 | j_dbs_info->cur_policy = policy; |
518 | 505 | ||
519 | j_dbs_info->prev_cpu_idle_up = | 506 | j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); |
520 | kstat_cpu(j).cpustat.idle + | ||
521 | kstat_cpu(j).cpustat.iowait + | ||
522 | ( !dbs_tuners_ins.ignore_nice | ||
523 | ? kstat_cpu(j).cpustat.nice : 0 ); | ||
524 | j_dbs_info->prev_cpu_idle_down | 507 | j_dbs_info->prev_cpu_idle_down |
525 | = j_dbs_info->prev_cpu_idle_up; | 508 | = j_dbs_info->prev_cpu_idle_up; |
526 | } | 509 | } |