diff options
author | Dave Jones <davej@redhat.com> | 2005-05-31 22:03:49 -0400 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2005-05-31 22:03:49 -0400 |
commit | 9c7d269b9b05440dd0fe92d96f4e5d7e73dd7238 (patch) | |
tree | 4e4268cc4f075187135312d5243e24d3a4fcd155 /drivers/cpufreq/cpufreq_ondemand.c | |
parent | 790d76fa979f55bfc49a6901bb911778949b582d (diff) |
[CPUFREQ] ondemand,conservative governor idle_tick clean-up
[PATCH] [3/5] ondemand,conservative governor idle_tick clean-up
Ondemand and conservative governor clean-up, it factorises the idle ticks
measurement.
Signed-off-by: Eric Piel <eric.piel@tremplin-utc.net>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 26 |
1 files changed, 5 insertions, 21 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f239545ac1b8..0482bd49aba8 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -296,7 +296,6 @@ static struct attribute_group dbs_attr_group = { | |||
296 | static void dbs_check_cpu(int cpu) | 296 | static void dbs_check_cpu(int cpu) |
297 | { | 297 | { |
298 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; | 298 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; |
299 | unsigned int total_idle_ticks; | ||
300 | unsigned int freq_down_step; | 299 | unsigned int freq_down_step; |
301 | unsigned int freq_down_sampling_rate; | 300 | unsigned int freq_down_sampling_rate; |
302 | static int down_skip[NR_CPUS]; | 301 | static int down_skip[NR_CPUS]; |
@@ -325,20 +324,12 @@ static void dbs_check_cpu(int cpu) | |||
325 | */ | 324 | */ |
326 | 325 | ||
327 | /* Check for frequency increase */ | 326 | /* Check for frequency increase */ |
328 | total_idle_ticks = get_cpu_idle_time(cpu); | 327 | idle_ticks = UINT_MAX; |
329 | idle_ticks = total_idle_ticks - | ||
330 | this_dbs_info->prev_cpu_idle_up; | ||
331 | this_dbs_info->prev_cpu_idle_up = total_idle_ticks; | ||
332 | |||
333 | for_each_cpu_mask(j, policy->cpus) { | 328 | for_each_cpu_mask(j, policy->cpus) { |
334 | unsigned int tmp_idle_ticks; | 329 | unsigned int tmp_idle_ticks, total_idle_ticks; |
335 | struct cpu_dbs_info_s *j_dbs_info; | 330 | struct cpu_dbs_info_s *j_dbs_info; |
336 | 331 | ||
337 | if (j == cpu) | ||
338 | continue; | ||
339 | |||
340 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 332 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
341 | /* Check for frequency increase */ | ||
342 | total_idle_ticks = get_cpu_idle_time(j); | 333 | total_idle_ticks = get_cpu_idle_time(j); |
343 | tmp_idle_ticks = total_idle_ticks - | 334 | tmp_idle_ticks = total_idle_ticks - |
344 | j_dbs_info->prev_cpu_idle_up; | 335 | j_dbs_info->prev_cpu_idle_up; |
@@ -376,18 +367,11 @@ static void dbs_check_cpu(int cpu) | |||
376 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) | 367 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) |
377 | return; | 368 | return; |
378 | 369 | ||
379 | total_idle_ticks = this_dbs_info->prev_cpu_idle_up; | 370 | idle_ticks = UINT_MAX; |
380 | idle_ticks = total_idle_ticks - | ||
381 | this_dbs_info->prev_cpu_idle_down; | ||
382 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
383 | |||
384 | for_each_cpu_mask(j, policy->cpus) { | 371 | for_each_cpu_mask(j, policy->cpus) { |
385 | unsigned int tmp_idle_ticks; | 372 | unsigned int tmp_idle_ticks, total_idle_ticks; |
386 | struct cpu_dbs_info_s *j_dbs_info; | 373 | struct cpu_dbs_info_s *j_dbs_info; |
387 | 374 | ||
388 | if (j == cpu) | ||
389 | continue; | ||
390 | |||
391 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 375 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
392 | /* Check for frequency decrease */ | 376 | /* Check for frequency decrease */ |
393 | total_idle_ticks = j_dbs_info->prev_cpu_idle_up; | 377 | total_idle_ticks = j_dbs_info->prev_cpu_idle_up; |
@@ -408,7 +392,7 @@ static void dbs_check_cpu(int cpu) | |||
408 | down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * | 392 | down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * |
409 | usecs_to_jiffies(freq_down_sampling_rate); | 393 | usecs_to_jiffies(freq_down_sampling_rate); |
410 | 394 | ||
411 | if (idle_ticks > down_idle_ticks ) { | 395 | if (idle_ticks > down_idle_ticks) { |
412 | /* if we are already at the lowest speed then break out early | 396 | /* if we are already at the lowest speed then break out early |
413 | * or if we 'cannot' reduce the speed as the user might want | 397 | * or if we 'cannot' reduce the speed as the user might want |
414 | * freq_step to be zero */ | 398 | * freq_step to be zero */ |