diff options
author | Dave Jones <davej@redhat.com> | 2005-05-31 22:03:49 -0400 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2005-05-31 22:03:49 -0400 |
commit | 9c7d269b9b05440dd0fe92d96f4e5d7e73dd7238 (patch) | |
tree | 4e4268cc4f075187135312d5243e24d3a4fcd155 /drivers | |
parent | 790d76fa979f55bfc49a6901bb911778949b582d (diff) |
[CPUFREQ] ondemand,conservative governor idle_tick clean-up
[PATCH] [3/5] ondemand,conservative governor idle_tick clean-up
Ondemand and conservative governor clean-up, it factorises the idle ticks
measurement.
Signed-off-by: Eric Piel <eric.piel@tremplin-utc.net>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 26 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 26 |
2 files changed, 10 insertions, 42 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c503ec14765f..e1df376e709e 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -297,7 +297,6 @@ static struct attribute_group dbs_attr_group = { | |||
297 | static void dbs_check_cpu(int cpu) | 297 | static void dbs_check_cpu(int cpu) |
298 | { | 298 | { |
299 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; | 299 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; |
300 | unsigned int total_idle_ticks; | ||
301 | unsigned int freq_step; | 300 | unsigned int freq_step; |
302 | unsigned int freq_down_sampling_rate; | 301 | unsigned int freq_down_sampling_rate; |
303 | static int down_skip[NR_CPUS]; | 302 | static int down_skip[NR_CPUS]; |
@@ -338,19 +337,12 @@ static void dbs_check_cpu(int cpu) | |||
338 | */ | 337 | */ |
339 | 338 | ||
340 | /* Check for frequency increase */ | 339 | /* Check for frequency increase */ |
341 | total_idle_ticks = get_cpu_idle_time(cpu); | ||
342 | idle_ticks = total_idle_ticks - | ||
343 | this_dbs_info->prev_cpu_idle_up; | ||
344 | this_dbs_info->prev_cpu_idle_up = total_idle_ticks; | ||
345 | |||
346 | 340 | ||
341 | idle_ticks = UINT_MAX; | ||
347 | for_each_cpu_mask(j, policy->cpus) { | 342 | for_each_cpu_mask(j, policy->cpus) { |
348 | unsigned int tmp_idle_ticks; | 343 | unsigned int tmp_idle_ticks, total_idle_ticks; |
349 | struct cpu_dbs_info_s *j_dbs_info; | 344 | struct cpu_dbs_info_s *j_dbs_info; |
350 | 345 | ||
351 | if (j == cpu) | ||
352 | continue; | ||
353 | |||
354 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 346 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
355 | /* Check for frequency increase */ | 347 | /* Check for frequency increase */ |
356 | total_idle_ticks = get_cpu_idle_time(j); | 348 | total_idle_ticks = get_cpu_idle_time(j); |
@@ -400,20 +392,12 @@ static void dbs_check_cpu(int cpu) | |||
400 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) | 392 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) |
401 | return; | 393 | return; |
402 | 394 | ||
403 | total_idle_ticks = this_dbs_info->prev_cpu_idle_up; | 395 | idle_ticks = UINT_MAX; |
404 | idle_ticks = total_idle_ticks - | ||
405 | this_dbs_info->prev_cpu_idle_down; | ||
406 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
407 | |||
408 | for_each_cpu_mask(j, policy->cpus) { | 396 | for_each_cpu_mask(j, policy->cpus) { |
409 | unsigned int tmp_idle_ticks; | 397 | unsigned int tmp_idle_ticks, total_idle_ticks; |
410 | struct cpu_dbs_info_s *j_dbs_info; | 398 | struct cpu_dbs_info_s *j_dbs_info; |
411 | 399 | ||
412 | if (j == cpu) | ||
413 | continue; | ||
414 | |||
415 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 400 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
416 | /* Check for frequency increase */ | ||
417 | total_idle_ticks = j_dbs_info->prev_cpu_idle_up; | 401 | total_idle_ticks = j_dbs_info->prev_cpu_idle_up; |
418 | tmp_idle_ticks = total_idle_ticks - | 402 | tmp_idle_ticks = total_idle_ticks - |
419 | j_dbs_info->prev_cpu_idle_down; | 403 | j_dbs_info->prev_cpu_idle_down; |
@@ -432,7 +416,7 @@ static void dbs_check_cpu(int cpu) | |||
432 | down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * | 416 | down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * |
433 | usecs_to_jiffies(freq_down_sampling_rate); | 417 | usecs_to_jiffies(freq_down_sampling_rate); |
434 | 418 | ||
435 | if (idle_ticks > down_idle_ticks ) { | 419 | if (idle_ticks > down_idle_ticks) { |
436 | /* if we are already at the lowest speed then break out early | 420 | /* if we are already at the lowest speed then break out early |
437 | * or if we 'cannot' reduce the speed as the user might want | 421 | * or if we 'cannot' reduce the speed as the user might want |
438 | * freq_step to be zero */ | 422 | * freq_step to be zero */ |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f239545ac1b8..0482bd49aba8 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -296,7 +296,6 @@ static struct attribute_group dbs_attr_group = { | |||
296 | static void dbs_check_cpu(int cpu) | 296 | static void dbs_check_cpu(int cpu) |
297 | { | 297 | { |
298 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; | 298 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; |
299 | unsigned int total_idle_ticks; | ||
300 | unsigned int freq_down_step; | 299 | unsigned int freq_down_step; |
301 | unsigned int freq_down_sampling_rate; | 300 | unsigned int freq_down_sampling_rate; |
302 | static int down_skip[NR_CPUS]; | 301 | static int down_skip[NR_CPUS]; |
@@ -325,20 +324,12 @@ static void dbs_check_cpu(int cpu) | |||
325 | */ | 324 | */ |
326 | 325 | ||
327 | /* Check for frequency increase */ | 326 | /* Check for frequency increase */ |
328 | total_idle_ticks = get_cpu_idle_time(cpu); | 327 | idle_ticks = UINT_MAX; |
329 | idle_ticks = total_idle_ticks - | ||
330 | this_dbs_info->prev_cpu_idle_up; | ||
331 | this_dbs_info->prev_cpu_idle_up = total_idle_ticks; | ||
332 | |||
333 | for_each_cpu_mask(j, policy->cpus) { | 328 | for_each_cpu_mask(j, policy->cpus) { |
334 | unsigned int tmp_idle_ticks; | 329 | unsigned int tmp_idle_ticks, total_idle_ticks; |
335 | struct cpu_dbs_info_s *j_dbs_info; | 330 | struct cpu_dbs_info_s *j_dbs_info; |
336 | 331 | ||
337 | if (j == cpu) | ||
338 | continue; | ||
339 | |||
340 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 332 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
341 | /* Check for frequency increase */ | ||
342 | total_idle_ticks = get_cpu_idle_time(j); | 333 | total_idle_ticks = get_cpu_idle_time(j); |
343 | tmp_idle_ticks = total_idle_ticks - | 334 | tmp_idle_ticks = total_idle_ticks - |
344 | j_dbs_info->prev_cpu_idle_up; | 335 | j_dbs_info->prev_cpu_idle_up; |
@@ -376,18 +367,11 @@ static void dbs_check_cpu(int cpu) | |||
376 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) | 367 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) |
377 | return; | 368 | return; |
378 | 369 | ||
379 | total_idle_ticks = this_dbs_info->prev_cpu_idle_up; | 370 | idle_ticks = UINT_MAX; |
380 | idle_ticks = total_idle_ticks - | ||
381 | this_dbs_info->prev_cpu_idle_down; | ||
382 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
383 | |||
384 | for_each_cpu_mask(j, policy->cpus) { | 371 | for_each_cpu_mask(j, policy->cpus) { |
385 | unsigned int tmp_idle_ticks; | 372 | unsigned int tmp_idle_ticks, total_idle_ticks; |
386 | struct cpu_dbs_info_s *j_dbs_info; | 373 | struct cpu_dbs_info_s *j_dbs_info; |
387 | 374 | ||
388 | if (j == cpu) | ||
389 | continue; | ||
390 | |||
391 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 375 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
392 | /* Check for frequency decrease */ | 376 | /* Check for frequency decrease */ |
393 | total_idle_ticks = j_dbs_info->prev_cpu_idle_up; | 377 | total_idle_ticks = j_dbs_info->prev_cpu_idle_up; |
@@ -408,7 +392,7 @@ static void dbs_check_cpu(int cpu) | |||
408 | down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * | 392 | down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * |
409 | usecs_to_jiffies(freq_down_sampling_rate); | 393 | usecs_to_jiffies(freq_down_sampling_rate); |
410 | 394 | ||
411 | if (idle_ticks > down_idle_ticks ) { | 395 | if (idle_ticks > down_idle_ticks) { |
412 | /* if we are already at the lowest speed then break out early | 396 | /* if we are already at the lowest speed then break out early |
413 | * or if we 'cannot' reduce the speed as the user might want | 397 | * or if we 'cannot' reduce the speed as the user might want |
414 | * freq_step to be zero */ | 398 | * freq_step to be zero */ |