diff options
author | Alexander Clouter <alex@digriz.org.uk> | 2006-03-22 05:00:18 -0500 |
---|---|---|
committer | Dominik Brodowski <linux@dominikbrodowski.net> | 2006-03-26 03:18:18 -0500 |
commit | a159b82770ab84e1b5e0306fa65e158188492b16 (patch) | |
tree | 93c0c6cd2438e8b20015bfb3444fa930b760452c /drivers | |
parent | 08a28e2e98aa821cf6f15f8a267beb2f33377bb9 (diff) |
[PATCH] cpufreq_conservative: alternative initialise approach
Venki, author of cpufreq_ondemand, came up with a neater way to remove the
initialiser code from the main loop of my code and out to the point when the
governor is actually initialised.
Not only does it look but it also feels cleaner, plus its simpler to
understand. It also saves a bunch of pointless conditional statements in the
main loop.
Signed-off-by: Alexander Clouter <alex-kernel@digriz.org.uk>
Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 55 |
1 files changed, 18 insertions, 37 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 7498f2506ade..a152d2c46be7 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -65,6 +65,8 @@ struct cpu_dbs_info_s { | |||
65 | unsigned int prev_cpu_idle_up; | 65 | unsigned int prev_cpu_idle_up; |
66 | unsigned int prev_cpu_idle_down; | 66 | unsigned int prev_cpu_idle_down; |
67 | unsigned int enable; | 67 | unsigned int enable; |
68 | unsigned int down_skip; | ||
69 | unsigned int requested_freq; | ||
68 | }; | 70 | }; |
69 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 71 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); |
70 | 72 | ||
@@ -297,35 +299,12 @@ static void dbs_check_cpu(int cpu) | |||
297 | unsigned int tmp_idle_ticks, total_idle_ticks; | 299 | unsigned int tmp_idle_ticks, total_idle_ticks; |
298 | unsigned int freq_step; | 300 | unsigned int freq_step; |
299 | unsigned int freq_down_sampling_rate; | 301 | unsigned int freq_down_sampling_rate; |
300 | static unsigned short down_skip[NR_CPUS]; | ||
301 | static unsigned int requested_freq[NR_CPUS]; | ||
302 | static unsigned int init_flag = NR_CPUS; | ||
303 | struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); | 302 | struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); |
304 | struct cpufreq_policy *policy; | 303 | struct cpufreq_policy *policy; |
305 | 304 | ||
306 | if (!this_dbs_info->enable) | 305 | if (!this_dbs_info->enable) |
307 | return; | 306 | return; |
308 | 307 | ||
309 | if ( init_flag != 0 ) { | ||
310 | for_each_cpu(init_flag) { | ||
311 | down_skip[init_flag] = 0; | ||
312 | /* I doubt a CPU exists with a freq of 0hz :) */ | ||
313 | requested_freq[init_flag] = 0; | ||
314 | } | ||
315 | init_flag = 0; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * If its a freshly initialised cpu we setup requested_freq. This | ||
320 | * check could be avoided if we did not care about a first time | ||
321 | * stunted increase in CPU speed when there is a load. I feel we | ||
322 | * should be initialising this to something. The removal of a CPU | ||
323 | * is not a problem, after a short time the CPU should settle down | ||
324 | * to a 'natural' frequency. | ||
325 | */ | ||
326 | if (requested_freq[cpu] == 0) | ||
327 | requested_freq[cpu] = this_dbs_info->cur_policy->cur; | ||
328 | |||
329 | policy = this_dbs_info->cur_policy; | 308 | policy = this_dbs_info->cur_policy; |
330 | 309 | ||
331 | /* | 310 | /* |
@@ -360,12 +339,12 @@ static void dbs_check_cpu(int cpu) | |||
360 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 339 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
361 | 340 | ||
362 | if (idle_ticks < up_idle_ticks) { | 341 | if (idle_ticks < up_idle_ticks) { |
363 | down_skip[cpu] = 0; | 342 | this_dbs_info->down_skip = 0; |
364 | this_dbs_info->prev_cpu_idle_down = | 343 | this_dbs_info->prev_cpu_idle_down = |
365 | this_dbs_info->prev_cpu_idle_up; | 344 | this_dbs_info->prev_cpu_idle_up; |
366 | 345 | ||
367 | /* if we are already at full speed then break out early */ | 346 | /* if we are already at full speed then break out early */ |
368 | if (requested_freq[cpu] == policy->max) | 347 | if (this_dbs_info->requested_freq == policy->max) |
369 | return; | 348 | return; |
370 | 349 | ||
371 | freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; | 350 | freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; |
@@ -374,18 +353,18 @@ static void dbs_check_cpu(int cpu) | |||
374 | if (unlikely(freq_step == 0)) | 353 | if (unlikely(freq_step == 0)) |
375 | freq_step = 5; | 354 | freq_step = 5; |
376 | 355 | ||
377 | requested_freq[cpu] += freq_step; | 356 | this_dbs_info->requested_freq += freq_step; |
378 | if (requested_freq[cpu] > policy->max) | 357 | if (this_dbs_info->requested_freq > policy->max) |
379 | requested_freq[cpu] = policy->max; | 358 | this_dbs_info->requested_freq = policy->max; |
380 | 359 | ||
381 | __cpufreq_driver_target(policy, requested_freq[cpu], | 360 | __cpufreq_driver_target(policy, this_dbs_info->requested_freq, |
382 | CPUFREQ_RELATION_H); | 361 | CPUFREQ_RELATION_H); |
383 | return; | 362 | return; |
384 | } | 363 | } |
385 | 364 | ||
386 | /* Check for frequency decrease */ | 365 | /* Check for frequency decrease */ |
387 | down_skip[cpu]++; | 366 | this_dbs_info->down_skip++; |
388 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) | 367 | if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) |
389 | return; | 368 | return; |
390 | 369 | ||
391 | /* Check for frequency decrease */ | 370 | /* Check for frequency decrease */ |
@@ -399,7 +378,7 @@ static void dbs_check_cpu(int cpu) | |||
399 | 378 | ||
400 | /* Scale idle ticks by 100 and compare with up and down ticks */ | 379 | /* Scale idle ticks by 100 and compare with up and down ticks */ |
401 | idle_ticks *= 100; | 380 | idle_ticks *= 100; |
402 | down_skip[cpu] = 0; | 381 | this_dbs_info->down_skip = 0; |
403 | 382 | ||
404 | freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * | 383 | freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * |
405 | dbs_tuners_ins.sampling_down_factor; | 384 | dbs_tuners_ins.sampling_down_factor; |
@@ -412,7 +391,7 @@ static void dbs_check_cpu(int cpu) | |||
412 | * or if we 'cannot' reduce the speed as the user might want | 391 | * or if we 'cannot' reduce the speed as the user might want |
413 | * freq_step to be zero | 392 | * freq_step to be zero |
414 | */ | 393 | */ |
415 | if (requested_freq[cpu] == policy->min | 394 | if (this_dbs_info->requested_freq == policy->min |
416 | || dbs_tuners_ins.freq_step == 0) | 395 | || dbs_tuners_ins.freq_step == 0) |
417 | return; | 396 | return; |
418 | 397 | ||
@@ -422,11 +401,11 @@ static void dbs_check_cpu(int cpu) | |||
422 | if (unlikely(freq_step == 0)) | 401 | if (unlikely(freq_step == 0)) |
423 | freq_step = 5; | 402 | freq_step = 5; |
424 | 403 | ||
425 | requested_freq[cpu] -= freq_step; | 404 | this_dbs_info->requested_freq -= freq_step; |
426 | if (requested_freq[cpu] < policy->min) | 405 | if (this_dbs_info->requested_freq < policy->min) |
427 | requested_freq[cpu] = policy->min; | 406 | this_dbs_info->requested_freq = policy->min; |
428 | 407 | ||
429 | __cpufreq_driver_target(policy, requested_freq[cpu], | 408 | __cpufreq_driver_target(policy, this_dbs_info->requested_freq, |
430 | CPUFREQ_RELATION_H); | 409 | CPUFREQ_RELATION_H); |
431 | return; | 410 | return; |
432 | } | 411 | } |
@@ -489,6 +468,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
489 | = j_dbs_info->prev_cpu_idle_up; | 468 | = j_dbs_info->prev_cpu_idle_up; |
490 | } | 469 | } |
491 | this_dbs_info->enable = 1; | 470 | this_dbs_info->enable = 1; |
471 | this_dbs_info->down_skip = 0; | ||
472 | this_dbs_info->requested_freq = policy->cur; | ||
492 | sysfs_create_group(&policy->kobj, &dbs_attr_group); | 473 | sysfs_create_group(&policy->kobj, &dbs_attr_group); |
493 | dbs_enable++; | 474 | dbs_enable++; |
494 | /* | 475 | /* |