diff options
| author | Alexander Clouter <alex@digriz.org.uk> | 2006-03-22 04:59:16 -0500 |
|---|---|---|
| committer | Dominik Brodowski <linux@dominikbrodowski.net> | 2006-03-26 03:14:54 -0500 |
| commit | 08a28e2e98aa821cf6f15f8a267beb2f33377bb9 (patch) | |
| tree | c1bc076d41c50e76cca7e1af023d1d1ace1b8326 | |
| parent | e8a02572252f9115c2b8296c40fd8b985f06f872 (diff) | |
[PATCH] cpufreq_conservative: make for_each_cpu() safe
All these changes should make cpufreq_conservative safe in regards to the x86
for_each_cpu cpumask.h changes and whatnot.
Whilst making it safe a number of pointless for loops related to the cpu
mask's were removed. I was never comfortable with all those for loops,
especially as the iteration is over the same data again and again for each
CPU you had in a single poll, an O(n^2) outcome to frequency scaling.
The approach I use is to assume by default no CPU's exist and it sets the
requested_freq to zero as a kind of flag, the reasoning is in the source ;)
If the CPU is queried and requested_freq is zero then it initialises the
variable to current_freq and then continues as if nothing happened which
should be the same net effect as before?
Signed-off-by: Alexander Clouter <alex-kernel@digriz.org.uk>
Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
| -rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 91 |
1 files changed, 42 insertions, 49 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 3ca3cf061642..7498f2506ade 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
| @@ -294,31 +294,40 @@ static struct attribute_group dbs_attr_group = { | |||
| 294 | static void dbs_check_cpu(int cpu) | 294 | static void dbs_check_cpu(int cpu) |
| 295 | { | 295 | { |
| 296 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; | 296 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; |
| 297 | unsigned int tmp_idle_ticks, total_idle_ticks; | ||
| 297 | unsigned int freq_step; | 298 | unsigned int freq_step; |
| 298 | unsigned int freq_down_sampling_rate; | 299 | unsigned int freq_down_sampling_rate; |
| 299 | static int down_skip[NR_CPUS]; | 300 | static unsigned short down_skip[NR_CPUS]; |
| 300 | static int requested_freq[NR_CPUS]; | 301 | static unsigned int requested_freq[NR_CPUS]; |
| 301 | static unsigned short init_flag = 0; | 302 | static unsigned int init_flag = NR_CPUS; |
| 302 | struct cpu_dbs_info_s *this_dbs_info; | 303 | struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); |
| 303 | struct cpu_dbs_info_s *dbs_info; | ||
| 304 | |||
| 305 | struct cpufreq_policy *policy; | 304 | struct cpufreq_policy *policy; |
| 306 | unsigned int j; | ||
| 307 | 305 | ||
| 308 | this_dbs_info = &per_cpu(cpu_dbs_info, cpu); | ||
| 309 | if (!this_dbs_info->enable) | 306 | if (!this_dbs_info->enable) |
| 310 | return; | 307 | return; |
| 311 | 308 | ||
| 312 | policy = this_dbs_info->cur_policy; | 309 | if ( init_flag != 0 ) { |
| 313 | 310 | for_each_cpu(init_flag) { | |
| 314 | if ( init_flag == 0 ) { | 311 | down_skip[init_flag] = 0; |
| 315 | for_each_online_cpu(j) { | 312 | /* I doubt a CPU exists with a freq of 0hz :) */ |
| 316 | dbs_info = &per_cpu(cpu_dbs_info, j); | 313 | requested_freq[init_flag] = 0; |
| 317 | requested_freq[j] = dbs_info->cur_policy->cur; | ||
| 318 | } | 314 | } |
| 319 | init_flag = 1; | 315 | init_flag = 0; |
| 320 | } | 316 | } |
| 321 | 317 | ||
| 318 | /* | ||
| 319 | * If its a freshly initialised cpu we setup requested_freq. This | ||
| 320 | * check could be avoided if we did not care about a first time | ||
| 321 | * stunted increase in CPU speed when there is a load. I feel we | ||
| 322 | * should be initialising this to something. The removal of a CPU | ||
| 323 | * is not a problem, after a short time the CPU should settle down | ||
| 324 | * to a 'natural' frequency. | ||
| 325 | */ | ||
| 326 | if (requested_freq[cpu] == 0) | ||
| 327 | requested_freq[cpu] = this_dbs_info->cur_policy->cur; | ||
| 328 | |||
| 329 | policy = this_dbs_info->cur_policy; | ||
| 330 | |||
| 322 | /* | 331 | /* |
| 323 | * The default safe range is 20% to 80% | 332 | * The default safe range is 20% to 80% |
| 324 | * Every sampling_rate, we check | 333 | * Every sampling_rate, we check |
| @@ -335,20 +344,15 @@ static void dbs_check_cpu(int cpu) | |||
| 335 | 344 | ||
| 336 | /* Check for frequency increase */ | 345 | /* Check for frequency increase */ |
| 337 | idle_ticks = UINT_MAX; | 346 | idle_ticks = UINT_MAX; |
| 338 | for_each_cpu_mask(j, policy->cpus) { | ||
| 339 | unsigned int tmp_idle_ticks, total_idle_ticks; | ||
| 340 | struct cpu_dbs_info_s *j_dbs_info; | ||
| 341 | 347 | ||
| 342 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 348 | /* Check for frequency increase */ |
| 343 | /* Check for frequency increase */ | 349 | total_idle_ticks = get_cpu_idle_time(cpu); |
| 344 | total_idle_ticks = get_cpu_idle_time(j); | 350 | tmp_idle_ticks = total_idle_ticks - |
| 345 | tmp_idle_ticks = total_idle_ticks - | 351 | this_dbs_info->prev_cpu_idle_up; |
| 346 | j_dbs_info->prev_cpu_idle_up; | 352 | this_dbs_info->prev_cpu_idle_up = total_idle_ticks; |
| 347 | j_dbs_info->prev_cpu_idle_up = total_idle_ticks; | 353 | |
| 348 | 354 | if (tmp_idle_ticks < idle_ticks) | |
| 349 | if (tmp_idle_ticks < idle_ticks) | 355 | idle_ticks = tmp_idle_ticks; |
| 350 | idle_ticks = tmp_idle_ticks; | ||
| 351 | } | ||
| 352 | 356 | ||
| 353 | /* Scale idle ticks by 100 and compare with up and down ticks */ | 357 | /* Scale idle ticks by 100 and compare with up and down ticks */ |
| 354 | idle_ticks *= 100; | 358 | idle_ticks *= 100; |
| @@ -357,13 +361,9 @@ static void dbs_check_cpu(int cpu) | |||
| 357 | 361 | ||
| 358 | if (idle_ticks < up_idle_ticks) { | 362 | if (idle_ticks < up_idle_ticks) { |
| 359 | down_skip[cpu] = 0; | 363 | down_skip[cpu] = 0; |
| 360 | for_each_cpu_mask(j, policy->cpus) { | 364 | this_dbs_info->prev_cpu_idle_down = |
| 361 | struct cpu_dbs_info_s *j_dbs_info; | 365 | this_dbs_info->prev_cpu_idle_up; |
| 362 | 366 | ||
| 363 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | ||
| 364 | j_dbs_info->prev_cpu_idle_down = | ||
| 365 | j_dbs_info->prev_cpu_idle_up; | ||
| 366 | } | ||
| 367 | /* if we are already at full speed then break out early */ | 367 | /* if we are already at full speed then break out early */ |
| 368 | if (requested_freq[cpu] == policy->max) | 368 | if (requested_freq[cpu] == policy->max) |
| 369 | return; | 369 | return; |
| @@ -388,21 +388,14 @@ static void dbs_check_cpu(int cpu) | |||
| 388 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) | 388 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) |
| 389 | return; | 389 | return; |
| 390 | 390 | ||
| 391 | idle_ticks = UINT_MAX; | 391 | /* Check for frequency decrease */ |
| 392 | for_each_cpu_mask(j, policy->cpus) { | 392 | total_idle_ticks = this_dbs_info->prev_cpu_idle_up; |
| 393 | unsigned int tmp_idle_ticks, total_idle_ticks; | 393 | tmp_idle_ticks = total_idle_ticks - |
| 394 | struct cpu_dbs_info_s *j_dbs_info; | 394 | this_dbs_info->prev_cpu_idle_down; |
| 395 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
| 395 | 396 | ||
| 396 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 397 | if (tmp_idle_ticks < idle_ticks) |
| 397 | /* Check for frequency decrease */ | 398 | idle_ticks = tmp_idle_ticks; |
| 398 | total_idle_ticks = j_dbs_info->prev_cpu_idle_up; | ||
| 399 | tmp_idle_ticks = total_idle_ticks - | ||
| 400 | j_dbs_info->prev_cpu_idle_down; | ||
| 401 | j_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
| 402 | |||
| 403 | if (tmp_idle_ticks < idle_ticks) | ||
| 404 | idle_ticks = tmp_idle_ticks; | ||
| 405 | } | ||
| 406 | 399 | ||
| 407 | /* Scale idle ticks by 100 and compare with up and down ticks */ | 400 | /* Scale idle ticks by 100 and compare with up and down ticks */ |
| 408 | idle_ticks *= 100; | 401 | idle_ticks *= 100; |
| @@ -491,7 +484,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 491 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 484 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
| 492 | j_dbs_info->cur_policy = policy; | 485 | j_dbs_info->cur_policy = policy; |
| 493 | 486 | ||
| 494 | j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); | 487 | j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); |
| 495 | j_dbs_info->prev_cpu_idle_down | 488 | j_dbs_info->prev_cpu_idle_down |
| 496 | = j_dbs_info->prev_cpu_idle_up; | 489 | = j_dbs_info->prev_cpu_idle_up; |
| 497 | } | 490 | } |
