diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-21 05:37:16 -0500 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2008-12-05 15:20:10 -0500 |
commit | 9963d1aad40946b1b6d34f9bee8d8a1b9032ae22 (patch) | |
tree | de3f4ef50a8aefc180b874d20d3bdcf338d031d9 | |
parent | bbeba4c35c252b2e961f09ce6ebe76b2cd5e7e3e (diff) |
[CPUFREQ] clean up speedstep-centrino and reduce cpumask_t usage
Impact: cleanup
1) The #ifdef CONFIG_HOTPLUG_CPU seems unnecessary these days.
2) The loop can simply skip over offline cpus, rather than creating a tmp mask.
3) set_mask is set to either a single cpu or all online cpus in a policy.
Since it's just used for set_cpus_allowed(), any offline cpus in a policy
don't matter, so we can just use cpumask_of_cpu() or the policy->cpus.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Dave Jones <davej@redhat.com>
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 51 |
1 files changed, 24 insertions, 27 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 3b5f06423e77..f0ea6fa2f53c 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -459,9 +459,7 @@ static int centrino_verify (struct cpufreq_policy *policy) | |||
459 | * Sets a new CPUFreq policy. | 459 | * Sets a new CPUFreq policy. |
460 | */ | 460 | */ |
461 | struct allmasks { | 461 | struct allmasks { |
462 | cpumask_t online_policy_cpus; | ||
463 | cpumask_t saved_mask; | 462 | cpumask_t saved_mask; |
464 | cpumask_t set_mask; | ||
465 | cpumask_t covered_cpus; | 463 | cpumask_t covered_cpus; |
466 | }; | 464 | }; |
467 | 465 | ||
@@ -475,9 +473,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
475 | int retval = 0; | 473 | int retval = 0; |
476 | unsigned int j, k, first_cpu, tmp; | 474 | unsigned int j, k, first_cpu, tmp; |
477 | CPUMASK_ALLOC(allmasks); | 475 | CPUMASK_ALLOC(allmasks); |
478 | CPUMASK_PTR(online_policy_cpus, allmasks); | ||
479 | CPUMASK_PTR(saved_mask, allmasks); | 476 | CPUMASK_PTR(saved_mask, allmasks); |
480 | CPUMASK_PTR(set_mask, allmasks); | ||
481 | CPUMASK_PTR(covered_cpus, allmasks); | 477 | CPUMASK_PTR(covered_cpus, allmasks); |
482 | 478 | ||
483 | if (unlikely(allmasks == NULL)) | 479 | if (unlikely(allmasks == NULL)) |
@@ -497,30 +493,28 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
497 | goto out; | 493 | goto out; |
498 | } | 494 | } |
499 | 495 | ||
500 | #ifdef CONFIG_HOTPLUG_CPU | ||
501 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | ||
502 | cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus); | ||
503 | #else | ||
504 | *online_policy_cpus = policy->cpus; | ||
505 | #endif | ||
506 | |||
507 | *saved_mask = current->cpus_allowed; | 496 | *saved_mask = current->cpus_allowed; |
508 | first_cpu = 1; | 497 | first_cpu = 1; |
509 | cpus_clear(*covered_cpus); | 498 | cpus_clear(*covered_cpus); |
510 | for_each_cpu_mask_nr(j, *online_policy_cpus) { | 499 | for_each_cpu_mask_nr(j, policy->cpus) { |
500 | const cpumask_t *mask; | ||
501 | |||
502 | /* cpufreq holds the hotplug lock, so we are safe here */ | ||
503 | if (!cpu_online(j)) | ||
504 | continue; | ||
505 | |||
511 | /* | 506 | /* |
512 | * Support for SMP systems. | 507 | * Support for SMP systems. |
513 | * Make sure we are running on CPU that wants to change freq | 508 | * Make sure we are running on CPU that wants to change freq |
514 | */ | 509 | */ |
515 | cpus_clear(*set_mask); | ||
516 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | 510 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) |
517 | cpus_or(*set_mask, *set_mask, *online_policy_cpus); | 511 | mask = &policy->cpus; |
518 | else | 512 | else |
519 | cpu_set(j, *set_mask); | 513 | mask = &cpumask_of_cpu(j); |
520 | 514 | ||
521 | set_cpus_allowed_ptr(current, set_mask); | 515 | set_cpus_allowed_ptr(current, mask); |
522 | preempt_disable(); | 516 | preempt_disable(); |
523 | if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) { | 517 | if (unlikely(!cpu_isset(smp_processor_id(), *mask))) { |
524 | dprintk("couldn't limit to CPUs in this domain\n"); | 518 | dprintk("couldn't limit to CPUs in this domain\n"); |
525 | retval = -EAGAIN; | 519 | retval = -EAGAIN; |
526 | if (first_cpu) { | 520 | if (first_cpu) { |
@@ -548,7 +542,9 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
548 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | 542 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", |
549 | target_freq, freqs.old, freqs.new, msr); | 543 | target_freq, freqs.old, freqs.new, msr); |
550 | 544 | ||
551 | for_each_cpu_mask_nr(k, *online_policy_cpus) { | 545 | for_each_cpu_mask_nr(k, policy->cpus) { |
546 | if (!cpu_online(k)) | ||
547 | continue; | ||
552 | freqs.cpu = k; | 548 | freqs.cpu = k; |
553 | cpufreq_notify_transition(&freqs, | 549 | cpufreq_notify_transition(&freqs, |
554 | CPUFREQ_PRECHANGE); | 550 | CPUFREQ_PRECHANGE); |
@@ -571,7 +567,9 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
571 | preempt_enable(); | 567 | preempt_enable(); |
572 | } | 568 | } |
573 | 569 | ||
574 | for_each_cpu_mask_nr(k, *online_policy_cpus) { | 570 | for_each_cpu_mask_nr(k, policy->cpus) { |
571 | if (!cpu_online(k)) | ||
572 | continue; | ||
575 | freqs.cpu = k; | 573 | freqs.cpu = k; |
576 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 574 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
577 | } | 575 | } |
@@ -584,18 +582,17 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
584 | * Best effort undo.. | 582 | * Best effort undo.. |
585 | */ | 583 | */ |
586 | 584 | ||
587 | if (!cpus_empty(*covered_cpus)) | 585 | for_each_cpu_mask_nr(j, *covered_cpus) { |
588 | for_each_cpu_mask_nr(j, *covered_cpus) { | 586 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); |
589 | set_cpus_allowed_ptr(current, | 587 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
590 | &cpumask_of_cpu(j)); | 588 | } |
591 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | ||
592 | } | ||
593 | 589 | ||
594 | tmp = freqs.new; | 590 | tmp = freqs.new; |
595 | freqs.new = freqs.old; | 591 | freqs.new = freqs.old; |
596 | freqs.old = tmp; | 592 | freqs.old = tmp; |
597 | for_each_cpu_mask_nr(j, *online_policy_cpus) { | 593 | for_each_cpu_mask_nr(j, policy->cpus) { |
598 | freqs.cpu = j; | 594 | if (!cpu_online(j)) |
595 | continue; | ||
599 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 596 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
600 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 597 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
601 | } | 598 | } |