aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-07-15 17:14:37 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-18 16:03:00 -0400
commiteb53fac5cafc4b2f8443ff064938b4494a28c54e (patch)
treed727fe840bc29b1e87277a4de6ea932876637a18 /arch
parent77586c2bdad0798cb24e35de5a878e7c6b200574 (diff)
cpumask: Use optimized CPUMASK_ALLOC macros in the centrino_target
* Use the CPUMASK_ALLOC macros in the centrino_target() function. Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c73
1 files changed, 45 insertions, 28 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index fd561bb26c60..470c016cb254 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -442,6 +442,13 @@ static int centrino_verify (struct cpufreq_policy *policy)
442 * 442 *
443 * Sets a new CPUFreq policy. 443 * Sets a new CPUFreq policy.
444 */ 444 */
445struct allmasks {
446 cpumask_t online_policy_cpus;
447 cpumask_t saved_mask;
448 cpumask_t set_mask;
449 cpumask_t covered_cpus;
450};
451
445static int centrino_target (struct cpufreq_policy *policy, 452static int centrino_target (struct cpufreq_policy *policy,
446 unsigned int target_freq, 453 unsigned int target_freq,
447 unsigned int relation) 454 unsigned int relation)
@@ -449,48 +456,55 @@ static int centrino_target (struct cpufreq_policy *policy,
449 unsigned int newstate = 0; 456 unsigned int newstate = 0;
450 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; 457 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
451 struct cpufreq_freqs freqs; 458 struct cpufreq_freqs freqs;
452 cpumask_t online_policy_cpus;
453 cpumask_t saved_mask;
454 cpumask_t set_mask;
455 cpumask_t covered_cpus;
456 int retval = 0; 459 int retval = 0;
457 unsigned int j, k, first_cpu, tmp; 460 unsigned int j, k, first_cpu, tmp;
458 461 CPUMASK_ALLOC(allmasks);
459 if (unlikely(centrino_model[cpu] == NULL)) 462 CPUMASK_VAR(online_policy_cpus, allmasks);
460 return -ENODEV; 463 CPUMASK_VAR(saved_mask, allmasks);
464 CPUMASK_VAR(set_mask, allmasks);
465 CPUMASK_VAR(covered_cpus, allmasks);
466
467 if (unlikely(allmasks == NULL))
468 return -ENOMEM;
469
470 if (unlikely(centrino_model[cpu] == NULL)) {
471 retval = -ENODEV;
472 goto out;
473 }
461 474
462 if (unlikely(cpufreq_frequency_table_target(policy, 475 if (unlikely(cpufreq_frequency_table_target(policy,
463 centrino_model[cpu]->op_points, 476 centrino_model[cpu]->op_points,
464 target_freq, 477 target_freq,
465 relation, 478 relation,
466 &newstate))) { 479 &newstate))) {
467 return -EINVAL; 480 retval = -EINVAL;
481 goto out;
468 } 482 }
469 483
470#ifdef CONFIG_HOTPLUG_CPU 484#ifdef CONFIG_HOTPLUG_CPU
471 /* cpufreq holds the hotplug lock, so we are safe from here on */ 485 /* cpufreq holds the hotplug lock, so we are safe from here on */
472 cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); 486 cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus);
473#else 487#else
474 online_policy_cpus = policy->cpus; 488 *online_policy_cpus = policy->cpus;
475#endif 489#endif
476 490
477 saved_mask = current->cpus_allowed; 491 *saved_mask = current->cpus_allowed;
478 first_cpu = 1; 492 first_cpu = 1;
479 cpus_clear(covered_cpus); 493 cpus_clear(*covered_cpus);
480 for_each_cpu_mask_nr(j, online_policy_cpus) { 494 for_each_cpu_mask_nr(j, *online_policy_cpus) {
481 /* 495 /*
482 * Support for SMP systems. 496 * Support for SMP systems.
483 * Make sure we are running on CPU that wants to change freq 497 * Make sure we are running on CPU that wants to change freq
484 */ 498 */
485 cpus_clear(set_mask); 499 cpus_clear(*set_mask);
486 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 500 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
487 cpus_or(set_mask, set_mask, online_policy_cpus); 501 cpus_or(*set_mask, *set_mask, *online_policy_cpus);
488 else 502 else
489 cpu_set(j, set_mask); 503 cpu_set(j, *set_mask);
490 504
491 set_cpus_allowed_ptr(current, &set_mask); 505 set_cpus_allowed_ptr(current, set_mask);
492 preempt_disable(); 506 preempt_disable();
493 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { 507 if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) {
494 dprintk("couldn't limit to CPUs in this domain\n"); 508 dprintk("couldn't limit to CPUs in this domain\n");
495 retval = -EAGAIN; 509 retval = -EAGAIN;
496 if (first_cpu) { 510 if (first_cpu) {
@@ -518,7 +532,7 @@ static int centrino_target (struct cpufreq_policy *policy,
518 dprintk("target=%dkHz old=%d new=%d msr=%04x\n", 532 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
519 target_freq, freqs.old, freqs.new, msr); 533 target_freq, freqs.old, freqs.new, msr);
520 534
521 for_each_cpu_mask_nr(k, online_policy_cpus) { 535 for_each_cpu_mask_nr(k, *online_policy_cpus) {
522 freqs.cpu = k; 536 freqs.cpu = k;
523 cpufreq_notify_transition(&freqs, 537 cpufreq_notify_transition(&freqs,
524 CPUFREQ_PRECHANGE); 538 CPUFREQ_PRECHANGE);
@@ -537,11 +551,11 @@ static int centrino_target (struct cpufreq_policy *policy,
537 break; 551 break;
538 } 552 }
539 553
540 cpu_set(j, covered_cpus); 554 cpu_set(j, *covered_cpus);
541 preempt_enable(); 555 preempt_enable();
542 } 556 }
543 557
544 for_each_cpu_mask_nr(k, online_policy_cpus) { 558 for_each_cpu_mask_nr(k, *online_policy_cpus) {
545 freqs.cpu = k; 559 freqs.cpu = k;
546 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 560 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
547 } 561 }
@@ -554,10 +568,10 @@ static int centrino_target (struct cpufreq_policy *policy,
554 * Best effort undo.. 568 * Best effort undo..
555 */ 569 */
556 570
557 if (!cpus_empty(covered_cpus)) { 571 if (!cpus_empty(*covered_cpus)) {
558 cpumask_of_cpu_ptr_declare(new_mask); 572 cpumask_of_cpu_ptr_declare(new_mask);
559 573
560 for_each_cpu_mask_nr(j, covered_cpus) { 574 for_each_cpu_mask_nr(j, *covered_cpus) {
561 cpumask_of_cpu_ptr_next(new_mask, j); 575 cpumask_of_cpu_ptr_next(new_mask, j);
562 set_cpus_allowed_ptr(current, new_mask); 576 set_cpus_allowed_ptr(current, new_mask);
563 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); 577 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
@@ -567,19 +581,22 @@ static int centrino_target (struct cpufreq_policy *policy,
567 tmp = freqs.new; 581 tmp = freqs.new;
568 freqs.new = freqs.old; 582 freqs.new = freqs.old;
569 freqs.old = tmp; 583 freqs.old = tmp;
570 for_each_cpu_mask_nr(j, online_policy_cpus) { 584 for_each_cpu_mask_nr(j, *online_policy_cpus) {
571 freqs.cpu = j; 585 freqs.cpu = j;
572 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 586 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
573 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 587 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
574 } 588 }
575 } 589 }
576 set_cpus_allowed_ptr(current, &saved_mask); 590 set_cpus_allowed_ptr(current, saved_mask);
577 return 0; 591 retval = 0;
592 goto out;
578 593
579migrate_end: 594migrate_end:
580 preempt_enable(); 595 preempt_enable();
581 set_cpus_allowed_ptr(current, &saved_mask); 596 set_cpus_allowed_ptr(current, saved_mask);
582 return 0; 597out:
598 CPUMASK_FREE(allmasks);
599 return retval;
583} 600}
584 601
585static struct freq_attr* centrino_attr[] = { 602static struct freq_attr* centrino_attr[] = {