diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-06-11 09:29:58 -0400 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2009-06-15 11:49:43 -0400 |
commit | e3f996c26ff6c4c084aaaa64dce6e54d31f517be (patch) | |
tree | d49ec5b8935a91766678d7b671ffff25ce597670 /arch | |
parent | 394122ab144dae4b276d74644a2f11c44a60ac5c (diff) |
[CPUFREQ] cpumask: avoid cpumask games in arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
Impact: don't play with current's cpumask
It's generally a very bad idea to mug some process's cpumask: it could
legitimately and reasonably be changed by root, which could break us
(if done before our code) or them (if we restore the wrong value).
Use rdmsr_on_cpu and wrmsr_on_cpu instead.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
To: cpufreq@vger.kernel.org
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 60 |
1 files changed, 17 insertions, 43 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 55c831ed71ce..8d672ef162ce 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -323,14 +323,8 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
323 | { | 323 | { |
324 | unsigned l, h; | 324 | unsigned l, h; |
325 | unsigned clock_freq; | 325 | unsigned clock_freq; |
326 | cpumask_t saved_mask; | ||
327 | 326 | ||
328 | saved_mask = current->cpus_allowed; | 327 | rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h); |
329 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
330 | if (smp_processor_id() != cpu) | ||
331 | return 0; | ||
332 | |||
333 | rdmsr(MSR_IA32_PERF_STATUS, l, h); | ||
334 | clock_freq = extract_clock(l, cpu, 0); | 328 | clock_freq = extract_clock(l, cpu, 0); |
335 | 329 | ||
336 | if (unlikely(clock_freq == 0)) { | 330 | if (unlikely(clock_freq == 0)) { |
@@ -340,11 +334,9 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
340 | * P-state transition (like TM2). Get the last freq set | 334 | * P-state transition (like TM2). Get the last freq set |
341 | * in PERF_CTL. | 335 | * in PERF_CTL. |
342 | */ | 336 | */ |
343 | rdmsr(MSR_IA32_PERF_CTL, l, h); | 337 | rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h); |
344 | clock_freq = extract_clock(l, cpu, 1); | 338 | clock_freq = extract_clock(l, cpu, 1); |
345 | } | 339 | } |
346 | |||
347 | set_cpus_allowed_ptr(current, &saved_mask); | ||
348 | return clock_freq; | 340 | return clock_freq; |
349 | } | 341 | } |
350 | 342 | ||
@@ -467,15 +459,10 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
467 | struct cpufreq_freqs freqs; | 459 | struct cpufreq_freqs freqs; |
468 | int retval = 0; | 460 | int retval = 0; |
469 | unsigned int j, k, first_cpu, tmp; | 461 | unsigned int j, k, first_cpu, tmp; |
470 | cpumask_var_t saved_mask, covered_cpus; | 462 | cpumask_var_t covered_cpus; |
471 | 463 | ||
472 | if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) | 464 | if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) |
473 | return -ENOMEM; | ||
474 | if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { | ||
475 | free_cpumask_var(saved_mask); | ||
476 | return -ENOMEM; | 465 | return -ENOMEM; |
477 | } | ||
478 | cpumask_copy(saved_mask, ¤t->cpus_allowed); | ||
479 | 466 | ||
480 | if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { | 467 | if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { |
481 | retval = -ENODEV; | 468 | retval = -ENODEV; |
@@ -493,7 +480,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
493 | 480 | ||
494 | first_cpu = 1; | 481 | first_cpu = 1; |
495 | for_each_cpu(j, policy->cpus) { | 482 | for_each_cpu(j, policy->cpus) { |
496 | const struct cpumask *mask; | 483 | int good_cpu; |
497 | 484 | ||
498 | /* cpufreq holds the hotplug lock, so we are safe here */ | 485 | /* cpufreq holds the hotplug lock, so we are safe here */ |
499 | if (!cpu_online(j)) | 486 | if (!cpu_online(j)) |
@@ -504,32 +491,30 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
504 | * Make sure we are running on CPU that wants to change freq | 491 | * Make sure we are running on CPU that wants to change freq |
505 | */ | 492 | */ |
506 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | 493 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) |
507 | mask = policy->cpus; | 494 | good_cpu = cpumask_any_and(policy->cpus, |
495 | cpu_online_mask); | ||
508 | else | 496 | else |
509 | mask = cpumask_of(j); | 497 | good_cpu = j; |
510 | 498 | ||
511 | set_cpus_allowed_ptr(current, mask); | 499 | if (good_cpu >= nr_cpu_ids) { |
512 | preempt_disable(); | ||
513 | if (unlikely(!cpu_isset(smp_processor_id(), *mask))) { | ||
514 | dprintk("couldn't limit to CPUs in this domain\n"); | 500 | dprintk("couldn't limit to CPUs in this domain\n"); |
515 | retval = -EAGAIN; | 501 | retval = -EAGAIN; |
516 | if (first_cpu) { | 502 | if (first_cpu) { |
517 | /* We haven't started the transition yet. */ | 503 | /* We haven't started the transition yet. */ |
518 | goto migrate_end; | 504 | goto out; |
519 | } | 505 | } |
520 | preempt_enable(); | ||
521 | break; | 506 | break; |
522 | } | 507 | } |
523 | 508 | ||
524 | msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; | 509 | msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; |
525 | 510 | ||
526 | if (first_cpu) { | 511 | if (first_cpu) { |
527 | rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 512 | rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); |
528 | if (msr == (oldmsr & 0xffff)) { | 513 | if (msr == (oldmsr & 0xffff)) { |
529 | dprintk("no change needed - msr was and needs " | 514 | dprintk("no change needed - msr was and needs " |
530 | "to be %x\n", oldmsr); | 515 | "to be %x\n", oldmsr); |
531 | retval = 0; | 516 | retval = 0; |
532 | goto migrate_end; | 517 | goto out; |
533 | } | 518 | } |
534 | 519 | ||
535 | freqs.old = extract_clock(oldmsr, cpu, 0); | 520 | freqs.old = extract_clock(oldmsr, cpu, 0); |
@@ -553,14 +538,11 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
553 | oldmsr |= msr; | 538 | oldmsr |= msr; |
554 | } | 539 | } |
555 | 540 | ||
556 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 541 | wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h); |
557 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { | 542 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) |
558 | preempt_enable(); | ||
559 | break; | 543 | break; |
560 | } | ||
561 | 544 | ||
562 | cpu_set(j, *covered_cpus); | 545 | cpumask_set_cpu(j, covered_cpus); |
563 | preempt_enable(); | ||
564 | } | 546 | } |
565 | 547 | ||
566 | for_each_cpu(k, policy->cpus) { | 548 | for_each_cpu(k, policy->cpus) { |
@@ -578,10 +560,8 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
578 | * Best effort undo.. | 560 | * Best effort undo.. |
579 | */ | 561 | */ |
580 | 562 | ||
581 | for_each_cpu_mask_nr(j, *covered_cpus) { | 563 | for_each_cpu(j, covered_cpus) |
582 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); | 564 | wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h); |
583 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | ||
584 | } | ||
585 | 565 | ||
586 | tmp = freqs.new; | 566 | tmp = freqs.new; |
587 | freqs.new = freqs.old; | 567 | freqs.new = freqs.old; |
@@ -593,15 +573,9 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
593 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 573 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
594 | } | 574 | } |
595 | } | 575 | } |
596 | set_cpus_allowed_ptr(current, saved_mask); | ||
597 | retval = 0; | 576 | retval = 0; |
598 | goto out; | ||
599 | 577 | ||
600 | migrate_end: | ||
601 | preempt_enable(); | ||
602 | set_cpus_allowed_ptr(current, saved_mask); | ||
603 | out: | 578 | out: |
604 | free_cpumask_var(saved_mask); | ||
605 | free_cpumask_var(covered_cpus); | 579 | free_cpumask_var(covered_cpus); |
606 | return retval; | 580 | return retval; |
607 | } | 581 | } |