aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/processor_throttling.c
diff options
context:
space:
mode:
authorZhao Yakui <yakui.zhao@intel.com>2007-11-29 03:22:43 -0500
committerLen Brown <len.brown@intel.com>2007-12-01 23:27:15 -0500
commit357dc4c3f13cb5c1e3b40a09cbe6ff1b0df2c7c3 (patch)
tree794d39137bae2073423f93b3b14717824138290c /drivers/acpi/processor_throttling.c
parentf79f06ab9f86d7203006d2ec8992ac80df36a34e (diff)
ACPI: Delete the IRQ operation in throttling controll via PTC
The IRQ operation(enable/disable) should be avoided when throttling is controlled via PTC method. It is replaced by the migration of task. This fixes an oops on T61 -- a regression due to f79f06ab9f86 b/c FixedHW support tried to read remote MSR with interrupts disabled. Signed-off-by: Zhao Yakui <yakui.zhao@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi/processor_throttling.c')
-rw-r--r--drivers/acpi/processor_throttling.c36
1 files changed, 28 insertions, 8 deletions
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index c26c61fb36c3..6742d7bc4777 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/sched.h>
32#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
33#include <linux/proc_fs.h> 34#include <linux/proc_fs.h>
34#include <linux/seq_file.h> 35#include <linux/seq_file.h>
@@ -413,7 +414,7 @@ static int acpi_throttling_rdmsr(struct acpi_processor *pr,
413 } else { 414 } else {
414 msr_low = 0; 415 msr_low = 0;
415 msr_high = 0; 416 msr_high = 0;
416 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, 417 rdmsr_safe(MSR_IA32_THERM_CONTROL,
417 (u32 *)&msr_low , (u32 *) &msr_high); 418 (u32 *)&msr_low , (u32 *) &msr_high);
418 msr = (msr_high << 32) | msr_low; 419 msr = (msr_high << 32) | msr_low;
419 *value = (acpi_integer) msr; 420 *value = (acpi_integer) msr;
@@ -438,7 +439,7 @@ static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
438 "HARDWARE addr space,NOT supported yet\n"); 439 "HARDWARE addr space,NOT supported yet\n");
439 } else { 440 } else {
440 msr = value; 441 msr = value;
441 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, 442 wrmsr_safe(MSR_IA32_THERM_CONTROL,
442 msr & 0xffffffff, msr >> 32); 443 msr & 0xffffffff, msr >> 32);
443 ret = 0; 444 ret = 0;
444 } 445 }
@@ -572,21 +573,32 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
572 return -ENODEV; 573 return -ENODEV;
573 574
574 pr->throttling.state = 0; 575 pr->throttling.state = 0;
575 local_irq_disable(); 576
576 value = 0; 577 value = 0;
577 ret = acpi_read_throttling_status(pr, &value); 578 ret = acpi_read_throttling_status(pr, &value);
578 if (ret >= 0) { 579 if (ret >= 0) {
579 state = acpi_get_throttling_state(pr, value); 580 state = acpi_get_throttling_state(pr, value);
580 pr->throttling.state = state; 581 pr->throttling.state = state;
581 } 582 }
582 local_irq_enable();
583 583
584 return 0; 584 return 0;
585} 585}
586 586
587static int acpi_processor_get_throttling(struct acpi_processor *pr) 587static int acpi_processor_get_throttling(struct acpi_processor *pr)
588{ 588{
589 return pr->throttling.acpi_processor_get_throttling(pr); 589 cpumask_t saved_mask;
590 int ret;
591
592 /*
593 * Migrate task to the cpu pointed by pr.
594 */
595 saved_mask = current->cpus_allowed;
596 set_cpus_allowed(current, cpumask_of_cpu(pr->id));
597 ret = pr->throttling.acpi_processor_get_throttling(pr);
598 /* restore the previous state */
599 set_cpus_allowed(current, saved_mask);
600
601 return ret;
590} 602}
591 603
592static int acpi_processor_get_fadt_info(struct acpi_processor *pr) 604static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
@@ -717,21 +729,29 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
717 if (state < pr->throttling_platform_limit) 729 if (state < pr->throttling_platform_limit)
718 return -EPERM; 730 return -EPERM;
719 731
720 local_irq_disable();
721 value = 0; 732 value = 0;
722 ret = acpi_get_throttling_value(pr, state, &value); 733 ret = acpi_get_throttling_value(pr, state, &value);
723 if (ret >= 0) { 734 if (ret >= 0) {
724 acpi_write_throttling_state(pr, value); 735 acpi_write_throttling_state(pr, value);
725 pr->throttling.state = state; 736 pr->throttling.state = state;
726 } 737 }
727 local_irq_enable();
728 738
729 return 0; 739 return 0;
730} 740}
731 741
732int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 742int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
733{ 743{
734 return pr->throttling.acpi_processor_set_throttling(pr, state); 744 cpumask_t saved_mask;
745 int ret;
746 /*
747 * Migrate task to the cpu pointed by pr.
748 */
749 saved_mask = current->cpus_allowed;
750 set_cpus_allowed(current, cpumask_of_cpu(pr->id));
751 ret = pr->throttling.acpi_processor_set_throttling(pr, state);
752 /* restore the previous state */
753 set_cpus_allowed(current, saved_mask);
754 return ret;
735} 755}
736 756
737int acpi_processor_get_throttling_info(struct acpi_processor *pr) 757int acpi_processor_get_throttling_info(struct acpi_processor *pr)