aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
authorAndi Kleen <andi@firstfloor.org>2012-02-06 11:17:11 -0500
committerLen Brown <len.brown@intel.com>2012-03-22 02:16:14 -0400
commit2815ab92ba3ab27556212cc306288dc95692824b (patch)
tree5e18864dbc2f07f7da0f552ea5e4c2624d7a8fdb /drivers/acpi
parentc16fa4f2ad19908a47c63d8fa436a1178438c7e7 (diff)
ACPI: Do cpufreq clamping for throttling per package v2
On Intel CPUs the processor typically uses the highest frequency set by any logical CPU. When the system overheats Linux first forces the frequency to the lowest available one to lower the temperature. However this was done only per logical CPU, which means all logical CPUs in a package would need to go through this before the frequency is actually lowered. Worse this delay actually prevents real throttling, because the real throttle code only proceeds when the lowest frequency is already reached. So when a throttle event happens force the lowest frequency for all CPUs in the package where it happened. The per CPU state is now kept per package, not per logical CPU. An alternative would be to do it per cpufreq unit, but since we want to bring down the temperature of the complete chip it's better to do it for all. In principle it may even make sense to do it for all CPUs, but I kept it on the package for now. With this change the frequency is actually lowered, which in terms also allows real throttling to proceed. I also removed an unnecessary per cpu variable initialization. v2: Fix package mapping Cc: <stable@vger.kernel.org> Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/processor_thermal.c45
1 files changed, 37 insertions, 8 deletions
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 3b599abf2b40..641b5450a0db 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -57,6 +57,27 @@ ACPI_MODULE_NAME("processor_thermal");
57static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); 57static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
58static unsigned int acpi_thermal_cpufreq_is_init = 0; 58static unsigned int acpi_thermal_cpufreq_is_init = 0;
59 59
60#define reduction_pctg(cpu) \
61 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
62
63/*
64 * Emulate "per package data" using per cpu data (which should really be
65 * provided elsewhere)
66 *
67 * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
68 * temporarily. Fortunately that's not a big issue here (I hope)
69 */
70static int phys_package_first_cpu(int cpu)
71{
72 int i;
73 int id = topology_physical_package_id(cpu);
74
75 for_each_online_cpu(i)
76 if (topology_physical_package_id(i) == id)
77 return i;
78 return 0;
79}
80
60static int cpu_has_cpufreq(unsigned int cpu) 81static int cpu_has_cpufreq(unsigned int cpu)
61{ 82{
62 struct cpufreq_policy policy; 83 struct cpufreq_policy policy;
@@ -76,7 +97,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
76 97
77 max_freq = ( 98 max_freq = (
78 policy->cpuinfo.max_freq * 99 policy->cpuinfo.max_freq *
79 (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20) 100 (100 - reduction_pctg(policy->cpu) * 20)
80 ) / 100; 101 ) / 100;
81 102
82 cpufreq_verify_within_limits(policy, 0, max_freq); 103 cpufreq_verify_within_limits(policy, 0, max_freq);
@@ -102,16 +123,28 @@ static int cpufreq_get_cur_state(unsigned int cpu)
102 if (!cpu_has_cpufreq(cpu)) 123 if (!cpu_has_cpufreq(cpu))
103 return 0; 124 return 0;
104 125
105 return per_cpu(cpufreq_thermal_reduction_pctg, cpu); 126 return reduction_pctg(cpu);
106} 127}
107 128
108static int cpufreq_set_cur_state(unsigned int cpu, int state) 129static int cpufreq_set_cur_state(unsigned int cpu, int state)
109{ 130{
131 int i;
132
110 if (!cpu_has_cpufreq(cpu)) 133 if (!cpu_has_cpufreq(cpu))
111 return 0; 134 return 0;
112 135
113 per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state; 136 reduction_pctg(cpu) = state;
114 cpufreq_update_policy(cpu); 137
138 /*
139 * Update all the CPUs in the same package because they all
140 * contribute to the temperature and often share the same
141 * frequency.
142 */
143 for_each_online_cpu(i) {
144 if (topology_physical_package_id(i) ==
145 topology_physical_package_id(cpu))
146 cpufreq_update_policy(i);
147 }
115 return 0; 148 return 0;
116} 149}
117 150
@@ -119,10 +152,6 @@ void acpi_thermal_cpufreq_init(void)
119{ 152{
120 int i; 153 int i;
121 154
122 for (i = 0; i < nr_cpu_ids; i++)
123 if (cpu_present(i))
124 per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
125
126 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, 155 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
127 CPUFREQ_POLICY_NOTIFIER); 156 CPUFREQ_POLICY_NOTIFIER);
128 if (!i) 157 if (!i)