aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c101
1 files changed, 53 insertions, 48 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 19f6b9d27e83..208ecf6643df 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -65,13 +65,18 @@ enum {
65struct acpi_cpufreq_data { 65struct acpi_cpufreq_data {
66 struct acpi_processor_performance *acpi_data; 66 struct acpi_processor_performance *acpi_data;
67 struct cpufreq_frequency_table *freq_table; 67 struct cpufreq_frequency_table *freq_table;
68 unsigned int max_freq;
69 unsigned int resume; 68 unsigned int resume;
70 unsigned int cpu_feature; 69 unsigned int cpu_feature;
71}; 70};
72 71
73static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 72static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
74 73
74struct acpi_msr_data {
75 u64 saved_aperf, saved_mperf;
76};
77
78static DEFINE_PER_CPU(struct acpi_msr_data, msr_data);
79
75DEFINE_TRACE(power_mark); 80DEFINE_TRACE(power_mark);
76 81
77/* acpi_perf_data is a pointer to percpu data. */ 82/* acpi_perf_data is a pointer to percpu data. */
@@ -152,7 +157,8 @@ struct drv_cmd {
152 u32 val; 157 u32 val;
153}; 158};
154 159
155static long do_drv_read(void *_cmd) 160/* Called via smp_call_function_single(), on the target CPU */
161static void do_drv_read(void *_cmd)
156{ 162{
157 struct drv_cmd *cmd = _cmd; 163 struct drv_cmd *cmd = _cmd;
158 u32 h; 164 u32 h;
@@ -169,10 +175,10 @@ static long do_drv_read(void *_cmd)
169 default: 175 default:
170 break; 176 break;
171 } 177 }
172 return 0;
173} 178}
174 179
175static long do_drv_write(void *_cmd) 180/* Called via smp_call_function_many(), on the target CPUs */
181static void do_drv_write(void *_cmd)
176{ 182{
177 struct drv_cmd *cmd = _cmd; 183 struct drv_cmd *cmd = _cmd;
178 u32 lo, hi; 184 u32 lo, hi;
@@ -191,23 +197,24 @@ static long do_drv_write(void *_cmd)
191 default: 197 default:
192 break; 198 break;
193 } 199 }
194 return 0;
195} 200}
196 201
197static void drv_read(struct drv_cmd *cmd) 202static void drv_read(struct drv_cmd *cmd)
198{ 203{
199 cmd->val = 0; 204 cmd->val = 0;
200 205
201 work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); 206 smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1);
202} 207}
203 208
204static void drv_write(struct drv_cmd *cmd) 209static void drv_write(struct drv_cmd *cmd)
205{ 210{
206 unsigned int i; 211 int this_cpu;
207 212
208 for_each_cpu(i, cmd->mask) { 213 this_cpu = get_cpu();
209 work_on_cpu(i, do_drv_write, cmd); 214 if (cpumask_test_cpu(this_cpu, cmd->mask))
210 } 215 do_drv_write(cmd);
216 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
217 put_cpu();
211} 218}
212 219
213static u32 get_cur_val(const struct cpumask *mask) 220static u32 get_cur_val(const struct cpumask *mask)
@@ -241,28 +248,23 @@ static u32 get_cur_val(const struct cpumask *mask)
241 return cmd.val; 248 return cmd.val;
242} 249}
243 250
244struct perf_cur { 251struct perf_pair {
245 union { 252 union {
246 struct { 253 struct {
247 u32 lo; 254 u32 lo;
248 u32 hi; 255 u32 hi;
249 } split; 256 } split;
250 u64 whole; 257 u64 whole;
251 } aperf_cur, mperf_cur; 258 } aperf, mperf;
252}; 259};
253 260
254 261/* Called via smp_call_function_single(), on the target CPU */
255static long read_measured_perf_ctrs(void *_cur) 262static void read_measured_perf_ctrs(void *_cur)
256{ 263{
257 struct perf_cur *cur = _cur; 264 struct perf_pair *cur = _cur;
258
259 rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi);
260 rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi);
261 265
262 wrmsr(MSR_IA32_APERF, 0, 0); 266 rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi);
263 wrmsr(MSR_IA32_MPERF, 0, 0); 267 rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi);
264
265 return 0;
266} 268}
267 269
268/* 270/*
@@ -281,58 +283,63 @@ static long read_measured_perf_ctrs(void *_cur)
281static unsigned int get_measured_perf(struct cpufreq_policy *policy, 283static unsigned int get_measured_perf(struct cpufreq_policy *policy,
282 unsigned int cpu) 284 unsigned int cpu)
283{ 285{
284 struct perf_cur cur; 286 struct perf_pair readin, cur;
285 unsigned int perf_percent; 287 unsigned int perf_percent;
286 unsigned int retval; 288 unsigned int retval;
287 289
288 if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) 290 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1))
289 return 0; 291 return 0;
290 292
293 cur.aperf.whole = readin.aperf.whole -
294 per_cpu(msr_data, cpu).saved_aperf;
295 cur.mperf.whole = readin.mperf.whole -
296 per_cpu(msr_data, cpu).saved_mperf;
297 per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole;
298 per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole;
299
291#ifdef __i386__ 300#ifdef __i386__
292 /* 301 /*
293 * We dont want to do 64 bit divide with 32 bit kernel 302 * We dont want to do 64 bit divide with 32 bit kernel
294 * Get an approximate value. Return failure in case we cannot get 303 * Get an approximate value. Return failure in case we cannot get
295 * an approximate value. 304 * an approximate value.
296 */ 305 */
297 if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { 306 if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) {
298 int shift_count; 307 int shift_count;
299 u32 h; 308 u32 h;
300 309
301 h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); 310 h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi);
302 shift_count = fls(h); 311 shift_count = fls(h);
303 312
304 cur.aperf_cur.whole >>= shift_count; 313 cur.aperf.whole >>= shift_count;
305 cur.mperf_cur.whole >>= shift_count; 314 cur.mperf.whole >>= shift_count;
306 } 315 }
307 316
308 if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { 317 if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) {
309 int shift_count = 7; 318 int shift_count = 7;
310 cur.aperf_cur.split.lo >>= shift_count; 319 cur.aperf.split.lo >>= shift_count;
311 cur.mperf_cur.split.lo >>= shift_count; 320 cur.mperf.split.lo >>= shift_count;
312 } 321 }
313 322
314 if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) 323 if (cur.aperf.split.lo && cur.mperf.split.lo)
315 perf_percent = (cur.aperf_cur.split.lo * 100) / 324 perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo;
316 cur.mperf_cur.split.lo;
317 else 325 else
318 perf_percent = 0; 326 perf_percent = 0;
319 327
320#else 328#else
321 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { 329 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) {
322 int shift_count = 7; 330 int shift_count = 7;
323 cur.aperf_cur.whole >>= shift_count; 331 cur.aperf.whole >>= shift_count;
324 cur.mperf_cur.whole >>= shift_count; 332 cur.mperf.whole >>= shift_count;
325 } 333 }
326 334
327 if (cur.aperf_cur.whole && cur.mperf_cur.whole) 335 if (cur.aperf.whole && cur.mperf.whole)
328 perf_percent = (cur.aperf_cur.whole * 100) / 336 perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole;
329 cur.mperf_cur.whole;
330 else 337 else
331 perf_percent = 0; 338 perf_percent = 0;
332 339
333#endif 340#endif
334 341
335 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; 342 retval = (policy->cpuinfo.max_freq * perf_percent) / 100;
336 343
337 return retval; 344 return retval;
338} 345}
@@ -685,16 +692,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
685 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ 692 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
686 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
687 policy->cpuinfo.transition_latency > 20 * 1000) { 694 policy->cpuinfo.transition_latency > 20 * 1000) {
688 static int print_once;
689 policy->cpuinfo.transition_latency = 20 * 1000; 695 policy->cpuinfo.transition_latency = 20 * 1000;
690 if (!print_once) { 696 printk_once(KERN_INFO "Capping off P-state tranision"
691 print_once = 1; 697 " latency at 20 uS\n");
692 printk(KERN_INFO "Capping off P-state tranision latency"
693 " at 20 uS\n");
694 }
695 } 698 }
696 699
697 data->max_freq = perf->states[0].core_frequency * 1000;
698 /* table init */ 700 /* table init */
699 for (i = 0; i < perf->state_count; i++) { 701 for (i = 0; i < perf->state_count; i++) {
700 if (i > 0 && perf->states[i].core_frequency >= 702 if (i > 0 && perf->states[i].core_frequency >=
@@ -713,6 +715,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
713 if (result) 715 if (result)
714 goto err_freqfree; 716 goto err_freqfree;
715 717
718 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
719 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
720
716 switch (perf->control_register.space_id) { 721 switch (perf->control_register.space_id) {
717 case ACPI_ADR_SPACE_SYSTEM_IO: 722 case ACPI_ADR_SPACE_SYSTEM_IO:
718 /* Current speed is unknown and not detectable by IO port */ 723 /* Current speed is unknown and not detectable by IO port */