diff options
Diffstat (limited to 'arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 109 |
1 files changed, 32 insertions, 77 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index ae9b503220ca..7bb676c533aa 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -60,7 +60,6 @@ enum { | |||
60 | }; | 60 | }; |
61 | 61 | ||
62 | #define INTEL_MSR_RANGE (0xffff) | 62 | #define INTEL_MSR_RANGE (0xffff) |
63 | #define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1) | ||
64 | 63 | ||
65 | struct acpi_cpufreq_data { | 64 | struct acpi_cpufreq_data { |
66 | struct acpi_processor_performance *acpi_data; | 65 | struct acpi_processor_performance *acpi_data; |
@@ -71,11 +70,7 @@ struct acpi_cpufreq_data { | |||
71 | 70 | ||
72 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 71 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); |
73 | 72 | ||
74 | struct acpi_msr_data { | 73 | static DEFINE_PER_CPU(struct aperfmperf, old_perf); |
75 | u64 saved_aperf, saved_mperf; | ||
76 | }; | ||
77 | |||
78 | static DEFINE_PER_CPU(struct acpi_msr_data, msr_data); | ||
79 | 74 | ||
80 | DEFINE_TRACE(power_mark); | 75 | DEFINE_TRACE(power_mark); |
81 | 76 | ||
@@ -244,23 +239,12 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
244 | return cmd.val; | 239 | return cmd.val; |
245 | } | 240 | } |
246 | 241 | ||
247 | struct perf_pair { | ||
248 | union { | ||
249 | struct { | ||
250 | u32 lo; | ||
251 | u32 hi; | ||
252 | } split; | ||
253 | u64 whole; | ||
254 | } aperf, mperf; | ||
255 | }; | ||
256 | |||
257 | /* Called via smp_call_function_single(), on the target CPU */ | 242 | /* Called via smp_call_function_single(), on the target CPU */ |
258 | static void read_measured_perf_ctrs(void *_cur) | 243 | static void read_measured_perf_ctrs(void *_cur) |
259 | { | 244 | { |
260 | struct perf_pair *cur = _cur; | 245 | struct aperfmperf *am = _cur; |
261 | 246 | ||
262 | rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); | 247 | get_aperfmperf(am); |
263 | rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); | ||
264 | } | 248 | } |
265 | 249 | ||
266 | /* | 250 | /* |
@@ -279,63 +263,17 @@ static void read_measured_perf_ctrs(void *_cur) | |||
279 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, | 263 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, |
280 | unsigned int cpu) | 264 | unsigned int cpu) |
281 | { | 265 | { |
282 | struct perf_pair readin, cur; | 266 | struct aperfmperf perf; |
283 | unsigned int perf_percent; | 267 | unsigned long ratio; |
284 | unsigned int retval; | 268 | unsigned int retval; |
285 | 269 | ||
286 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1)) | 270 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) |
287 | return 0; | 271 | return 0; |
288 | 272 | ||
289 | cur.aperf.whole = readin.aperf.whole - | 273 | ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf); |
290 | per_cpu(msr_data, cpu).saved_aperf; | 274 | per_cpu(old_perf, cpu) = perf; |
291 | cur.mperf.whole = readin.mperf.whole - | ||
292 | per_cpu(msr_data, cpu).saved_mperf; | ||
293 | per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole; | ||
294 | per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole; | ||
295 | |||
296 | #ifdef __i386__ | ||
297 | /* | ||
298 | * We dont want to do 64 bit divide with 32 bit kernel | ||
299 | * Get an approximate value. Return failure in case we cannot get | ||
300 | * an approximate value. | ||
301 | */ | ||
302 | if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) { | ||
303 | int shift_count; | ||
304 | u32 h; | ||
305 | |||
306 | h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi); | ||
307 | shift_count = fls(h); | ||
308 | |||
309 | cur.aperf.whole >>= shift_count; | ||
310 | cur.mperf.whole >>= shift_count; | ||
311 | } | ||
312 | |||
313 | if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) { | ||
314 | int shift_count = 7; | ||
315 | cur.aperf.split.lo >>= shift_count; | ||
316 | cur.mperf.split.lo >>= shift_count; | ||
317 | } | ||
318 | |||
319 | if (cur.aperf.split.lo && cur.mperf.split.lo) | ||
320 | perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo; | ||
321 | else | ||
322 | perf_percent = 0; | ||
323 | 275 | ||
324 | #else | 276 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; |
325 | if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) { | ||
326 | int shift_count = 7; | ||
327 | cur.aperf.whole >>= shift_count; | ||
328 | cur.mperf.whole >>= shift_count; | ||
329 | } | ||
330 | |||
331 | if (cur.aperf.whole && cur.mperf.whole) | ||
332 | perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole; | ||
333 | else | ||
334 | perf_percent = 0; | ||
335 | |||
336 | #endif | ||
337 | |||
338 | retval = (policy->cpuinfo.max_freq * perf_percent) / 100; | ||
339 | 277 | ||
340 | return retval; | 278 | return retval; |
341 | } | 279 | } |
@@ -588,6 +526,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = { | |||
588 | }, | 526 | }, |
589 | { } | 527 | { } |
590 | }; | 528 | }; |
529 | |||
530 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) | ||
531 | { | ||
532 | /* http://www.intel.com/Assets/PDF/specupdate/314554.pdf | ||
533 | * AL30: A Machine Check Exception (MCE) Occurring during an | ||
534 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause | ||
535 | * Both Processor Cores to Lock Up when HT is enabled*/ | ||
536 | if (c->x86_vendor == X86_VENDOR_INTEL) { | ||
537 | if ((c->x86 == 15) && | ||
538 | (c->x86_model == 6) && | ||
539 | (c->x86_mask == 8) && smt_capable()) | ||
540 | return -ENODEV; | ||
541 | } | ||
542 | return 0; | ||
543 | } | ||
591 | #endif | 544 | #endif |
592 | 545 | ||
593 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | 546 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) |
@@ -602,6 +555,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
602 | 555 | ||
603 | dprintk("acpi_cpufreq_cpu_init\n"); | 556 | dprintk("acpi_cpufreq_cpu_init\n"); |
604 | 557 | ||
558 | #ifdef CONFIG_SMP | ||
559 | result = acpi_cpufreq_blacklist(c); | ||
560 | if (result) | ||
561 | return result; | ||
562 | #endif | ||
563 | |||
605 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); | 564 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); |
606 | if (!data) | 565 | if (!data) |
607 | return -ENOMEM; | 566 | return -ENOMEM; |
@@ -731,12 +690,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
731 | acpi_processor_notify_smm(THIS_MODULE); | 690 | acpi_processor_notify_smm(THIS_MODULE); |
732 | 691 | ||
733 | /* Check for APERF/MPERF support in hardware */ | 692 | /* Check for APERF/MPERF support in hardware */ |
734 | if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) { | 693 | if (cpu_has(c, X86_FEATURE_APERFMPERF)) |
735 | unsigned int ecx; | 694 | acpi_cpufreq_driver.getavg = get_measured_perf; |
736 | ecx = cpuid_ecx(6); | ||
737 | if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY) | ||
738 | acpi_cpufreq_driver.getavg = get_measured_perf; | ||
739 | } | ||
740 | 695 | ||
741 | dprintk("CPU%u - ACPI performance management activated.\n", cpu); | 696 | dprintk("CPU%u - ACPI performance management activated.\n", cpu); |
742 | for (i = 0; i < perf->state_count; i++) | 697 | for (i = 0; i < perf->state_count; i++) |