diff options
Diffstat (limited to 'arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 88 |
1 files changed, 11 insertions, 77 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index ae9b503220ca..4109679863c1 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -60,7 +60,6 @@ enum { | |||
60 | }; | 60 | }; |
61 | 61 | ||
62 | #define INTEL_MSR_RANGE (0xffff) | 62 | #define INTEL_MSR_RANGE (0xffff) |
63 | #define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1) | ||
64 | 63 | ||
65 | struct acpi_cpufreq_data { | 64 | struct acpi_cpufreq_data { |
66 | struct acpi_processor_performance *acpi_data; | 65 | struct acpi_processor_performance *acpi_data; |
@@ -71,11 +70,7 @@ struct acpi_cpufreq_data { | |||
71 | 70 | ||
72 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 71 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); |
73 | 72 | ||
74 | struct acpi_msr_data { | 73 | static DEFINE_PER_CPU(struct aperfmperf, old_perf); |
75 | u64 saved_aperf, saved_mperf; | ||
76 | }; | ||
77 | |||
78 | static DEFINE_PER_CPU(struct acpi_msr_data, msr_data); | ||
79 | 74 | ||
80 | DEFINE_TRACE(power_mark); | 75 | DEFINE_TRACE(power_mark); |
81 | 76 | ||
@@ -244,23 +239,12 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
244 | return cmd.val; | 239 | return cmd.val; |
245 | } | 240 | } |
246 | 241 | ||
247 | struct perf_pair { | ||
248 | union { | ||
249 | struct { | ||
250 | u32 lo; | ||
251 | u32 hi; | ||
252 | } split; | ||
253 | u64 whole; | ||
254 | } aperf, mperf; | ||
255 | }; | ||
256 | |||
257 | /* Called via smp_call_function_single(), on the target CPU */ | 242 | /* Called via smp_call_function_single(), on the target CPU */ |
258 | static void read_measured_perf_ctrs(void *_cur) | 243 | static void read_measured_perf_ctrs(void *_cur) |
259 | { | 244 | { |
260 | struct perf_pair *cur = _cur; | 245 | struct aperfmperf *am = _cur; |
261 | 246 | ||
262 | rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); | 247 | get_aperfmperf(am); |
263 | rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); | ||
264 | } | 248 | } |
265 | 249 | ||
266 | /* | 250 | /* |
@@ -279,63 +263,17 @@ static void read_measured_perf_ctrs(void *_cur) | |||
279 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, | 263 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, |
280 | unsigned int cpu) | 264 | unsigned int cpu) |
281 | { | 265 | { |
282 | struct perf_pair readin, cur; | 266 | struct aperfmperf perf; |
283 | unsigned int perf_percent; | 267 | unsigned long ratio; |
284 | unsigned int retval; | 268 | unsigned int retval; |
285 | 269 | ||
286 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1)) | 270 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) |
287 | return 0; | 271 | return 0; |
288 | 272 | ||
289 | cur.aperf.whole = readin.aperf.whole - | 273 | ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf); |
290 | per_cpu(msr_data, cpu).saved_aperf; | 274 | per_cpu(old_perf, cpu) = perf; |
291 | cur.mperf.whole = readin.mperf.whole - | ||
292 | per_cpu(msr_data, cpu).saved_mperf; | ||
293 | per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole; | ||
294 | per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole; | ||
295 | |||
296 | #ifdef __i386__ | ||
297 | /* | ||
298 | * We dont want to do 64 bit divide with 32 bit kernel | ||
299 | * Get an approximate value. Return failure in case we cannot get | ||
300 | * an approximate value. | ||
301 | */ | ||
302 | if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) { | ||
303 | int shift_count; | ||
304 | u32 h; | ||
305 | |||
306 | h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi); | ||
307 | shift_count = fls(h); | ||
308 | |||
309 | cur.aperf.whole >>= shift_count; | ||
310 | cur.mperf.whole >>= shift_count; | ||
311 | } | ||
312 | |||
313 | if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) { | ||
314 | int shift_count = 7; | ||
315 | cur.aperf.split.lo >>= shift_count; | ||
316 | cur.mperf.split.lo >>= shift_count; | ||
317 | } | ||
318 | |||
319 | if (cur.aperf.split.lo && cur.mperf.split.lo) | ||
320 | perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo; | ||
321 | else | ||
322 | perf_percent = 0; | ||
323 | 275 | ||
324 | #else | 276 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; |
325 | if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) { | ||
326 | int shift_count = 7; | ||
327 | cur.aperf.whole >>= shift_count; | ||
328 | cur.mperf.whole >>= shift_count; | ||
329 | } | ||
330 | |||
331 | if (cur.aperf.whole && cur.mperf.whole) | ||
332 | perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole; | ||
333 | else | ||
334 | perf_percent = 0; | ||
335 | |||
336 | #endif | ||
337 | |||
338 | retval = (policy->cpuinfo.max_freq * perf_percent) / 100; | ||
339 | 277 | ||
340 | return retval; | 278 | return retval; |
341 | } | 279 | } |
@@ -731,12 +669,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
731 | acpi_processor_notify_smm(THIS_MODULE); | 669 | acpi_processor_notify_smm(THIS_MODULE); |
732 | 670 | ||
733 | /* Check for APERF/MPERF support in hardware */ | 671 | /* Check for APERF/MPERF support in hardware */ |
734 | if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) { | 672 | if (cpu_has(c, X86_FEATURE_APERFMPERF)) |
735 | unsigned int ecx; | 673 | acpi_cpufreq_driver.getavg = get_measured_perf; |
736 | ecx = cpuid_ecx(6); | ||
737 | if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY) | ||
738 | acpi_cpufreq_driver.getavg = get_measured_perf; | ||
739 | } | ||
740 | 674 | ||
741 | dprintk("CPU%u - ACPI performance management activated.\n", cpu); | 675 | dprintk("CPU%u - ACPI performance management activated.\n", cpu); |
742 | for (i = 0; i < perf->state_count; i++) | 676 | for (i = 0; i < perf->state_count; i++) |