aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/acpi-cpufreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/acpi-cpufreq.c')
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c212
1 files changed, 96 insertions, 116 deletions
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 51eef87bbc37..59a7b380fbe2 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -70,6 +70,8 @@ struct acpi_cpufreq_data {
70 unsigned int cpu_feature; 70 unsigned int cpu_feature;
71 unsigned int acpi_perf_cpu; 71 unsigned int acpi_perf_cpu;
72 cpumask_var_t freqdomain_cpus; 72 cpumask_var_t freqdomain_cpus;
73 void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
74 u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
73}; 75};
74 76
75/* acpi_perf_data is a pointer to percpu data. */ 77/* acpi_perf_data is a pointer to percpu data. */
@@ -243,125 +245,119 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
243 } 245 }
244} 246}
245 247
246struct msr_addr { 248u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
247 u32 reg; 249{
248}; 250 u32 val, dummy;
249 251
250struct io_addr { 252 rdmsr(MSR_IA32_PERF_CTL, val, dummy);
251 u16 port; 253 return val;
252 u8 bit_width; 254}
253}; 255
256void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
257{
258 u32 lo, hi;
259
260 rdmsr(MSR_IA32_PERF_CTL, lo, hi);
261 lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
262 wrmsr(MSR_IA32_PERF_CTL, lo, hi);
263}
264
265u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
266{
267 u32 val, dummy;
268
269 rdmsr(MSR_AMD_PERF_CTL, val, dummy);
270 return val;
271}
272
273void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
274{
275 wrmsr(MSR_AMD_PERF_CTL, val, 0);
276}
277
278u32 cpu_freq_read_io(struct acpi_pct_register *reg)
279{
280 u32 val;
281
282 acpi_os_read_port(reg->address, &val, reg->bit_width);
283 return val;
284}
285
286void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
287{
288 acpi_os_write_port(reg->address, val, reg->bit_width);
289}
254 290
255struct drv_cmd { 291struct drv_cmd {
256 unsigned int type; 292 struct acpi_pct_register *reg;
257 const struct cpumask *mask;
258 union {
259 struct msr_addr msr;
260 struct io_addr io;
261 } addr;
262 u32 val; 293 u32 val;
294 union {
295 void (*write)(struct acpi_pct_register *reg, u32 val);
296 u32 (*read)(struct acpi_pct_register *reg);
297 } func;
263}; 298};
264 299
265/* Called via smp_call_function_single(), on the target CPU */ 300/* Called via smp_call_function_single(), on the target CPU */
266static void do_drv_read(void *_cmd) 301static void do_drv_read(void *_cmd)
267{ 302{
268 struct drv_cmd *cmd = _cmd; 303 struct drv_cmd *cmd = _cmd;
269 u32 h;
270 304
271 switch (cmd->type) { 305 cmd->val = cmd->func.read(cmd->reg);
272 case SYSTEM_INTEL_MSR_CAPABLE:
273 case SYSTEM_AMD_MSR_CAPABLE:
274 rdmsr(cmd->addr.msr.reg, cmd->val, h);
275 break;
276 case SYSTEM_IO_CAPABLE:
277 acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
278 &cmd->val,
279 (u32)cmd->addr.io.bit_width);
280 break;
281 default:
282 break;
283 }
284} 306}
285 307
286/* Called via smp_call_function_many(), on the target CPUs */ 308static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
287static void do_drv_write(void *_cmd)
288{ 309{
289 struct drv_cmd *cmd = _cmd; 310 struct acpi_processor_performance *perf = to_perf_data(data);
290 u32 lo, hi; 311 struct drv_cmd cmd = {
312 .reg = &perf->control_register,
313 .func.read = data->cpu_freq_read,
314 };
315 int err;
291 316
292 switch (cmd->type) { 317 err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
293 case SYSTEM_INTEL_MSR_CAPABLE: 318 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
294 rdmsr(cmd->addr.msr.reg, lo, hi); 319 return cmd.val;
295 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
296 wrmsr(cmd->addr.msr.reg, lo, hi);
297 break;
298 case SYSTEM_AMD_MSR_CAPABLE:
299 wrmsr(cmd->addr.msr.reg, cmd->val, 0);
300 break;
301 case SYSTEM_IO_CAPABLE:
302 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
303 cmd->val,
304 (u32)cmd->addr.io.bit_width);
305 break;
306 default:
307 break;
308 }
309} 320}
310 321
311static void drv_read(struct drv_cmd *cmd) 322/* Called via smp_call_function_many(), on the target CPUs */
323static void do_drv_write(void *_cmd)
312{ 324{
313 int err; 325 struct drv_cmd *cmd = _cmd;
314 cmd->val = 0;
315 326
316 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); 327 cmd->func.write(cmd->reg, cmd->val);
317 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
318} 328}
319 329
320static void drv_write(struct drv_cmd *cmd) 330static void drv_write(struct acpi_cpufreq_data *data,
331 const struct cpumask *mask, u32 val)
321{ 332{
333 struct acpi_processor_performance *perf = to_perf_data(data);
334 struct drv_cmd cmd = {
335 .reg = &perf->control_register,
336 .val = val,
337 .func.write = data->cpu_freq_write,
338 };
322 int this_cpu; 339 int this_cpu;
323 340
324 this_cpu = get_cpu(); 341 this_cpu = get_cpu();
325 if (cpumask_test_cpu(this_cpu, cmd->mask)) 342 if (cpumask_test_cpu(this_cpu, mask))
326 do_drv_write(cmd); 343 do_drv_write(&cmd);
327 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); 344
345 smp_call_function_many(mask, do_drv_write, &cmd, 1);
328 put_cpu(); 346 put_cpu();
329} 347}
330 348
331static u32 349static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
332get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
333{ 350{
334 struct acpi_processor_performance *perf; 351 u32 val;
335 struct drv_cmd cmd;
336 352
337 if (unlikely(cpumask_empty(mask))) 353 if (unlikely(cpumask_empty(mask)))
338 return 0; 354 return 0;
339 355
340 switch (data->cpu_feature) { 356 val = drv_read(data, mask);
341 case SYSTEM_INTEL_MSR_CAPABLE:
342 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
343 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
344 break;
345 case SYSTEM_AMD_MSR_CAPABLE:
346 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
347 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
348 break;
349 case SYSTEM_IO_CAPABLE:
350 cmd.type = SYSTEM_IO_CAPABLE;
351 perf = to_perf_data(data);
352 cmd.addr.io.port = perf->control_register.address;
353 cmd.addr.io.bit_width = perf->control_register.bit_width;
354 break;
355 default:
356 return 0;
357 }
358
359 cmd.mask = mask;
360 drv_read(&cmd);
361 357
362 pr_debug("get_cur_val = %u\n", cmd.val); 358 pr_debug("get_cur_val = %u\n", val);
363 359
364 return cmd.val; 360 return val;
365} 361}
366 362
367static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 363static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
@@ -416,7 +412,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
416{ 412{
417 struct acpi_cpufreq_data *data = policy->driver_data; 413 struct acpi_cpufreq_data *data = policy->driver_data;
418 struct acpi_processor_performance *perf; 414 struct acpi_processor_performance *perf;
419 struct drv_cmd cmd; 415 const struct cpumask *mask;
420 unsigned int next_perf_state = 0; /* Index into perf table */ 416 unsigned int next_perf_state = 0; /* Index into perf table */
421 int result = 0; 417 int result = 0;
422 418
@@ -434,42 +430,21 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
434 } else { 430 } else {
435 pr_debug("Already at target state (P%d)\n", 431 pr_debug("Already at target state (P%d)\n",
436 next_perf_state); 432 next_perf_state);
437 goto out; 433 return 0;
438 } 434 }
439 } 435 }
440 436
441 switch (data->cpu_feature) { 437 /*
442 case SYSTEM_INTEL_MSR_CAPABLE: 438 * The core won't allow CPUs to go away until the governor has been
443 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 439 * stopped, so we can rely on the stability of policy->cpus.
444 cmd.addr.msr.reg = MSR_IA32_PERF_CTL; 440 */
445 cmd.val = (u32) perf->states[next_perf_state].control; 441 mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
446 break; 442 cpumask_of(policy->cpu) : policy->cpus;
447 case SYSTEM_AMD_MSR_CAPABLE:
448 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
449 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
450 cmd.val = (u32) perf->states[next_perf_state].control;
451 break;
452 case SYSTEM_IO_CAPABLE:
453 cmd.type = SYSTEM_IO_CAPABLE;
454 cmd.addr.io.port = perf->control_register.address;
455 cmd.addr.io.bit_width = perf->control_register.bit_width;
456 cmd.val = (u32) perf->states[next_perf_state].control;
457 break;
458 default:
459 result = -ENODEV;
460 goto out;
461 }
462
463 /* cpufreq holds the hotplug lock, so we are safe from here on */
464 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
465 cmd.mask = policy->cpus;
466 else
467 cmd.mask = cpumask_of(policy->cpu);
468 443
469 drv_write(&cmd); 444 drv_write(data, mask, perf->states[next_perf_state].control);
470 445
471 if (acpi_pstate_strict) { 446 if (acpi_pstate_strict) {
472 if (!check_freqs(cmd.mask, data->freq_table[index].frequency, 447 if (!check_freqs(mask, data->freq_table[index].frequency,
473 data)) { 448 data)) {
474 pr_debug("acpi_cpufreq_target failed (%d)\n", 449 pr_debug("acpi_cpufreq_target failed (%d)\n",
475 policy->cpu); 450 policy->cpu);
@@ -480,7 +455,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
480 if (!result) 455 if (!result)
481 perf->state = next_perf_state; 456 perf->state = next_perf_state;
482 457
483out:
484 return result; 458 return result;
485} 459}
486 460
@@ -740,15 +714,21 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
740 } 714 }
741 pr_debug("SYSTEM IO addr space\n"); 715 pr_debug("SYSTEM IO addr space\n");
742 data->cpu_feature = SYSTEM_IO_CAPABLE; 716 data->cpu_feature = SYSTEM_IO_CAPABLE;
717 data->cpu_freq_read = cpu_freq_read_io;
718 data->cpu_freq_write = cpu_freq_write_io;
743 break; 719 break;
744 case ACPI_ADR_SPACE_FIXED_HARDWARE: 720 case ACPI_ADR_SPACE_FIXED_HARDWARE:
745 pr_debug("HARDWARE addr space\n"); 721 pr_debug("HARDWARE addr space\n");
746 if (check_est_cpu(cpu)) { 722 if (check_est_cpu(cpu)) {
747 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; 723 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
724 data->cpu_freq_read = cpu_freq_read_intel;
725 data->cpu_freq_write = cpu_freq_write_intel;
748 break; 726 break;
749 } 727 }
750 if (check_amd_hwpstate_cpu(cpu)) { 728 if (check_amd_hwpstate_cpu(cpu)) {
751 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; 729 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
730 data->cpu_freq_read = cpu_freq_read_amd;
731 data->cpu_freq_write = cpu_freq_write_amd;
752 break; 732 break;
753 } 733 }
754 result = -ENODEV; 734 result = -ENODEV;