aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-24 14:29:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-24 14:29:31 -0400
commit1c45d9a920e6ef4fce38921e4fc776c2abca3197 (patch)
tree2f82ef22837b6cf62987941aacd598dbda30c61e /drivers/cpufreq
parent8264fce6de03f3915e2301f52f181a982718a8cb (diff)
parenta91e99e27a683608d221fb18b70d7de9d801de4a (diff)
Merge tag 'pm+acpi-3.18-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management updates from Rafael Wysocki: "This is material that didn't make it to my 3.18-rc1 pull request for various reasons, mostly related to timing and travel (LinuxCon EU / LPC) plus a couple of fixes for recent bugs. The only really new thing here is the PM QoS class for memory bandwidth, but it is simple enough and users of it will be added in the next cycle. One major change in behavior is that platform devices enumerated by ACPI will use 32-bit DMA mask by default. Also included is an ACPICA update to a new upstream release, but that's mostly cleanups, changes in tools and similar. The rest is fixes and cleanups mostly. Specifics: - Fix for a recent PCI power management change that overlooked the fact that some IRQ chips might not be able to configure PCIe PME for system wakeup from Lucas Stach. - Fix for a bug introduced in 3.17 where acpi_device_wakeup() is called with a wrong ordering of arguments from Zhang Rui. - A bunch of intel_pstate driver fixes (all -stable candidates) from Dirk Brandewie, Gabriele Mazzotta and Pali Rohár. - Fixes for a rather long-standing problem with the OOM killer and the freezer that frozen processes killed by the OOM do not actually release any memory until they are thawed, so OOM-killing them is rather pointless, with a couple of cleanups on top (Michal Hocko, Cong Wang, Rafael J Wysocki). - ACPICA update to upstream release 20140926, inlcuding mostly cleanups reducing differences between the upstream ACPICA and the kernel code, tools changes (acpidump, acpiexec) and support for the _DDN object (Bob Moore, Lv Zheng). - New PM QoS class for memory bandwidth from Tomeu Vizoso. - Default 32-bit DMA mask for platform devices enumerated by ACPI (this change is mostly needed for some drivers development in progress targeted at 3.19) from Heikki Krogerus. - ACPI EC driver cleanups, mostly related to debugging, from Lv Zheng. - cpufreq-dt driver updates from Thomas Petazzoni. - powernv cpuidle driver update from Preeti U Murthy" * tag 'pm+acpi-3.18-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (34 commits) intel_pstate: Correct BYT VID values. intel_pstate: Fix BYT frequency reporting intel_pstate: Don't lose sysfs settings during cpu offline cpufreq: intel_pstate: Reflect current no_turbo state correctly cpufreq: expose scaling_cur_freq sysfs file for set_policy() drivers cpufreq: intel_pstate: Fix setting max_perf_pct in performance policy PCI / PM: handle failure to enable wakeup on PCIe PME ACPI: invoke acpi_device_wakeup() with correct parameters PM / freezer: Clean up code after recent fixes PM: convert do_each_thread to for_each_process_thread OOM, PM: OOM killed task shouldn't escape PM suspend freezer: remove obsolete comments in __thaw_task() freezer: Do not freeze tasks killed by OOM killer ACPI / platform: provide default DMA mask cpuidle: powernv: Populate cpuidle state details by querying the device-tree cpufreq: cpufreq-dt: adjust message related to regulators cpufreq: cpufreq-dt: extend with platform_data cpufreq: allow driver-specific data ACPI / EC: Cleanup coding style. ACPI / EC: Refine event/query debugging messages. ...
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq-dt.c21
-rw-r--r--drivers/cpufreq/cpufreq.c38
-rw-r--r--drivers/cpufreq/intel_pstate.c110
3 files changed, 137 insertions, 32 deletions
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 6bbb8b913446..92c162af5045 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -18,6 +18,7 @@
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <linux/cpu_cooling.h> 19#include <linux/cpu_cooling.h>
20#include <linux/cpufreq.h> 20#include <linux/cpufreq.h>
21#include <linux/cpufreq-dt.h>
21#include <linux/cpumask.h> 22#include <linux/cpumask.h>
22#include <linux/err.h> 23#include <linux/err.h>
23#include <linux/module.h> 24#include <linux/module.h>
@@ -146,8 +147,8 @@ try_again:
146 goto try_again; 147 goto try_again;
147 } 148 }
148 149
149 dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n", 150 dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
150 cpu, PTR_ERR(cpu_reg)); 151 cpu, PTR_ERR(cpu_reg));
151 } 152 }
152 153
153 cpu_clk = clk_get(cpu_dev, NULL); 154 cpu_clk = clk_get(cpu_dev, NULL);
@@ -178,6 +179,7 @@ try_again:
178 179
179static int cpufreq_init(struct cpufreq_policy *policy) 180static int cpufreq_init(struct cpufreq_policy *policy)
180{ 181{
182 struct cpufreq_dt_platform_data *pd;
181 struct cpufreq_frequency_table *freq_table; 183 struct cpufreq_frequency_table *freq_table;
182 struct thermal_cooling_device *cdev; 184 struct thermal_cooling_device *cdev;
183 struct device_node *np; 185 struct device_node *np;
@@ -265,9 +267,18 @@ static int cpufreq_init(struct cpufreq_policy *policy)
265 policy->driver_data = priv; 267 policy->driver_data = priv;
266 268
267 policy->clk = cpu_clk; 269 policy->clk = cpu_clk;
268 ret = cpufreq_generic_init(policy, freq_table, transition_latency); 270 ret = cpufreq_table_validate_and_show(policy, freq_table);
269 if (ret) 271 if (ret) {
272 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
273 ret);
270 goto out_cooling_unregister; 274 goto out_cooling_unregister;
275 }
276
277 policy->cpuinfo.transition_latency = transition_latency;
278
279 pd = cpufreq_get_driver_data();
280 if (pd && !pd->independent_clocks)
281 cpumask_setall(policy->cpus);
271 282
272 of_node_put(np); 283 of_node_put(np);
273 284
@@ -335,6 +346,8 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
335 if (!IS_ERR(cpu_reg)) 346 if (!IS_ERR(cpu_reg))
336 regulator_put(cpu_reg); 347 regulator_put(cpu_reg);
337 348
349 dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
350
338 ret = cpufreq_register_driver(&dt_cpufreq_driver); 351 ret = cpufreq_register_driver(&dt_cpufreq_driver);
339 if (ret) 352 if (ret)
340 dev_err(cpu_dev, "failed register driver: %d\n", ret); 353 dev_err(cpu_dev, "failed register driver: %d\n", ret);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 24bf76fba141..644b54e1e7d1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -512,7 +512,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq);
512show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 512show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
513show_one(scaling_min_freq, min); 513show_one(scaling_min_freq, min);
514show_one(scaling_max_freq, max); 514show_one(scaling_max_freq, max);
515show_one(scaling_cur_freq, cur); 515
516static ssize_t show_scaling_cur_freq(
517 struct cpufreq_policy *policy, char *buf)
518{
519 ssize_t ret;
520
521 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
522 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
523 else
524 ret = sprintf(buf, "%u\n", policy->cur);
525 return ret;
526}
516 527
517static int cpufreq_set_policy(struct cpufreq_policy *policy, 528static int cpufreq_set_policy(struct cpufreq_policy *policy,
518 struct cpufreq_policy *new_policy); 529 struct cpufreq_policy *new_policy);
@@ -906,11 +917,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
906 if (ret) 917 if (ret)
907 goto err_out_kobj_put; 918 goto err_out_kobj_put;
908 } 919 }
909 if (has_target()) { 920
910 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 921 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
911 if (ret) 922 if (ret)
912 goto err_out_kobj_put; 923 goto err_out_kobj_put;
913 } 924
914 if (cpufreq_driver->bios_limit) { 925 if (cpufreq_driver->bios_limit) {
915 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 926 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
916 if (ret) 927 if (ret)
@@ -1731,6 +1742,21 @@ const char *cpufreq_get_current_driver(void)
1731} 1742}
1732EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 1743EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1733 1744
1745/**
1746 * cpufreq_get_driver_data - return current driver data
1747 *
1748 * Return the private data of the currently loaded cpufreq
1749 * driver, or NULL if no cpufreq driver is loaded.
1750 */
1751void *cpufreq_get_driver_data(void)
1752{
1753 if (cpufreq_driver)
1754 return cpufreq_driver->driver_data;
1755
1756 return NULL;
1757}
1758EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1759
1734/********************************************************************* 1760/*********************************************************************
1735 * NOTIFIER LISTS INTERFACE * 1761 * NOTIFIER LISTS INTERFACE *
1736 *********************************************************************/ 1762 *********************************************************************/
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 0668b389c516..27bb6d3877ed 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -52,6 +52,17 @@ static inline int32_t div_fp(int32_t x, int32_t y)
52 return div_s64((int64_t)x << FRAC_BITS, y); 52 return div_s64((int64_t)x << FRAC_BITS, y);
53} 53}
54 54
55static inline int ceiling_fp(int32_t x)
56{
57 int mask, ret;
58
59 ret = fp_toint(x);
60 mask = (1 << FRAC_BITS) - 1;
61 if (x & mask)
62 ret += 1;
63 return ret;
64}
65
55struct sample { 66struct sample {
56 int32_t core_pct_busy; 67 int32_t core_pct_busy;
57 u64 aperf; 68 u64 aperf;
@@ -64,6 +75,7 @@ struct pstate_data {
64 int current_pstate; 75 int current_pstate;
65 int min_pstate; 76 int min_pstate;
66 int max_pstate; 77 int max_pstate;
78 int scaling;
67 int turbo_pstate; 79 int turbo_pstate;
68}; 80};
69 81
@@ -113,6 +125,7 @@ struct pstate_funcs {
113 int (*get_max)(void); 125 int (*get_max)(void);
114 int (*get_min)(void); 126 int (*get_min)(void);
115 int (*get_turbo)(void); 127 int (*get_turbo)(void);
128 int (*get_scaling)(void);
116 void (*set)(struct cpudata*, int pstate); 129 void (*set)(struct cpudata*, int pstate);
117 void (*get_vid)(struct cpudata *); 130 void (*get_vid)(struct cpudata *);
118}; 131};
@@ -138,6 +151,7 @@ struct perf_limits {
138 151
139static struct perf_limits limits = { 152static struct perf_limits limits = {
140 .no_turbo = 0, 153 .no_turbo = 0,
154 .turbo_disabled = 0,
141 .max_perf_pct = 100, 155 .max_perf_pct = 100,
142 .max_perf = int_tofp(1), 156 .max_perf = int_tofp(1),
143 .min_perf_pct = 0, 157 .min_perf_pct = 0,
@@ -218,6 +232,18 @@ static inline void intel_pstate_reset_all_pid(void)
218 } 232 }
219} 233}
220 234
235static inline void update_turbo_state(void)
236{
237 u64 misc_en;
238 struct cpudata *cpu;
239
240 cpu = all_cpu_data[0];
241 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
242 limits.turbo_disabled =
243 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
244 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
245}
246
221/************************** debugfs begin ************************/ 247/************************** debugfs begin ************************/
222static int pid_param_set(void *data, u64 val) 248static int pid_param_set(void *data, u64 val)
223{ 249{
@@ -274,6 +300,20 @@ static void __init intel_pstate_debug_expose_params(void)
274 return sprintf(buf, "%u\n", limits.object); \ 300 return sprintf(buf, "%u\n", limits.object); \
275 } 301 }
276 302
303static ssize_t show_no_turbo(struct kobject *kobj,
304 struct attribute *attr, char *buf)
305{
306 ssize_t ret;
307
308 update_turbo_state();
309 if (limits.turbo_disabled)
310 ret = sprintf(buf, "%u\n", limits.turbo_disabled);
311 else
312 ret = sprintf(buf, "%u\n", limits.no_turbo);
313
314 return ret;
315}
316
277static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 317static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
278 const char *buf, size_t count) 318 const char *buf, size_t count)
279{ 319{
@@ -283,11 +323,14 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
283 ret = sscanf(buf, "%u", &input); 323 ret = sscanf(buf, "%u", &input);
284 if (ret != 1) 324 if (ret != 1)
285 return -EINVAL; 325 return -EINVAL;
286 limits.no_turbo = clamp_t(int, input, 0 , 1); 326
327 update_turbo_state();
287 if (limits.turbo_disabled) { 328 if (limits.turbo_disabled) {
288 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 329 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
289 limits.no_turbo = limits.turbo_disabled; 330 return -EPERM;
290 } 331 }
332 limits.no_turbo = clamp_t(int, input, 0, 1);
333
291 return count; 334 return count;
292} 335}
293 336
@@ -323,7 +366,6 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
323 return count; 366 return count;
324} 367}
325 368
326show_one(no_turbo, no_turbo);
327show_one(max_perf_pct, max_perf_pct); 369show_one(max_perf_pct, max_perf_pct);
328show_one(min_perf_pct, min_perf_pct); 370show_one(min_perf_pct, min_perf_pct);
329 371
@@ -394,7 +436,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
394 cpudata->vid.ratio); 436 cpudata->vid.ratio);
395 437
396 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 438 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
397 vid = fp_toint(vid_fp); 439 vid = ceiling_fp(vid_fp);
398 440
399 if (pstate > cpudata->pstate.max_pstate) 441 if (pstate > cpudata->pstate.max_pstate)
400 vid = cpudata->vid.turbo; 442 vid = cpudata->vid.turbo;
@@ -404,6 +446,22 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
404 wrmsrl(MSR_IA32_PERF_CTL, val); 446 wrmsrl(MSR_IA32_PERF_CTL, val);
405} 447}
406 448
449#define BYT_BCLK_FREQS 5
450static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
451
452static int byt_get_scaling(void)
453{
454 u64 value;
455 int i;
456
457 rdmsrl(MSR_FSB_FREQ, value);
458 i = value & 0x3;
459
460 BUG_ON(i > BYT_BCLK_FREQS);
461
462 return byt_freq_table[i] * 100;
463}
464
407static void byt_get_vid(struct cpudata *cpudata) 465static void byt_get_vid(struct cpudata *cpudata)
408{ 466{
409 u64 value; 467 u64 value;
@@ -449,6 +507,11 @@ static int core_get_turbo_pstate(void)
449 return ret; 507 return ret;
450} 508}
451 509
510static inline int core_get_scaling(void)
511{
512 return 100000;
513}
514
452static void core_set_pstate(struct cpudata *cpudata, int pstate) 515static void core_set_pstate(struct cpudata *cpudata, int pstate)
453{ 516{
454 u64 val; 517 u64 val;
@@ -473,6 +536,7 @@ static struct cpu_defaults core_params = {
473 .get_max = core_get_max_pstate, 536 .get_max = core_get_max_pstate,
474 .get_min = core_get_min_pstate, 537 .get_min = core_get_min_pstate,
475 .get_turbo = core_get_turbo_pstate, 538 .get_turbo = core_get_turbo_pstate,
539 .get_scaling = core_get_scaling,
476 .set = core_set_pstate, 540 .set = core_set_pstate,
477 }, 541 },
478}; 542};
@@ -491,6 +555,7 @@ static struct cpu_defaults byt_params = {
491 .get_min = byt_get_min_pstate, 555 .get_min = byt_get_min_pstate,
492 .get_turbo = byt_get_turbo_pstate, 556 .get_turbo = byt_get_turbo_pstate,
493 .set = byt_set_pstate, 557 .set = byt_set_pstate,
558 .get_scaling = byt_get_scaling,
494 .get_vid = byt_get_vid, 559 .get_vid = byt_get_vid,
495 }, 560 },
496}; 561};
@@ -501,7 +566,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
501 int max_perf_adj; 566 int max_perf_adj;
502 int min_perf; 567 int min_perf;
503 568
504 if (limits.no_turbo) 569 if (limits.no_turbo || limits.turbo_disabled)
505 max_perf = cpu->pstate.max_pstate; 570 max_perf = cpu->pstate.max_pstate;
506 571
507 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 572 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
@@ -516,6 +581,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
516{ 581{
517 int max_perf, min_perf; 582 int max_perf, min_perf;
518 583
584 update_turbo_state();
585
519 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 586 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
520 587
521 pstate = clamp_t(int, pstate, min_perf, max_perf); 588 pstate = clamp_t(int, pstate, min_perf, max_perf);
@@ -523,7 +590,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
523 if (pstate == cpu->pstate.current_pstate) 590 if (pstate == cpu->pstate.current_pstate)
524 return; 591 return;
525 592
526 trace_cpu_frequency(pstate * 100000, cpu->cpu); 593 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
527 594
528 cpu->pstate.current_pstate = pstate; 595 cpu->pstate.current_pstate = pstate;
529 596
@@ -535,6 +602,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
535 cpu->pstate.min_pstate = pstate_funcs.get_min(); 602 cpu->pstate.min_pstate = pstate_funcs.get_min();
536 cpu->pstate.max_pstate = pstate_funcs.get_max(); 603 cpu->pstate.max_pstate = pstate_funcs.get_max();
537 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 604 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
605 cpu->pstate.scaling = pstate_funcs.get_scaling();
538 606
539 if (pstate_funcs.get_vid) 607 if (pstate_funcs.get_vid)
540 pstate_funcs.get_vid(cpu); 608 pstate_funcs.get_vid(cpu);
@@ -550,7 +618,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
550 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 618 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
551 619
552 sample->freq = fp_toint( 620 sample->freq = fp_toint(
553 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); 621 mul_fp(int_tofp(
622 cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
623 core_pct));
554 624
555 sample->core_pct_busy = (int32_t)core_pct; 625 sample->core_pct_busy = (int32_t)core_pct;
556} 626}
@@ -671,7 +741,9 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
671{ 741{
672 struct cpudata *cpu; 742 struct cpudata *cpu;
673 743
674 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); 744 if (!all_cpu_data[cpunum])
745 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
746 GFP_KERNEL);
675 if (!all_cpu_data[cpunum]) 747 if (!all_cpu_data[cpunum])
676 return -ENOMEM; 748 return -ENOMEM;
677 749
@@ -714,9 +786,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
714 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 786 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
715 limits.min_perf_pct = 100; 787 limits.min_perf_pct = 100;
716 limits.min_perf = int_tofp(1); 788 limits.min_perf = int_tofp(1);
789 limits.max_policy_pct = 100;
717 limits.max_perf_pct = 100; 790 limits.max_perf_pct = 100;
718 limits.max_perf = int_tofp(1); 791 limits.max_perf = int_tofp(1);
719 limits.no_turbo = limits.turbo_disabled; 792 limits.no_turbo = 0;
720 return 0; 793 return 0;
721 } 794 }
722 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 795 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -751,15 +824,12 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
751 824
752 del_timer_sync(&all_cpu_data[cpu_num]->timer); 825 del_timer_sync(&all_cpu_data[cpu_num]->timer);
753 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 826 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
754 kfree(all_cpu_data[cpu_num]);
755 all_cpu_data[cpu_num] = NULL;
756} 827}
757 828
758static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 829static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
759{ 830{
760 struct cpudata *cpu; 831 struct cpudata *cpu;
761 int rc; 832 int rc;
762 u64 misc_en;
763 833
764 rc = intel_pstate_init_cpu(policy->cpu); 834 rc = intel_pstate_init_cpu(policy->cpu);
765 if (rc) 835 if (rc)
@@ -767,23 +837,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
767 837
768 cpu = all_cpu_data[policy->cpu]; 838 cpu = all_cpu_data[policy->cpu];
769 839
770 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
771 if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
772 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
773 limits.turbo_disabled = 1;
774 limits.no_turbo = 1;
775 }
776 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 840 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
777 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 841 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
778 else 842 else
779 policy->policy = CPUFREQ_POLICY_POWERSAVE; 843 policy->policy = CPUFREQ_POLICY_POWERSAVE;
780 844
781 policy->min = cpu->pstate.min_pstate * 100000; 845 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
782 policy->max = cpu->pstate.turbo_pstate * 100000; 846 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
783 847
784 /* cpuinfo and default policy values */ 848 /* cpuinfo and default policy values */
785 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; 849 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
786 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000; 850 policy->cpuinfo.max_freq =
851 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
787 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 852 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
788 cpumask_set_cpu(policy->cpu, policy->cpus); 853 cpumask_set_cpu(policy->cpu, policy->cpus);
789 854
@@ -841,6 +906,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
841 pstate_funcs.get_max = funcs->get_max; 906 pstate_funcs.get_max = funcs->get_max;
842 pstate_funcs.get_min = funcs->get_min; 907 pstate_funcs.get_min = funcs->get_min;
843 pstate_funcs.get_turbo = funcs->get_turbo; 908 pstate_funcs.get_turbo = funcs->get_turbo;
909 pstate_funcs.get_scaling = funcs->get_scaling;
844 pstate_funcs.set = funcs->set; 910 pstate_funcs.set = funcs->set;
845 pstate_funcs.get_vid = funcs->get_vid; 911 pstate_funcs.get_vid = funcs->get_vid;
846} 912}