diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-10-16 16:12:02 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-10-16 16:12:02 -0400 |
commit | 7855e10294efd4925b351c56d2834dc9f7cff7a2 (patch) | |
tree | 6a23920b576108029fa79ce048af6d0a36dd897f | |
parent | 8e601a9f97a00bab031980de34f9a81891c1f82f (diff) | |
parent | 4ef45148701917fbc08a7c05bc6a3bb0c0573047 (diff) |
Merge back earlier cpufreq material for v4.4.
-rw-r--r-- | Documentation/kernel-parameters.txt | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 7 | ||||
-rw-r--r-- | drivers/cpufreq/Kconfig.x86 | 1 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 7 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 31 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.c | 12 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.h | 1 | ||||
-rw-r--r-- | drivers/cpufreq/imx6q-cpufreq.c | 50 | ||||
-rw-r--r-- | drivers/cpufreq/integrator-cpufreq.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 276 | ||||
-rw-r--r-- | drivers/cpufreq/powernv-cpufreq.c | 10 | ||||
-rw-r--r-- | drivers/cpufreq/tegra20-cpufreq.c | 2 |
12 files changed, 351 insertions, 51 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 22a4b687ea5b..9b75e2a760de 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1546,6 +1546,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1546 | hwp_only | 1546 | hwp_only |
1547 | Only load intel_pstate on systems which support | 1547 | Only load intel_pstate on systems which support |
1548 | hardware P state control (HWP) if available. | 1548 | hardware P state control (HWP) if available. |
1549 | no_acpi | ||
1550 | Don't use ACPI processor performance control objects | ||
1551 | _PSS and _PPC specified limits. | ||
1549 | 1552 | ||
1550 | intremap= [X86-64, Intel-IOMMU] | 1553 | intremap= [X86-64, Intel-IOMMU] |
1551 | on enable Interrupt Remapping (default) | 1554 | on enable Interrupt Remapping (default) |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index b8c14bb7fc8f..9f3905697f12 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -206,6 +206,13 @@ | |||
206 | #define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 | 206 | #define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 |
207 | #define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 | 207 | #define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 |
208 | 208 | ||
209 | /* Config TDP MSRs */ | ||
210 | #define MSR_CONFIG_TDP_NOMINAL 0x00000648 | ||
211 | #define MSR_CONFIG_TDP_LEVEL1 0x00000649 | ||
212 | #define MSR_CONFIG_TDP_LEVEL2 0x0000064A | ||
213 | #define MSR_CONFIG_TDP_CONTROL 0x0000064B | ||
214 | #define MSR_TURBO_ACTIVATION_RATIO 0x0000064C | ||
215 | |||
209 | /* Hardware P state interface */ | 216 | /* Hardware P state interface */ |
210 | #define MSR_PPERF 0x0000064e | 217 | #define MSR_PPERF 0x0000064e |
211 | #define MSR_PERF_LIMIT_REASONS 0x0000064f | 218 | #define MSR_PERF_LIMIT_REASONS 0x0000064f |
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index c59bdcb83217..adbd1de1cea5 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 | |||
@@ -5,6 +5,7 @@ | |||
5 | config X86_INTEL_PSTATE | 5 | config X86_INTEL_PSTATE |
6 | bool "Intel P state control" | 6 | bool "Intel P state control" |
7 | depends on X86 | 7 | depends on X86 |
8 | select ACPI_PROCESSOR if ACPI | ||
8 | help | 9 | help |
9 | This driver provides a P state for Intel core processors. | 10 | This driver provides a P state for Intel core processors. |
10 | The driver implements an internal governor and will become | 11 | The driver implements an internal governor and will become |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 25c4c15103a0..8701dc559850 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -843,18 +843,11 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, | |||
843 | 843 | ||
844 | down_write(&policy->rwsem); | 844 | down_write(&policy->rwsem); |
845 | 845 | ||
846 | /* Updating inactive policies is invalid, so avoid doing that. */ | ||
847 | if (unlikely(policy_is_inactive(policy))) { | ||
848 | ret = -EBUSY; | ||
849 | goto unlock_policy_rwsem; | ||
850 | } | ||
851 | |||
852 | if (fattr->store) | 846 | if (fattr->store) |
853 | ret = fattr->store(policy, buf, count); | 847 | ret = fattr->store(policy, buf, count); |
854 | else | 848 | else |
855 | ret = -EIO; | 849 | ret = -EIO; |
856 | 850 | ||
857 | unlock_policy_rwsem: | ||
858 | up_write(&policy->rwsem); | 851 | up_write(&policy->rwsem); |
859 | unlock: | 852 | unlock: |
860 | put_online_cpus(); | 853 | put_online_cpus(); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 84a1506950a7..1fa1deb6e91f 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -23,6 +23,19 @@ | |||
23 | 23 | ||
24 | static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); | 24 | static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); |
25 | 25 | ||
26 | static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, | ||
27 | unsigned int event); | ||
28 | |||
29 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE | ||
30 | static | ||
31 | #endif | ||
32 | struct cpufreq_governor cpufreq_gov_conservative = { | ||
33 | .name = "conservative", | ||
34 | .governor = cs_cpufreq_governor_dbs, | ||
35 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | ||
36 | .owner = THIS_MODULE, | ||
37 | }; | ||
38 | |||
26 | static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, | 39 | static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, |
27 | struct cpufreq_policy *policy) | 40 | struct cpufreq_policy *policy) |
28 | { | 41 | { |
@@ -119,12 +132,14 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
119 | struct cpufreq_freqs *freq = data; | 132 | struct cpufreq_freqs *freq = data; |
120 | struct cs_cpu_dbs_info_s *dbs_info = | 133 | struct cs_cpu_dbs_info_s *dbs_info = |
121 | &per_cpu(cs_cpu_dbs_info, freq->cpu); | 134 | &per_cpu(cs_cpu_dbs_info, freq->cpu); |
122 | struct cpufreq_policy *policy; | 135 | struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu); |
123 | 136 | ||
124 | if (!dbs_info->enable) | 137 | if (!policy) |
125 | return 0; | 138 | return 0; |
126 | 139 | ||
127 | policy = dbs_info->cdbs.shared->policy; | 140 | /* policy isn't governed by conservative governor */ |
141 | if (policy->governor != &cpufreq_gov_conservative) | ||
142 | return 0; | ||
128 | 143 | ||
129 | /* | 144 | /* |
130 | * we only care if our internally tracked freq moves outside the 'valid' | 145 | * we only care if our internally tracked freq moves outside the 'valid' |
@@ -367,16 +382,6 @@ static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
367 | return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event); | 382 | return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event); |
368 | } | 383 | } |
369 | 384 | ||
370 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE | ||
371 | static | ||
372 | #endif | ||
373 | struct cpufreq_governor cpufreq_gov_conservative = { | ||
374 | .name = "conservative", | ||
375 | .governor = cs_cpufreq_governor_dbs, | ||
376 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | ||
377 | .owner = THIS_MODULE, | ||
378 | }; | ||
379 | |||
380 | static int __init cpufreq_gov_dbs_init(void) | 385 | static int __init cpufreq_gov_dbs_init(void) |
381 | { | 386 | { |
382 | return cpufreq_register_governor(&cpufreq_gov_conservative); | 387 | return cpufreq_register_governor(&cpufreq_gov_conservative); |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 939197ffa4ac..750626d8fb03 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -463,7 +463,6 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, | |||
463 | cdata->get_cpu_dbs_info_s(cpu); | 463 | cdata->get_cpu_dbs_info_s(cpu); |
464 | 464 | ||
465 | cs_dbs_info->down_skip = 0; | 465 | cs_dbs_info->down_skip = 0; |
466 | cs_dbs_info->enable = 1; | ||
467 | cs_dbs_info->requested_freq = policy->cur; | 466 | cs_dbs_info->requested_freq = policy->cur; |
468 | } else { | 467 | } else { |
469 | struct od_ops *od_ops = cdata->gov_ops; | 468 | struct od_ops *od_ops = cdata->gov_ops; |
@@ -482,9 +481,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, | |||
482 | static int cpufreq_governor_stop(struct cpufreq_policy *policy, | 481 | static int cpufreq_governor_stop(struct cpufreq_policy *policy, |
483 | struct dbs_data *dbs_data) | 482 | struct dbs_data *dbs_data) |
484 | { | 483 | { |
485 | struct common_dbs_data *cdata = dbs_data->cdata; | 484 | struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu); |
486 | unsigned int cpu = policy->cpu; | ||
487 | struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu); | ||
488 | struct cpu_common_dbs_info *shared = cdbs->shared; | 485 | struct cpu_common_dbs_info *shared = cdbs->shared; |
489 | 486 | ||
490 | /* State should be equivalent to START */ | 487 | /* State should be equivalent to START */ |
@@ -493,13 +490,6 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy, | |||
493 | 490 | ||
494 | gov_cancel_work(dbs_data, policy); | 491 | gov_cancel_work(dbs_data, policy); |
495 | 492 | ||
496 | if (cdata->governor == GOV_CONSERVATIVE) { | ||
497 | struct cs_cpu_dbs_info_s *cs_dbs_info = | ||
498 | cdata->get_cpu_dbs_info_s(cpu); | ||
499 | |||
500 | cs_dbs_info->enable = 0; | ||
501 | } | ||
502 | |||
503 | shared->policy = NULL; | 493 | shared->policy = NULL; |
504 | mutex_destroy(&shared->timer_mutex); | 494 | mutex_destroy(&shared->timer_mutex); |
505 | return 0; | 495 | return 0; |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 50f171796632..5621bb03e874 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -170,7 +170,6 @@ struct cs_cpu_dbs_info_s { | |||
170 | struct cpu_dbs_info cdbs; | 170 | struct cpu_dbs_info cdbs; |
171 | unsigned int down_skip; | 171 | unsigned int down_skip; |
172 | unsigned int requested_freq; | 172 | unsigned int requested_freq; |
173 | unsigned int enable:1; | ||
174 | }; | 173 | }; |
175 | 174 | ||
176 | /* Per policy Governors sysfs tunables */ | 175 | /* Per policy Governors sysfs tunables */ |
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 380a90d3c57e..9b4a7bd04dea 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c | |||
@@ -30,6 +30,10 @@ static struct clk *pll1_sw_clk; | |||
30 | static struct clk *step_clk; | 30 | static struct clk *step_clk; |
31 | static struct clk *pll2_pfd2_396m_clk; | 31 | static struct clk *pll2_pfd2_396m_clk; |
32 | 32 | ||
33 | /* clk used by i.MX6UL */ | ||
34 | static struct clk *pll2_bus_clk; | ||
35 | static struct clk *secondary_sel_clk; | ||
36 | |||
33 | static struct device *cpu_dev; | 37 | static struct device *cpu_dev; |
34 | static bool free_opp; | 38 | static bool free_opp; |
35 | static struct cpufreq_frequency_table *freq_table; | 39 | static struct cpufreq_frequency_table *freq_table; |
@@ -91,16 +95,36 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) | |||
91 | * The setpoints are selected per PLL/PDF frequencies, so we need to | 95 | * The setpoints are selected per PLL/PDF frequencies, so we need to |
92 | * reprogram PLL for frequency scaling. The procedure of reprogramming | 96 | * reprogram PLL for frequency scaling. The procedure of reprogramming |
93 | * PLL1 is as below. | 97 | * PLL1 is as below. |
94 | * | 98 | * For i.MX6UL, it has a secondary clk mux, the cpu frequency change |
99 | * flow is slightly different from other i.MX6 OSC. | ||
100 | * The cpu frequeny change flow for i.MX6(except i.MX6UL) is as below: | ||
95 | * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it | 101 | * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it |
96 | * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it | 102 | * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it |
97 | * - Disable pll2_pfd2_396m_clk | 103 | * - Disable pll2_pfd2_396m_clk |
98 | */ | 104 | */ |
99 | clk_set_parent(step_clk, pll2_pfd2_396m_clk); | 105 | if (of_machine_is_compatible("fsl,imx6ul")) { |
100 | clk_set_parent(pll1_sw_clk, step_clk); | 106 | /* |
101 | if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { | 107 | * When changing pll1_sw_clk's parent to pll1_sys_clk, |
102 | clk_set_rate(pll1_sys_clk, new_freq * 1000); | 108 | * CPU may run at higher than 528MHz, this will lead to |
109 | * the system unstable if the voltage is lower than the | ||
110 | * voltage of 528MHz, so lower the CPU frequency to one | ||
111 | * half before changing CPU frequency. | ||
112 | */ | ||
113 | clk_set_rate(arm_clk, (old_freq >> 1) * 1000); | ||
103 | clk_set_parent(pll1_sw_clk, pll1_sys_clk); | 114 | clk_set_parent(pll1_sw_clk, pll1_sys_clk); |
115 | if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) | ||
116 | clk_set_parent(secondary_sel_clk, pll2_bus_clk); | ||
117 | else | ||
118 | clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk); | ||
119 | clk_set_parent(step_clk, secondary_sel_clk); | ||
120 | clk_set_parent(pll1_sw_clk, step_clk); | ||
121 | } else { | ||
122 | clk_set_parent(step_clk, pll2_pfd2_396m_clk); | ||
123 | clk_set_parent(pll1_sw_clk, step_clk); | ||
124 | if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { | ||
125 | clk_set_rate(pll1_sys_clk, new_freq * 1000); | ||
126 | clk_set_parent(pll1_sw_clk, pll1_sys_clk); | ||
127 | } | ||
104 | } | 128 | } |
105 | 129 | ||
106 | /* Ensure the arm clock divider is what we expect */ | 130 | /* Ensure the arm clock divider is what we expect */ |
@@ -186,6 +210,16 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) | |||
186 | goto put_clk; | 210 | goto put_clk; |
187 | } | 211 | } |
188 | 212 | ||
213 | if (of_machine_is_compatible("fsl,imx6ul")) { | ||
214 | pll2_bus_clk = clk_get(cpu_dev, "pll2_bus"); | ||
215 | secondary_sel_clk = clk_get(cpu_dev, "secondary_sel"); | ||
216 | if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) { | ||
217 | dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n"); | ||
218 | ret = -ENOENT; | ||
219 | goto put_clk; | ||
220 | } | ||
221 | } | ||
222 | |||
189 | arm_reg = regulator_get(cpu_dev, "arm"); | 223 | arm_reg = regulator_get(cpu_dev, "arm"); |
190 | pu_reg = regulator_get_optional(cpu_dev, "pu"); | 224 | pu_reg = regulator_get_optional(cpu_dev, "pu"); |
191 | soc_reg = regulator_get(cpu_dev, "soc"); | 225 | soc_reg = regulator_get(cpu_dev, "soc"); |
@@ -331,6 +365,10 @@ put_clk: | |||
331 | clk_put(step_clk); | 365 | clk_put(step_clk); |
332 | if (!IS_ERR(pll2_pfd2_396m_clk)) | 366 | if (!IS_ERR(pll2_pfd2_396m_clk)) |
333 | clk_put(pll2_pfd2_396m_clk); | 367 | clk_put(pll2_pfd2_396m_clk); |
368 | if (!IS_ERR(pll2_bus_clk)) | ||
369 | clk_put(pll2_bus_clk); | ||
370 | if (!IS_ERR(secondary_sel_clk)) | ||
371 | clk_put(secondary_sel_clk); | ||
334 | of_node_put(np); | 372 | of_node_put(np); |
335 | return ret; | 373 | return ret; |
336 | } | 374 | } |
@@ -350,6 +388,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev) | |||
350 | clk_put(pll1_sw_clk); | 388 | clk_put(pll1_sw_clk); |
351 | clk_put(step_clk); | 389 | clk_put(step_clk); |
352 | clk_put(pll2_pfd2_396m_clk); | 390 | clk_put(pll2_pfd2_396m_clk); |
391 | clk_put(pll2_bus_clk); | ||
392 | clk_put(secondary_sel_clk); | ||
353 | 393 | ||
354 | return 0; | 394 | return 0; |
355 | } | 395 | } |
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index 2faa4216bf2a..79e3ff2771a6 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c | |||
@@ -221,6 +221,8 @@ static const struct of_device_id integrator_cpufreq_match[] = { | |||
221 | { }, | 221 | { }, |
222 | }; | 222 | }; |
223 | 223 | ||
224 | MODULE_DEVICE_TABLE(of, integrator_cpufreq_match); | ||
225 | |||
224 | static struct platform_driver integrator_cpufreq_driver = { | 226 | static struct platform_driver integrator_cpufreq_driver = { |
225 | .driver = { | 227 | .driver = { |
226 | .name = "integrator-cpufreq", | 228 | .name = "integrator-cpufreq", |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index aa33b92b3e3e..6a5a22192128 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -34,6 +34,10 @@ | |||
34 | #include <asm/cpu_device_id.h> | 34 | #include <asm/cpu_device_id.h> |
35 | #include <asm/cpufeature.h> | 35 | #include <asm/cpufeature.h> |
36 | 36 | ||
37 | #if IS_ENABLED(CONFIG_ACPI) | ||
38 | #include <acpi/processor.h> | ||
39 | #endif | ||
40 | |||
37 | #define BYT_RATIOS 0x66a | 41 | #define BYT_RATIOS 0x66a |
38 | #define BYT_VIDS 0x66b | 42 | #define BYT_VIDS 0x66b |
39 | #define BYT_TURBO_RATIOS 0x66c | 43 | #define BYT_TURBO_RATIOS 0x66c |
@@ -43,7 +47,6 @@ | |||
43 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) | 47 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
44 | #define fp_toint(X) ((X) >> FRAC_BITS) | 48 | #define fp_toint(X) ((X) >> FRAC_BITS) |
45 | 49 | ||
46 | |||
47 | static inline int32_t mul_fp(int32_t x, int32_t y) | 50 | static inline int32_t mul_fp(int32_t x, int32_t y) |
48 | { | 51 | { |
49 | return ((int64_t)x * (int64_t)y) >> FRAC_BITS; | 52 | return ((int64_t)x * (int64_t)y) >> FRAC_BITS; |
@@ -78,6 +81,7 @@ struct pstate_data { | |||
78 | int current_pstate; | 81 | int current_pstate; |
79 | int min_pstate; | 82 | int min_pstate; |
80 | int max_pstate; | 83 | int max_pstate; |
84 | int max_pstate_physical; | ||
81 | int scaling; | 85 | int scaling; |
82 | int turbo_pstate; | 86 | int turbo_pstate; |
83 | }; | 87 | }; |
@@ -113,6 +117,9 @@ struct cpudata { | |||
113 | u64 prev_mperf; | 117 | u64 prev_mperf; |
114 | u64 prev_tsc; | 118 | u64 prev_tsc; |
115 | struct sample sample; | 119 | struct sample sample; |
120 | #if IS_ENABLED(CONFIG_ACPI) | ||
121 | struct acpi_processor_performance acpi_perf_data; | ||
122 | #endif | ||
116 | }; | 123 | }; |
117 | 124 | ||
118 | static struct cpudata **all_cpu_data; | 125 | static struct cpudata **all_cpu_data; |
@@ -127,6 +134,7 @@ struct pstate_adjust_policy { | |||
127 | 134 | ||
128 | struct pstate_funcs { | 135 | struct pstate_funcs { |
129 | int (*get_max)(void); | 136 | int (*get_max)(void); |
137 | int (*get_max_physical)(void); | ||
130 | int (*get_min)(void); | 138 | int (*get_min)(void); |
131 | int (*get_turbo)(void); | 139 | int (*get_turbo)(void); |
132 | int (*get_scaling)(void); | 140 | int (*get_scaling)(void); |
@@ -142,6 +150,7 @@ struct cpu_defaults { | |||
142 | static struct pstate_adjust_policy pid_params; | 150 | static struct pstate_adjust_policy pid_params; |
143 | static struct pstate_funcs pstate_funcs; | 151 | static struct pstate_funcs pstate_funcs; |
144 | static int hwp_active; | 152 | static int hwp_active; |
153 | static int no_acpi_perf; | ||
145 | 154 | ||
146 | struct perf_limits { | 155 | struct perf_limits { |
147 | int no_turbo; | 156 | int no_turbo; |
@@ -154,6 +163,8 @@ struct perf_limits { | |||
154 | int max_sysfs_pct; | 163 | int max_sysfs_pct; |
155 | int min_policy_pct; | 164 | int min_policy_pct; |
156 | int min_sysfs_pct; | 165 | int min_sysfs_pct; |
166 | int max_perf_ctl; | ||
167 | int min_perf_ctl; | ||
157 | }; | 168 | }; |
158 | 169 | ||
159 | static struct perf_limits limits = { | 170 | static struct perf_limits limits = { |
@@ -167,8 +178,157 @@ static struct perf_limits limits = { | |||
167 | .max_sysfs_pct = 100, | 178 | .max_sysfs_pct = 100, |
168 | .min_policy_pct = 0, | 179 | .min_policy_pct = 0, |
169 | .min_sysfs_pct = 0, | 180 | .min_sysfs_pct = 0, |
181 | .max_perf_ctl = 0, | ||
182 | .min_perf_ctl = 0, | ||
170 | }; | 183 | }; |
171 | 184 | ||
185 | #if IS_ENABLED(CONFIG_ACPI) | ||
186 | /* | ||
187 | * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and | ||
188 | * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and | ||
189 | * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state | ||
190 | * ratio, out of it only high 8 bits are used. For example 0x1700 is setting | ||
191 | * target ratio 0x17. The _PSS control value stores in a format which can be | ||
192 | * directly written to PERF_CTL MSR. But in intel_pstate driver this shift | ||
193 | * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). | ||
194 | * This function converts the _PSS control value to intel pstate driver format | ||
195 | * for comparison and assignment. | ||
196 | */ | ||
197 | static int convert_to_native_pstate_format(struct cpudata *cpu, int index) | ||
198 | { | ||
199 | return cpu->acpi_perf_data.states[index].control >> 8; | ||
200 | } | ||
201 | |||
202 | static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) | ||
203 | { | ||
204 | struct cpudata *cpu; | ||
205 | int ret; | ||
206 | bool turbo_absent = false; | ||
207 | int max_pstate_index; | ||
208 | int min_pss_ctl, max_pss_ctl, turbo_pss_ctl; | ||
209 | int i; | ||
210 | |||
211 | cpu = all_cpu_data[policy->cpu]; | ||
212 | |||
213 | pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n", | ||
214 | cpu->pstate.min_pstate, cpu->pstate.max_pstate, | ||
215 | cpu->pstate.turbo_pstate); | ||
216 | |||
217 | if (!cpu->acpi_perf_data.shared_cpu_map && | ||
218 | zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map, | ||
219 | GFP_KERNEL, cpu_to_node(policy->cpu))) { | ||
220 | return -ENOMEM; | ||
221 | } | ||
222 | |||
223 | ret = acpi_processor_register_performance(&cpu->acpi_perf_data, | ||
224 | policy->cpu); | ||
225 | if (ret) | ||
226 | return ret; | ||
227 | |||
228 | /* | ||
229 | * Check if the control value in _PSS is for PERF_CTL MSR, which should | ||
230 | * guarantee that the states returned by it map to the states in our | ||
231 | * list directly. | ||
232 | */ | ||
233 | if (cpu->acpi_perf_data.control_register.space_id != | ||
234 | ACPI_ADR_SPACE_FIXED_HARDWARE) | ||
235 | return -EIO; | ||
236 | |||
237 | pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu); | ||
238 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) | ||
239 | pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", | ||
240 | (i == cpu->acpi_perf_data.state ? '*' : ' '), i, | ||
241 | (u32) cpu->acpi_perf_data.states[i].core_frequency, | ||
242 | (u32) cpu->acpi_perf_data.states[i].power, | ||
243 | (u32) cpu->acpi_perf_data.states[i].control); | ||
244 | |||
245 | /* | ||
246 | * If there is only one entry _PSS, simply ignore _PSS and continue as | ||
247 | * usual without taking _PSS into account | ||
248 | */ | ||
249 | if (cpu->acpi_perf_data.state_count < 2) | ||
250 | return 0; | ||
251 | |||
252 | turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); | ||
253 | min_pss_ctl = convert_to_native_pstate_format(cpu, | ||
254 | cpu->acpi_perf_data.state_count - 1); | ||
255 | /* Check if there is a turbo freq in _PSS */ | ||
256 | if (turbo_pss_ctl <= cpu->pstate.max_pstate && | ||
257 | turbo_pss_ctl > cpu->pstate.min_pstate) { | ||
258 | pr_debug("intel_pstate: no turbo range exists in _PSS\n"); | ||
259 | limits.no_turbo = limits.turbo_disabled = 1; | ||
260 | cpu->pstate.turbo_pstate = cpu->pstate.max_pstate; | ||
261 | turbo_absent = true; | ||
262 | } | ||
263 | |||
264 | /* Check if the max non turbo p state < Intel P state max */ | ||
265 | max_pstate_index = turbo_absent ? 0 : 1; | ||
266 | max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index); | ||
267 | if (max_pss_ctl < cpu->pstate.max_pstate && | ||
268 | max_pss_ctl > cpu->pstate.min_pstate) | ||
269 | cpu->pstate.max_pstate = max_pss_ctl; | ||
270 | |||
271 | /* check If min perf > Intel P State min */ | ||
272 | if (min_pss_ctl > cpu->pstate.min_pstate && | ||
273 | min_pss_ctl < cpu->pstate.max_pstate) { | ||
274 | cpu->pstate.min_pstate = min_pss_ctl; | ||
275 | policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling; | ||
276 | } | ||
277 | |||
278 | if (turbo_absent) | ||
279 | policy->cpuinfo.max_freq = cpu->pstate.max_pstate * | ||
280 | cpu->pstate.scaling; | ||
281 | else { | ||
282 | policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * | ||
283 | cpu->pstate.scaling; | ||
284 | /* | ||
285 | * The _PSS table doesn't contain whole turbo frequency range. | ||
286 | * This just contains +1 MHZ above the max non turbo frequency, | ||
287 | * with control value corresponding to max turbo ratio. But | ||
288 | * when cpufreq set policy is called, it will call with this | ||
289 | * max frequency, which will cause a reduced performance as | ||
290 | * this driver uses real max turbo frequency as the max | ||
291 | * frequeny. So correct this frequency in _PSS table to | ||
292 | * correct max turbo frequency based on the turbo ratio. | ||
293 | * Also need to convert to MHz as _PSS freq is in MHz. | ||
294 | */ | ||
295 | cpu->acpi_perf_data.states[0].core_frequency = | ||
296 | turbo_pss_ctl * 100; | ||
297 | } | ||
298 | |||
299 | pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n", | ||
300 | cpu->pstate.min_pstate, cpu->pstate.max_pstate, | ||
301 | cpu->pstate.turbo_pstate); | ||
302 | pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n", | ||
303 | policy->cpuinfo.max_freq, policy->cpuinfo.min_freq); | ||
304 | |||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | ||
309 | { | ||
310 | struct cpudata *cpu; | ||
311 | |||
312 | if (!no_acpi_perf) | ||
313 | return 0; | ||
314 | |||
315 | cpu = all_cpu_data[policy->cpu]; | ||
316 | acpi_processor_unregister_performance(policy->cpu); | ||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | #else | ||
321 | static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) | ||
322 | { | ||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | ||
327 | { | ||
328 | return 0; | ||
329 | } | ||
330 | #endif | ||
331 | |||
172 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, | 332 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, |
173 | int deadband, int integral) { | 333 | int deadband, int integral) { |
174 | pid->setpoint = setpoint; | 334 | pid->setpoint = setpoint; |
@@ -591,7 +751,7 @@ static int core_get_min_pstate(void) | |||
591 | return (value >> 40) & 0xFF; | 751 | return (value >> 40) & 0xFF; |
592 | } | 752 | } |
593 | 753 | ||
594 | static int core_get_max_pstate(void) | 754 | static int core_get_max_pstate_physical(void) |
595 | { | 755 | { |
596 | u64 value; | 756 | u64 value; |
597 | 757 | ||
@@ -599,6 +759,46 @@ static int core_get_max_pstate(void) | |||
599 | return (value >> 8) & 0xFF; | 759 | return (value >> 8) & 0xFF; |
600 | } | 760 | } |
601 | 761 | ||
762 | static int core_get_max_pstate(void) | ||
763 | { | ||
764 | u64 tar; | ||
765 | u64 plat_info; | ||
766 | int max_pstate; | ||
767 | int err; | ||
768 | |||
769 | rdmsrl(MSR_PLATFORM_INFO, plat_info); | ||
770 | max_pstate = (plat_info >> 8) & 0xFF; | ||
771 | |||
772 | err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); | ||
773 | if (!err) { | ||
774 | /* Do some sanity checking for safety */ | ||
775 | if (plat_info & 0x600000000) { | ||
776 | u64 tdp_ctrl; | ||
777 | u64 tdp_ratio; | ||
778 | int tdp_msr; | ||
779 | |||
780 | err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); | ||
781 | if (err) | ||
782 | goto skip_tar; | ||
783 | |||
784 | tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; | ||
785 | err = rdmsrl_safe(tdp_msr, &tdp_ratio); | ||
786 | if (err) | ||
787 | goto skip_tar; | ||
788 | |||
789 | if (tdp_ratio - 1 == tar) { | ||
790 | max_pstate = tar; | ||
791 | pr_debug("max_pstate=TAC %x\n", max_pstate); | ||
792 | } else { | ||
793 | goto skip_tar; | ||
794 | } | ||
795 | } | ||
796 | } | ||
797 | |||
798 | skip_tar: | ||
799 | return max_pstate; | ||
800 | } | ||
801 | |||
602 | static int core_get_turbo_pstate(void) | 802 | static int core_get_turbo_pstate(void) |
603 | { | 803 | { |
604 | u64 value; | 804 | u64 value; |
@@ -652,6 +852,7 @@ static struct cpu_defaults core_params = { | |||
652 | }, | 852 | }, |
653 | .funcs = { | 853 | .funcs = { |
654 | .get_max = core_get_max_pstate, | 854 | .get_max = core_get_max_pstate, |
855 | .get_max_physical = core_get_max_pstate_physical, | ||
655 | .get_min = core_get_min_pstate, | 856 | .get_min = core_get_min_pstate, |
656 | .get_turbo = core_get_turbo_pstate, | 857 | .get_turbo = core_get_turbo_pstate, |
657 | .get_scaling = core_get_scaling, | 858 | .get_scaling = core_get_scaling, |
@@ -670,6 +871,7 @@ static struct cpu_defaults byt_params = { | |||
670 | }, | 871 | }, |
671 | .funcs = { | 872 | .funcs = { |
672 | .get_max = byt_get_max_pstate, | 873 | .get_max = byt_get_max_pstate, |
874 | .get_max_physical = byt_get_max_pstate, | ||
673 | .get_min = byt_get_min_pstate, | 875 | .get_min = byt_get_min_pstate, |
674 | .get_turbo = byt_get_turbo_pstate, | 876 | .get_turbo = byt_get_turbo_pstate, |
675 | .set = byt_set_pstate, | 877 | .set = byt_set_pstate, |
@@ -689,6 +891,7 @@ static struct cpu_defaults knl_params = { | |||
689 | }, | 891 | }, |
690 | .funcs = { | 892 | .funcs = { |
691 | .get_max = core_get_max_pstate, | 893 | .get_max = core_get_max_pstate, |
894 | .get_max_physical = core_get_max_pstate_physical, | ||
692 | .get_min = core_get_min_pstate, | 895 | .get_min = core_get_min_pstate, |
693 | .get_turbo = knl_get_turbo_pstate, | 896 | .get_turbo = knl_get_turbo_pstate, |
694 | .get_scaling = core_get_scaling, | 897 | .get_scaling = core_get_scaling, |
@@ -710,12 +913,23 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | |||
710 | * policy, or by cpu specific default values determined through | 913 | * policy, or by cpu specific default values determined through |
711 | * experimentation. | 914 | * experimentation. |
712 | */ | 915 | */ |
713 | max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); | 916 | if (limits.max_perf_ctl && limits.max_sysfs_pct >= |
714 | *max = clamp_t(int, max_perf_adj, | 917 | limits.max_policy_pct) { |
715 | cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); | 918 | *max = limits.max_perf_ctl; |
919 | } else { | ||
920 | max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), | ||
921 | limits.max_perf)); | ||
922 | *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, | ||
923 | cpu->pstate.turbo_pstate); | ||
924 | } | ||
716 | 925 | ||
717 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); | 926 | if (limits.min_perf_ctl) { |
718 | *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); | 927 | *min = limits.min_perf_ctl; |
928 | } else { | ||
929 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), | ||
930 | limits.min_perf)); | ||
931 | *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); | ||
932 | } | ||
719 | } | 933 | } |
720 | 934 | ||
721 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) | 935 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) |
@@ -743,6 +957,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) | |||
743 | { | 957 | { |
744 | cpu->pstate.min_pstate = pstate_funcs.get_min(); | 958 | cpu->pstate.min_pstate = pstate_funcs.get_min(); |
745 | cpu->pstate.max_pstate = pstate_funcs.get_max(); | 959 | cpu->pstate.max_pstate = pstate_funcs.get_max(); |
960 | cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); | ||
746 | cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); | 961 | cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); |
747 | cpu->pstate.scaling = pstate_funcs.get_scaling(); | 962 | cpu->pstate.scaling = pstate_funcs.get_scaling(); |
748 | 963 | ||
@@ -761,7 +976,8 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu) | |||
761 | 976 | ||
762 | sample->freq = fp_toint( | 977 | sample->freq = fp_toint( |
763 | mul_fp(int_tofp( | 978 | mul_fp(int_tofp( |
764 | cpu->pstate.max_pstate * cpu->pstate.scaling / 100), | 979 | cpu->pstate.max_pstate_physical * |
980 | cpu->pstate.scaling / 100), | ||
765 | core_pct)); | 981 | core_pct)); |
766 | 982 | ||
767 | sample->core_pct_busy = (int32_t)core_pct; | 983 | sample->core_pct_busy = (int32_t)core_pct; |
@@ -834,7 +1050,7 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) | |||
834 | * specified pstate. | 1050 | * specified pstate. |
835 | */ | 1051 | */ |
836 | core_busy = cpu->sample.core_pct_busy; | 1052 | core_busy = cpu->sample.core_pct_busy; |
837 | max_pstate = int_tofp(cpu->pstate.max_pstate); | 1053 | max_pstate = int_tofp(cpu->pstate.max_pstate_physical); |
838 | current_pstate = int_tofp(cpu->pstate.current_pstate); | 1054 | current_pstate = int_tofp(cpu->pstate.current_pstate); |
839 | core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); | 1055 | core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); |
840 | 1056 | ||
@@ -988,6 +1204,12 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) | |||
988 | 1204 | ||
989 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) | 1205 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
990 | { | 1206 | { |
1207 | #if IS_ENABLED(CONFIG_ACPI) | ||
1208 | struct cpudata *cpu; | ||
1209 | int i; | ||
1210 | #endif | ||
1211 | pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__, | ||
1212 | policy->cpuinfo.max_freq, policy->max); | ||
991 | if (!policy->cpuinfo.max_freq) | 1213 | if (!policy->cpuinfo.max_freq) |
992 | return -ENODEV; | 1214 | return -ENODEV; |
993 | 1215 | ||
@@ -1000,6 +1222,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
1000 | limits.max_perf_pct = 100; | 1222 | limits.max_perf_pct = 100; |
1001 | limits.max_perf = int_tofp(1); | 1223 | limits.max_perf = int_tofp(1); |
1002 | limits.no_turbo = 0; | 1224 | limits.no_turbo = 0; |
1225 | limits.max_perf_ctl = 0; | ||
1226 | limits.min_perf_ctl = 0; | ||
1003 | return 0; | 1227 | return 0; |
1004 | } | 1228 | } |
1005 | 1229 | ||
@@ -1020,6 +1244,23 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
1020 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); | 1244 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); |
1021 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); | 1245 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); |
1022 | 1246 | ||
1247 | #if IS_ENABLED(CONFIG_ACPI) | ||
1248 | cpu = all_cpu_data[policy->cpu]; | ||
1249 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { | ||
1250 | int control; | ||
1251 | |||
1252 | control = convert_to_native_pstate_format(cpu, i); | ||
1253 | if (control * cpu->pstate.scaling == policy->max) | ||
1254 | limits.max_perf_ctl = control; | ||
1255 | if (control * cpu->pstate.scaling == policy->min) | ||
1256 | limits.min_perf_ctl = control; | ||
1257 | } | ||
1258 | |||
1259 | pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n", | ||
1260 | policy->cpuinfo.max_freq, policy->max, limits.min_perf_ctl, | ||
1261 | limits.max_perf_ctl); | ||
1262 | #endif | ||
1263 | |||
1023 | if (hwp_active) | 1264 | if (hwp_active) |
1024 | intel_pstate_hwp_set(); | 1265 | intel_pstate_hwp_set(); |
1025 | 1266 | ||
@@ -1074,18 +1315,30 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
1074 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; | 1315 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
1075 | policy->cpuinfo.max_freq = | 1316 | policy->cpuinfo.max_freq = |
1076 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 1317 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; |
1318 | if (!no_acpi_perf) | ||
1319 | intel_pstate_init_perf_limits(policy); | ||
1320 | /* | ||
1321 | * If there is no acpi perf data or error, we ignore and use Intel P | ||
1322 | * state calculated limits, So this is not fatal error. | ||
1323 | */ | ||
1077 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 1324 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
1078 | cpumask_set_cpu(policy->cpu, policy->cpus); | 1325 | cpumask_set_cpu(policy->cpu, policy->cpus); |
1079 | 1326 | ||
1080 | return 0; | 1327 | return 0; |
1081 | } | 1328 | } |
1082 | 1329 | ||
1330 | static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) | ||
1331 | { | ||
1332 | return intel_pstate_exit_perf_limits(policy); | ||
1333 | } | ||
1334 | |||
1083 | static struct cpufreq_driver intel_pstate_driver = { | 1335 | static struct cpufreq_driver intel_pstate_driver = { |
1084 | .flags = CPUFREQ_CONST_LOOPS, | 1336 | .flags = CPUFREQ_CONST_LOOPS, |
1085 | .verify = intel_pstate_verify_policy, | 1337 | .verify = intel_pstate_verify_policy, |
1086 | .setpolicy = intel_pstate_set_policy, | 1338 | .setpolicy = intel_pstate_set_policy, |
1087 | .get = intel_pstate_get, | 1339 | .get = intel_pstate_get, |
1088 | .init = intel_pstate_cpu_init, | 1340 | .init = intel_pstate_cpu_init, |
1341 | .exit = intel_pstate_cpu_exit, | ||
1089 | .stop_cpu = intel_pstate_stop_cpu, | 1342 | .stop_cpu = intel_pstate_stop_cpu, |
1090 | .name = "intel_pstate", | 1343 | .name = "intel_pstate", |
1091 | }; | 1344 | }; |
@@ -1118,6 +1371,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy) | |||
1118 | static void copy_cpu_funcs(struct pstate_funcs *funcs) | 1371 | static void copy_cpu_funcs(struct pstate_funcs *funcs) |
1119 | { | 1372 | { |
1120 | pstate_funcs.get_max = funcs->get_max; | 1373 | pstate_funcs.get_max = funcs->get_max; |
1374 | pstate_funcs.get_max_physical = funcs->get_max_physical; | ||
1121 | pstate_funcs.get_min = funcs->get_min; | 1375 | pstate_funcs.get_min = funcs->get_min; |
1122 | pstate_funcs.get_turbo = funcs->get_turbo; | 1376 | pstate_funcs.get_turbo = funcs->get_turbo; |
1123 | pstate_funcs.get_scaling = funcs->get_scaling; | 1377 | pstate_funcs.get_scaling = funcs->get_scaling; |
@@ -1126,7 +1380,6 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) | |||
1126 | } | 1380 | } |
1127 | 1381 | ||
1128 | #if IS_ENABLED(CONFIG_ACPI) | 1382 | #if IS_ENABLED(CONFIG_ACPI) |
1129 | #include <acpi/processor.h> | ||
1130 | 1383 | ||
1131 | static bool intel_pstate_no_acpi_pss(void) | 1384 | static bool intel_pstate_no_acpi_pss(void) |
1132 | { | 1385 | { |
@@ -1318,6 +1571,9 @@ static int __init intel_pstate_setup(char *str) | |||
1318 | force_load = 1; | 1571 | force_load = 1; |
1319 | if (!strcmp(str, "hwp_only")) | 1572 | if (!strcmp(str, "hwp_only")) |
1320 | hwp_only = 1; | 1573 | hwp_only = 1; |
1574 | if (!strcmp(str, "no_acpi")) | ||
1575 | no_acpi_perf = 1; | ||
1576 | |||
1321 | return 0; | 1577 | return 0; |
1322 | } | 1578 | } |
1323 | early_param("intel_pstate", intel_pstate_setup); | 1579 | early_param("intel_pstate", intel_pstate_setup); |
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 64994e10638e..cb501386eb6e 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c | |||
@@ -327,8 +327,14 @@ static void powernv_cpufreq_throttle_check(void *data) | |||
327 | if (chips[i].throttled) | 327 | if (chips[i].throttled) |
328 | goto next; | 328 | goto next; |
329 | chips[i].throttled = true; | 329 | chips[i].throttled = true; |
330 | pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu, | 330 | if (pmsr_pmax < powernv_pstate_info.nominal) |
331 | chips[i].id, pmsr_pmax); | 331 | pr_crit("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n", |
332 | cpu, chips[i].id, pmsr_pmax, | ||
333 | powernv_pstate_info.nominal); | ||
334 | else | ||
335 | pr_info("CPU %d on Chip %u has Pmax reduced below turbo frequency (%d < %d)\n", | ||
336 | cpu, chips[i].id, pmsr_pmax, | ||
337 | powernv_pstate_info.max); | ||
332 | } else if (chips[i].throttled) { | 338 | } else if (chips[i].throttled) { |
333 | chips[i].throttled = false; | 339 | chips[i].throttled = false; |
334 | pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu, | 340 | pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu, |
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c index 8084c7f7e206..2bd62845e9d5 100644 --- a/drivers/cpufreq/tegra20-cpufreq.c +++ b/drivers/cpufreq/tegra20-cpufreq.c | |||
@@ -175,9 +175,7 @@ static struct cpufreq_driver tegra_cpufreq_driver = { | |||
175 | .exit = tegra_cpu_exit, | 175 | .exit = tegra_cpu_exit, |
176 | .name = "tegra", | 176 | .name = "tegra", |
177 | .attr = cpufreq_generic_attr, | 177 | .attr = cpufreq_generic_attr, |
178 | #ifdef CONFIG_PM | ||
179 | .suspend = cpufreq_generic_suspend, | 178 | .suspend = cpufreq_generic_suspend, |
180 | #endif | ||
181 | }; | 179 | }; |
182 | 180 | ||
183 | static int __init tegra_cpufreq_init(void) | 181 | static int __init tegra_cpufreq_init(void) |