diff options
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 316 |
1 files changed, 72 insertions, 244 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 2e31d097def6..001a532e342e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -34,14 +34,10 @@ | |||
34 | #include <asm/cpu_device_id.h> | 34 | #include <asm/cpu_device_id.h> |
35 | #include <asm/cpufeature.h> | 35 | #include <asm/cpufeature.h> |
36 | 36 | ||
37 | #if IS_ENABLED(CONFIG_ACPI) | 37 | #define ATOM_RATIOS 0x66a |
38 | #include <acpi/processor.h> | 38 | #define ATOM_VIDS 0x66b |
39 | #endif | 39 | #define ATOM_TURBO_RATIOS 0x66c |
40 | 40 | #define ATOM_TURBO_VIDS 0x66d | |
41 | #define BYT_RATIOS 0x66a | ||
42 | #define BYT_VIDS 0x66b | ||
43 | #define BYT_TURBO_RATIOS 0x66c | ||
44 | #define BYT_TURBO_VIDS 0x66d | ||
45 | 41 | ||
46 | #define FRAC_BITS 8 | 42 | #define FRAC_BITS 8 |
47 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) | 43 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
@@ -117,9 +113,6 @@ struct cpudata { | |||
117 | u64 prev_mperf; | 113 | u64 prev_mperf; |
118 | u64 prev_tsc; | 114 | u64 prev_tsc; |
119 | struct sample sample; | 115 | struct sample sample; |
120 | #if IS_ENABLED(CONFIG_ACPI) | ||
121 | struct acpi_processor_performance acpi_perf_data; | ||
122 | #endif | ||
123 | }; | 116 | }; |
124 | 117 | ||
125 | static struct cpudata **all_cpu_data; | 118 | static struct cpudata **all_cpu_data; |
@@ -150,7 +143,6 @@ struct cpu_defaults { | |||
150 | static struct pstate_adjust_policy pid_params; | 143 | static struct pstate_adjust_policy pid_params; |
151 | static struct pstate_funcs pstate_funcs; | 144 | static struct pstate_funcs pstate_funcs; |
152 | static int hwp_active; | 145 | static int hwp_active; |
153 | static int no_acpi_perf; | ||
154 | 146 | ||
155 | struct perf_limits { | 147 | struct perf_limits { |
156 | int no_turbo; | 148 | int no_turbo; |
@@ -163,8 +155,6 @@ struct perf_limits { | |||
163 | int max_sysfs_pct; | 155 | int max_sysfs_pct; |
164 | int min_policy_pct; | 156 | int min_policy_pct; |
165 | int min_sysfs_pct; | 157 | int min_sysfs_pct; |
166 | int max_perf_ctl; | ||
167 | int min_perf_ctl; | ||
168 | }; | 158 | }; |
169 | 159 | ||
170 | static struct perf_limits performance_limits = { | 160 | static struct perf_limits performance_limits = { |
@@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = { | |||
191 | .max_sysfs_pct = 100, | 181 | .max_sysfs_pct = 100, |
192 | .min_policy_pct = 0, | 182 | .min_policy_pct = 0, |
193 | .min_sysfs_pct = 0, | 183 | .min_sysfs_pct = 0, |
194 | .max_perf_ctl = 0, | ||
195 | .min_perf_ctl = 0, | ||
196 | }; | 184 | }; |
197 | 185 | ||
198 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE | 186 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE |
@@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits; | |||
201 | static struct perf_limits *limits = &powersave_limits; | 189 | static struct perf_limits *limits = &powersave_limits; |
202 | #endif | 190 | #endif |
203 | 191 | ||
204 | #if IS_ENABLED(CONFIG_ACPI) | ||
205 | /* | ||
206 | * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and | ||
207 | * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and | ||
208 | * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state | ||
209 | * ratio, out of it only high 8 bits are used. For example 0x1700 is setting | ||
210 | * target ratio 0x17. The _PSS control value stores in a format which can be | ||
211 | * directly written to PERF_CTL MSR. But in intel_pstate driver this shift | ||
212 | * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). | ||
213 | * This function converts the _PSS control value to intel pstate driver format | ||
214 | * for comparison and assignment. | ||
215 | */ | ||
216 | static int convert_to_native_pstate_format(struct cpudata *cpu, int index) | ||
217 | { | ||
218 | return cpu->acpi_perf_data.states[index].control >> 8; | ||
219 | } | ||
220 | |||
221 | static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) | ||
222 | { | ||
223 | struct cpudata *cpu; | ||
224 | int ret; | ||
225 | bool turbo_absent = false; | ||
226 | int max_pstate_index; | ||
227 | int min_pss_ctl, max_pss_ctl, turbo_pss_ctl; | ||
228 | int i; | ||
229 | |||
230 | cpu = all_cpu_data[policy->cpu]; | ||
231 | |||
232 | pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n", | ||
233 | cpu->pstate.min_pstate, cpu->pstate.max_pstate, | ||
234 | cpu->pstate.turbo_pstate); | ||
235 | |||
236 | if (!cpu->acpi_perf_data.shared_cpu_map && | ||
237 | zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map, | ||
238 | GFP_KERNEL, cpu_to_node(policy->cpu))) { | ||
239 | return -ENOMEM; | ||
240 | } | ||
241 | |||
242 | ret = acpi_processor_register_performance(&cpu->acpi_perf_data, | ||
243 | policy->cpu); | ||
244 | if (ret) | ||
245 | return ret; | ||
246 | |||
247 | /* | ||
248 | * Check if the control value in _PSS is for PERF_CTL MSR, which should | ||
249 | * guarantee that the states returned by it map to the states in our | ||
250 | * list directly. | ||
251 | */ | ||
252 | if (cpu->acpi_perf_data.control_register.space_id != | ||
253 | ACPI_ADR_SPACE_FIXED_HARDWARE) | ||
254 | return -EIO; | ||
255 | |||
256 | pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu); | ||
257 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) | ||
258 | pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", | ||
259 | (i == cpu->acpi_perf_data.state ? '*' : ' '), i, | ||
260 | (u32) cpu->acpi_perf_data.states[i].core_frequency, | ||
261 | (u32) cpu->acpi_perf_data.states[i].power, | ||
262 | (u32) cpu->acpi_perf_data.states[i].control); | ||
263 | |||
264 | /* | ||
265 | * If there is only one entry _PSS, simply ignore _PSS and continue as | ||
266 | * usual without taking _PSS into account | ||
267 | */ | ||
268 | if (cpu->acpi_perf_data.state_count < 2) | ||
269 | return 0; | ||
270 | |||
271 | turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); | ||
272 | min_pss_ctl = convert_to_native_pstate_format(cpu, | ||
273 | cpu->acpi_perf_data.state_count - 1); | ||
274 | /* Check if there is a turbo freq in _PSS */ | ||
275 | if (turbo_pss_ctl <= cpu->pstate.max_pstate && | ||
276 | turbo_pss_ctl > cpu->pstate.min_pstate) { | ||
277 | pr_debug("intel_pstate: no turbo range exists in _PSS\n"); | ||
278 | limits->no_turbo = limits->turbo_disabled = 1; | ||
279 | cpu->pstate.turbo_pstate = cpu->pstate.max_pstate; | ||
280 | turbo_absent = true; | ||
281 | } | ||
282 | |||
283 | /* Check if the max non turbo p state < Intel P state max */ | ||
284 | max_pstate_index = turbo_absent ? 0 : 1; | ||
285 | max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index); | ||
286 | if (max_pss_ctl < cpu->pstate.max_pstate && | ||
287 | max_pss_ctl > cpu->pstate.min_pstate) | ||
288 | cpu->pstate.max_pstate = max_pss_ctl; | ||
289 | |||
290 | /* check If min perf > Intel P State min */ | ||
291 | if (min_pss_ctl > cpu->pstate.min_pstate && | ||
292 | min_pss_ctl < cpu->pstate.max_pstate) { | ||
293 | cpu->pstate.min_pstate = min_pss_ctl; | ||
294 | policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling; | ||
295 | } | ||
296 | |||
297 | if (turbo_absent) | ||
298 | policy->cpuinfo.max_freq = cpu->pstate.max_pstate * | ||
299 | cpu->pstate.scaling; | ||
300 | else { | ||
301 | policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * | ||
302 | cpu->pstate.scaling; | ||
303 | /* | ||
304 | * The _PSS table doesn't contain whole turbo frequency range. | ||
305 | * This just contains +1 MHZ above the max non turbo frequency, | ||
306 | * with control value corresponding to max turbo ratio. But | ||
307 | * when cpufreq set policy is called, it will call with this | ||
308 | * max frequency, which will cause a reduced performance as | ||
309 | * this driver uses real max turbo frequency as the max | ||
310 | * frequeny. So correct this frequency in _PSS table to | ||
311 | * correct max turbo frequency based on the turbo ratio. | ||
312 | * Also need to convert to MHz as _PSS freq is in MHz. | ||
313 | */ | ||
314 | cpu->acpi_perf_data.states[0].core_frequency = | ||
315 | turbo_pss_ctl * 100; | ||
316 | } | ||
317 | |||
318 | pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n", | ||
319 | cpu->pstate.min_pstate, cpu->pstate.max_pstate, | ||
320 | cpu->pstate.turbo_pstate); | ||
321 | pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n", | ||
322 | policy->cpuinfo.max_freq, policy->cpuinfo.min_freq); | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | ||
328 | { | ||
329 | struct cpudata *cpu; | ||
330 | |||
331 | if (!no_acpi_perf) | ||
332 | return 0; | ||
333 | |||
334 | cpu = all_cpu_data[policy->cpu]; | ||
335 | acpi_processor_unregister_performance(policy->cpu); | ||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | #else | ||
340 | static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) | ||
341 | { | ||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | ||
346 | { | ||
347 | return 0; | ||
348 | } | ||
349 | #endif | ||
350 | |||
351 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, | 192 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, |
352 | int deadband, int integral) { | 193 | int deadband, int integral) { |
353 | pid->setpoint = setpoint; | 194 | pid->setpoint = setpoint; |
@@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) | |||
687 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); | 528 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); |
688 | } | 529 | } |
689 | 530 | ||
690 | static int byt_get_min_pstate(void) | 531 | static int atom_get_min_pstate(void) |
691 | { | 532 | { |
692 | u64 value; | 533 | u64 value; |
693 | 534 | ||
694 | rdmsrl(BYT_RATIOS, value); | 535 | rdmsrl(ATOM_RATIOS, value); |
695 | return (value >> 8) & 0x7F; | 536 | return (value >> 8) & 0x7F; |
696 | } | 537 | } |
697 | 538 | ||
698 | static int byt_get_max_pstate(void) | 539 | static int atom_get_max_pstate(void) |
699 | { | 540 | { |
700 | u64 value; | 541 | u64 value; |
701 | 542 | ||
702 | rdmsrl(BYT_RATIOS, value); | 543 | rdmsrl(ATOM_RATIOS, value); |
703 | return (value >> 16) & 0x7F; | 544 | return (value >> 16) & 0x7F; |
704 | } | 545 | } |
705 | 546 | ||
706 | static int byt_get_turbo_pstate(void) | 547 | static int atom_get_turbo_pstate(void) |
707 | { | 548 | { |
708 | u64 value; | 549 | u64 value; |
709 | 550 | ||
710 | rdmsrl(BYT_TURBO_RATIOS, value); | 551 | rdmsrl(ATOM_TURBO_RATIOS, value); |
711 | return value & 0x7F; | 552 | return value & 0x7F; |
712 | } | 553 | } |
713 | 554 | ||
714 | static void byt_set_pstate(struct cpudata *cpudata, int pstate) | 555 | static void atom_set_pstate(struct cpudata *cpudata, int pstate) |
715 | { | 556 | { |
716 | u64 val; | 557 | u64 val; |
717 | int32_t vid_fp; | 558 | int32_t vid_fp; |
@@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) | |||
736 | wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); | 577 | wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); |
737 | } | 578 | } |
738 | 579 | ||
739 | #define BYT_BCLK_FREQS 5 | 580 | static int silvermont_get_scaling(void) |
740 | static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; | ||
741 | |||
742 | static int byt_get_scaling(void) | ||
743 | { | 581 | { |
744 | u64 value; | 582 | u64 value; |
745 | int i; | 583 | int i; |
584 | /* Defined in Table 35-6 from SDM (Sept 2015) */ | ||
585 | static int silvermont_freq_table[] = { | ||
586 | 83300, 100000, 133300, 116700, 80000}; | ||
746 | 587 | ||
747 | rdmsrl(MSR_FSB_FREQ, value); | 588 | rdmsrl(MSR_FSB_FREQ, value); |
748 | i = value & 0x3; | 589 | i = value & 0x7; |
590 | WARN_ON(i > 4); | ||
749 | 591 | ||
750 | BUG_ON(i > BYT_BCLK_FREQS); | 592 | return silvermont_freq_table[i]; |
593 | } | ||
751 | 594 | ||
752 | return byt_freq_table[i] * 100; | 595 | static int airmont_get_scaling(void) |
596 | { | ||
597 | u64 value; | ||
598 | int i; | ||
599 | /* Defined in Table 35-10 from SDM (Sept 2015) */ | ||
600 | static int airmont_freq_table[] = { | ||
601 | 83300, 100000, 133300, 116700, 80000, | ||
602 | 93300, 90000, 88900, 87500}; | ||
603 | |||
604 | rdmsrl(MSR_FSB_FREQ, value); | ||
605 | i = value & 0xF; | ||
606 | WARN_ON(i > 8); | ||
607 | |||
608 | return airmont_freq_table[i]; | ||
753 | } | 609 | } |
754 | 610 | ||
755 | static void byt_get_vid(struct cpudata *cpudata) | 611 | static void atom_get_vid(struct cpudata *cpudata) |
756 | { | 612 | { |
757 | u64 value; | 613 | u64 value; |
758 | 614 | ||
759 | rdmsrl(BYT_VIDS, value); | 615 | rdmsrl(ATOM_VIDS, value); |
760 | cpudata->vid.min = int_tofp((value >> 8) & 0x7f); | 616 | cpudata->vid.min = int_tofp((value >> 8) & 0x7f); |
761 | cpudata->vid.max = int_tofp((value >> 16) & 0x7f); | 617 | cpudata->vid.max = int_tofp((value >> 16) & 0x7f); |
762 | cpudata->vid.ratio = div_fp( | 618 | cpudata->vid.ratio = div_fp( |
@@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata) | |||
764 | int_tofp(cpudata->pstate.max_pstate - | 620 | int_tofp(cpudata->pstate.max_pstate - |
765 | cpudata->pstate.min_pstate)); | 621 | cpudata->pstate.min_pstate)); |
766 | 622 | ||
767 | rdmsrl(BYT_TURBO_VIDS, value); | 623 | rdmsrl(ATOM_TURBO_VIDS, value); |
768 | cpudata->vid.turbo = value & 0x7f; | 624 | cpudata->vid.turbo = value & 0x7f; |
769 | } | 625 | } |
770 | 626 | ||
@@ -885,7 +741,7 @@ static struct cpu_defaults core_params = { | |||
885 | }, | 741 | }, |
886 | }; | 742 | }; |
887 | 743 | ||
888 | static struct cpu_defaults byt_params = { | 744 | static struct cpu_defaults silvermont_params = { |
889 | .pid_policy = { | 745 | .pid_policy = { |
890 | .sample_rate_ms = 10, | 746 | .sample_rate_ms = 10, |
891 | .deadband = 0, | 747 | .deadband = 0, |
@@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = { | |||
895 | .i_gain_pct = 4, | 751 | .i_gain_pct = 4, |
896 | }, | 752 | }, |
897 | .funcs = { | 753 | .funcs = { |
898 | .get_max = byt_get_max_pstate, | 754 | .get_max = atom_get_max_pstate, |
899 | .get_max_physical = byt_get_max_pstate, | 755 | .get_max_physical = atom_get_max_pstate, |
900 | .get_min = byt_get_min_pstate, | 756 | .get_min = atom_get_min_pstate, |
901 | .get_turbo = byt_get_turbo_pstate, | 757 | .get_turbo = atom_get_turbo_pstate, |
902 | .set = byt_set_pstate, | 758 | .set = atom_set_pstate, |
903 | .get_scaling = byt_get_scaling, | 759 | .get_scaling = silvermont_get_scaling, |
904 | .get_vid = byt_get_vid, | 760 | .get_vid = atom_get_vid, |
761 | }, | ||
762 | }; | ||
763 | |||
764 | static struct cpu_defaults airmont_params = { | ||
765 | .pid_policy = { | ||
766 | .sample_rate_ms = 10, | ||
767 | .deadband = 0, | ||
768 | .setpoint = 60, | ||
769 | .p_gain_pct = 14, | ||
770 | .d_gain_pct = 0, | ||
771 | .i_gain_pct = 4, | ||
772 | }, | ||
773 | .funcs = { | ||
774 | .get_max = atom_get_max_pstate, | ||
775 | .get_max_physical = atom_get_max_pstate, | ||
776 | .get_min = atom_get_min_pstate, | ||
777 | .get_turbo = atom_get_turbo_pstate, | ||
778 | .set = atom_set_pstate, | ||
779 | .get_scaling = airmont_get_scaling, | ||
780 | .get_vid = atom_get_vid, | ||
905 | }, | 781 | }, |
906 | }; | 782 | }; |
907 | 783 | ||
@@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | |||
938 | * policy, or by cpu specific default values determined through | 814 | * policy, or by cpu specific default values determined through |
939 | * experimentation. | 815 | * experimentation. |
940 | */ | 816 | */ |
941 | if (limits->max_perf_ctl && limits->max_sysfs_pct >= | 817 | max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf)); |
942 | limits->max_policy_pct) { | 818 | *max = clamp_t(int, max_perf_adj, |
943 | *max = limits->max_perf_ctl; | 819 | cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); |
944 | } else { | ||
945 | max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), | ||
946 | limits->max_perf)); | ||
947 | *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, | ||
948 | cpu->pstate.turbo_pstate); | ||
949 | } | ||
950 | 820 | ||
951 | if (limits->min_perf_ctl) { | 821 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf)); |
952 | *min = limits->min_perf_ctl; | 822 | *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); |
953 | } else { | ||
954 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), | ||
955 | limits->min_perf)); | ||
956 | *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); | ||
957 | } | ||
958 | } | 823 | } |
959 | 824 | ||
960 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) | 825 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) |
@@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
1153 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | 1018 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { |
1154 | ICPU(0x2a, core_params), | 1019 | ICPU(0x2a, core_params), |
1155 | ICPU(0x2d, core_params), | 1020 | ICPU(0x2d, core_params), |
1156 | ICPU(0x37, byt_params), | 1021 | ICPU(0x37, silvermont_params), |
1157 | ICPU(0x3a, core_params), | 1022 | ICPU(0x3a, core_params), |
1158 | ICPU(0x3c, core_params), | 1023 | ICPU(0x3c, core_params), |
1159 | ICPU(0x3d, core_params), | 1024 | ICPU(0x3d, core_params), |
@@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | |||
1162 | ICPU(0x45, core_params), | 1027 | ICPU(0x45, core_params), |
1163 | ICPU(0x46, core_params), | 1028 | ICPU(0x46, core_params), |
1164 | ICPU(0x47, core_params), | 1029 | ICPU(0x47, core_params), |
1165 | ICPU(0x4c, byt_params), | 1030 | ICPU(0x4c, airmont_params), |
1166 | ICPU(0x4e, core_params), | 1031 | ICPU(0x4e, core_params), |
1167 | ICPU(0x4f, core_params), | 1032 | ICPU(0x4f, core_params), |
1168 | ICPU(0x5e, core_params), | 1033 | ICPU(0x5e, core_params), |
@@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) | |||
1229 | 1094 | ||
1230 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) | 1095 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
1231 | { | 1096 | { |
1232 | #if IS_ENABLED(CONFIG_ACPI) | ||
1233 | struct cpudata *cpu; | ||
1234 | int i; | ||
1235 | #endif | ||
1236 | pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__, | ||
1237 | policy->cpuinfo.max_freq, policy->max); | ||
1238 | if (!policy->cpuinfo.max_freq) | 1097 | if (!policy->cpuinfo.max_freq) |
1239 | return -ENODEV; | 1098 | return -ENODEV; |
1240 | 1099 | ||
@@ -1270,23 +1129,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
1270 | limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), | 1129 | limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), |
1271 | int_tofp(100)); | 1130 | int_tofp(100)); |
1272 | 1131 | ||
1273 | #if IS_ENABLED(CONFIG_ACPI) | ||
1274 | cpu = all_cpu_data[policy->cpu]; | ||
1275 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { | ||
1276 | int control; | ||
1277 | |||
1278 | control = convert_to_native_pstate_format(cpu, i); | ||
1279 | if (control * cpu->pstate.scaling == policy->max) | ||
1280 | limits->max_perf_ctl = control; | ||
1281 | if (control * cpu->pstate.scaling == policy->min) | ||
1282 | limits->min_perf_ctl = control; | ||
1283 | } | ||
1284 | |||
1285 | pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n", | ||
1286 | policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl, | ||
1287 | limits->max_perf_ctl); | ||
1288 | #endif | ||
1289 | |||
1290 | if (hwp_active) | 1132 | if (hwp_active) |
1291 | intel_pstate_hwp_set(); | 1133 | intel_pstate_hwp_set(); |
1292 | 1134 | ||
@@ -1341,30 +1183,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
1341 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; | 1183 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
1342 | policy->cpuinfo.max_freq = | 1184 | policy->cpuinfo.max_freq = |
1343 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 1185 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; |
1344 | if (!no_acpi_perf) | ||
1345 | intel_pstate_init_perf_limits(policy); | ||
1346 | /* | ||
1347 | * If there is no acpi perf data or error, we ignore and use Intel P | ||
1348 | * state calculated limits, So this is not fatal error. | ||
1349 | */ | ||
1350 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 1186 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
1351 | cpumask_set_cpu(policy->cpu, policy->cpus); | 1187 | cpumask_set_cpu(policy->cpu, policy->cpus); |
1352 | 1188 | ||
1353 | return 0; | 1189 | return 0; |
1354 | } | 1190 | } |
1355 | 1191 | ||
1356 | static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) | ||
1357 | { | ||
1358 | return intel_pstate_exit_perf_limits(policy); | ||
1359 | } | ||
1360 | |||
1361 | static struct cpufreq_driver intel_pstate_driver = { | 1192 | static struct cpufreq_driver intel_pstate_driver = { |
1362 | .flags = CPUFREQ_CONST_LOOPS, | 1193 | .flags = CPUFREQ_CONST_LOOPS, |
1363 | .verify = intel_pstate_verify_policy, | 1194 | .verify = intel_pstate_verify_policy, |
1364 | .setpolicy = intel_pstate_set_policy, | 1195 | .setpolicy = intel_pstate_set_policy, |
1365 | .get = intel_pstate_get, | 1196 | .get = intel_pstate_get, |
1366 | .init = intel_pstate_cpu_init, | 1197 | .init = intel_pstate_cpu_init, |
1367 | .exit = intel_pstate_cpu_exit, | ||
1368 | .stop_cpu = intel_pstate_stop_cpu, | 1198 | .stop_cpu = intel_pstate_stop_cpu, |
1369 | .name = "intel_pstate", | 1199 | .name = "intel_pstate", |
1370 | }; | 1200 | }; |
@@ -1406,6 +1236,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) | |||
1406 | } | 1236 | } |
1407 | 1237 | ||
1408 | #if IS_ENABLED(CONFIG_ACPI) | 1238 | #if IS_ENABLED(CONFIG_ACPI) |
1239 | #include <acpi/processor.h> | ||
1409 | 1240 | ||
1410 | static bool intel_pstate_no_acpi_pss(void) | 1241 | static bool intel_pstate_no_acpi_pss(void) |
1411 | { | 1242 | { |
@@ -1601,9 +1432,6 @@ static int __init intel_pstate_setup(char *str) | |||
1601 | force_load = 1; | 1432 | force_load = 1; |
1602 | if (!strcmp(str, "hwp_only")) | 1433 | if (!strcmp(str, "hwp_only")) |
1603 | hwp_only = 1; | 1434 | hwp_only = 1; |
1604 | if (!strcmp(str, "no_acpi")) | ||
1605 | no_acpi_perf = 1; | ||
1606 | |||
1607 | return 0; | 1435 | return 0; |
1608 | } | 1436 | } |
1609 | early_param("intel_pstate", intel_pstate_setup); | 1437 | early_param("intel_pstate", intel_pstate_setup); |