summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-04-28 17:13:04 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-04-28 17:13:04 -0400
commit2dee4b0e0b83c9d045d6cfe168d6798ead3fe33c (patch)
tree673cc2e0abf8b17910ca1f2ff70784bfb790a65a /drivers/cpufreq
parent939dc6f51e90c95a7d88034da48b747f01873bce (diff)
parent630e57573efa20b586c808400005d0ebfb93fc6a (diff)
Merge intel_pstate driver updates for v4.12.
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/intel_pstate.c908
1 files changed, 408 insertions, 500 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 283491f742d3..c31b72b16c2b 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,6 +37,9 @@
37#include <asm/cpufeature.h> 37#include <asm/cpufeature.h>
38#include <asm/intel-family.h> 38#include <asm/intel-family.h>
39 39
40#define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
41#define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC)
42
40#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 43#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
41 44
42#ifdef CONFIG_ACPI 45#ifdef CONFIG_ACPI
@@ -74,6 +77,11 @@ static inline int ceiling_fp(int32_t x)
74 return ret; 77 return ret;
75} 78}
76 79
80static inline int32_t percent_fp(int percent)
81{
82 return div_fp(percent, 100);
83}
84
77static inline u64 mul_ext_fp(u64 x, u64 y) 85static inline u64 mul_ext_fp(u64 x, u64 y)
78{ 86{
79 return (x * y) >> EXT_FRAC_BITS; 87 return (x * y) >> EXT_FRAC_BITS;
@@ -186,45 +194,22 @@ struct _pid {
186}; 194};
187 195
188/** 196/**
189 * struct perf_limits - Store user and policy limits 197 * struct global_params - Global parameters, mostly tunable via sysfs.
190 * @no_turbo: User requested turbo state from intel_pstate sysfs 198 * @no_turbo: Whether or not to use turbo P-states.
191 * @turbo_disabled: Platform turbo status either from msr 199 * @turbo_disabled: Whethet or not turbo P-states are available at all,
192 * MSR_IA32_MISC_ENABLE or when maximum available pstate 200 * based on the MSR_IA32_MISC_ENABLE value and whether or
193 * matches the maximum turbo pstate 201 * not the maximum reported turbo P-state is different from
194 * @max_perf_pct: Effective maximum performance limit in percentage, this 202 * the maximum reported non-turbo one.
195 * is minimum of either limits enforced by cpufreq policy 203 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
196 * or limits from user set limits via intel_pstate sysfs 204 * P-state capacity.
197 * @min_perf_pct: Effective minimum performance limit in percentage, this 205 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
198 * is maximum of either limits enforced by cpufreq policy 206 * P-state capacity.
199 * or limits from user set limits via intel_pstate sysfs
200 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
201 * This value is used to limit max pstate
202 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
203 * This value is used to limit min pstate
204 * @max_policy_pct: The maximum performance in percentage enforced by
205 * cpufreq setpolicy interface
206 * @max_sysfs_pct: The maximum performance in percentage enforced by
207 * intel pstate sysfs interface, unused when per cpu
208 * controls are enforced
209 * @min_policy_pct: The minimum performance in percentage enforced by
210 * cpufreq setpolicy interface
211 * @min_sysfs_pct: The minimum performance in percentage enforced by
212 * intel pstate sysfs interface, unused when per cpu
213 * controls are enforced
214 *
215 * Storage for user and policy defined limits.
216 */ 207 */
217struct perf_limits { 208struct global_params {
218 int no_turbo; 209 bool no_turbo;
219 int turbo_disabled; 210 bool turbo_disabled;
220 int max_perf_pct; 211 int max_perf_pct;
221 int min_perf_pct; 212 int min_perf_pct;
222 int32_t max_perf;
223 int32_t min_perf;
224 int max_policy_pct;
225 int max_sysfs_pct;
226 int min_policy_pct;
227 int min_sysfs_pct;
228}; 213};
229 214
230/** 215/**
@@ -245,9 +230,10 @@ struct perf_limits {
245 * @prev_cummulative_iowait: IO Wait time difference from last and 230 * @prev_cummulative_iowait: IO Wait time difference from last and
246 * current sample 231 * current sample
247 * @sample: Storage for storing last Sample data 232 * @sample: Storage for storing last Sample data
248 * @perf_limits: Pointer to perf_limit unique to this CPU 233 * @min_perf: Minimum capacity limit as a fraction of the maximum
249 * Not all field in the structure are applicable 234 * turbo P-state capacity.
250 * when per cpu controls are enforced 235 * @max_perf: Maximum capacity limit as a fraction of the maximum
236 * turbo P-state capacity.
251 * @acpi_perf_data: Stores ACPI perf information read from _PSS 237 * @acpi_perf_data: Stores ACPI perf information read from _PSS
252 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 238 * @valid_pss_table: Set to true for valid ACPI _PSS entries found
253 * @epp_powersave: Last saved HWP energy performance preference 239 * @epp_powersave: Last saved HWP energy performance preference
@@ -279,7 +265,8 @@ struct cpudata {
279 u64 prev_tsc; 265 u64 prev_tsc;
280 u64 prev_cummulative_iowait; 266 u64 prev_cummulative_iowait;
281 struct sample sample; 267 struct sample sample;
282 struct perf_limits *perf_limits; 268 int32_t min_perf;
269 int32_t max_perf;
283#ifdef CONFIG_ACPI 270#ifdef CONFIG_ACPI
284 struct acpi_processor_performance acpi_perf_data; 271 struct acpi_processor_performance acpi_perf_data;
285 bool valid_pss_table; 272 bool valid_pss_table;
@@ -324,7 +311,7 @@ struct pstate_adjust_policy {
324 * @get_scaling: Callback to get frequency scaling factor 311 * @get_scaling: Callback to get frequency scaling factor
325 * @get_val: Callback to convert P state to actual MSR write value 312 * @get_val: Callback to convert P state to actual MSR write value
326 * @get_vid: Callback to get VID data for Atom platforms 313 * @get_vid: Callback to get VID data for Atom platforms
327 * @get_target_pstate: Callback to a function to calculate next P state to use 314 * @update_util: Active mode utilization update callback.
328 * 315 *
329 * Core and Atom CPU models have different way to get P State limits. This 316 * Core and Atom CPU models have different way to get P State limits. This
330 * structure is used to store those callbacks. 317 * structure is used to store those callbacks.
@@ -337,43 +324,31 @@ struct pstate_funcs {
337 int (*get_scaling)(void); 324 int (*get_scaling)(void);
338 u64 (*get_val)(struct cpudata*, int pstate); 325 u64 (*get_val)(struct cpudata*, int pstate);
339 void (*get_vid)(struct cpudata *); 326 void (*get_vid)(struct cpudata *);
340 int32_t (*get_target_pstate)(struct cpudata *); 327 void (*update_util)(struct update_util_data *data, u64 time,
328 unsigned int flags);
341}; 329};
342 330
343/** 331static struct pstate_funcs pstate_funcs __read_mostly;
344 * struct cpu_defaults- Per CPU model default config data 332static struct pstate_adjust_policy pid_params __read_mostly = {
345 * @pid_policy: PID config data 333 .sample_rate_ms = 10,
346 * @funcs: Callback function data 334 .sample_rate_ns = 10 * NSEC_PER_MSEC,
347 */ 335 .deadband = 0,
348struct cpu_defaults { 336 .setpoint = 97,
349 struct pstate_adjust_policy pid_policy; 337 .p_gain_pct = 20,
350 struct pstate_funcs funcs; 338 .d_gain_pct = 0,
339 .i_gain_pct = 0,
351}; 340};
352 341
353static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
354static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
355
356static struct pstate_adjust_policy pid_params __read_mostly;
357static struct pstate_funcs pstate_funcs __read_mostly;
358static int hwp_active __read_mostly; 342static int hwp_active __read_mostly;
359static bool per_cpu_limits __read_mostly; 343static bool per_cpu_limits __read_mostly;
360 344
361static bool driver_registered __read_mostly; 345static struct cpufreq_driver *intel_pstate_driver __read_mostly;
362 346
363#ifdef CONFIG_ACPI 347#ifdef CONFIG_ACPI
364static bool acpi_ppc; 348static bool acpi_ppc;
365#endif 349#endif
366 350
367static struct perf_limits global; 351static struct global_params global;
368
369static void intel_pstate_init_limits(struct perf_limits *limits)
370{
371 memset(limits, 0, sizeof(*limits));
372 limits->max_perf_pct = 100;
373 limits->max_perf = int_ext_tofp(1);
374 limits->max_policy_pct = 100;
375 limits->max_sysfs_pct = 100;
376}
377 352
378static DEFINE_MUTEX(intel_pstate_driver_lock); 353static DEFINE_MUTEX(intel_pstate_driver_lock);
379static DEFINE_MUTEX(intel_pstate_limits_lock); 354static DEFINE_MUTEX(intel_pstate_limits_lock);
@@ -530,29 +505,6 @@ static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
530} 505}
531#endif 506#endif
532 507
533static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
534 int deadband, int integral) {
535 pid->setpoint = int_tofp(setpoint);
536 pid->deadband = int_tofp(deadband);
537 pid->integral = int_tofp(integral);
538 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
539}
540
541static inline void pid_p_gain_set(struct _pid *pid, int percent)
542{
543 pid->p_gain = div_fp(percent, 100);
544}
545
546static inline void pid_i_gain_set(struct _pid *pid, int percent)
547{
548 pid->i_gain = div_fp(percent, 100);
549}
550
551static inline void pid_d_gain_set(struct _pid *pid, int percent)
552{
553 pid->d_gain = div_fp(percent, 100);
554}
555
556static signed int pid_calc(struct _pid *pid, int32_t busy) 508static signed int pid_calc(struct _pid *pid, int32_t busy)
557{ 509{
558 signed int result; 510 signed int result;
@@ -590,23 +542,17 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
590 return (signed int)fp_toint(result); 542 return (signed int)fp_toint(result);
591} 543}
592 544
593static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 545static inline void intel_pstate_pid_reset(struct cpudata *cpu)
594{
595 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
596 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
597 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
598
599 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
600}
601
602static inline void intel_pstate_reset_all_pid(void)
603{ 546{
604 unsigned int cpu; 547 struct _pid *pid = &cpu->pid;
605 548
606 for_each_online_cpu(cpu) { 549 pid->p_gain = percent_fp(pid_params.p_gain_pct);
607 if (all_cpu_data[cpu]) 550 pid->d_gain = percent_fp(pid_params.d_gain_pct);
608 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 551 pid->i_gain = percent_fp(pid_params.i_gain_pct);
609 } 552 pid->setpoint = int_tofp(pid_params.setpoint);
553 pid->last_err = pid->setpoint - int_tofp(100);
554 pid->deadband = int_tofp(pid_params.deadband);
555 pid->integral = 0;
610} 556}
611 557
612static inline void update_turbo_state(void) 558static inline void update_turbo_state(void)
@@ -621,6 +567,14 @@ static inline void update_turbo_state(void)
621 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 567 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
622} 568}
623 569
570static int min_perf_pct_min(void)
571{
572 struct cpudata *cpu = all_cpu_data[0];
573
574 return DIV_ROUND_UP(cpu->pstate.min_pstate * 100,
575 cpu->pstate.turbo_pstate);
576}
577
624static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 578static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
625{ 579{
626 u64 epb; 580 u64 epb;
@@ -838,96 +792,80 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
838 NULL, 792 NULL,
839}; 793};
840 794
841static void intel_pstate_hwp_set(struct cpufreq_policy *policy) 795static void intel_pstate_hwp_set(unsigned int cpu)
842{ 796{
843 int min, hw_min, max, hw_max, cpu; 797 struct cpudata *cpu_data = all_cpu_data[cpu];
844 struct perf_limits *perf_limits = &global; 798 int min, hw_min, max, hw_max;
845 u64 value, cap; 799 u64 value, cap;
800 s16 epp;
846 801
847 for_each_cpu(cpu, policy->cpus) { 802 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
848 struct cpudata *cpu_data = all_cpu_data[cpu]; 803 hw_min = HWP_LOWEST_PERF(cap);
849 s16 epp; 804 if (global.no_turbo)
850 805 hw_max = HWP_GUARANTEED_PERF(cap);
851 if (per_cpu_limits) 806 else
852 perf_limits = all_cpu_data[cpu]->perf_limits; 807 hw_max = HWP_HIGHEST_PERF(cap);
853
854 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
855 hw_min = HWP_LOWEST_PERF(cap);
856 if (global.no_turbo)
857 hw_max = HWP_GUARANTEED_PERF(cap);
858 else
859 hw_max = HWP_HIGHEST_PERF(cap);
860
861 max = fp_ext_toint(hw_max * perf_limits->max_perf);
862 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
863 min = max;
864 else
865 min = fp_ext_toint(hw_max * perf_limits->min_perf);
866 808
867 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 809 max = fp_ext_toint(hw_max * cpu_data->max_perf);
810 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
811 min = max;
812 else
813 min = fp_ext_toint(hw_max * cpu_data->min_perf);
868 814
869 value &= ~HWP_MIN_PERF(~0L); 815 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
870 value |= HWP_MIN_PERF(min);
871 816
872 value &= ~HWP_MAX_PERF(~0L); 817 value &= ~HWP_MIN_PERF(~0L);
873 value |= HWP_MAX_PERF(max); 818 value |= HWP_MIN_PERF(min);
874 819
875 if (cpu_data->epp_policy == cpu_data->policy) 820 value &= ~HWP_MAX_PERF(~0L);
876 goto skip_epp; 821 value |= HWP_MAX_PERF(max);
877 822
878 cpu_data->epp_policy = cpu_data->policy; 823 if (cpu_data->epp_policy == cpu_data->policy)
824 goto skip_epp;
879 825
880 if (cpu_data->epp_saved >= 0) { 826 cpu_data->epp_policy = cpu_data->policy;
881 epp = cpu_data->epp_saved;
882 cpu_data->epp_saved = -EINVAL;
883 goto update_epp;
884 }
885 827
886 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 828 if (cpu_data->epp_saved >= 0) {
887 epp = intel_pstate_get_epp(cpu_data, value); 829 epp = cpu_data->epp_saved;
888 cpu_data->epp_powersave = epp; 830 cpu_data->epp_saved = -EINVAL;
889 /* If EPP read was failed, then don't try to write */ 831 goto update_epp;
890 if (epp < 0) 832 }
891 goto skip_epp;
892 833
834 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
835 epp = intel_pstate_get_epp(cpu_data, value);
836 cpu_data->epp_powersave = epp;
837 /* If EPP read was failed, then don't try to write */
838 if (epp < 0)
839 goto skip_epp;
893 840
894 epp = 0; 841 epp = 0;
895 } else { 842 } else {
896 /* skip setting EPP, when saved value is invalid */ 843 /* skip setting EPP, when saved value is invalid */
897 if (cpu_data->epp_powersave < 0) 844 if (cpu_data->epp_powersave < 0)
898 goto skip_epp; 845 goto skip_epp;
899 846
900 /* 847 /*
901 * No need to restore EPP when it is not zero. This 848 * No need to restore EPP when it is not zero. This
902 * means: 849 * means:
903 * - Policy is not changed 850 * - Policy is not changed
904 * - user has manually changed 851 * - user has manually changed
905 * - Error reading EPB 852 * - Error reading EPB
906 */ 853 */
907 epp = intel_pstate_get_epp(cpu_data, value); 854 epp = intel_pstate_get_epp(cpu_data, value);
908 if (epp) 855 if (epp)
909 goto skip_epp; 856 goto skip_epp;
910 857
911 epp = cpu_data->epp_powersave; 858 epp = cpu_data->epp_powersave;
912 } 859 }
913update_epp: 860update_epp:
914 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 861 if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
915 value &= ~GENMASK_ULL(31, 24); 862 value &= ~GENMASK_ULL(31, 24);
916 value |= (u64)epp << 24; 863 value |= (u64)epp << 24;
917 } else { 864 } else {
918 intel_pstate_set_epb(cpu, epp); 865 intel_pstate_set_epb(cpu, epp);
919 }
920skip_epp:
921 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
922 } 866 }
923} 867skip_epp:
924 868 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
925static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
926{
927 if (hwp_active)
928 intel_pstate_hwp_set(policy);
929
930 return 0;
931} 869}
932 870
933static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) 871static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
@@ -944,20 +882,17 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
944 882
945static int intel_pstate_resume(struct cpufreq_policy *policy) 883static int intel_pstate_resume(struct cpufreq_policy *policy)
946{ 884{
947 int ret;
948
949 if (!hwp_active) 885 if (!hwp_active)
950 return 0; 886 return 0;
951 887
952 mutex_lock(&intel_pstate_limits_lock); 888 mutex_lock(&intel_pstate_limits_lock);
953 889
954 all_cpu_data[policy->cpu]->epp_policy = 0; 890 all_cpu_data[policy->cpu]->epp_policy = 0;
955 891 intel_pstate_hwp_set(policy->cpu);
956 ret = intel_pstate_hwp_set_policy(policy);
957 892
958 mutex_unlock(&intel_pstate_limits_lock); 893 mutex_unlock(&intel_pstate_limits_lock);
959 894
960 return ret; 895 return 0;
961} 896}
962 897
963static void intel_pstate_update_policies(void) 898static void intel_pstate_update_policies(void)
@@ -971,9 +906,14 @@ static void intel_pstate_update_policies(void)
971/************************** debugfs begin ************************/ 906/************************** debugfs begin ************************/
972static int pid_param_set(void *data, u64 val) 907static int pid_param_set(void *data, u64 val)
973{ 908{
909 unsigned int cpu;
910
974 *(u32 *)data = val; 911 *(u32 *)data = val;
975 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 912 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
976 intel_pstate_reset_all_pid(); 913 for_each_possible_cpu(cpu)
914 if (all_cpu_data[cpu])
915 intel_pstate_pid_reset(all_cpu_data[cpu]);
916
977 return 0; 917 return 0;
978} 918}
979 919
@@ -1084,7 +1024,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
1084 1024
1085 mutex_lock(&intel_pstate_driver_lock); 1025 mutex_lock(&intel_pstate_driver_lock);
1086 1026
1087 if (!driver_registered) { 1027 if (!intel_pstate_driver) {
1088 mutex_unlock(&intel_pstate_driver_lock); 1028 mutex_unlock(&intel_pstate_driver_lock);
1089 return -EAGAIN; 1029 return -EAGAIN;
1090 } 1030 }
@@ -1109,7 +1049,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
1109 1049
1110 mutex_lock(&intel_pstate_driver_lock); 1050 mutex_lock(&intel_pstate_driver_lock);
1111 1051
1112 if (!driver_registered) { 1052 if (!intel_pstate_driver) {
1113 mutex_unlock(&intel_pstate_driver_lock); 1053 mutex_unlock(&intel_pstate_driver_lock);
1114 return -EAGAIN; 1054 return -EAGAIN;
1115 } 1055 }
@@ -1129,7 +1069,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
1129 1069
1130 mutex_lock(&intel_pstate_driver_lock); 1070 mutex_lock(&intel_pstate_driver_lock);
1131 1071
1132 if (!driver_registered) { 1072 if (!intel_pstate_driver) {
1133 mutex_unlock(&intel_pstate_driver_lock); 1073 mutex_unlock(&intel_pstate_driver_lock);
1134 return -EAGAIN; 1074 return -EAGAIN;
1135 } 1075 }
@@ -1157,7 +1097,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1157 1097
1158 mutex_lock(&intel_pstate_driver_lock); 1098 mutex_lock(&intel_pstate_driver_lock);
1159 1099
1160 if (!driver_registered) { 1100 if (!intel_pstate_driver) {
1161 mutex_unlock(&intel_pstate_driver_lock); 1101 mutex_unlock(&intel_pstate_driver_lock);
1162 return -EAGAIN; 1102 return -EAGAIN;
1163 } 1103 }
@@ -1174,6 +1114,15 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1174 1114
1175 global.no_turbo = clamp_t(int, input, 0, 1); 1115 global.no_turbo = clamp_t(int, input, 0, 1);
1176 1116
1117 if (global.no_turbo) {
1118 struct cpudata *cpu = all_cpu_data[0];
1119 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
1120
1121 /* Squash the global minimum into the permitted range. */
1122 if (global.min_perf_pct > pct)
1123 global.min_perf_pct = pct;
1124 }
1125
1177 mutex_unlock(&intel_pstate_limits_lock); 1126 mutex_unlock(&intel_pstate_limits_lock);
1178 1127
1179 intel_pstate_update_policies(); 1128 intel_pstate_update_policies();
@@ -1195,18 +1144,14 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1195 1144
1196 mutex_lock(&intel_pstate_driver_lock); 1145 mutex_lock(&intel_pstate_driver_lock);
1197 1146
1198 if (!driver_registered) { 1147 if (!intel_pstate_driver) {
1199 mutex_unlock(&intel_pstate_driver_lock); 1148 mutex_unlock(&intel_pstate_driver_lock);
1200 return -EAGAIN; 1149 return -EAGAIN;
1201 } 1150 }
1202 1151
1203 mutex_lock(&intel_pstate_limits_lock); 1152 mutex_lock(&intel_pstate_limits_lock);
1204 1153
1205 global.max_sysfs_pct = clamp_t(int, input, 0 , 100); 1154 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
1206 global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
1207 global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
1208 global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
1209 global.max_perf = percent_ext_fp(global.max_perf_pct);
1210 1155
1211 mutex_unlock(&intel_pstate_limits_lock); 1156 mutex_unlock(&intel_pstate_limits_lock);
1212 1157
@@ -1229,18 +1174,15 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1229 1174
1230 mutex_lock(&intel_pstate_driver_lock); 1175 mutex_lock(&intel_pstate_driver_lock);
1231 1176
1232 if (!driver_registered) { 1177 if (!intel_pstate_driver) {
1233 mutex_unlock(&intel_pstate_driver_lock); 1178 mutex_unlock(&intel_pstate_driver_lock);
1234 return -EAGAIN; 1179 return -EAGAIN;
1235 } 1180 }
1236 1181
1237 mutex_lock(&intel_pstate_limits_lock); 1182 mutex_lock(&intel_pstate_limits_lock);
1238 1183
1239 global.min_sysfs_pct = clamp_t(int, input, 0 , 100); 1184 global.min_perf_pct = clamp_t(int, input,
1240 global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct); 1185 min_perf_pct_min(), global.max_perf_pct);
1241 global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
1242 global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
1243 global.min_perf = percent_ext_fp(global.min_perf_pct);
1244 1186
1245 mutex_unlock(&intel_pstate_limits_lock); 1187 mutex_unlock(&intel_pstate_limits_lock);
1246 1188
@@ -1554,132 +1496,10 @@ static int knl_get_turbo_pstate(void)
1554 return ret; 1496 return ret;
1555} 1497}
1556 1498
1557static struct cpu_defaults core_params = { 1499static int intel_pstate_get_base_pstate(struct cpudata *cpu)
1558 .pid_policy = {
1559 .sample_rate_ms = 10,
1560 .deadband = 0,
1561 .setpoint = 97,
1562 .p_gain_pct = 20,
1563 .d_gain_pct = 0,
1564 .i_gain_pct = 0,
1565 },
1566 .funcs = {
1567 .get_max = core_get_max_pstate,
1568 .get_max_physical = core_get_max_pstate_physical,
1569 .get_min = core_get_min_pstate,
1570 .get_turbo = core_get_turbo_pstate,
1571 .get_scaling = core_get_scaling,
1572 .get_val = core_get_val,
1573 .get_target_pstate = get_target_pstate_use_performance,
1574 },
1575};
1576
1577static const struct cpu_defaults silvermont_params = {
1578 .pid_policy = {
1579 .sample_rate_ms = 10,
1580 .deadband = 0,
1581 .setpoint = 60,
1582 .p_gain_pct = 14,
1583 .d_gain_pct = 0,
1584 .i_gain_pct = 4,
1585 },
1586 .funcs = {
1587 .get_max = atom_get_max_pstate,
1588 .get_max_physical = atom_get_max_pstate,
1589 .get_min = atom_get_min_pstate,
1590 .get_turbo = atom_get_turbo_pstate,
1591 .get_val = atom_get_val,
1592 .get_scaling = silvermont_get_scaling,
1593 .get_vid = atom_get_vid,
1594 .get_target_pstate = get_target_pstate_use_cpu_load,
1595 },
1596};
1597
1598static const struct cpu_defaults airmont_params = {
1599 .pid_policy = {
1600 .sample_rate_ms = 10,
1601 .deadband = 0,
1602 .setpoint = 60,
1603 .p_gain_pct = 14,
1604 .d_gain_pct = 0,
1605 .i_gain_pct = 4,
1606 },
1607 .funcs = {
1608 .get_max = atom_get_max_pstate,
1609 .get_max_physical = atom_get_max_pstate,
1610 .get_min = atom_get_min_pstate,
1611 .get_turbo = atom_get_turbo_pstate,
1612 .get_val = atom_get_val,
1613 .get_scaling = airmont_get_scaling,
1614 .get_vid = atom_get_vid,
1615 .get_target_pstate = get_target_pstate_use_cpu_load,
1616 },
1617};
1618
1619static const struct cpu_defaults knl_params = {
1620 .pid_policy = {
1621 .sample_rate_ms = 10,
1622 .deadband = 0,
1623 .setpoint = 97,
1624 .p_gain_pct = 20,
1625 .d_gain_pct = 0,
1626 .i_gain_pct = 0,
1627 },
1628 .funcs = {
1629 .get_max = core_get_max_pstate,
1630 .get_max_physical = core_get_max_pstate_physical,
1631 .get_min = core_get_min_pstate,
1632 .get_turbo = knl_get_turbo_pstate,
1633 .get_scaling = core_get_scaling,
1634 .get_val = core_get_val,
1635 .get_target_pstate = get_target_pstate_use_performance,
1636 },
1637};
1638
1639static const struct cpu_defaults bxt_params = {
1640 .pid_policy = {
1641 .sample_rate_ms = 10,
1642 .deadband = 0,
1643 .setpoint = 60,
1644 .p_gain_pct = 14,
1645 .d_gain_pct = 0,
1646 .i_gain_pct = 4,
1647 },
1648 .funcs = {
1649 .get_max = core_get_max_pstate,
1650 .get_max_physical = core_get_max_pstate_physical,
1651 .get_min = core_get_min_pstate,
1652 .get_turbo = core_get_turbo_pstate,
1653 .get_scaling = core_get_scaling,
1654 .get_val = core_get_val,
1655 .get_target_pstate = get_target_pstate_use_cpu_load,
1656 },
1657};
1658
1659static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
1660{ 1500{
1661 int max_perf = cpu->pstate.turbo_pstate; 1501 return global.no_turbo || global.turbo_disabled ?
1662 int max_perf_adj; 1502 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1663 int min_perf;
1664 struct perf_limits *perf_limits = &global;
1665
1666 if (global.no_turbo || global.turbo_disabled)
1667 max_perf = cpu->pstate.max_pstate;
1668
1669 if (per_cpu_limits)
1670 perf_limits = cpu->perf_limits;
1671
1672 /*
1673 * performance can be limited by user through sysfs, by cpufreq
1674 * policy, or by cpu specific default values determined through
1675 * experimentation.
1676 */
1677 max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf);
1678 *max = clamp_t(int, max_perf_adj,
1679 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
1680
1681 min_perf = fp_ext_toint(max_perf * perf_limits->min_perf);
1682 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
1683} 1503}
1684 1504
1685static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1505static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
@@ -1702,11 +1522,13 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1702 1522
1703static void intel_pstate_max_within_limits(struct cpudata *cpu) 1523static void intel_pstate_max_within_limits(struct cpudata *cpu)
1704{ 1524{
1705 int min_pstate, max_pstate; 1525 int pstate;
1706 1526
1707 update_turbo_state(); 1527 update_turbo_state();
1708 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); 1528 pstate = intel_pstate_get_base_pstate(cpu);
1709 intel_pstate_set_pstate(cpu, max_pstate); 1529 pstate = max(cpu->pstate.min_pstate,
1530 fp_ext_toint(pstate * cpu->max_perf));
1531 intel_pstate_set_pstate(cpu, pstate);
1710} 1532}
1711 1533
1712static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1534static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
@@ -1767,7 +1589,11 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
1767 * that sample.time will always be reset before setting the utilization 1589 * that sample.time will always be reset before setting the utilization
1768 * update hook and make the caller skip the sample then. 1590 * update hook and make the caller skip the sample then.
1769 */ 1591 */
1770 return !!cpu->last_sample_time; 1592 if (cpu->last_sample_time) {
1593 intel_pstate_calc_avg_perf(cpu);
1594 return true;
1595 }
1596 return false;
1771} 1597}
1772 1598
1773static inline int32_t get_avg_frequency(struct cpudata *cpu) 1599static inline int32_t get_avg_frequency(struct cpudata *cpu)
@@ -1788,6 +1614,9 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
1788 int32_t busy_frac, boost; 1614 int32_t busy_frac, boost;
1789 int target, avg_pstate; 1615 int target, avg_pstate;
1790 1616
1617 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
1618 return cpu->pstate.turbo_pstate;
1619
1791 busy_frac = div_fp(sample->mperf, sample->tsc); 1620 busy_frac = div_fp(sample->mperf, sample->tsc);
1792 1621
1793 boost = cpu->iowait_boost; 1622 boost = cpu->iowait_boost;
@@ -1824,6 +1653,9 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1824 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1653 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
1825 u64 duration_ns; 1654 u64 duration_ns;
1826 1655
1656 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
1657 return cpu->pstate.turbo_pstate;
1658
1827 /* 1659 /*
1828 * perf_scaled is the ratio of the average P-state during the last 1660 * perf_scaled is the ratio of the average P-state during the last
1829 * sampling period to the P-state requested last time (in percent). 1661 * sampling period to the P-state requested last time (in percent).
@@ -1858,11 +1690,13 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1858 1690
1859static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 1691static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1860{ 1692{
1861 int max_perf, min_perf; 1693 int max_pstate = intel_pstate_get_base_pstate(cpu);
1694 int min_pstate;
1862 1695
1863 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1696 min_pstate = max(cpu->pstate.min_pstate,
1864 pstate = clamp_t(int, pstate, min_perf, max_perf); 1697 fp_ext_toint(max_pstate * cpu->min_perf));
1865 return pstate; 1698 max_pstate = max(min_pstate, fp_ext_toint(max_pstate * cpu->max_perf));
1699 return clamp_t(int, pstate, min_pstate, max_pstate);
1866} 1700}
1867 1701
1868static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1702static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
@@ -1874,16 +1708,11 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
1874 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1708 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
1875} 1709}
1876 1710
1877static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1711static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
1878{ 1712{
1879 int from, target_pstate; 1713 int from = cpu->pstate.current_pstate;
1880 struct sample *sample; 1714 struct sample *sample;
1881 1715
1882 from = cpu->pstate.current_pstate;
1883
1884 target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
1885 cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
1886
1887 update_turbo_state(); 1716 update_turbo_state();
1888 1717
1889 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 1718 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
@@ -1902,76 +1731,155 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1902 fp_toint(cpu->iowait_boost * 100)); 1731 fp_toint(cpu->iowait_boost * 100));
1903} 1732}
1904 1733
1734static void intel_pstate_update_util_hwp(struct update_util_data *data,
1735 u64 time, unsigned int flags)
1736{
1737 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1738 u64 delta_ns = time - cpu->sample.time;
1739
1740 if ((s64)delta_ns >= INTEL_PSTATE_HWP_SAMPLING_INTERVAL)
1741 intel_pstate_sample(cpu, time);
1742}
1743
1744static void intel_pstate_update_util_pid(struct update_util_data *data,
1745 u64 time, unsigned int flags)
1746{
1747 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1748 u64 delta_ns = time - cpu->sample.time;
1749
1750 if ((s64)delta_ns < pid_params.sample_rate_ns)
1751 return;
1752
1753 if (intel_pstate_sample(cpu, time)) {
1754 int target_pstate;
1755
1756 target_pstate = get_target_pstate_use_performance(cpu);
1757 intel_pstate_adjust_pstate(cpu, target_pstate);
1758 }
1759}
1760
1905static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1761static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1906 unsigned int flags) 1762 unsigned int flags)
1907{ 1763{
1908 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1764 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1909 u64 delta_ns; 1765 u64 delta_ns;
1910 1766
1911 if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) { 1767 if (flags & SCHED_CPUFREQ_IOWAIT) {
1912 if (flags & SCHED_CPUFREQ_IOWAIT) { 1768 cpu->iowait_boost = int_tofp(1);
1913 cpu->iowait_boost = int_tofp(1); 1769 } else if (cpu->iowait_boost) {
1914 } else if (cpu->iowait_boost) { 1770 /* Clear iowait_boost if the CPU may have been idle. */
1915 /* Clear iowait_boost if the CPU may have been idle. */ 1771 delta_ns = time - cpu->last_update;
1916 delta_ns = time - cpu->last_update; 1772 if (delta_ns > TICK_NSEC)
1917 if (delta_ns > TICK_NSEC) 1773 cpu->iowait_boost = 0;
1918 cpu->iowait_boost = 0;
1919 }
1920 cpu->last_update = time;
1921 } 1774 }
1922 1775 cpu->last_update = time;
1923 delta_ns = time - cpu->sample.time; 1776 delta_ns = time - cpu->sample.time;
1924 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1777 if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL)
1925 bool sample_taken = intel_pstate_sample(cpu, time); 1778 return;
1926 1779
1927 if (sample_taken) { 1780 if (intel_pstate_sample(cpu, time)) {
1928 intel_pstate_calc_avg_perf(cpu); 1781 int target_pstate;
1929 if (!hwp_active) 1782
1930 intel_pstate_adjust_busy_pstate(cpu); 1783 target_pstate = get_target_pstate_use_cpu_load(cpu);
1931 } 1784 intel_pstate_adjust_pstate(cpu, target_pstate);
1932 } 1785 }
1933} 1786}
1934 1787
1788static struct pstate_funcs core_funcs = {
1789 .get_max = core_get_max_pstate,
1790 .get_max_physical = core_get_max_pstate_physical,
1791 .get_min = core_get_min_pstate,
1792 .get_turbo = core_get_turbo_pstate,
1793 .get_scaling = core_get_scaling,
1794 .get_val = core_get_val,
1795 .update_util = intel_pstate_update_util_pid,
1796};
1797
1798static const struct pstate_funcs silvermont_funcs = {
1799 .get_max = atom_get_max_pstate,
1800 .get_max_physical = atom_get_max_pstate,
1801 .get_min = atom_get_min_pstate,
1802 .get_turbo = atom_get_turbo_pstate,
1803 .get_val = atom_get_val,
1804 .get_scaling = silvermont_get_scaling,
1805 .get_vid = atom_get_vid,
1806 .update_util = intel_pstate_update_util,
1807};
1808
1809static const struct pstate_funcs airmont_funcs = {
1810 .get_max = atom_get_max_pstate,
1811 .get_max_physical = atom_get_max_pstate,
1812 .get_min = atom_get_min_pstate,
1813 .get_turbo = atom_get_turbo_pstate,
1814 .get_val = atom_get_val,
1815 .get_scaling = airmont_get_scaling,
1816 .get_vid = atom_get_vid,
1817 .update_util = intel_pstate_update_util,
1818};
1819
1820static const struct pstate_funcs knl_funcs = {
1821 .get_max = core_get_max_pstate,
1822 .get_max_physical = core_get_max_pstate_physical,
1823 .get_min = core_get_min_pstate,
1824 .get_turbo = knl_get_turbo_pstate,
1825 .get_scaling = core_get_scaling,
1826 .get_val = core_get_val,
1827 .update_util = intel_pstate_update_util_pid,
1828};
1829
1830static const struct pstate_funcs bxt_funcs = {
1831 .get_max = core_get_max_pstate,
1832 .get_max_physical = core_get_max_pstate_physical,
1833 .get_min = core_get_min_pstate,
1834 .get_turbo = core_get_turbo_pstate,
1835 .get_scaling = core_get_scaling,
1836 .get_val = core_get_val,
1837 .update_util = intel_pstate_update_util,
1838};
1839
1935#define ICPU(model, policy) \ 1840#define ICPU(model, policy) \
1936 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1841 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1937 (unsigned long)&policy } 1842 (unsigned long)&policy }
1938 1843
1939static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1844static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1940 ICPU(INTEL_FAM6_SANDYBRIDGE, core_params), 1845 ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs),
1941 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params), 1846 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs),
1942 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params), 1847 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs),
1943 ICPU(INTEL_FAM6_IVYBRIDGE, core_params), 1848 ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs),
1944 ICPU(INTEL_FAM6_HASWELL_CORE, core_params), 1849 ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs),
1945 ICPU(INTEL_FAM6_BROADWELL_CORE, core_params), 1850 ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs),
1946 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params), 1851 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs),
1947 ICPU(INTEL_FAM6_HASWELL_X, core_params), 1852 ICPU(INTEL_FAM6_HASWELL_X, core_funcs),
1948 ICPU(INTEL_FAM6_HASWELL_ULT, core_params), 1853 ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs),
1949 ICPU(INTEL_FAM6_HASWELL_GT3E, core_params), 1854 ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs),
1950 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params), 1855 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs),
1951 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params), 1856 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs),
1952 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params), 1857 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs),
1953 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1858 ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
1954 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params), 1859 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs),
1955 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1860 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
1956 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params), 1861 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
1957 ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_params), 1862 ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
1958 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params), 1863 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs),
1864 ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs),
1959 {} 1865 {}
1960}; 1866};
1961MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1867MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1962 1868
1963static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 1869static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
1964 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1870 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
1965 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1871 ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
1966 ICPU(INTEL_FAM6_SKYLAKE_X, core_params), 1872 ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
1967 {} 1873 {}
1968}; 1874};
1969 1875
1970static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 1876static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
1971 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params), 1877 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_funcs),
1972 {} 1878 {}
1973}; 1879};
1974 1880
1881static bool pid_in_use(void);
1882
1975static int intel_pstate_init_cpu(unsigned int cpunum) 1883static int intel_pstate_init_cpu(unsigned int cpunum)
1976{ 1884{
1977 struct cpudata *cpu; 1885 struct cpudata *cpu;
@@ -1979,18 +1887,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1979 cpu = all_cpu_data[cpunum]; 1887 cpu = all_cpu_data[cpunum];
1980 1888
1981 if (!cpu) { 1889 if (!cpu) {
1982 unsigned int size = sizeof(struct cpudata); 1890 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
1983
1984 if (per_cpu_limits)
1985 size += sizeof(struct perf_limits);
1986
1987 cpu = kzalloc(size, GFP_KERNEL);
1988 if (!cpu) 1891 if (!cpu)
1989 return -ENOMEM; 1892 return -ENOMEM;
1990 1893
1991 all_cpu_data[cpunum] = cpu; 1894 all_cpu_data[cpunum] = cpu;
1992 if (per_cpu_limits)
1993 cpu->perf_limits = (struct perf_limits *)(cpu + 1);
1994 1895
1995 cpu->epp_default = -EINVAL; 1896 cpu->epp_default = -EINVAL;
1996 cpu->epp_powersave = -EINVAL; 1897 cpu->epp_powersave = -EINVAL;
@@ -2009,14 +1910,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
2009 intel_pstate_disable_ee(cpunum); 1910 intel_pstate_disable_ee(cpunum);
2010 1911
2011 intel_pstate_hwp_enable(cpu); 1912 intel_pstate_hwp_enable(cpu);
2012 pid_params.sample_rate_ms = 50; 1913 } else if (pid_in_use()) {
2013 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1914 intel_pstate_pid_reset(cpu);
2014 } 1915 }
2015 1916
2016 intel_pstate_get_cpu_pstates(cpu); 1917 intel_pstate_get_cpu_pstates(cpu);
2017 1918
2018 intel_pstate_busy_pid_reset(cpu);
2019
2020 pr_debug("controlling: cpu %d\n", cpunum); 1919 pr_debug("controlling: cpu %d\n", cpunum);
2021 1920
2022 return 0; 1921 return 0;
@@ -2039,7 +1938,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
2039 /* Prevent intel_pstate_update_util() from using stale data. */ 1938 /* Prevent intel_pstate_update_util() from using stale data. */
2040 cpu->sample.time = 0; 1939 cpu->sample.time = 0;
2041 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1940 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
2042 intel_pstate_update_util); 1941 pstate_funcs.update_util);
2043 cpu->update_util_set = true; 1942 cpu->update_util_set = true;
2044} 1943}
2045 1944
@@ -2055,46 +1954,68 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
2055 synchronize_sched(); 1954 synchronize_sched();
2056} 1955}
2057 1956
1957static int intel_pstate_get_max_freq(struct cpudata *cpu)
1958{
1959 return global.turbo_disabled || global.no_turbo ?
1960 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
1961}
1962
2058static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 1963static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
2059 struct perf_limits *limits) 1964 struct cpudata *cpu)
2060{ 1965{
1966 int max_freq = intel_pstate_get_max_freq(cpu);
2061 int32_t max_policy_perf, min_policy_perf; 1967 int32_t max_policy_perf, min_policy_perf;
2062 1968
2063 max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq); 1969 max_policy_perf = div_ext_fp(policy->max, max_freq);
2064 max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1)); 1970 max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
2065 if (policy->max == policy->min) { 1971 if (policy->max == policy->min) {
2066 min_policy_perf = max_policy_perf; 1972 min_policy_perf = max_policy_perf;
2067 } else { 1973 } else {
2068 min_policy_perf = div_ext_fp(policy->min, 1974 min_policy_perf = div_ext_fp(policy->min, max_freq);
2069 policy->cpuinfo.max_freq);
2070 min_policy_perf = clamp_t(int32_t, min_policy_perf, 1975 min_policy_perf = clamp_t(int32_t, min_policy_perf,
2071 0, max_policy_perf); 1976 0, max_policy_perf);
2072 } 1977 }
2073 1978
2074 /* Normalize user input to [min_perf, max_perf] */ 1979 /* Normalize user input to [min_perf, max_perf] */
2075 limits->min_perf = max(min_policy_perf, 1980 if (per_cpu_limits) {
2076 percent_ext_fp(limits->min_sysfs_pct)); 1981 cpu->min_perf = min_policy_perf;
2077 limits->min_perf = min(limits->min_perf, max_policy_perf); 1982 cpu->max_perf = max_policy_perf;
2078 limits->max_perf = min(max_policy_perf, 1983 } else {
2079 percent_ext_fp(limits->max_sysfs_pct)); 1984 int32_t global_min, global_max;
2080 limits->max_perf = max(min_policy_perf, limits->max_perf); 1985
1986 /* Global limits are in percent of the maximum turbo P-state. */
1987 global_max = percent_ext_fp(global.max_perf_pct);
1988 global_min = percent_ext_fp(global.min_perf_pct);
1989 if (max_freq != cpu->pstate.turbo_freq) {
1990 int32_t turbo_factor;
1991
1992 turbo_factor = div_ext_fp(cpu->pstate.turbo_pstate,
1993 cpu->pstate.max_pstate);
1994 global_min = mul_ext_fp(global_min, turbo_factor);
1995 global_max = mul_ext_fp(global_max, turbo_factor);
1996 }
1997 global_min = clamp_t(int32_t, global_min, 0, global_max);
1998
1999 cpu->min_perf = max(min_policy_perf, global_min);
2000 cpu->min_perf = min(cpu->min_perf, max_policy_perf);
2001 cpu->max_perf = min(max_policy_perf, global_max);
2002 cpu->max_perf = max(min_policy_perf, cpu->max_perf);
2081 2003
2082 /* Make sure min_perf <= max_perf */ 2004 /* Make sure min_perf <= max_perf */
2083 limits->min_perf = min(limits->min_perf, limits->max_perf); 2005 cpu->min_perf = min(cpu->min_perf, cpu->max_perf);
2006 }
2084 2007
2085 limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS); 2008 cpu->max_perf = round_up(cpu->max_perf, EXT_FRAC_BITS);
2086 limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS); 2009 cpu->min_perf = round_up(cpu->min_perf, EXT_FRAC_BITS);
2087 limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
2088 limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
2089 2010
2090 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, 2011 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
2091 limits->max_perf_pct, limits->min_perf_pct); 2012 fp_ext_toint(cpu->max_perf * 100),
2013 fp_ext_toint(cpu->min_perf * 100));
2092} 2014}
2093 2015
2094static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2016static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2095{ 2017{
2096 struct cpudata *cpu; 2018 struct cpudata *cpu;
2097 struct perf_limits *perf_limits = &global;
2098 2019
2099 if (!policy->cpuinfo.max_freq) 2020 if (!policy->cpuinfo.max_freq)
2100 return -ENODEV; 2021 return -ENODEV;
@@ -2105,19 +2026,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2105 cpu = all_cpu_data[policy->cpu]; 2026 cpu = all_cpu_data[policy->cpu];
2106 cpu->policy = policy->policy; 2027 cpu->policy = policy->policy;
2107 2028
2108 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
2109 policy->max < policy->cpuinfo.max_freq &&
2110 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
2111 pr_debug("policy->max > max non turbo frequency\n");
2112 policy->max = policy->cpuinfo.max_freq;
2113 }
2114
2115 if (per_cpu_limits)
2116 perf_limits = cpu->perf_limits;
2117
2118 mutex_lock(&intel_pstate_limits_lock); 2029 mutex_lock(&intel_pstate_limits_lock);
2119 2030
2120 intel_pstate_update_perf_limits(policy, perf_limits); 2031 intel_pstate_update_perf_limits(policy, cpu);
2121 2032
2122 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2033 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
2123 /* 2034 /*
@@ -2130,38 +2041,38 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2130 2041
2131 intel_pstate_set_update_util_hook(policy->cpu); 2042 intel_pstate_set_update_util_hook(policy->cpu);
2132 2043
2133 intel_pstate_hwp_set_policy(policy); 2044 if (hwp_active)
2045 intel_pstate_hwp_set(policy->cpu);
2134 2046
2135 mutex_unlock(&intel_pstate_limits_lock); 2047 mutex_unlock(&intel_pstate_limits_lock);
2136 2048
2137 return 0; 2049 return 0;
2138} 2050}
2139 2051
2052static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
2053 struct cpudata *cpu)
2054{
2055 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
2056 policy->max < policy->cpuinfo.max_freq &&
2057 policy->max > cpu->pstate.max_freq) {
2058 pr_debug("policy->max > max non turbo frequency\n");
2059 policy->max = policy->cpuinfo.max_freq;
2060 }
2061}
2062
2140static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 2063static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2141{ 2064{
2142 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2065 struct cpudata *cpu = all_cpu_data[policy->cpu];
2143 2066
2144 update_turbo_state(); 2067 update_turbo_state();
2145 policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ? 2068 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
2146 cpu->pstate.max_freq : 2069 intel_pstate_get_max_freq(cpu));
2147 cpu->pstate.turbo_freq;
2148
2149 cpufreq_verify_within_cpu_limits(policy);
2150 2070
2151 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 2071 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
2152 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 2072 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
2153 return -EINVAL; 2073 return -EINVAL;
2154 2074
2155 /* When per-CPU limits are used, sysfs limits are not used */ 2075 intel_pstate_adjust_policy_max(policy, cpu);
2156 if (!per_cpu_limits) {
2157 unsigned int max_freq, min_freq;
2158
2159 max_freq = policy->cpuinfo.max_freq *
2160 global.max_sysfs_pct / 100;
2161 min_freq = policy->cpuinfo.max_freq *
2162 global.min_sysfs_pct / 100;
2163 cpufreq_verify_within_limits(policy, min_freq, max_freq);
2164 }
2165 2076
2166 return 0; 2077 return 0;
2167} 2078}
@@ -2202,8 +2113,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2202 2113
2203 cpu = all_cpu_data[policy->cpu]; 2114 cpu = all_cpu_data[policy->cpu];
2204 2115
2205 if (per_cpu_limits) 2116 cpu->max_perf = int_ext_tofp(1);
2206 intel_pstate_init_limits(cpu->perf_limits); 2117 cpu->min_perf = 0;
2207 2118
2208 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 2119 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
2209 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 2120 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -2257,10 +2168,12 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2257 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2168 struct cpudata *cpu = all_cpu_data[policy->cpu];
2258 2169
2259 update_turbo_state(); 2170 update_turbo_state();
2260 policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ? 2171 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
2261 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2172 intel_pstate_get_max_freq(cpu));
2262 2173
2263 cpufreq_verify_within_cpu_limits(policy); 2174 intel_pstate_adjust_policy_max(policy, cpu);
2175
2176 intel_pstate_update_perf_limits(policy, cpu);
2264 2177
2265 return 0; 2178 return 0;
2266} 2179}
@@ -2341,7 +2254,13 @@ static struct cpufreq_driver intel_cpufreq = {
2341 .name = "intel_cpufreq", 2254 .name = "intel_cpufreq",
2342}; 2255};
2343 2256
2344static struct cpufreq_driver *intel_pstate_driver = &intel_pstate; 2257static struct cpufreq_driver *default_driver = &intel_pstate;
2258
2259static bool pid_in_use(void)
2260{
2261 return intel_pstate_driver == &intel_pstate &&
2262 pstate_funcs.update_util == intel_pstate_update_util_pid;
2263}
2345 2264
2346static void intel_pstate_driver_cleanup(void) 2265static void intel_pstate_driver_cleanup(void)
2347{ 2266{
@@ -2358,26 +2277,26 @@ static void intel_pstate_driver_cleanup(void)
2358 } 2277 }
2359 } 2278 }
2360 put_online_cpus(); 2279 put_online_cpus();
2280 intel_pstate_driver = NULL;
2361} 2281}
2362 2282
2363static int intel_pstate_register_driver(void) 2283static int intel_pstate_register_driver(struct cpufreq_driver *driver)
2364{ 2284{
2365 int ret; 2285 int ret;
2366 2286
2367 intel_pstate_init_limits(&global); 2287 memset(&global, 0, sizeof(global));
2288 global.max_perf_pct = 100;
2368 2289
2290 intel_pstate_driver = driver;
2369 ret = cpufreq_register_driver(intel_pstate_driver); 2291 ret = cpufreq_register_driver(intel_pstate_driver);
2370 if (ret) { 2292 if (ret) {
2371 intel_pstate_driver_cleanup(); 2293 intel_pstate_driver_cleanup();
2372 return ret; 2294 return ret;
2373 } 2295 }
2374 2296
2375 mutex_lock(&intel_pstate_limits_lock); 2297 global.min_perf_pct = min_perf_pct_min();
2376 driver_registered = true;
2377 mutex_unlock(&intel_pstate_limits_lock);
2378 2298
2379 if (intel_pstate_driver == &intel_pstate && !hwp_active && 2299 if (pid_in_use())
2380 pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
2381 intel_pstate_debug_expose_params(); 2300 intel_pstate_debug_expose_params();
2382 2301
2383 return 0; 2302 return 0;
@@ -2388,14 +2307,9 @@ static int intel_pstate_unregister_driver(void)
2388 if (hwp_active) 2307 if (hwp_active)
2389 return -EBUSY; 2308 return -EBUSY;
2390 2309
2391 if (intel_pstate_driver == &intel_pstate && !hwp_active && 2310 if (pid_in_use())
2392 pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
2393 intel_pstate_debug_hide_params(); 2311 intel_pstate_debug_hide_params();
2394 2312
2395 mutex_lock(&intel_pstate_limits_lock);
2396 driver_registered = false;
2397 mutex_unlock(&intel_pstate_limits_lock);
2398
2399 cpufreq_unregister_driver(intel_pstate_driver); 2313 cpufreq_unregister_driver(intel_pstate_driver);
2400 intel_pstate_driver_cleanup(); 2314 intel_pstate_driver_cleanup();
2401 2315
@@ -2404,7 +2318,7 @@ static int intel_pstate_unregister_driver(void)
2404 2318
2405static ssize_t intel_pstate_show_status(char *buf) 2319static ssize_t intel_pstate_show_status(char *buf)
2406{ 2320{
2407 if (!driver_registered) 2321 if (!intel_pstate_driver)
2408 return sprintf(buf, "off\n"); 2322 return sprintf(buf, "off\n");
2409 2323
2410 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 2324 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
@@ -2416,11 +2330,11 @@ static int intel_pstate_update_status(const char *buf, size_t size)
2416 int ret; 2330 int ret;
2417 2331
2418 if (size == 3 && !strncmp(buf, "off", size)) 2332 if (size == 3 && !strncmp(buf, "off", size))
2419 return driver_registered ? 2333 return intel_pstate_driver ?
2420 intel_pstate_unregister_driver() : -EINVAL; 2334 intel_pstate_unregister_driver() : -EINVAL;
2421 2335
2422 if (size == 6 && !strncmp(buf, "active", size)) { 2336 if (size == 6 && !strncmp(buf, "active", size)) {
2423 if (driver_registered) { 2337 if (intel_pstate_driver) {
2424 if (intel_pstate_driver == &intel_pstate) 2338 if (intel_pstate_driver == &intel_pstate)
2425 return 0; 2339 return 0;
2426 2340
@@ -2429,13 +2343,12 @@ static int intel_pstate_update_status(const char *buf, size_t size)
2429 return ret; 2343 return ret;
2430 } 2344 }
2431 2345
2432 intel_pstate_driver = &intel_pstate; 2346 return intel_pstate_register_driver(&intel_pstate);
2433 return intel_pstate_register_driver();
2434 } 2347 }
2435 2348
2436 if (size == 7 && !strncmp(buf, "passive", size)) { 2349 if (size == 7 && !strncmp(buf, "passive", size)) {
2437 if (driver_registered) { 2350 if (intel_pstate_driver) {
2438 if (intel_pstate_driver != &intel_pstate) 2351 if (intel_pstate_driver == &intel_cpufreq)
2439 return 0; 2352 return 0;
2440 2353
2441 ret = intel_pstate_unregister_driver(); 2354 ret = intel_pstate_unregister_driver();
@@ -2443,8 +2356,7 @@ static int intel_pstate_update_status(const char *buf, size_t size)
2443 return ret; 2356 return ret;
2444 } 2357 }
2445 2358
2446 intel_pstate_driver = &intel_cpufreq; 2359 return intel_pstate_register_driver(&intel_cpufreq);
2447 return intel_pstate_register_driver();
2448 } 2360 }
2449 2361
2450 return -EINVAL; 2362 return -EINVAL;
@@ -2465,23 +2377,17 @@ static int __init intel_pstate_msrs_not_valid(void)
2465 return 0; 2377 return 0;
2466} 2378}
2467 2379
2468static void __init copy_pid_params(struct pstate_adjust_policy *policy)
2469{
2470 pid_params.sample_rate_ms = policy->sample_rate_ms;
2471 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
2472 pid_params.p_gain_pct = policy->p_gain_pct;
2473 pid_params.i_gain_pct = policy->i_gain_pct;
2474 pid_params.d_gain_pct = policy->d_gain_pct;
2475 pid_params.deadband = policy->deadband;
2476 pid_params.setpoint = policy->setpoint;
2477}
2478
2479#ifdef CONFIG_ACPI 2380#ifdef CONFIG_ACPI
2480static void intel_pstate_use_acpi_profile(void) 2381static void intel_pstate_use_acpi_profile(void)
2481{ 2382{
2482 if (acpi_gbl_FADT.preferred_profile == PM_MOBILE) 2383 switch (acpi_gbl_FADT.preferred_profile) {
2483 pstate_funcs.get_target_pstate = 2384 case PM_MOBILE:
2484 get_target_pstate_use_cpu_load; 2385 case PM_TABLET:
2386 case PM_APPLIANCE_PC:
2387 case PM_DESKTOP:
2388 case PM_WORKSTATION:
2389 pstate_funcs.update_util = intel_pstate_update_util;
2390 }
2485} 2391}
2486#else 2392#else
2487static void intel_pstate_use_acpi_profile(void) 2393static void intel_pstate_use_acpi_profile(void)
@@ -2498,7 +2404,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
2498 pstate_funcs.get_scaling = funcs->get_scaling; 2404 pstate_funcs.get_scaling = funcs->get_scaling;
2499 pstate_funcs.get_val = funcs->get_val; 2405 pstate_funcs.get_val = funcs->get_val;
2500 pstate_funcs.get_vid = funcs->get_vid; 2406 pstate_funcs.get_vid = funcs->get_vid;
2501 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 2407 pstate_funcs.update_util = funcs->update_util;
2502 2408
2503 intel_pstate_use_acpi_profile(); 2409 intel_pstate_use_acpi_profile();
2504} 2410}
@@ -2637,28 +2543,30 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
2637 2543
2638static int __init intel_pstate_init(void) 2544static int __init intel_pstate_init(void)
2639{ 2545{
2640 const struct x86_cpu_id *id; 2546 int rc;
2641 struct cpu_defaults *cpu_def;
2642 int rc = 0;
2643 2547
2644 if (no_load) 2548 if (no_load)
2645 return -ENODEV; 2549 return -ENODEV;
2646 2550
2647 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 2551 if (x86_match_cpu(hwp_support_ids)) {
2648 copy_cpu_funcs(&core_params.funcs); 2552 copy_cpu_funcs(&core_funcs);
2649 hwp_active++; 2553 if (no_hwp) {
2650 intel_pstate.attr = hwp_cpufreq_attrs; 2554 pstate_funcs.update_util = intel_pstate_update_util;
2651 goto hwp_cpu_matched; 2555 } else {
2652 } 2556 hwp_active++;
2653 2557 intel_pstate.attr = hwp_cpufreq_attrs;
2654 id = x86_match_cpu(intel_pstate_cpu_ids); 2558 pstate_funcs.update_util = intel_pstate_update_util_hwp;
2655 if (!id) 2559 goto hwp_cpu_matched;
2656 return -ENODEV; 2560 }
2561 } else {
2562 const struct x86_cpu_id *id;
2657 2563
2658 cpu_def = (struct cpu_defaults *)id->driver_data; 2564 id = x86_match_cpu(intel_pstate_cpu_ids);
2565 if (!id)
2566 return -ENODEV;
2659 2567
2660 copy_pid_params(&cpu_def->pid_policy); 2568 copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
2661 copy_cpu_funcs(&cpu_def->funcs); 2569 }
2662 2570
2663 if (intel_pstate_msrs_not_valid()) 2571 if (intel_pstate_msrs_not_valid())
2664 return -ENODEV; 2572 return -ENODEV;
@@ -2685,7 +2593,7 @@ hwp_cpu_matched:
2685 intel_pstate_sysfs_expose_params(); 2593 intel_pstate_sysfs_expose_params();
2686 2594
2687 mutex_lock(&intel_pstate_driver_lock); 2595 mutex_lock(&intel_pstate_driver_lock);
2688 rc = intel_pstate_register_driver(); 2596 rc = intel_pstate_register_driver(default_driver);
2689 mutex_unlock(&intel_pstate_driver_lock); 2597 mutex_unlock(&intel_pstate_driver_lock);
2690 if (rc) 2598 if (rc)
2691 return rc; 2599 return rc;
@@ -2706,7 +2614,7 @@ static int __init intel_pstate_setup(char *str)
2706 no_load = 1; 2614 no_load = 1;
2707 } else if (!strcmp(str, "passive")) { 2615 } else if (!strcmp(str, "passive")) {
2708 pr_info("Passive mode enabled\n"); 2616 pr_info("Passive mode enabled\n");
2709 intel_pstate_driver = &intel_cpufreq; 2617 default_driver = &intel_cpufreq;
2710 no_hwp = 1; 2618 no_hwp = 1;
2711 } 2619 }
2712 if (!strcmp(str, "no_hwp")) { 2620 if (!strcmp(str, "no_hwp")) {