aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/intel_pstate.c142
1 files changed, 85 insertions, 57 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6a5a22192128..93a3c635ea27 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -167,7 +167,20 @@ struct perf_limits {
167 int min_perf_ctl; 167 int min_perf_ctl;
168}; 168};
169 169
170static struct perf_limits limits = { 170static struct perf_limits performance_limits = {
171 .no_turbo = 0,
172 .turbo_disabled = 0,
173 .max_perf_pct = 100,
174 .max_perf = int_tofp(1),
175 .min_perf_pct = 100,
176 .min_perf = int_tofp(1),
177 .max_policy_pct = 100,
178 .max_sysfs_pct = 100,
179 .min_policy_pct = 0,
180 .min_sysfs_pct = 0,
181};
182
183static struct perf_limits powersave_limits = {
171 .no_turbo = 0, 184 .no_turbo = 0,
172 .turbo_disabled = 0, 185 .turbo_disabled = 0,
173 .max_perf_pct = 100, 186 .max_perf_pct = 100,
@@ -182,6 +195,12 @@ static struct perf_limits limits = {
182 .min_perf_ctl = 0, 195 .min_perf_ctl = 0,
183}; 196};
184 197
198#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
199static struct perf_limits *limits = &performance_limits;
200#else
201static struct perf_limits *limits = &powersave_limits;
202#endif
203
185#if IS_ENABLED(CONFIG_ACPI) 204#if IS_ENABLED(CONFIG_ACPI)
186/* 205/*
187 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and 206 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
@@ -256,7 +275,7 @@ static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
256 if (turbo_pss_ctl <= cpu->pstate.max_pstate && 275 if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
257 turbo_pss_ctl > cpu->pstate.min_pstate) { 276 turbo_pss_ctl > cpu->pstate.min_pstate) {
258 pr_debug("intel_pstate: no turbo range exists in _PSS\n"); 277 pr_debug("intel_pstate: no turbo range exists in _PSS\n");
259 limits.no_turbo = limits.turbo_disabled = 1; 278 limits->no_turbo = limits->turbo_disabled = 1;
260 cpu->pstate.turbo_pstate = cpu->pstate.max_pstate; 279 cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
261 turbo_absent = true; 280 turbo_absent = true;
262 } 281 }
@@ -415,7 +434,7 @@ static inline void update_turbo_state(void)
415 434
416 cpu = all_cpu_data[0]; 435 cpu = all_cpu_data[0];
417 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 436 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
418 limits.turbo_disabled = 437 limits->turbo_disabled =
419 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 438 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
420 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 439 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
421} 440}
@@ -434,14 +453,14 @@ static void intel_pstate_hwp_set(void)
434 453
435 for_each_online_cpu(cpu) { 454 for_each_online_cpu(cpu) {
436 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 455 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
437 adj_range = limits.min_perf_pct * range / 100; 456 adj_range = limits->min_perf_pct * range / 100;
438 min = hw_min + adj_range; 457 min = hw_min + adj_range;
439 value &= ~HWP_MIN_PERF(~0L); 458 value &= ~HWP_MIN_PERF(~0L);
440 value |= HWP_MIN_PERF(min); 459 value |= HWP_MIN_PERF(min);
441 460
442 adj_range = limits.max_perf_pct * range / 100; 461 adj_range = limits->max_perf_pct * range / 100;
443 max = hw_min + adj_range; 462 max = hw_min + adj_range;
444 if (limits.no_turbo) { 463 if (limits->no_turbo) {
445 hw_max = HWP_GUARANTEED_PERF(cap); 464 hw_max = HWP_GUARANTEED_PERF(cap);
446 if (hw_max < max) 465 if (hw_max < max)
447 max = hw_max; 466 max = hw_max;
@@ -510,7 +529,7 @@ static void __init intel_pstate_debug_expose_params(void)
510 static ssize_t show_##file_name \ 529 static ssize_t show_##file_name \
511 (struct kobject *kobj, struct attribute *attr, char *buf) \ 530 (struct kobject *kobj, struct attribute *attr, char *buf) \
512 { \ 531 { \
513 return sprintf(buf, "%u\n", limits.object); \ 532 return sprintf(buf, "%u\n", limits->object); \
514 } 533 }
515 534
516static ssize_t show_turbo_pct(struct kobject *kobj, 535static ssize_t show_turbo_pct(struct kobject *kobj,
@@ -546,10 +565,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
546 ssize_t ret; 565 ssize_t ret;
547 566
548 update_turbo_state(); 567 update_turbo_state();
549 if (limits.turbo_disabled) 568 if (limits->turbo_disabled)
550 ret = sprintf(buf, "%u\n", limits.turbo_disabled); 569 ret = sprintf(buf, "%u\n", limits->turbo_disabled);
551 else 570 else
552 ret = sprintf(buf, "%u\n", limits.no_turbo); 571 ret = sprintf(buf, "%u\n", limits->no_turbo);
553 572
554 return ret; 573 return ret;
555} 574}
@@ -565,12 +584,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
565 return -EINVAL; 584 return -EINVAL;
566 585
567 update_turbo_state(); 586 update_turbo_state();
568 if (limits.turbo_disabled) { 587 if (limits->turbo_disabled) {
569 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); 588 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
570 return -EPERM; 589 return -EPERM;
571 } 590 }
572 591
573 limits.no_turbo = clamp_t(int, input, 0, 1); 592 limits->no_turbo = clamp_t(int, input, 0, 1);
574 593
575 if (hwp_active) 594 if (hwp_active)
576 intel_pstate_hwp_set(); 595 intel_pstate_hwp_set();
@@ -588,11 +607,15 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
588 if (ret != 1) 607 if (ret != 1)
589 return -EINVAL; 608 return -EINVAL;
590 609
591 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 610 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
592 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 611 limits->max_perf_pct = min(limits->max_policy_pct,
593 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct); 612 limits->max_sysfs_pct);
594 limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct); 613 limits->max_perf_pct = max(limits->min_policy_pct,
595 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 614 limits->max_perf_pct);
615 limits->max_perf_pct = max(limits->min_perf_pct,
616 limits->max_perf_pct);
617 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
618 int_tofp(100));
596 619
597 if (hwp_active) 620 if (hwp_active)
598 intel_pstate_hwp_set(); 621 intel_pstate_hwp_set();
@@ -609,11 +632,15 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
609 if (ret != 1) 632 if (ret != 1)
610 return -EINVAL; 633 return -EINVAL;
611 634
612 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); 635 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
613 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 636 limits->min_perf_pct = max(limits->min_policy_pct,
614 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct); 637 limits->min_sysfs_pct);
615 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct); 638 limits->min_perf_pct = min(limits->max_policy_pct,
616 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 639 limits->min_perf_pct);
640 limits->min_perf_pct = min(limits->max_perf_pct,
641 limits->min_perf_pct);
642 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
643 int_tofp(100));
617 644
618 if (hwp_active) 645 if (hwp_active)
619 intel_pstate_hwp_set(); 646 intel_pstate_hwp_set();
@@ -693,7 +720,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
693 u32 vid; 720 u32 vid;
694 721
695 val = (u64)pstate << 8; 722 val = (u64)pstate << 8;
696 if (limits.no_turbo && !limits.turbo_disabled) 723 if (limits->no_turbo && !limits->turbo_disabled)
697 val |= (u64)1 << 32; 724 val |= (u64)1 << 32;
698 725
699 vid_fp = cpudata->vid.min + mul_fp( 726 vid_fp = cpudata->vid.min + mul_fp(
@@ -822,7 +849,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
822 u64 val; 849 u64 val;
823 850
824 val = (u64)pstate << 8; 851 val = (u64)pstate << 8;
825 if (limits.no_turbo && !limits.turbo_disabled) 852 if (limits->no_turbo && !limits->turbo_disabled)
826 val |= (u64)1 << 32; 853 val |= (u64)1 << 32;
827 854
828 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 855 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
@@ -905,7 +932,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
905 int max_perf_adj; 932 int max_perf_adj;
906 int min_perf; 933 int min_perf;
907 934
908 if (limits.no_turbo || limits.turbo_disabled) 935 if (limits->no_turbo || limits->turbo_disabled)
909 max_perf = cpu->pstate.max_pstate; 936 max_perf = cpu->pstate.max_pstate;
910 937
911 /* 938 /*
@@ -913,21 +940,21 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
913 * policy, or by cpu specific default values determined through 940 * policy, or by cpu specific default values determined through
914 * experimentation. 941 * experimentation.
915 */ 942 */
916 if (limits.max_perf_ctl && limits.max_sysfs_pct >= 943 if (limits->max_perf_ctl && limits->max_sysfs_pct >=
917 limits.max_policy_pct) { 944 limits->max_policy_pct) {
918 *max = limits.max_perf_ctl; 945 *max = limits->max_perf_ctl;
919 } else { 946 } else {
920 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), 947 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
921 limits.max_perf)); 948 limits->max_perf));
922 *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, 949 *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
923 cpu->pstate.turbo_pstate); 950 cpu->pstate.turbo_pstate);
924 } 951 }
925 952
926 if (limits.min_perf_ctl) { 953 if (limits->min_perf_ctl) {
927 *min = limits.min_perf_ctl; 954 *min = limits->min_perf_ctl;
928 } else { 955 } else {
929 min_perf = fp_toint(mul_fp(int_tofp(max_perf), 956 min_perf = fp_toint(mul_fp(int_tofp(max_perf),
930 limits.min_perf)); 957 limits->min_perf));
931 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 958 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
932 } 959 }
933} 960}
@@ -1215,34 +1242,35 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1215 1242
1216 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 1243 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
1217 policy->max >= policy->cpuinfo.max_freq) { 1244 policy->max >= policy->cpuinfo.max_freq) {
1218 limits.min_policy_pct = 100; 1245 pr_debug("intel_pstate: set performance\n");
1219 limits.min_perf_pct = 100; 1246 limits = &performance_limits;
1220 limits.min_perf = int_tofp(1);
1221 limits.max_policy_pct = 100;
1222 limits.max_perf_pct = 100;
1223 limits.max_perf = int_tofp(1);
1224 limits.no_turbo = 0;
1225 limits.max_perf_ctl = 0;
1226 limits.min_perf_ctl = 0;
1227 return 0; 1247 return 0;
1228 } 1248 }
1229 1249
1230 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1250 pr_debug("intel_pstate: set powersave\n");
1231 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); 1251 limits = &powersave_limits;
1232 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 1252 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1233 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 1253 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1254 limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
1255 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
1234 1256
1235 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1257 /* Normalize user input to [min_policy_pct, max_policy_pct] */
1236 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 1258 limits->min_perf_pct = max(limits->min_policy_pct,
1237 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct); 1259 limits->min_sysfs_pct);
1238 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 1260 limits->min_perf_pct = min(limits->max_policy_pct,
1239 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct); 1261 limits->min_perf_pct);
1262 limits->max_perf_pct = min(limits->max_policy_pct,
1263 limits->max_sysfs_pct);
1264 limits->max_perf_pct = max(limits->min_policy_pct,
1265 limits->max_perf_pct);
1240 1266
1241 /* Make sure min_perf_pct <= max_perf_pct */ 1267 /* Make sure min_perf_pct <= max_perf_pct */
1242 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct); 1268 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
1243 1269
1244 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 1270 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
1245 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 1271 int_tofp(100));
1272 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1273 int_tofp(100));
1246 1274
1247#if IS_ENABLED(CONFIG_ACPI) 1275#if IS_ENABLED(CONFIG_ACPI)
1248 cpu = all_cpu_data[policy->cpu]; 1276 cpu = all_cpu_data[policy->cpu];
@@ -1251,14 +1279,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1251 1279
1252 control = convert_to_native_pstate_format(cpu, i); 1280 control = convert_to_native_pstate_format(cpu, i);
1253 if (control * cpu->pstate.scaling == policy->max) 1281 if (control * cpu->pstate.scaling == policy->max)
1254 limits.max_perf_ctl = control; 1282 limits->max_perf_ctl = control;
1255 if (control * cpu->pstate.scaling == policy->min) 1283 if (control * cpu->pstate.scaling == policy->min)
1256 limits.min_perf_ctl = control; 1284 limits->min_perf_ctl = control;
1257 } 1285 }
1258 1286
1259 pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n", 1287 pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
1260 policy->cpuinfo.max_freq, policy->max, limits.min_perf_ctl, 1288 policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
1261 limits.max_perf_ctl); 1289 limits->max_perf_ctl);
1262#endif 1290#endif
1263 1291
1264 if (hwp_active) 1292 if (hwp_active)
@@ -1303,7 +1331,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1303 1331
1304 cpu = all_cpu_data[policy->cpu]; 1332 cpu = all_cpu_data[policy->cpu];
1305 1333
1306 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 1334 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
1307 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1335 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1308 else 1336 else
1309 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1337 policy->policy = CPUFREQ_POLICY_POWERSAVE;