aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrinivas Pandruvada <srinivas.pandruvada@linux.intel.com>2016-11-24 19:07:10 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-11-28 08:23:56 -0500
commit8442885fca09b2d26375b9fe507759879a6f661e (patch)
tree765281d711d05823b71141512d76f3755c95a72d
parentd5dd33d9de0d50db7f3ba221f9c4e4f74e61a69d (diff)
cpufreq: intel_pstate: Set EPP/EPB to 0 in performance mode
When user has selected performance policy, then set the EPP (Energy Performance Preference) or EPB (Energy Performance Bias) to maximum performance mode. Also when user switch back to powersave, then restore EPP/EPB to last EPP/EPB value before entering performance mode. If user has not changed EPP/EPB manually then it will be power on default value. Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/cpufreq/intel_pstate.c106
1 files changed, 105 insertions, 1 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7159dbde0160..0b90a63de46d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -249,6 +249,9 @@ struct perf_limits {
249 * when per cpu controls are enforced 249 * when per cpu controls are enforced
250 * @acpi_perf_data: Stores ACPI perf information read from _PSS 250 * @acpi_perf_data: Stores ACPI perf information read from _PSS
251 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 251 * @valid_pss_table: Set to true for valid ACPI _PSS entries found
252 * @epp_saved: Last saved HWP energy performance preference
253 * (EPP) or energy performance bias (EPB)
254 * @epp_policy: Last saved policy used to set EPP/EPB
252 * 255 *
253 * This structure stores per CPU instance data for all CPUs. 256 * This structure stores per CPU instance data for all CPUs.
254 */ 257 */
@@ -276,6 +279,8 @@ struct cpudata {
276 bool valid_pss_table; 279 bool valid_pss_table;
277#endif 280#endif
278 unsigned int iowait_boost; 281 unsigned int iowait_boost;
282 s16 epp_saved;
283 s16 epp_policy;
279}; 284};
280 285
281static struct cpudata **all_cpu_data; 286static struct cpudata **all_cpu_data;
@@ -574,6 +579,48 @@ static inline void update_turbo_state(void)
574 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 579 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
575} 580}
576 581
582static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
583{
584 u64 epb;
585 int ret;
586
587 if (!static_cpu_has(X86_FEATURE_EPB))
588 return -ENXIO;
589
590 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
591 if (ret)
592 return (s16)ret;
593
594 return (s16)(epb & 0x0f);
595}
596
597static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
598{
599 s16 epp;
600
601 if (static_cpu_has(X86_FEATURE_HWP_EPP))
602 epp = (hwp_req_data >> 24) & 0xff;
603 else
604 /* When there is no EPP present, HWP uses EPB settings */
605 epp = intel_pstate_get_epb(cpu_data);
606
607 return epp;
608}
609
610static void intel_pstate_set_epb(int cpu, s16 pref)
611{
612 u64 epb;
613
614 if (!static_cpu_has(X86_FEATURE_EPB))
615 return;
616
617 if (rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb))
618 return;
619
620 epb = (epb & ~0x0f) | pref;
621 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
622}
623
577static void intel_pstate_hwp_set(const struct cpumask *cpumask) 624static void intel_pstate_hwp_set(const struct cpumask *cpumask)
578{ 625{
579 int min, hw_min, max, hw_max, cpu, range, adj_range; 626 int min, hw_min, max, hw_max, cpu, range, adj_range;
@@ -582,6 +629,8 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
582 629
583 for_each_cpu(cpu, cpumask) { 630 for_each_cpu(cpu, cpumask) {
584 int max_perf_pct, min_perf_pct; 631 int max_perf_pct, min_perf_pct;
632 struct cpudata *cpu_data = all_cpu_data[cpu];
633 s16 epp;
585 634
586 if (per_cpu_limits) 635 if (per_cpu_limits)
587 perf_limits = all_cpu_data[cpu]->perf_limits; 636 perf_limits = all_cpu_data[cpu]->perf_limits;
@@ -610,6 +659,48 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
610 659
611 value &= ~HWP_MAX_PERF(~0L); 660 value &= ~HWP_MAX_PERF(~0L);
612 value |= HWP_MAX_PERF(max); 661 value |= HWP_MAX_PERF(max);
662
663 if (cpu_data->epp_policy == cpu_data->policy)
664 goto skip_epp;
665
666 cpu_data->epp_policy = cpu_data->policy;
667
668 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
669 epp = intel_pstate_get_epp(cpu_data, value);
670 /* If EPP read was failed, then don't try to write */
671 if (epp < 0) {
672 cpu_data->epp_saved = epp;
673 goto skip_epp;
674 }
675
676 cpu_data->epp_saved = epp;
677
678 epp = 0;
679 } else {
680 /* skip setting EPP, when saved value is invalid */
681 if (cpu_data->epp_saved < 0)
682 goto skip_epp;
683
684 /*
685 * No need to restore EPP when it is not zero. This
686 * means:
687 * - Policy is not changed
688 * - user has manually changed
689 * - Error reading EPB
690 */
691 epp = intel_pstate_get_epp(cpu_data, value);
692 if (epp)
693 goto skip_epp;
694
695 epp = cpu_data->epp_saved;
696 }
697 if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
698 value &= ~GENMASK_ULL(31, 24);
699 value |= (u64)epp << 24;
700 } else {
701 intel_pstate_set_epb(cpu, epp);
702 }
703skip_epp:
613 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 704 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
614 } 705 }
615} 706}
@@ -622,6 +713,17 @@ static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
622 return 0; 713 return 0;
623} 714}
624 715
716static int intel_pstate_resume(struct cpufreq_policy *policy)
717{
718 if (!hwp_active)
719 return 0;
720
721 all_cpu_data[policy->cpu]->epp_policy = 0;
722 all_cpu_data[policy->cpu]->epp_saved = -EINVAL;
723
724 return intel_pstate_hwp_set_policy(policy);
725}
726
625static void intel_pstate_hwp_set_online_cpus(void) 727static void intel_pstate_hwp_set_online_cpus(void)
626{ 728{
627 get_online_cpus(); 729 get_online_cpus();
@@ -872,6 +974,8 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
872 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 974 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
873 975
874 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 976 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
977 cpudata->epp_policy = 0;
978 cpudata->epp_saved = -EINVAL;
875} 979}
876 980
877static int atom_get_min_pstate(void) 981static int atom_get_min_pstate(void)
@@ -1767,7 +1871,7 @@ static struct cpufreq_driver intel_pstate = {
1767 .flags = CPUFREQ_CONST_LOOPS, 1871 .flags = CPUFREQ_CONST_LOOPS,
1768 .verify = intel_pstate_verify_policy, 1872 .verify = intel_pstate_verify_policy,
1769 .setpolicy = intel_pstate_set_policy, 1873 .setpolicy = intel_pstate_set_policy,
1770 .resume = intel_pstate_hwp_set_policy, 1874 .resume = intel_pstate_resume,
1771 .get = intel_pstate_get, 1875 .get = intel_pstate_get,
1772 .init = intel_pstate_cpu_init, 1876 .init = intel_pstate_cpu_init,
1773 .exit = intel_pstate_cpu_exit, 1877 .exit = intel_pstate_cpu_exit,