aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/intel_pstate.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r--drivers/cpufreq/intel_pstate.c38
1 files changed, 36 insertions, 2 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1405b393c93d..742eefba12c2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -199,7 +199,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
199 199
200 pid->integral += fp_error; 200 pid->integral += fp_error;
201 201
202 /* limit the integral term */ 202 /*
203 * We limit the integral here so that it will never
204 * get higher than 30. This prevents it from becoming
205 * too large an input over long periods of time and allows
206 * it to get factored out sooner.
207 *
208 * The value of 30 was chosen through experimentation.
209 */
203 integral_limit = int_tofp(30); 210 integral_limit = int_tofp(30);
204 if (pid->integral > integral_limit) 211 if (pid->integral > integral_limit)
205 pid->integral = integral_limit; 212 pid->integral = integral_limit;
@@ -616,6 +623,11 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
616 if (limits.no_turbo || limits.turbo_disabled) 623 if (limits.no_turbo || limits.turbo_disabled)
617 max_perf = cpu->pstate.max_pstate; 624 max_perf = cpu->pstate.max_pstate;
618 625
626 /*
627 * performance can be limited by user through sysfs, by cpufreq
628 * policy, or by cpu specific default values determined through
629 * experimentation.
630 */
619 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 631 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
620 *max = clamp_t(int, max_perf_adj, 632 *max = clamp_t(int, max_perf_adj,
621 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 633 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
@@ -717,11 +729,29 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
717 u32 duration_us; 729 u32 duration_us;
718 u32 sample_time; 730 u32 sample_time;
719 731
732 /*
733 * core_busy is the ratio of actual performance to max
734 * max_pstate is the max non turbo pstate available
735 * current_pstate was the pstate that was requested during
736 * the last sample period.
737 *
738 * We normalize core_busy, which was our actual percent
739 * performance to what we requested during the last sample
740 * period. The result will be a percentage of busy at a
741 * specified pstate.
742 */
720 core_busy = cpu->sample.core_pct_busy; 743 core_busy = cpu->sample.core_pct_busy;
721 max_pstate = int_tofp(cpu->pstate.max_pstate); 744 max_pstate = int_tofp(cpu->pstate.max_pstate);
722 current_pstate = int_tofp(cpu->pstate.current_pstate); 745 current_pstate = int_tofp(cpu->pstate.current_pstate);
723 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 746 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
724 747
748 /*
749 * Since we have a deferred timer, it will not fire unless
750 * we are in C0. So, determine if the actual elapsed time
751 * is significantly greater (3x) than our sample interval. If it
752 * is, then we were idle for a long enough period of time
753 * to adjust our busyness.
754 */
725 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; 755 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
726 duration_us = (u32) ktime_us_delta(cpu->sample.time, 756 duration_us = (u32) ktime_us_delta(cpu->sample.time,
727 cpu->last_sample_time); 757 cpu->last_sample_time);
@@ -948,6 +978,7 @@ static struct cpufreq_driver intel_pstate_driver = {
948 978
949static int __initdata no_load; 979static int __initdata no_load;
950static int __initdata no_hwp; 980static int __initdata no_hwp;
981static unsigned int force_load;
951 982
952static int intel_pstate_msrs_not_valid(void) 983static int intel_pstate_msrs_not_valid(void)
953{ 984{
@@ -1094,7 +1125,8 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
1094 case PSS: 1125 case PSS:
1095 return intel_pstate_no_acpi_pss(); 1126 return intel_pstate_no_acpi_pss();
1096 case PPC: 1127 case PPC:
1097 return intel_pstate_has_acpi_ppc(); 1128 return intel_pstate_has_acpi_ppc() &&
1129 (!force_load);
1098 } 1130 }
1099 } 1131 }
1100 1132
@@ -1175,6 +1207,8 @@ static int __init intel_pstate_setup(char *str)
1175 no_load = 1; 1207 no_load = 1;
1176 if (!strcmp(str, "no_hwp")) 1208 if (!strcmp(str, "no_hwp"))
1177 no_hwp = 1; 1209 no_hwp = 1;
1210 if (!strcmp(str, "force"))
1211 force_load = 1;
1178 return 0; 1212 return 0;
1179} 1213}
1180early_param("intel_pstate", intel_pstate_setup); 1214early_param("intel_pstate", intel_pstate_setup);