diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-05-11 13:11:26 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-05-11 16:58:38 -0400 |
commit | 1aa7a6e2b8105f22a5f7d6def281f776459c95ba (patch) | |
tree | 5b386334f71df41f50d601973d9a6f69262d2479 | |
parent | 8edb0a6e48d147bb2aa466c58e03c52d2b0d6ee7 (diff) |
intel_pstate: Clean up get_target_pstate_use_performance()
The comments and the core_busy variable name in
get_target_pstate_use_performance() are totally confusing,
so modify them to reflect what's going on.
The results of the computations should be the same as before.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 27 |
1 files changed, 11 insertions, 16 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ff5c591578ee..b76a98dd9988 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -1259,43 +1259,38 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) | |||
1259 | 1259 | ||
1260 | static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) | 1260 | static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) |
1261 | { | 1261 | { |
1262 | int32_t core_busy, max_pstate, current_pstate, sample_ratio; | 1262 | int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; |
1263 | u64 duration_ns; | 1263 | u64 duration_ns; |
1264 | 1264 | ||
1265 | /* | 1265 | /* |
1266 | * core_busy is the ratio of actual performance to max | 1266 | * perf_scaled is the average performance during the last sampling |
1267 | * max_pstate is the max non turbo pstate available | 1267 | * period scaled by the ratio of the maximum P-state to the P-state |
1268 | * current_pstate was the pstate that was requested during | 1268 | * requested last time (in percent). That measures the system's |
1269 | * the last sample period. | 1269 | * response to the previous P-state selection. |
1270 | * | ||
1271 | * We normalize core_busy, which was our actual percent | ||
1272 | * performance to what we requested during the last sample | ||
1273 | * period. The result will be a percentage of busy at a | ||
1274 | * specified pstate. | ||
1275 | */ | 1270 | */ |
1276 | max_pstate = cpu->pstate.max_pstate_physical; | 1271 | max_pstate = cpu->pstate.max_pstate_physical; |
1277 | current_pstate = cpu->pstate.current_pstate; | 1272 | current_pstate = cpu->pstate.current_pstate; |
1278 | core_busy = mul_ext_fp(cpu->sample.core_avg_perf, | 1273 | perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, |
1279 | div_fp(100 * max_pstate, current_pstate)); | 1274 | div_fp(100 * max_pstate, current_pstate)); |
1280 | 1275 | ||
1281 | /* | 1276 | /* |
1282 | * Since our utilization update callback will not run unless we are | 1277 | * Since our utilization update callback will not run unless we are |
1283 | * in C0, check if the actual elapsed time is significantly greater (3x) | 1278 | * in C0, check if the actual elapsed time is significantly greater (3x) |
1284 | * than our sample interval. If it is, then we were idle for a long | 1279 | * than our sample interval. If it is, then we were idle for a long |
1285 | * enough period of time to adjust our busyness. | 1280 | * enough period of time to adjust our performance metric. |
1286 | */ | 1281 | */ |
1287 | duration_ns = cpu->sample.time - cpu->last_sample_time; | 1282 | duration_ns = cpu->sample.time - cpu->last_sample_time; |
1288 | if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { | 1283 | if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { |
1289 | sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); | 1284 | sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); |
1290 | core_busy = mul_fp(core_busy, sample_ratio); | 1285 | perf_scaled = mul_fp(perf_scaled, sample_ratio); |
1291 | } else { | 1286 | } else { |
1292 | sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); | 1287 | sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); |
1293 | if (sample_ratio < int_tofp(1)) | 1288 | if (sample_ratio < int_tofp(1)) |
1294 | core_busy = 0; | 1289 | perf_scaled = 0; |
1295 | } | 1290 | } |
1296 | 1291 | ||
1297 | cpu->sample.busy_scaled = core_busy; | 1292 | cpu->sample.busy_scaled = perf_scaled; |
1298 | return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); | 1293 | return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); |
1299 | } | 1294 | } |
1300 | 1295 | ||
1301 | static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) | 1296 | static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) |