aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-04-08 19:25:58 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-04-08 19:25:58 -0400
commit22590efb98ae0c84f798a9938c0b6d97bc89adf5 (patch)
tree0ee9817ec59901350d6deef093d613e427284619
parent2249c00a0bf854adf49e8e3c2973feddfbaae71f (diff)
intel_pstate: Avoid pointless FRAC_BITS shifts under div_fp()
There are multiple places in intel_pstate where int_tofp() is applied to both arguments of div_fp(), but this is pointless, because int_tofp() simply shifts its argument to the left by FRAC_BITS which mathematically is equivalent to multuplication by 2^FRAC_BITS, so if this is done to both arguments of a division, the extra factors will cancel each other during that operation anyway. Drop the pointless int_tofp() applied to div_fp() arguments throughout the driver. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/cpufreq/intel_pstate.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6c7cff13f0ed..8a368d2ee25c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -341,17 +341,17 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
341 341
342static inline void pid_p_gain_set(struct _pid *pid, int percent) 342static inline void pid_p_gain_set(struct _pid *pid, int percent)
343{ 343{
344 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 344 pid->p_gain = div_fp(percent, 100);
345} 345}
346 346
347static inline void pid_i_gain_set(struct _pid *pid, int percent) 347static inline void pid_i_gain_set(struct _pid *pid, int percent)
348{ 348{
349 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 349 pid->i_gain = div_fp(percent, 100);
350} 350}
351 351
352static inline void pid_d_gain_set(struct _pid *pid, int percent) 352static inline void pid_d_gain_set(struct _pid *pid, int percent)
353{ 353{
354 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 354 pid->d_gain = div_fp(percent, 100);
355} 355}
356 356
357static signed int pid_calc(struct _pid *pid, int32_t busy) 357static signed int pid_calc(struct _pid *pid, int32_t busy)
@@ -529,7 +529,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
529 529
530 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 530 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
531 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 531 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
532 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); 532 turbo_fp = div_fp(no_turbo, total);
533 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 533 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
534 return sprintf(buf, "%u\n", turbo_pct); 534 return sprintf(buf, "%u\n", turbo_pct);
535} 535}
@@ -600,8 +600,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
600 limits->max_perf_pct); 600 limits->max_perf_pct);
601 limits->max_perf_pct = max(limits->min_perf_pct, 601 limits->max_perf_pct = max(limits->min_perf_pct,
602 limits->max_perf_pct); 602 limits->max_perf_pct);
603 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 603 limits->max_perf = div_fp(limits->max_perf_pct, 100);
604 int_tofp(100));
605 604
606 if (hwp_active) 605 if (hwp_active)
607 intel_pstate_hwp_set_online_cpus(); 606 intel_pstate_hwp_set_online_cpus();
@@ -625,8 +624,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
625 limits->min_perf_pct); 624 limits->min_perf_pct);
626 limits->min_perf_pct = min(limits->max_perf_pct, 625 limits->min_perf_pct = min(limits->max_perf_pct,
627 limits->min_perf_pct); 626 limits->min_perf_pct);
628 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 627 limits->min_perf = div_fp(limits->min_perf_pct, 100);
629 int_tofp(100));
630 628
631 if (hwp_active) 629 if (hwp_active)
632 intel_pstate_hwp_set_online_cpus(); 630 intel_pstate_hwp_set_online_cpus();
@@ -1011,8 +1009,8 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
1011 struct sample *sample = &cpu->sample; 1009 struct sample *sample = &cpu->sample;
1012 int64_t core_pct; 1010 int64_t core_pct;
1013 1011
1014 core_pct = int_tofp(sample->aperf) * int_tofp(100); 1012 core_pct = sample->aperf * int_tofp(100);
1015 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 1013 core_pct = div64_u64(core_pct, sample->mperf);
1016 1014
1017 sample->core_pct_busy = (int32_t)core_pct; 1015 sample->core_pct_busy = (int32_t)core_pct;
1018} 1016}
@@ -1115,8 +1113,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1115 * specified pstate. 1113 * specified pstate.
1116 */ 1114 */
1117 core_busy = cpu->sample.core_pct_busy; 1115 core_busy = cpu->sample.core_pct_busy;
1118 max_pstate = int_tofp(cpu->pstate.max_pstate_physical); 1116 max_pstate = cpu->pstate.max_pstate_physical;
1119 current_pstate = int_tofp(cpu->pstate.current_pstate); 1117 current_pstate = cpu->pstate.current_pstate;
1120 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 1118 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
1121 1119
1122 /* 1120 /*
@@ -1127,8 +1125,7 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1127 */ 1125 */
1128 duration_ns = cpu->sample.time - cpu->last_sample_time; 1126 duration_ns = cpu->sample.time - cpu->last_sample_time;
1129 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1127 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
1130 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 1128 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
1131 int_tofp(duration_ns));
1132 core_busy = mul_fp(core_busy, sample_ratio); 1129 core_busy = mul_fp(core_busy, sample_ratio);
1133 } 1130 }
1134 1131
@@ -1328,10 +1325,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1328 /* Make sure min_perf_pct <= max_perf_pct */ 1325 /* Make sure min_perf_pct <= max_perf_pct */
1329 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1326 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
1330 1327
1331 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 1328 limits->min_perf = div_fp(limits->min_perf_pct, 100);
1332 int_tofp(100)); 1329 limits->max_perf = div_fp(limits->max_perf_pct, 100);
1333 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1334 int_tofp(100));
1335 1330
1336 out: 1331 out:
1337 intel_pstate_set_update_util_hook(policy->cpu); 1332 intel_pstate_set_update_util_hook(policy->cpu);