aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChen Yu <yu.c.chen@intel.com>2018-06-07 21:07:33 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-06-08 05:40:44 -0400
commit7592019634f8473f0b0973ce79297183077bdbc2 (patch)
treeca24a1ed9dff3c4eb4e70e27e382aaf51163eb3f
parent41ab43c9c89e06ff08a4750d1b09e227ea97894f (diff)
cpufreq: governors: Fix long idle detection logic in load calculation
According to current code implementation, detecting the long idle period is done by checking if the interval between two adjacent utilization update handlers is long enough. Although this mechanism can detect if the idle period is long enough (no utilization hooks invoked during idle period), it might not cover a corner case: if the task has occupied the CPU for too long which causes no context switches during that period, then no utilization handler will be launched until this high prio task is scheduled out. As a result, the idle_periods field might be calculated incorrectly because it regards the 100% load as 0% and makes the conservative governor who uses this field confusing. Change the detection to compare the idle_time with sampling_rate directly. Reported-by: Artem S. Tashkinov <t.artem@mailcity.com> Signed-off-by: Chen Yu <yu.c.chen@intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Cc: All applicable <stable@vger.kernel.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/cpufreq/cpufreq_governor.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 871bf9cf55cf..1d50e97d49f1 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
165 * calls, so the previous load value can be used then. 165 * calls, so the previous load value can be used then.
166 */ 166 */
167 load = j_cdbs->prev_load; 167 load = j_cdbs->prev_load;
168 } else if (unlikely(time_elapsed > 2 * sampling_rate && 168 } else if (unlikely((int)idle_time > 2 * sampling_rate &&
169 j_cdbs->prev_load)) { 169 j_cdbs->prev_load)) {
170 /* 170 /*
171 * If the CPU had gone completely idle and a task has 171 * If the CPU had gone completely idle and a task has
@@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
185 * clear prev_load to guarantee that the load will be 185 * clear prev_load to guarantee that the load will be
186 * computed again next time. 186 * computed again next time.
187 * 187 *
188 * Detecting this situation is easy: the governor's 188 * Detecting this situation is easy: an unusually large
189 * utilization update handler would not have run during 189 * 'idle_time' (as compared to the sampling rate)
190 * CPU-idle periods. Hence, an unusually large
191 * 'time_elapsed' (as compared to the sampling rate)
192 * indicates this scenario. 190 * indicates this scenario.
193 */ 191 */
194 load = j_cdbs->prev_load; 192 load = j_cdbs->prev_load;
@@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
217 j_cdbs->prev_load = load; 215 j_cdbs->prev_load = load;
218 } 216 }
219 217
220 if (time_elapsed > 2 * sampling_rate) { 218 if (unlikely((int)idle_time > 2 * sampling_rate)) {
221 unsigned int periods = time_elapsed / sampling_rate; 219 unsigned int periods = idle_time / sampling_rate;
222 220
223 if (periods < idle_periods) 221 if (periods < idle_periods)
224 idle_periods = periods; 222 idle_periods = periods;