aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick Bellasi <patrick.bellasi@arm.com>2018-05-24 10:10:22 -0400
committerIngo Molnar <mingo@kernel.org>2018-05-25 02:04:52 -0400
commit8ecf04e11283a28ca88b8b8049ac93c3a99fcd2c (patch)
tree9f8a78cd56eb5117f6c9e6b43b2e9e3d96bd3915
parent0548dc5cde19e88b8495cb74e3893d8c8713392a (diff)
sched/cpufreq: Modify aggregate utilization to always include blocked FAIR utilization
Since the refactoring introduced by: commit 8f111bc357aa ("cpufreq/schedutil: Rewrite CPUFREQ_RT support") we aggregate FAIR utilization only if this class has runnable tasks. This was mainly due to avoid the risk to stay on an high frequency just because of the blocked utilization of a CPU not being properly decayed while the CPU was idle. However, since: commit 31e77c93e432 ("sched/fair: Update blocked load when newly idle") the FAIR blocked utilization is properly decayed also for IDLE CPUs. This allows us to use the FAIR blocked utilization as a safe mechanism to gracefully reduce the frequency only if no FAIR tasks show up on a CPU for a reasonable period of time. Moreover, we also reduce the frequency drops of CPUs running periodic tasks which, depending on the task periodicity and the time required for a frequency switch, was increasing the chances to introduce some undesirable performance variations. Reported-by: Vincent Guittot <vincent.guittot@linaro.org> Tested-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Acked-by: Vincent Guittot <vincent.guittot@linaro.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Joel Fernandes <joelaf@google.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J . Wysocki <rafael.j.wysocki@intel.com> Cc: Steve Muckle <smuckle@google.com> Link: http://lkml.kernel.org/r/20180524141023.13765-2-patrick.bellasi@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/cpufreq_schedutil.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index e13df951aca7..28592b62b1d5 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -183,22 +183,21 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
183static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) 183static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
184{ 184{
185 struct rq *rq = cpu_rq(sg_cpu->cpu); 185 struct rq *rq = cpu_rq(sg_cpu->cpu);
186 unsigned long util;
187 186
188 if (rq->rt.rt_nr_running) { 187 if (rq->rt.rt_nr_running)
189 util = sg_cpu->max; 188 return sg_cpu->max;
190 } else {
191 util = sg_cpu->util_dl;
192 if (rq->cfs.h_nr_running)
193 util += sg_cpu->util_cfs;
194 }
195 189
196 /* 190 /*
191 * Utilization required by DEADLINE must always be granted while, for
192 * FAIR, we use blocked utilization of IDLE CPUs as a mechanism to
193 * gracefully reduce the frequency when no tasks show up for longer
194 * periods of time.
195 *
197 * Ideally we would like to set util_dl as min/guaranteed freq and 196 * Ideally we would like to set util_dl as min/guaranteed freq and
198 * util_cfs + util_dl as requested freq. However, cpufreq is not yet 197 * util_cfs + util_dl as requested freq. However, cpufreq is not yet
199 * ready for such an interface. So, we only do the latter for now. 198 * ready for such an interface. So, we only do the latter for now.
200 */ 199 */
201 return min(util, sg_cpu->max); 200 return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs));
202} 201}
203 202
204static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags) 203static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags)