diff options
author | Claudio Scordino <claudio@evidence.eu.com> | 2018-03-13 06:35:40 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-03-23 17:48:22 -0400 |
commit | e97a90f7069b740575bcb1dae86596e0484b8957 (patch) | |
tree | 9886e76139ec6e5406b9cd341e9bb518551c3d1f | |
parent | d519329f72a6f36bc4f2b85452640cfe583b4f81 (diff) |
sched/cpufreq: Rate limits for SCHED_DEADLINE
When the SCHED_DEADLINE scheduling class increases the CPU utilization, it
should not wait for the rate limit, otherwise it may miss some deadline.
Tests using rt-app on Exynos5422 with up to 10 SCHED_DEADLINE tasks have
shown reductions of even 10% of deadline misses with a negligible
increase of energy consumption (measured through Baylibre Cape).
Signed-off-by: Claudio Scordino <claudio@evidence.eu.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: linux-pm@vger.kernel.org
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Todd Kjos <tkjos@android.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Link: https://lkml.kernel.org/r/1520937340-2755-1-git-send-email-claudio@evidence.eu.com
-rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 14 |
1 files changed, 14 insertions, 0 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 89fe78ecb88c..2b124811947d 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
@@ -267,6 +267,16 @@ static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) | |||
267 | static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } | 267 | static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } |
268 | #endif /* CONFIG_NO_HZ_COMMON */ | 268 | #endif /* CONFIG_NO_HZ_COMMON */ |
269 | 269 | ||
270 | /* | ||
271 | * Make sugov_should_update_freq() ignore the rate limit when DL | ||
272 | * has increased the utilization. | ||
273 | */ | ||
274 | static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) | ||
275 | { | ||
276 | if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl) | ||
277 | sg_policy->need_freq_update = true; | ||
278 | } | ||
279 | |||
270 | static void sugov_update_single(struct update_util_data *hook, u64 time, | 280 | static void sugov_update_single(struct update_util_data *hook, u64 time, |
271 | unsigned int flags) | 281 | unsigned int flags) |
272 | { | 282 | { |
@@ -279,6 +289,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, | |||
279 | sugov_set_iowait_boost(sg_cpu, time, flags); | 289 | sugov_set_iowait_boost(sg_cpu, time, flags); |
280 | sg_cpu->last_update = time; | 290 | sg_cpu->last_update = time; |
281 | 291 | ||
292 | ignore_dl_rate_limit(sg_cpu, sg_policy); | ||
293 | |||
282 | if (!sugov_should_update_freq(sg_policy, time)) | 294 | if (!sugov_should_update_freq(sg_policy, time)) |
283 | return; | 295 | return; |
284 | 296 | ||
@@ -356,6 +368,8 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) | |||
356 | sugov_set_iowait_boost(sg_cpu, time, flags); | 368 | sugov_set_iowait_boost(sg_cpu, time, flags); |
357 | sg_cpu->last_update = time; | 369 | sg_cpu->last_update = time; |
358 | 370 | ||
371 | ignore_dl_rate_limit(sg_cpu, sg_policy); | ||
372 | |||
359 | if (sugov_should_update_freq(sg_policy, time)) { | 373 | if (sugov_should_update_freq(sg_policy, time)) { |
360 | next_f = sugov_next_freq_shared(sg_cpu, time); | 374 | next_f = sugov_next_freq_shared(sg_cpu, time); |
361 | sugov_update_commit(sg_policy, time, next_f); | 375 | sugov_update_commit(sg_policy, time, next_f); |