diff options
| author | Vincent Guittot <vincent.guittot@linaro.org> | 2018-06-26 09:53:22 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-07-03 03:17:28 -0400 |
| commit | 296b2ffe7fa9ed756c41415c6b1512bc4ad687b1 (patch) | |
| tree | 11267161f1a90159ac9a47dcf48dbadfd44ef9cc | |
| parent | d9c0ffcabd6aae7ff1e34e8078354c13bb9f1183 (diff) | |
sched/rt: Fix call to cpufreq_update_util()
With commit:
8f111bc357aa ("cpufreq/schedutil: Rewrite CPUFREQ_RT support")
the schedutil governor uses rq->rt.rt_nr_running to detect whether an
RT task is currently running on the CPU and to set frequency to max
if necessary.
cpufreq_update_util() is called in enqueue/dequeue_top_rt_rq() but
rq->rt.rt_nr_running has not been updated yet when dequeue_top_rt_rq() is
called so schedutil still considers that an RT task is running when the
last task is dequeued. The update of rq->rt.rt_nr_running happens later
in dequeue_rt_stack().
In fact, we can take advantage of the sequence that the dequeue then
re-enqueue rt entities when a rt task is enqueued or dequeued;
As a result enqueue_top_rt_rq() is always called when a task is
enqueued or dequeued and also when groups are throttled or unthrottled.
The only place that not use enqueue_top_rt_rq() is when root rt_rq is
throttled.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: efault@gmx.de
Cc: juri.lelli@redhat.com
Cc: patrick.bellasi@arm.com
Cc: viresh.kumar@linaro.org
Fixes: 8f111bc357aa ('cpufreq/schedutil: Rewrite CPUFREQ_RT support')
Link: http://lkml.kernel.org/r/1530021202-21695-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| -rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 2 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 16 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 5 |
3 files changed, 16 insertions, 7 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 3cde46483f0a..c907fde01eaa 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
| @@ -192,7 +192,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) | |||
| 192 | { | 192 | { |
| 193 | struct rq *rq = cpu_rq(sg_cpu->cpu); | 193 | struct rq *rq = cpu_rq(sg_cpu->cpu); |
| 194 | 194 | ||
| 195 | if (rq->rt.rt_nr_running) | 195 | if (rt_rq_is_runnable(&rq->rt)) |
| 196 | return sg_cpu->max; | 196 | return sg_cpu->max; |
| 197 | 197 | ||
| 198 | /* | 198 | /* |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 47556b0c9a95..572567078b60 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | |||
| 508 | 508 | ||
| 509 | rt_se = rt_rq->tg->rt_se[cpu]; | 509 | rt_se = rt_rq->tg->rt_se[cpu]; |
| 510 | 510 | ||
| 511 | if (!rt_se) | 511 | if (!rt_se) { |
| 512 | dequeue_top_rt_rq(rt_rq); | 512 | dequeue_top_rt_rq(rt_rq); |
| 513 | /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ | ||
| 514 | cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); | ||
| 515 | } | ||
| 513 | else if (on_rt_rq(rt_se)) | 516 | else if (on_rt_rq(rt_se)) |
| 514 | dequeue_rt_entity(rt_se, 0); | 517 | dequeue_rt_entity(rt_se, 0); |
| 515 | } | 518 | } |
| @@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq) | |||
| 1001 | sub_nr_running(rq, rt_rq->rt_nr_running); | 1004 | sub_nr_running(rq, rt_rq->rt_nr_running); |
| 1002 | rt_rq->rt_queued = 0; | 1005 | rt_rq->rt_queued = 0; |
| 1003 | 1006 | ||
| 1004 | /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ | ||
| 1005 | cpufreq_update_util(rq, 0); | ||
| 1006 | } | 1007 | } |
| 1007 | 1008 | ||
| 1008 | static void | 1009 | static void |
| @@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq) | |||
| 1014 | 1015 | ||
| 1015 | if (rt_rq->rt_queued) | 1016 | if (rt_rq->rt_queued) |
| 1016 | return; | 1017 | return; |
| 1017 | if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running) | 1018 | |
| 1019 | if (rt_rq_throttled(rt_rq)) | ||
| 1018 | return; | 1020 | return; |
| 1019 | 1021 | ||
| 1020 | add_nr_running(rq, rt_rq->rt_nr_running); | 1022 | if (rt_rq->rt_nr_running) { |
| 1021 | rt_rq->rt_queued = 1; | 1023 | add_nr_running(rq, rt_rq->rt_nr_running); |
| 1024 | rt_rq->rt_queued = 1; | ||
| 1025 | } | ||
| 1022 | 1026 | ||
| 1023 | /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ | 1027 | /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ |
| 1024 | cpufreq_update_util(rq, 0); | 1028 | cpufreq_update_util(rq, 0); |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6601baf2361c..27ddec334601 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -609,6 +609,11 @@ struct rt_rq { | |||
| 609 | #endif | 609 | #endif |
| 610 | }; | 610 | }; |
| 611 | 611 | ||
| 612 | static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) | ||
| 613 | { | ||
| 614 | return rt_rq->rt_queued && rt_rq->rt_nr_running; | ||
| 615 | } | ||
| 616 | |||
| 612 | /* Deadline class' related fields in a runqueue */ | 617 | /* Deadline class' related fields in a runqueue */ |
| 613 | struct dl_rq { | 618 | struct dl_rq { |
| 614 | /* runqueue is an rbtree, ordered by deadline */ | 619 | /* runqueue is an rbtree, ordered by deadline */ |
