summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorbsegall@google.com <bsegall@google.com>2019-06-06 13:21:01 -0400
committerIngo Molnar <mingo@kernel.org>2019-06-17 06:16:01 -0400
commit66567fcbaecac455caa1b13643155d686b51ce63 (patch)
tree8abd2701725d26e700bbd1c00dee312a3158769a /kernel/sched/fair.c
parentaacedf26fb7601222f2452cf0a54cab4fee160c5 (diff)
sched/fair: Don't push cfs_bandwith slack timers forward
When a cfs_rq sleeps and returns its quota, we delay for 5ms before waking any throttled cfs_rqs to coalesce with other cfs_rqs going to sleep, as this has to be done outside of the rq lock we hold. The current code waits for 5ms without any sleeps, instead of waiting for 5ms from the first sleep, which can delay the unthrottle more than we want. Switch this around so that we can't push this forward forever. This requires an extra flag rather than using hrtimer_active, since we need to start a new timer if the current one is in the process of finishing. Signed-off-by: Ben Segall <bsegall@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Xunlei Pang <xlpang@linux.alibaba.com> Acked-by: Phil Auld <pauld@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/xm26a7euy6iq.fsf_-_@bsegall-linux.svl.corp.google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4c8f45ed093c..3c11dcdedcbc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4729,6 +4729,11 @@ static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
4729 if (runtime_refresh_within(cfs_b, min_left)) 4729 if (runtime_refresh_within(cfs_b, min_left))
4730 return; 4730 return;
4731 4731
4732 /* don't push forwards an existing deferred unthrottle */
4733 if (cfs_b->slack_started)
4734 return;
4735 cfs_b->slack_started = true;
4736
4732 hrtimer_start(&cfs_b->slack_timer, 4737 hrtimer_start(&cfs_b->slack_timer,
4733 ns_to_ktime(cfs_bandwidth_slack_period), 4738 ns_to_ktime(cfs_bandwidth_slack_period),
4734 HRTIMER_MODE_REL); 4739 HRTIMER_MODE_REL);
@@ -4782,6 +4787,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4782 4787
4783 /* confirm we're still not at a refresh boundary */ 4788 /* confirm we're still not at a refresh boundary */
4784 raw_spin_lock_irqsave(&cfs_b->lock, flags); 4789 raw_spin_lock_irqsave(&cfs_b->lock, flags);
4790 cfs_b->slack_started = false;
4785 if (cfs_b->distribute_running) { 4791 if (cfs_b->distribute_running) {
4786 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 4792 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
4787 return; 4793 return;
@@ -4945,6 +4951,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4945 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4951 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4946 cfs_b->slack_timer.function = sched_cfs_slack_timer; 4952 cfs_b->slack_timer.function = sched_cfs_slack_timer;
4947 cfs_b->distribute_running = 0; 4953 cfs_b->distribute_running = 0;
4954 cfs_b->slack_started = false;
4948} 4955}
4949 4956
4950static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4957static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)