summaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-05-29 16:36:44 -0400
committerPeter Zijlstra <peterz@infradead.org>2019-08-08 03:09:31 -0400
commit67692435c411e5c53a1c588ecca2037aebd81f2e (patch)
tree53d9c44e6e80654a60c652493d4b0cc948ce5818 /kernel/sched/rt.c
parent5f2a45fc9e89e022233085e6f0f352eb6ff770bb (diff)
sched: Rework pick_next_task() slow-path
Avoid the RETRY_TASK case in the pick_next_task() slow path. By doing the put_prev_task() early, we get the rt/deadline pull done, and by testing rq->nr_running we know if we need newidle_balance(). This then gives a stable state to pick a task from. Since the fast-path is fair only; it means the other classes will always have pick_next_task(.prev=NULL, .rf=NULL) and we can simplify. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Aaron Lu <aaron.lwe@gmail.com> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: mingo@kernel.org Cc: Phil Auld <pauld@redhat.com> Cc: Julien Desfossez <jdesfossez@digitalocean.com> Cc: Nishanth Aravamudan <naravamudan@digitalocean.com> Link: https://lkml.kernel.org/r/aa34d24b36547139248f32a30138791ac6c02bd6.1559129225.git.vpillai@digitalocean.com
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c29
1 files changed, 1 insertions, 28 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index dbdabd76f192..858c4cc6f99b 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1553,38 +1553,11 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1553 struct task_struct *p; 1553 struct task_struct *p;
1554 struct rt_rq *rt_rq = &rq->rt; 1554 struct rt_rq *rt_rq = &rq->rt;
1555 1555
1556 if (need_pull_rt_task(rq, prev)) { 1556 WARN_ON_ONCE(prev || rf);
1557 /*
1558 * This is OK, because current is on_cpu, which avoids it being
1559 * picked for load-balance and preemption/IRQs are still
1560 * disabled avoiding further scheduler activity on it and we're
1561 * being very careful to re-start the picking loop.
1562 */
1563 rq_unpin_lock(rq, rf);
1564 pull_rt_task(rq);
1565 rq_repin_lock(rq, rf);
1566 /*
1567 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1568 * means a dl or stop task can slip in, in which case we need
1569 * to re-start task selection.
1570 */
1571 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1572 rq->dl.dl_nr_running))
1573 return RETRY_TASK;
1574 }
1575
1576 /*
1577 * We may dequeue prev's rt_rq in put_prev_task().
1578 * So, we update time before rt_queued check.
1579 */
1580 if (prev->sched_class == &rt_sched_class)
1581 update_curr_rt(rq);
1582 1557
1583 if (!rt_rq->rt_queued) 1558 if (!rt_rq->rt_queued)
1584 return NULL; 1559 return NULL;
1585 1560
1586 put_prev_task(rq, prev);
1587
1588 p = _pick_next_task_rt(rq); 1561 p = _pick_next_task_rt(rq);
1589 1562
1590 set_next_task_rt(rq, p); 1563 set_next_task_rt(rq, p);