diff options
author | Peter Zijlstra <peterz@infradead.org> | 2019-05-29 16:36:40 -0400 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2019-08-08 03:09:30 -0400 |
commit | f95d4eaee6d0207bff2dc93371133d31227d4cfb (patch) | |
tree | a0225a0b9c0c9785130f8149331821ff1132144c | |
parent | 5feeb7837a448f659e0aaa19fb446b1d9a4b323a (diff) |
sched/{rt,deadline}: Fix set_next_task vs pick_next_task
Because pick_next_task() implies set_curr_task() and some of the
details haven't mattered too much, some of what _should_ be in
set_curr_task() ended up in pick_next_task, correct this.
This prepares the way for a pick_next_task() variant that does not
affect the current state; allowing remote picking.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Aaron Lu <aaron.lwe@gmail.com>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: mingo@kernel.org
Cc: Phil Auld <pauld@redhat.com>
Cc: Julien Desfossez <jdesfossez@digitalocean.com>
Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
Link: https://lkml.kernel.org/r/38c61d5240553e043c27c5e00b9dd0d184dd6081.1559129225.git.vpillai@digitalocean.com
-rw-r--r-- | kernel/sched/deadline.c | 22 | ||||
-rw-r--r-- | kernel/sched/rt.c | 26 |
2 files changed, 24 insertions, 24 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 039dde2b1dac..2dc2784b196c 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1727,12 +1727,20 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p) | |||
1727 | } | 1727 | } |
1728 | #endif | 1728 | #endif |
1729 | 1729 | ||
1730 | static inline void set_next_task(struct rq *rq, struct task_struct *p) | 1730 | static void set_next_task_dl(struct rq *rq, struct task_struct *p) |
1731 | { | 1731 | { |
1732 | p->se.exec_start = rq_clock_task(rq); | 1732 | p->se.exec_start = rq_clock_task(rq); |
1733 | 1733 | ||
1734 | /* You can't push away the running task */ | 1734 | /* You can't push away the running task */ |
1735 | dequeue_pushable_dl_task(rq, p); | 1735 | dequeue_pushable_dl_task(rq, p); |
1736 | |||
1737 | if (hrtick_enabled(rq)) | ||
1738 | start_hrtick_dl(rq, p); | ||
1739 | |||
1740 | if (rq->curr->sched_class != &dl_sched_class) | ||
1741 | update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); | ||
1742 | |||
1743 | deadline_queue_push_tasks(rq); | ||
1736 | } | 1744 | } |
1737 | 1745 | ||
1738 | static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, | 1746 | static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, |
@@ -1791,15 +1799,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
1791 | 1799 | ||
1792 | p = dl_task_of(dl_se); | 1800 | p = dl_task_of(dl_se); |
1793 | 1801 | ||
1794 | set_next_task(rq, p); | 1802 | set_next_task_dl(rq, p); |
1795 | |||
1796 | if (hrtick_enabled(rq)) | ||
1797 | start_hrtick_dl(rq, p); | ||
1798 | |||
1799 | deadline_queue_push_tasks(rq); | ||
1800 | |||
1801 | if (rq->curr->sched_class != &dl_sched_class) | ||
1802 | update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); | ||
1803 | 1803 | ||
1804 | return p; | 1804 | return p; |
1805 | } | 1805 | } |
@@ -1846,7 +1846,7 @@ static void task_fork_dl(struct task_struct *p) | |||
1846 | 1846 | ||
1847 | static void set_curr_task_dl(struct rq *rq) | 1847 | static void set_curr_task_dl(struct rq *rq) |
1848 | { | 1848 | { |
1849 | set_next_task(rq, rq->curr); | 1849 | set_next_task_dl(rq, rq->curr); |
1850 | } | 1850 | } |
1851 | 1851 | ||
1852 | #ifdef CONFIG_SMP | 1852 | #ifdef CONFIG_SMP |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a532558a5176..40bb71004325 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1498,12 +1498,22 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag | |||
1498 | #endif | 1498 | #endif |
1499 | } | 1499 | } |
1500 | 1500 | ||
1501 | static inline void set_next_task(struct rq *rq, struct task_struct *p) | 1501 | static inline void set_next_task_rt(struct rq *rq, struct task_struct *p) |
1502 | { | 1502 | { |
1503 | p->se.exec_start = rq_clock_task(rq); | 1503 | p->se.exec_start = rq_clock_task(rq); |
1504 | 1504 | ||
1505 | /* The running task is never eligible for pushing */ | 1505 | /* The running task is never eligible for pushing */ |
1506 | dequeue_pushable_task(rq, p); | 1506 | dequeue_pushable_task(rq, p); |
1507 | |||
1508 | /* | ||
1509 | * If prev task was rt, put_prev_task() has already updated the | ||
1510 | * utilization. We only care of the case where we start to schedule a | ||
1511 | * rt task | ||
1512 | */ | ||
1513 | if (rq->curr->sched_class != &rt_sched_class) | ||
1514 | update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); | ||
1515 | |||
1516 | rt_queue_push_tasks(rq); | ||
1507 | } | 1517 | } |
1508 | 1518 | ||
1509 | static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, | 1519 | static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, |
@@ -1577,17 +1587,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
1577 | 1587 | ||
1578 | p = _pick_next_task_rt(rq); | 1588 | p = _pick_next_task_rt(rq); |
1579 | 1589 | ||
1580 | set_next_task(rq, p); | 1590 | set_next_task_rt(rq, p); |
1581 | |||
1582 | rt_queue_push_tasks(rq); | ||
1583 | |||
1584 | /* | ||
1585 | * If prev task was rt, put_prev_task() has already updated the | ||
1586 | * utilization. We only care of the case where we start to schedule a | ||
1587 | * rt task | ||
1588 | */ | ||
1589 | if (rq->curr->sched_class != &rt_sched_class) | ||
1590 | update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); | ||
1591 | 1591 | ||
1592 | return p; | 1592 | return p; |
1593 | } | 1593 | } |
@@ -2356,7 +2356,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
2356 | 2356 | ||
2357 | static void set_curr_task_rt(struct rq *rq) | 2357 | static void set_curr_task_rt(struct rq *rq) |
2358 | { | 2358 | { |
2359 | set_next_task(rq, rq->curr); | 2359 | set_next_task_rt(rq, rq->curr); |
2360 | } | 2360 | } |
2361 | 2361 | ||
2362 | static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) | 2362 | static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) |