aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-01-23 14:32:21 -0500
committerIngo Molnar <mingo@kernel.org>2014-02-11 03:58:10 -0500
commit38033c37faab850ed5d33bb675c4de6c66be84d8 (patch)
tree7a00530a9a1346f29f8899ff949bf07a9e7db7ee /kernel/sched/deadline.c
parent6c3b4d44ba2838f00614a5a2d777d4401e0bfd71 (diff)
sched: Push down pre_schedule() and idle_balance()
This patch both merged idle_balance() and pre_schedule() and pushes both of them into pick_next_task(). Conceptually pre_schedule() and idle_balance() are rather similar, both are used to pull more work onto the current CPU. We cannot however first move idle_balance() into pre_schedule_fair() since there is no guarantee the last runnable task is a fair task, and thus we would miss newidle balances. Similarly, the dl and rt pre_schedule calls must be ran before idle_balance() since their respective tasks have higher priority and it would not do to delay their execution searching for less important tasks first. However, by noticing that pick_next_tasks() already traverses the sched_class hierarchy in the right order, we can get the right behaviour and do away with both calls. We must however change the special case optimization to also require that prev is of sched_class_fair, otherwise we can miss doing a dl or rt pull where we needed one. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/n/tip-a8k6vvaebtn64nie345kx1je@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 50797d576080..ed31ef66ab9d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -944,6 +944,8 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
944 resched_task(rq->curr); 944 resched_task(rq->curr);
945} 945}
946 946
947static int pull_dl_task(struct rq *this_rq);
948
947#endif /* CONFIG_SMP */ 949#endif /* CONFIG_SMP */
948 950
949/* 951/*
@@ -998,6 +1000,11 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
998 1000
999 dl_rq = &rq->dl; 1001 dl_rq = &rq->dl;
1000 1002
1003#ifdef CONFIG_SMP
1004 if (dl_task(prev))
1005 pull_dl_task(rq);
1006#endif
1007
1001 if (unlikely(!dl_rq->dl_nr_running)) 1008 if (unlikely(!dl_rq->dl_nr_running))
1002 return NULL; 1009 return NULL;
1003 1010
@@ -1429,13 +1436,6 @@ skip:
1429 return ret; 1436 return ret;
1430} 1437}
1431 1438
1432static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
1433{
1434 /* Try to pull other tasks here */
1435 if (dl_task(prev))
1436 pull_dl_task(rq);
1437}
1438
1439static void post_schedule_dl(struct rq *rq) 1439static void post_schedule_dl(struct rq *rq)
1440{ 1440{
1441 push_dl_tasks(rq); 1441 push_dl_tasks(rq);
@@ -1628,7 +1628,6 @@ const struct sched_class dl_sched_class = {
1628 .set_cpus_allowed = set_cpus_allowed_dl, 1628 .set_cpus_allowed = set_cpus_allowed_dl,
1629 .rq_online = rq_online_dl, 1629 .rq_online = rq_online_dl,
1630 .rq_offline = rq_offline_dl, 1630 .rq_offline = rq_offline_dl,
1631 .pre_schedule = pre_schedule_dl,
1632 .post_schedule = post_schedule_dl, 1631 .post_schedule = post_schedule_dl,
1633 .task_woken = task_woken_dl, 1632 .task_woken = task_woken_dl,
1634#endif 1633#endif