aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWanpeng Li <kernellwp@gmail.com>2015-12-03 04:42:10 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-06 05:05:56 -0500
commit7d92de3a8285ab3dfd68aa3a99823acd5b190444 (patch)
treed94027b610ef0795ec1f31d4148e32d7d14bcd7e
parent567bee2803cb46caeb6011de5b738fde33dc3896 (diff)
sched/deadline: Fix the earliest_dl.next logic
earliest_dl.next should cache deadline of the earliest ready task that is also enqueued in the pushable rbtree, as pull algorithm uses this information to find candidates for migration: if the earliest_dl.next deadline of source rq is earlier than the earliest_dl.curr deadline of destination rq, the task from the source rq can be pulled. However, current implementation only guarantees that earliest_dl.next is the deadline of the next ready task instead of the next pushable task; which will result in potentially holding both rqs' lock and find nothing to migrate because of affinity constraints. In addition, current logic doesn't update the next candidate for pushing in pick_next_task_dl(), even if the running task is never eligible. This patch fixes both problems by updating earliest_dl.next when pushable dl task is enqueued/dequeued, similar to what we already do for RT. Tested-by: Luca Abeni <luca.abeni@unitn.it> Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Juri Lelli <juri.lelli@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1449135730-27202-1-git-send-email-wanpeng.li@hotmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/deadline.c59
1 files changed, 7 insertions, 52 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 8b0a15e285f9..cd64c979d0e1 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -176,8 +176,10 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
176 } 176 }
177 } 177 }
178 178
179 if (leftmost) 179 if (leftmost) {
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks; 180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181 dl_rq->earliest_dl.next = p->dl.deadline;
182 }
181 183
182 rb_link_node(&p->pushable_dl_tasks, parent, link); 184 rb_link_node(&p->pushable_dl_tasks, parent, link);
183 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 185 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
@@ -195,6 +197,10 @@ static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
195 197
196 next_node = rb_next(&p->pushable_dl_tasks); 198 next_node = rb_next(&p->pushable_dl_tasks);
197 dl_rq->pushable_dl_tasks_leftmost = next_node; 199 dl_rq->pushable_dl_tasks_leftmost = next_node;
200 if (next_node) {
201 dl_rq->earliest_dl.next = rb_entry(next_node,
202 struct task_struct, pushable_dl_tasks)->dl.deadline;
203 }
198 } 204 }
199 205
200 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 206 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
@@ -782,42 +788,14 @@ static void update_curr_dl(struct rq *rq)
782 788
783#ifdef CONFIG_SMP 789#ifdef CONFIG_SMP
784 790
785static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
786
787static inline u64 next_deadline(struct rq *rq)
788{
789 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
790
791 if (next && dl_prio(next->prio))
792 return next->dl.deadline;
793 else
794 return 0;
795}
796
797static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 791static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
798{ 792{
799 struct rq *rq = rq_of_dl_rq(dl_rq); 793 struct rq *rq = rq_of_dl_rq(dl_rq);
800 794
801 if (dl_rq->earliest_dl.curr == 0 || 795 if (dl_rq->earliest_dl.curr == 0 ||
802 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 796 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
803 /*
804 * If the dl_rq had no -deadline tasks, or if the new task
805 * has shorter deadline than the current one on dl_rq, we
806 * know that the previous earliest becomes our next earliest,
807 * as the new task becomes the earliest itself.
808 */
809 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
810 dl_rq->earliest_dl.curr = deadline; 797 dl_rq->earliest_dl.curr = deadline;
811 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); 798 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
812 } else if (dl_rq->earliest_dl.next == 0 ||
813 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
814 /*
815 * On the other hand, if the new -deadline task has a
816 * a later deadline than the earliest one on dl_rq, but
817 * it is earlier than the next (if any), we must
818 * recompute the next-earliest.
819 */
820 dl_rq->earliest_dl.next = next_deadline(rq);
821 } 799 }
822} 800}
823 801
@@ -839,7 +817,6 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
839 817
840 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 818 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
841 dl_rq->earliest_dl.curr = entry->deadline; 819 dl_rq->earliest_dl.curr = entry->deadline;
842 dl_rq->earliest_dl.next = next_deadline(rq);
843 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1); 820 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
844 } 821 }
845} 822}
@@ -1274,28 +1251,6 @@ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1274 return 0; 1251 return 0;
1275} 1252}
1276 1253
1277/* Returns the second earliest -deadline task, NULL otherwise */
1278static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1279{
1280 struct rb_node *next_node = rq->dl.rb_leftmost;
1281 struct sched_dl_entity *dl_se;
1282 struct task_struct *p = NULL;
1283
1284next_node:
1285 next_node = rb_next(next_node);
1286 if (next_node) {
1287 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1288 p = dl_task_of(dl_se);
1289
1290 if (pick_dl_task(rq, p, cpu))
1291 return p;
1292
1293 goto next_node;
1294 }
1295
1296 return NULL;
1297}
1298
1299/* 1254/*
1300 * Return the earliest pushable rq's task, which is suitable to be executed 1255 * Return the earliest pushable rq's task, which is suitable to be executed
1301 * on the CPU, NULL otherwise: 1256 * on the CPU, NULL otherwise: