diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-02-14 06:25:08 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-02-27 06:41:02 -0500 |
commit | 37e117c07b89194aae7062bc63bde1104c03db02 (patch) | |
tree | 770312bf789e367b8f2102e9f8de743f05efeeac /kernel/sched/core.c | |
parent | 06d50c65b1043b166d102accc081093f79d8f7e5 (diff) |
sched: Guarantee task priority in pick_next_task()
Michael spotted that the idle_balance() push down created a task
priority problem.
Previously, when we called idle_balance() before pick_next_task() it
wasn't a problem when -- because of the rq->lock droppage -- an rt/dl
task slipped in.
Similarly for pre_schedule(), rt pre-schedule could have a dl task
slip in.
But by pulling it into the pick_next_task() loop, we'll not try a
higher task priority again.
Cure this by creating a re-start condition in pick_next_task(); and
triggering this from pick_next_task_{rt,fair}().
It also fixes a live-lock where we get stuck in pick_next_task_fair()
due to idle_balance() seeing !0 nr_running but there not actually
being any fair tasks about.
Reported-by: Michael Wang <wangyun@linux.vnet.ibm.com>
Fixes: 38033c37faab ("sched: Push down pre_schedule() and idle_balance()")
Tested-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20140224121218.GR15586@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a8a73b8897bf..cde573d3f12e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2586,24 +2586,28 @@ static inline void schedule_debug(struct task_struct *prev) | |||
2586 | static inline struct task_struct * | 2586 | static inline struct task_struct * |
2587 | pick_next_task(struct rq *rq, struct task_struct *prev) | 2587 | pick_next_task(struct rq *rq, struct task_struct *prev) |
2588 | { | 2588 | { |
2589 | const struct sched_class *class; | 2589 | const struct sched_class *class = &fair_sched_class; |
2590 | struct task_struct *p; | 2590 | struct task_struct *p; |
2591 | 2591 | ||
2592 | /* | 2592 | /* |
2593 | * Optimization: we know that if all tasks are in | 2593 | * Optimization: we know that if all tasks are in |
2594 | * the fair class we can call that function directly: | 2594 | * the fair class we can call that function directly: |
2595 | */ | 2595 | */ |
2596 | if (likely(prev->sched_class == &fair_sched_class && | 2596 | if (likely(prev->sched_class == class && |
2597 | rq->nr_running == rq->cfs.h_nr_running)) { | 2597 | rq->nr_running == rq->cfs.h_nr_running)) { |
2598 | p = fair_sched_class.pick_next_task(rq, prev); | 2598 | p = fair_sched_class.pick_next_task(rq, prev); |
2599 | if (likely(p)) | 2599 | if (likely(p && p != RETRY_TASK)) |
2600 | return p; | 2600 | return p; |
2601 | } | 2601 | } |
2602 | 2602 | ||
2603 | again: | ||
2603 | for_each_class(class) { | 2604 | for_each_class(class) { |
2604 | p = class->pick_next_task(rq, prev); | 2605 | p = class->pick_next_task(rq, prev); |
2605 | if (p) | 2606 | if (p) { |
2607 | if (unlikely(p == RETRY_TASK)) | ||
2608 | goto again; | ||
2606 | return p; | 2609 | return p; |
2610 | } | ||
2607 | } | 2611 | } |
2608 | 2612 | ||
2609 | BUG(); /* the idle class will always have a runnable task */ | 2613 | BUG(); /* the idle class will always have a runnable task */ |