diff options
-rw-r--r-- | kernel/sched/core.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bbfb917a9b49..6699d43a8843 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -3273,10 +3273,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
3273 | struct task_struct *p; | 3273 | struct task_struct *p; |
3274 | 3274 | ||
3275 | /* | 3275 | /* |
3276 | * Optimization: we know that if all tasks are in | 3276 | * Optimization: we know that if all tasks are in the fair class we can |
3277 | * the fair class we can call that function directly: | 3277 | * call that function directly, but only if the @prev task wasn't of a |
3278 | * higher scheduling class, because otherwise those loose the | ||
3279 | * opportunity to pull in more work from other CPUs. | ||
3278 | */ | 3280 | */ |
3279 | if (likely(rq->nr_running == rq->cfs.h_nr_running)) { | 3281 | if (likely((prev->sched_class == &idle_sched_class || |
3282 | prev->sched_class == &fair_sched_class) && | ||
3283 | rq->nr_running == rq->cfs.h_nr_running)) { | ||
3284 | |||
3280 | p = fair_sched_class.pick_next_task(rq, prev, rf); | 3285 | p = fair_sched_class.pick_next_task(rq, prev, rf); |
3281 | if (unlikely(p == RETRY_TASK)) | 3286 | if (unlikely(p == RETRY_TASK)) |
3282 | goto again; | 3287 | goto again; |