aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index deeb1f8e0c30..6107a0cd6325 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1682,6 +1682,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1682 1682
1683 p->prio = effective_prio(p); 1683 p->prio = effective_prio(p);
1684 1684
1685 if (rt_prio(p->prio))
1686 p->sched_class = &rt_sched_class;
1687 else
1688 p->sched_class = &fair_sched_class;
1689
1685 if (!p->sched_class->task_new || !sysctl_sched_child_runs_first || 1690 if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||
1686 (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu || 1691 (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||
1687 !current->se.on_rq) { 1692 !current->se.on_rq) {
@@ -4550,10 +4555,7 @@ asmlinkage long sys_sched_yield(void)
4550 struct rq *rq = this_rq_lock(); 4555 struct rq *rq = this_rq_lock();
4551 4556
4552 schedstat_inc(rq, yld_cnt); 4557 schedstat_inc(rq, yld_cnt);
4553 if (unlikely(rq->nr_running == 1)) 4558 current->sched_class->yield_task(rq, current);
4554 schedstat_inc(rq, yld_act_empty);
4555 else
4556 current->sched_class->yield_task(rq, current);
4557 4559
4558 /* 4560 /*
4559 * Since we are going to call schedule() anyway, there's 4561 * Since we are going to call schedule() anyway, there's