aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-04 03:59:02 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-09 04:03:10 -0500
commit57785df5ac53c70da9fb53696130f3c551bfe1f9 (patch)
tree5653c894d74a1555f480835b013fdb7b512f8b69 /kernel
parentcd8ad40de36c2fe75f3b731bd70198b385895246 (diff)
sched: Fix task priority bug
83f9ac removed a call to effective_prio() in wake_up_new_task(), which leads to tasks running at MAX_PRIO. This is caused by the idle thread being set to MAX_PRIO before forking off init. O(1) used that to make sure idle was always preempted, CFS uses check_preempt_curr_idle() for that so we can savely remove this bit of legacy code. Reported-by: Mike Galbraith <efault@gmx.de> Tested-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1259754383.4003.610.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c6
1 files changed, 0 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 71eb0622f548..3878f5018007 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3158,10 +3158,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
3158 deactivate_task(src_rq, p, 0); 3158 deactivate_task(src_rq, p, 0);
3159 set_task_cpu(p, this_cpu); 3159 set_task_cpu(p, this_cpu);
3160 activate_task(this_rq, p, 0); 3160 activate_task(this_rq, p, 0);
3161 /*
3162 * Note that idle threads have a prio of MAX_PRIO, for this test
3163 * to be always true for them.
3164 */
3165 check_preempt_curr(this_rq, p, 0); 3161 check_preempt_curr(this_rq, p, 0);
3166} 3162}
3167 3163
@@ -6992,7 +6988,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6992 __sched_fork(idle); 6988 __sched_fork(idle);
6993 idle->se.exec_start = sched_clock(); 6989 idle->se.exec_start = sched_clock();
6994 6990
6995 idle->prio = idle->normal_prio = MAX_PRIO;
6996 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); 6991 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
6997 __set_task_cpu(idle, cpu); 6992 __set_task_cpu(idle, cpu);
6998 6993
@@ -7696,7 +7691,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7696 spin_lock_irq(&rq->lock); 7691 spin_lock_irq(&rq->lock);
7697 update_rq_clock(rq); 7692 update_rq_clock(rq);
7698 deactivate_task(rq, rq->idle, 0); 7693 deactivate_task(rq, rq->idle, 0);
7699 rq->idle->static_prio = MAX_PRIO;
7700 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 7694 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
7701 rq->idle->sched_class = &idle_sched_class; 7695 rq->idle->sched_class = &idle_sched_class;
7702 migrate_dead_tasks(cpu); 7696 migrate_dead_tasks(cpu);