aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-01-17 11:03:27 -0500
committerIngo Molnar <mingo@elte.hu>2011-01-26 06:33:22 -0500
commitda7a735e51f9622eb3e1672594d4a41da01d7e4f (patch)
tree27623dcd39c52a80b79e0ee86ab426fc9c7e2b46 /kernel/sched.c
parenta8941d7ec81678fb69aea7183338175f112f3e0d (diff)
sched: Fix switch_from_fair()
When a task is taken out of the fair class we must ensure the vruntime is properly normalized because when we put it back in it will assume to be normalized. The case that goes wrong is when changing away from the fair class while sleeping. Sleeping tasks have non-normalized vruntime in order to make sleeper-fairness work. So treat the switch away from fair as a wakeup and preserve the relative vruntime. Also update sysrq-n to call the ->switch_{to,from} methods. Reported-by: Onkalo Samu <samu.p.onkalo@nokia.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8b718b59b09f..78fa75394011 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2057,14 +2057,14 @@ inline int task_curr(const struct task_struct *p)
2057 2057
2058static inline void check_class_changed(struct rq *rq, struct task_struct *p, 2058static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2059 const struct sched_class *prev_class, 2059 const struct sched_class *prev_class,
2060 int oldprio, int running) 2060 int oldprio)
2061{ 2061{
2062 if (prev_class != p->sched_class) { 2062 if (prev_class != p->sched_class) {
2063 if (prev_class->switched_from) 2063 if (prev_class->switched_from)
2064 prev_class->switched_from(rq, p, running); 2064 prev_class->switched_from(rq, p);
2065 p->sched_class->switched_to(rq, p, running); 2065 p->sched_class->switched_to(rq, p);
2066 } else 2066 } else if (oldprio != p->prio)
2067 p->sched_class->prio_changed(rq, p, oldprio, running); 2067 p->sched_class->prio_changed(rq, p, oldprio);
2068} 2068}
2069 2069
2070static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 2070static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
@@ -2598,6 +2598,7 @@ static void __sched_fork(struct task_struct *p)
2598 p->se.sum_exec_runtime = 0; 2598 p->se.sum_exec_runtime = 0;
2599 p->se.prev_sum_exec_runtime = 0; 2599 p->se.prev_sum_exec_runtime = 0;
2600 p->se.nr_migrations = 0; 2600 p->se.nr_migrations = 0;
2601 p->se.vruntime = 0;
2601 2602
2602#ifdef CONFIG_SCHEDSTATS 2603#ifdef CONFIG_SCHEDSTATS
2603 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2604 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -4696,11 +4697,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4696 4697
4697 if (running) 4698 if (running)
4698 p->sched_class->set_curr_task(rq); 4699 p->sched_class->set_curr_task(rq);
4699 if (on_rq) { 4700 if (on_rq)
4700 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); 4701 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4701 4702
4702 check_class_changed(rq, p, prev_class, oldprio, running); 4703 check_class_changed(rq, p, prev_class, oldprio);
4703 }
4704 task_rq_unlock(rq, &flags); 4704 task_rq_unlock(rq, &flags);
4705} 4705}
4706 4706
@@ -5028,11 +5028,10 @@ recheck:
5028 5028
5029 if (running) 5029 if (running)
5030 p->sched_class->set_curr_task(rq); 5030 p->sched_class->set_curr_task(rq);
5031 if (on_rq) { 5031 if (on_rq)
5032 activate_task(rq, p, 0); 5032 activate_task(rq, p, 0);
5033 5033
5034 check_class_changed(rq, p, prev_class, oldprio, running); 5034 check_class_changed(rq, p, prev_class, oldprio);
5035 }
5036 __task_rq_unlock(rq); 5035 __task_rq_unlock(rq);
5037 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5036 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5038 5037
@@ -8237,6 +8236,8 @@ EXPORT_SYMBOL(__might_sleep);
8237#ifdef CONFIG_MAGIC_SYSRQ 8236#ifdef CONFIG_MAGIC_SYSRQ
8238static void normalize_task(struct rq *rq, struct task_struct *p) 8237static void normalize_task(struct rq *rq, struct task_struct *p)
8239{ 8238{
8239 const struct sched_class *prev_class = p->sched_class;
8240 int old_prio = p->prio;
8240 int on_rq; 8241 int on_rq;
8241 8242
8242 on_rq = p->se.on_rq; 8243 on_rq = p->se.on_rq;
@@ -8247,6 +8248,8 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
8247 activate_task(rq, p, 0); 8248 activate_task(rq, p, 0);
8248 resched_task(rq->curr); 8249 resched_task(rq->curr);
8249 } 8250 }
8251
8252 check_class_changed(rq, p, prev_class, old_prio);
8250} 8253}
8251 8254
8252void normalize_rt_tasks(void) 8255void normalize_rt_tasks(void)