aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-17 03:05:48 -0500
committerThomas Gleixner <tglx@linutronix.de>2010-02-17 05:58:18 -0500
commit83ab0aa0d5623d823444db82c3b3c34d7ec364ae (patch)
tree7ec14a5e163cb2e17fbfe06b368b4969b78879bd /kernel
parent6e40f5bbbc734231bc5809d3eb785e3c21f275d7 (diff)
sched: Don't use possibly stale sched_class
setscheduler() saves task->sched_class outside of the rq->lock held region for a check after the setscheduler changes have become effective. That might result in checking a stale value. rtmutex_setprio() has the same problem, though it is protected by p->pi_lock against setscheduler(), but for correctness sake (and to avoid bad examples) it needs to be fixed as well. Retrieve task->sched_class inside of the rq->lock held region. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: stable@kernel.org
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index af5fa239804d..0b914fc90a55 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4249,7 +4249,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4249 unsigned long flags; 4249 unsigned long flags;
4250 int oldprio, on_rq, running; 4250 int oldprio, on_rq, running;
4251 struct rq *rq; 4251 struct rq *rq;
4252 const struct sched_class *prev_class = p->sched_class; 4252 const struct sched_class *prev_class;
4253 4253
4254 BUG_ON(prio < 0 || prio > MAX_PRIO); 4254 BUG_ON(prio < 0 || prio > MAX_PRIO);
4255 4255
@@ -4257,6 +4257,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4257 update_rq_clock(rq); 4257 update_rq_clock(rq);
4258 4258
4259 oldprio = p->prio; 4259 oldprio = p->prio;
4260 prev_class = p->sched_class;
4260 on_rq = p->se.on_rq; 4261 on_rq = p->se.on_rq;
4261 running = task_current(rq, p); 4262 running = task_current(rq, p);
4262 if (on_rq) 4263 if (on_rq)
@@ -4476,7 +4477,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
4476{ 4477{
4477 int retval, oldprio, oldpolicy = -1, on_rq, running; 4478 int retval, oldprio, oldpolicy = -1, on_rq, running;
4478 unsigned long flags; 4479 unsigned long flags;
4479 const struct sched_class *prev_class = p->sched_class; 4480 const struct sched_class *prev_class;
4480 struct rq *rq; 4481 struct rq *rq;
4481 int reset_on_fork; 4482 int reset_on_fork;
4482 4483
@@ -4590,6 +4591,7 @@ recheck:
4590 p->sched_reset_on_fork = reset_on_fork; 4591 p->sched_reset_on_fork = reset_on_fork;
4591 4592
4592 oldprio = p->prio; 4593 oldprio = p->prio;
4594 prev_class = p->sched_class;
4593 __setscheduler(rq, p, policy, param->sched_priority); 4595 __setscheduler(rq, p, policy, param->sched_priority);
4594 4596
4595 if (running) 4597 if (running)