aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2015-05-05 13:49:49 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-08 05:53:55 -0400
commit0782e63bc6fe7e2d3408d250df11d388b7799c6b (patch)
treec8b370c5093a2c89bd9ed384971e189e09ec49b7 /kernel
parent3e0283a53f7d2f2dae7bc4aa7f3104cb5988018f (diff)
sched: Handle priority boosted tasks proper in setscheduler()
Ronny reported that the following scenario is not handled correctly: T1 (prio = 10) lock(rtmutex); T2 (prio = 20) lock(rtmutex) boost T1 T1 (prio = 20) sys_set_scheduler(prio = 30) T1 prio = 30 .... sys_set_scheduler(prio = 10) T1 prio = 30 The last step is wrong as T1 should now be back at prio 20. Commit c365c292d059 ("sched: Consider pi boosting in setscheduler()") only handles the case where a boosted tasks tries to lower its priority. Fix it by taking the new effective priority into account for the decision whether a change of the priority is required. Reported-by: Ronny Meeus <ronny.meeus@gmail.com> Tested-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: <stable@vger.kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Fixes: c365c292d059 ("sched: Consider pi boosting in setscheduler()") Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1505051806060.4225@nanos Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/rtmutex.c12
-rw-r--r--kernel/sched/core.c26
2 files changed, 21 insertions, 17 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b73279367087..b025295f4966 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
265} 265}
266 266
267/* 267/*
268 * Called by sched_setscheduler() to check whether the priority change 268 * Called by sched_setscheduler() to get the priority which will be
269 * is overruled by a possible priority boosting. 269 * effective after the change.
270 */ 270 */
271int rt_mutex_check_prio(struct task_struct *task, int newprio) 271int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
272{ 272{
273 if (!task_has_pi_waiters(task)) 273 if (!task_has_pi_waiters(task))
274 return 0; 274 return newprio;
275 275
276 return task_top_pi_waiter(task)->task->prio <= newprio; 276 if (task_top_pi_waiter(task)->task->prio <= newprio)
277 return task_top_pi_waiter(task)->task->prio;
278 return newprio;
277} 279}
278 280
279/* 281/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe22f7510bce..34db9bf892a3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3300,15 +3300,18 @@ static void __setscheduler_params(struct task_struct *p,
3300 3300
3301/* Actually do priority change: must hold pi & rq lock. */ 3301/* Actually do priority change: must hold pi & rq lock. */
3302static void __setscheduler(struct rq *rq, struct task_struct *p, 3302static void __setscheduler(struct rq *rq, struct task_struct *p,
3303 const struct sched_attr *attr) 3303 const struct sched_attr *attr, bool keep_boost)
3304{ 3304{
3305 __setscheduler_params(p, attr); 3305 __setscheduler_params(p, attr);
3306 3306
3307 /* 3307 /*
3308 * If we get here, there was no pi waiters boosting the 3308 * Keep a potential priority boosting if called from
3309 * task. It is safe to use the normal prio. 3309 * sched_setscheduler().
3310 */ 3310 */
3311 p->prio = normal_prio(p); 3311 if (keep_boost)
3312 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3313 else
3314 p->prio = normal_prio(p);
3312 3315
3313 if (dl_prio(p->prio)) 3316 if (dl_prio(p->prio))
3314 p->sched_class = &dl_sched_class; 3317 p->sched_class = &dl_sched_class;
@@ -3408,7 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p,
3408 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3411 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3409 MAX_RT_PRIO - 1 - attr->sched_priority; 3412 MAX_RT_PRIO - 1 - attr->sched_priority;
3410 int retval, oldprio, oldpolicy = -1, queued, running; 3413 int retval, oldprio, oldpolicy = -1, queued, running;
3411 int policy = attr->sched_policy; 3414 int new_effective_prio, policy = attr->sched_policy;
3412 unsigned long flags; 3415 unsigned long flags;
3413 const struct sched_class *prev_class; 3416 const struct sched_class *prev_class;
3414 struct rq *rq; 3417 struct rq *rq;
@@ -3590,15 +3593,14 @@ change:
3590 oldprio = p->prio; 3593 oldprio = p->prio;
3591 3594
3592 /* 3595 /*
3593 * Special case for priority boosted tasks. 3596 * Take priority boosted tasks into account. If the new
3594 * 3597 * effective priority is unchanged, we just store the new
3595 * If the new priority is lower or equal (user space view)
3596 * than the current (boosted) priority, we just store the new
3597 * normal parameters and do not touch the scheduler class and 3598 * normal parameters and do not touch the scheduler class and
3598 * the runqueue. This will be done when the task deboost 3599 * the runqueue. This will be done when the task deboost
3599 * itself. 3600 * itself.
3600 */ 3601 */
3601 if (rt_mutex_check_prio(p, newprio)) { 3602 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
3603 if (new_effective_prio == oldprio) {
3602 __setscheduler_params(p, attr); 3604 __setscheduler_params(p, attr);
3603 task_rq_unlock(rq, p, &flags); 3605 task_rq_unlock(rq, p, &flags);
3604 return 0; 3606 return 0;
@@ -3612,7 +3614,7 @@ change:
3612 put_prev_task(rq, p); 3614 put_prev_task(rq, p);
3613 3615
3614 prev_class = p->sched_class; 3616 prev_class = p->sched_class;
3615 __setscheduler(rq, p, attr); 3617 __setscheduler(rq, p, attr, true);
3616 3618
3617 if (running) 3619 if (running)
3618 p->sched_class->set_curr_task(rq); 3620 p->sched_class->set_curr_task(rq);
@@ -7346,7 +7348,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7346 queued = task_on_rq_queued(p); 7348 queued = task_on_rq_queued(p);
7347 if (queued) 7349 if (queued)
7348 dequeue_task(rq, p, 0); 7350 dequeue_task(rq, p, 0);
7349 __setscheduler(rq, p, &attr); 7351 __setscheduler(rq, p, &attr, false);
7350 if (queued) { 7352 if (queued) {
7351 enqueue_task(rq, p, 0); 7353 enqueue_task(rq, p, 0);
7352 resched_curr(rq); 7354 resched_curr(rq);