aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-05-15 15:42:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-05-15 15:42:33 -0400
commit14db1e8dc08ca1b1ad8aa20e6ebfe0b395bbe5bf (patch)
tree04bc01379de22f1346e06cb6eb0dd46d44c51492 /kernel
parentef4a293a4417fc5f50b0b83b62deb9ead8423256 (diff)
parent533445c6e53368569e50ab3fb712230c03d523f3 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two fixes: a suspend/resume related regression fix, and an RT priority boosting fix" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/core: Fix regression in cpuset_cpu_inactive() for suspend sched: Handle priority boosted tasks proper in setscheduler()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/rtmutex.c12
-rw-r--r--kernel/sched/core.c54
2 files changed, 33 insertions, 33 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b73279367087..b025295f4966 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
265} 265}
266 266
267/* 267/*
268 * Called by sched_setscheduler() to check whether the priority change 268 * Called by sched_setscheduler() to get the priority which will be
269 * is overruled by a possible priority boosting. 269 * effective after the change.
270 */ 270 */
271int rt_mutex_check_prio(struct task_struct *task, int newprio) 271int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
272{ 272{
273 if (!task_has_pi_waiters(task)) 273 if (!task_has_pi_waiters(task))
274 return 0; 274 return newprio;
275 275
276 return task_top_pi_waiter(task)->task->prio <= newprio; 276 if (task_top_pi_waiter(task)->task->prio <= newprio)
277 return task_top_pi_waiter(task)->task->prio;
278 return newprio;
277} 279}
278 280
279/* 281/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe22f7510bce..57bd333bc4ab 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3300,15 +3300,18 @@ static void __setscheduler_params(struct task_struct *p,
3300 3300
3301/* Actually do priority change: must hold pi & rq lock. */ 3301/* Actually do priority change: must hold pi & rq lock. */
3302static void __setscheduler(struct rq *rq, struct task_struct *p, 3302static void __setscheduler(struct rq *rq, struct task_struct *p,
3303 const struct sched_attr *attr) 3303 const struct sched_attr *attr, bool keep_boost)
3304{ 3304{
3305 __setscheduler_params(p, attr); 3305 __setscheduler_params(p, attr);
3306 3306
3307 /* 3307 /*
3308 * If we get here, there was no pi waiters boosting the 3308 * Keep a potential priority boosting if called from
3309 * task. It is safe to use the normal prio. 3309 * sched_setscheduler().
3310 */ 3310 */
3311 p->prio = normal_prio(p); 3311 if (keep_boost)
3312 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3313 else
3314 p->prio = normal_prio(p);
3312 3315
3313 if (dl_prio(p->prio)) 3316 if (dl_prio(p->prio))
3314 p->sched_class = &dl_sched_class; 3317 p->sched_class = &dl_sched_class;
@@ -3408,7 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p,
3408 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3411 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3409 MAX_RT_PRIO - 1 - attr->sched_priority; 3412 MAX_RT_PRIO - 1 - attr->sched_priority;
3410 int retval, oldprio, oldpolicy = -1, queued, running; 3413 int retval, oldprio, oldpolicy = -1, queued, running;
3411 int policy = attr->sched_policy; 3414 int new_effective_prio, policy = attr->sched_policy;
3412 unsigned long flags; 3415 unsigned long flags;
3413 const struct sched_class *prev_class; 3416 const struct sched_class *prev_class;
3414 struct rq *rq; 3417 struct rq *rq;
@@ -3590,15 +3593,14 @@ change:
3590 oldprio = p->prio; 3593 oldprio = p->prio;
3591 3594
3592 /* 3595 /*
3593 * Special case for priority boosted tasks. 3596 * Take priority boosted tasks into account. If the new
3594 * 3597 * effective priority is unchanged, we just store the new
3595 * If the new priority is lower or equal (user space view)
3596 * than the current (boosted) priority, we just store the new
3597 * normal parameters and do not touch the scheduler class and 3598 * normal parameters and do not touch the scheduler class and
3598 * the runqueue. This will be done when the task deboost 3599 * the runqueue. This will be done when the task deboost
3599 * itself. 3600 * itself.
3600 */ 3601 */
3601 if (rt_mutex_check_prio(p, newprio)) { 3602 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
3603 if (new_effective_prio == oldprio) {
3602 __setscheduler_params(p, attr); 3604 __setscheduler_params(p, attr);
3603 task_rq_unlock(rq, p, &flags); 3605 task_rq_unlock(rq, p, &flags);
3604 return 0; 3606 return 0;
@@ -3612,7 +3614,7 @@ change:
3612 put_prev_task(rq, p); 3614 put_prev_task(rq, p);
3613 3615
3614 prev_class = p->sched_class; 3616 prev_class = p->sched_class;
3615 __setscheduler(rq, p, attr); 3617 __setscheduler(rq, p, attr, true);
3616 3618
3617 if (running) 3619 if (running)
3618 p->sched_class->set_curr_task(rq); 3620 p->sched_class->set_curr_task(rq);
@@ -6997,27 +6999,23 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6997 unsigned long flags; 6999 unsigned long flags;
6998 long cpu = (long)hcpu; 7000 long cpu = (long)hcpu;
6999 struct dl_bw *dl_b; 7001 struct dl_bw *dl_b;
7002 bool overflow;
7003 int cpus;
7000 7004
7001 switch (action & ~CPU_TASKS_FROZEN) { 7005 switch (action) {
7002 case CPU_DOWN_PREPARE: 7006 case CPU_DOWN_PREPARE:
7003 /* explicitly allow suspend */ 7007 rcu_read_lock_sched();
7004 if (!(action & CPU_TASKS_FROZEN)) { 7008 dl_b = dl_bw_of(cpu);
7005 bool overflow;
7006 int cpus;
7007
7008 rcu_read_lock_sched();
7009 dl_b = dl_bw_of(cpu);
7010 7009
7011 raw_spin_lock_irqsave(&dl_b->lock, flags); 7010 raw_spin_lock_irqsave(&dl_b->lock, flags);
7012 cpus = dl_bw_cpus(cpu); 7011 cpus = dl_bw_cpus(cpu);
7013 overflow = __dl_overflow(dl_b, cpus, 0, 0); 7012 overflow = __dl_overflow(dl_b, cpus, 0, 0);
7014 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7013 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7015 7014
7016 rcu_read_unlock_sched(); 7015 rcu_read_unlock_sched();
7017 7016
7018 if (overflow) 7017 if (overflow)
7019 return notifier_from_errno(-EBUSY); 7018 return notifier_from_errno(-EBUSY);
7020 }
7021 cpuset_update_active_cpus(false); 7019 cpuset_update_active_cpus(false);
7022 break; 7020 break;
7023 case CPU_DOWN_PREPARE_FROZEN: 7021 case CPU_DOWN_PREPARE_FROZEN:
@@ -7346,7 +7344,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7346 queued = task_on_rq_queued(p); 7344 queued = task_on_rq_queued(p);
7347 if (queued) 7345 if (queued)
7348 dequeue_task(rq, p, 0); 7346 dequeue_task(rq, p, 0);
7349 __setscheduler(rq, p, &attr); 7347 __setscheduler(rq, p, &attr, false);
7350 if (queued) { 7348 if (queued) {
7351 enqueue_task(rq, p, 0); 7349 enqueue_task(rq, p, 0);
7352 resched_curr(rq); 7350 resched_curr(rq);