aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b2688ce54b11..6d1892192e21 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3915,8 +3915,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
3915 */ 3915 */
3916void rt_mutex_setprio(struct task_struct *p, int prio) 3916void rt_mutex_setprio(struct task_struct *p, int prio)
3917{ 3917{
3918 int oldprio, on_rq, running;
3918 unsigned long flags; 3919 unsigned long flags;
3919 int oldprio, on_rq;
3920 struct rq *rq; 3920 struct rq *rq;
3921 3921
3922 BUG_ON(prio < 0 || prio > MAX_PRIO); 3922 BUG_ON(prio < 0 || prio > MAX_PRIO);
@@ -3926,9 +3926,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3926 3926
3927 oldprio = p->prio; 3927 oldprio = p->prio;
3928 on_rq = p->se.on_rq; 3928 on_rq = p->se.on_rq;
3929 running = task_running(rq, p);
3929 if (on_rq) { 3930 if (on_rq) {
3930 dequeue_task(rq, p, 0); 3931 dequeue_task(rq, p, 0);
3931 if (task_running(rq, p)) 3932 if (running)
3932 p->sched_class->put_prev_task(rq, p); 3933 p->sched_class->put_prev_task(rq, p);
3933 } 3934 }
3934 3935
@@ -3940,16 +3941,17 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3940 p->prio = prio; 3941 p->prio = prio;
3941 3942
3942 if (on_rq) { 3943 if (on_rq) {
3944 if (running)
3945 p->sched_class->set_curr_task(rq);
3943 enqueue_task(rq, p, 0); 3946 enqueue_task(rq, p, 0);
3944 /* 3947 /*
3945 * Reschedule if we are currently running on this runqueue and 3948 * Reschedule if we are currently running on this runqueue and
3946 * our priority decreased, or if we are not currently running on 3949 * our priority decreased, or if we are not currently running on
3947 * this runqueue and our priority is higher than the current's 3950 * this runqueue and our priority is higher than the current's
3948 */ 3951 */
3949 if (task_running(rq, p)) { 3952 if (running) {
3950 if (p->prio > oldprio) 3953 if (p->prio > oldprio)
3951 resched_task(rq->curr); 3954 resched_task(rq->curr);
3952 p->sched_class->set_curr_task(rq);
3953 } else { 3955 } else {
3954 check_preempt_curr(rq, p); 3956 check_preempt_curr(rq, p);
3955 } 3957 }
@@ -4153,7 +4155,7 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4153int sched_setscheduler(struct task_struct *p, int policy, 4155int sched_setscheduler(struct task_struct *p, int policy,
4154 struct sched_param *param) 4156 struct sched_param *param)
4155{ 4157{
4156 int retval, oldprio, oldpolicy = -1, on_rq; 4158 int retval, oldprio, oldpolicy = -1, on_rq, running;
4157 unsigned long flags; 4159 unsigned long flags;
4158 struct rq *rq; 4160 struct rq *rq;
4159 4161
@@ -4235,24 +4237,26 @@ recheck:
4235 } 4237 }
4236 update_rq_clock(rq); 4238 update_rq_clock(rq);
4237 on_rq = p->se.on_rq; 4239 on_rq = p->se.on_rq;
4240 running = task_running(rq, p);
4238 if (on_rq) { 4241 if (on_rq) {
4239 deactivate_task(rq, p, 0); 4242 deactivate_task(rq, p, 0);
4240 if (task_running(rq, p)) 4243 if (running)
4241 p->sched_class->put_prev_task(rq, p); 4244 p->sched_class->put_prev_task(rq, p);
4242 } 4245 }
4243 oldprio = p->prio; 4246 oldprio = p->prio;
4244 __setscheduler(rq, p, policy, param->sched_priority); 4247 __setscheduler(rq, p, policy, param->sched_priority);
4245 if (on_rq) { 4248 if (on_rq) {
4249 if (running)
4250 p->sched_class->set_curr_task(rq);
4246 activate_task(rq, p, 0); 4251 activate_task(rq, p, 0);
4247 /* 4252 /*
4248 * Reschedule if we are currently running on this runqueue and 4253 * Reschedule if we are currently running on this runqueue and
4249 * our priority decreased, or if we are not currently running on 4254 * our priority decreased, or if we are not currently running on
4250 * this runqueue and our priority is higher than the current's 4255 * this runqueue and our priority is higher than the current's
4251 */ 4256 */
4252 if (task_running(rq, p)) { 4257 if (running) {
4253 if (p->prio > oldprio) 4258 if (p->prio > oldprio)
4254 resched_task(rq->curr); 4259 resched_task(rq->curr);
4255 p->sched_class->set_curr_task(rq);
4256 } else { 4260 } else {
4257 check_preempt_curr(rq, p); 4261 check_preempt_curr(rq, p);
4258 } 4262 }
@@ -6861,9 +6865,9 @@ static void sched_move_task(struct container_subsys *ss, struct container *cont,
6861 set_task_cfs_rq(tsk); 6865 set_task_cfs_rq(tsk);
6862 6866
6863 if (on_rq) { 6867 if (on_rq) {
6864 enqueue_task(rq, tsk, 0);
6865 if (unlikely(running)) 6868 if (unlikely(running))
6866 tsk->sched_class->set_curr_task(rq); 6869 tsk->sched_class->set_curr_task(rq);
6870 enqueue_task(rq, tsk, 0);
6867 } 6871 }
6868 6872
6869done: 6873done: