aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 72c936d3e332..ee7ac71b12f8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3916,7 +3916,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
3916void rt_mutex_setprio(struct task_struct *p, int prio) 3916void rt_mutex_setprio(struct task_struct *p, int prio)
3917{ 3917{
3918 unsigned long flags; 3918 unsigned long flags;
3919 int oldprio, on_rq; 3919 int oldprio, on_rq, running;
3920 struct rq *rq; 3920 struct rq *rq;
3921 3921
3922 BUG_ON(prio < 0 || prio > MAX_PRIO); 3922 BUG_ON(prio < 0 || prio > MAX_PRIO);
@@ -3926,8 +3926,12 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3926 3926
3927 oldprio = p->prio; 3927 oldprio = p->prio;
3928 on_rq = p->se.on_rq; 3928 on_rq = p->se.on_rq;
3929 if (on_rq) 3929 running = task_running(rq, p);
3930 if (on_rq) {
3930 dequeue_task(rq, p, 0); 3931 dequeue_task(rq, p, 0);
3932 if (running)
3933 p->sched_class->put_prev_task(rq, p);
3934 }
3931 3935
3932 if (rt_prio(prio)) 3936 if (rt_prio(prio))
3933 p->sched_class = &rt_sched_class; 3937 p->sched_class = &rt_sched_class;
@@ -3937,13 +3941,15 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3937 p->prio = prio; 3941 p->prio = prio;
3938 3942
3939 if (on_rq) { 3943 if (on_rq) {
3944 if (running)
3945 p->sched_class->set_curr_task(rq);
3940 enqueue_task(rq, p, 0); 3946 enqueue_task(rq, p, 0);
3941 /* 3947 /*
3942 * Reschedule if we are currently running on this runqueue and 3948 * Reschedule if we are currently running on this runqueue and
3943 * our priority decreased, or if we are not currently running on 3949 * our priority decreased, or if we are not currently running on
3944 * this runqueue and our priority is higher than the current's 3950 * this runqueue and our priority is higher than the current's
3945 */ 3951 */
3946 if (task_running(rq, p)) { 3952 if (running) {
3947 if (p->prio > oldprio) 3953 if (p->prio > oldprio)
3948 resched_task(rq->curr); 3954 resched_task(rq->curr);
3949 } else { 3955 } else {
@@ -4149,7 +4155,7 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4149int sched_setscheduler(struct task_struct *p, int policy, 4155int sched_setscheduler(struct task_struct *p, int policy,
4150 struct sched_param *param) 4156 struct sched_param *param)
4151{ 4157{
4152 int retval, oldprio, oldpolicy = -1, on_rq; 4158 int retval, oldprio, oldpolicy = -1, on_rq, running;
4153 unsigned long flags; 4159 unsigned long flags;
4154 struct rq *rq; 4160 struct rq *rq;
4155 4161
@@ -4231,20 +4237,26 @@ recheck:
4231 } 4237 }
4232 update_rq_clock(rq); 4238 update_rq_clock(rq);
4233 on_rq = p->se.on_rq; 4239 on_rq = p->se.on_rq;
4234 if (on_rq) 4240 running = task_running(rq, p);
4241 if (on_rq) {
4235 deactivate_task(rq, p, 0); 4242 deactivate_task(rq, p, 0);
4243 if (running)
4244 p->sched_class->put_prev_task(rq, p);
4245 }
4236 4246
4237 oldprio = p->prio; 4247 oldprio = p->prio;
4238 __setscheduler(rq, p, policy, param->sched_priority); 4248 __setscheduler(rq, p, policy, param->sched_priority);
4239 4249
4240 if (on_rq) { 4250 if (on_rq) {
4251 if (running)
4252 p->sched_class->set_curr_task(rq);
4241 activate_task(rq, p, 0); 4253 activate_task(rq, p, 0);
4242 /* 4254 /*
4243 * Reschedule if we are currently running on this runqueue and 4255 * Reschedule if we are currently running on this runqueue and
4244 * our priority decreased, or if we are not currently running on 4256 * our priority decreased, or if we are not currently running on
4245 * this runqueue and our priority is higher than the current's 4257 * this runqueue and our priority is higher than the current's
4246 */ 4258 */
4247 if (task_running(rq, p)) { 4259 if (running) {
4248 if (p->prio > oldprio) 4260 if (p->prio > oldprio)
4249 resched_task(rq->curr); 4261 resched_task(rq->curr);
4250 } else { 4262 } else {
@@ -6845,13 +6857,19 @@ static void sched_move_task(struct container_subsys *ss, struct container *cont,
6845 running = task_running(rq, tsk); 6857 running = task_running(rq, tsk);
6846 on_rq = tsk->se.on_rq; 6858 on_rq = tsk->se.on_rq;
6847 6859
6848 if (on_rq) 6860 if (on_rq) {
6849 dequeue_task(rq, tsk, 0); 6861 dequeue_task(rq, tsk, 0);
6862 if (unlikely(running))
6863 tsk->sched_class->put_prev_task(rq, tsk);
6864 }
6850 6865
6851 set_task_cfs_rq(tsk); 6866 set_task_cfs_rq(tsk);
6852 6867
6853 if (on_rq) 6868 if (on_rq) {
6869 if (unlikely(running))
6870 tsk->sched_class->set_curr_task(rq);
6854 enqueue_task(rq, tsk, 0); 6871 enqueue_task(rq, tsk, 0);
6872 }
6855 6873
6856done: 6874done:
6857 task_rq_unlock(rq, &flags); 6875 task_rq_unlock(rq, &flags);