aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-15 09:24:15 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-22 08:16:45 -0400
commitb2b5ce022acf5e9f52f7b78c5579994fdde191d4 (patch)
tree15e7d1742b530cdbe7b0734378e7f043b9a5faac /kernel
parentb7dadc38797584f6203386da1947ed5edf516646 (diff)
sched, cgroup: Fixup broken cgroup movement
Dima noticed that we fail to correct the ->vruntime of sleeping tasks when we move them between cgroups. Reported-by: Dima Zavin <dima@android.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Mike Galbraith <efault@gmx.de> LKML-Reference: <1287150604.29097.1513.camel@twins> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/sched_fair.c25
2 files changed, 23 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5998222f901c..3fe253e6a6e9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8498,12 +8498,12 @@ void sched_move_task(struct task_struct *tsk)
8498 if (unlikely(running)) 8498 if (unlikely(running))
8499 tsk->sched_class->put_prev_task(rq, tsk); 8499 tsk->sched_class->put_prev_task(rq, tsk);
8500 8500
8501 set_task_rq(tsk, task_cpu(tsk));
8502
8503#ifdef CONFIG_FAIR_GROUP_SCHED 8501#ifdef CONFIG_FAIR_GROUP_SCHED
8504 if (tsk->sched_class->moved_group) 8502 if (tsk->sched_class->task_move_group)
8505 tsk->sched_class->moved_group(tsk, on_rq); 8503 tsk->sched_class->task_move_group(tsk, on_rq);
8504 else
8506#endif 8505#endif
8506 set_task_rq(tsk, task_cpu(tsk));
8507 8507
8508 if (unlikely(running)) 8508 if (unlikely(running))
8509 tsk->sched_class->set_curr_task(rq); 8509 tsk->sched_class->set_curr_task(rq);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 74cccfae87a8..3acc2a487c18 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3866,13 +3866,26 @@ static void set_curr_task_fair(struct rq *rq)
3866} 3866}
3867 3867
3868#ifdef CONFIG_FAIR_GROUP_SCHED 3868#ifdef CONFIG_FAIR_GROUP_SCHED
3869static void moved_group_fair(struct task_struct *p, int on_rq) 3869static void task_move_group_fair(struct task_struct *p, int on_rq)
3870{ 3870{
3871 struct cfs_rq *cfs_rq = task_cfs_rq(p); 3871 /*
3872 3872 * If the task was not on the rq at the time of this cgroup movement
3873 update_curr(cfs_rq); 3873 * it must have been asleep, sleeping tasks keep their ->vruntime
3874 * absolute on their old rq until wakeup (needed for the fair sleeper
3875 * bonus in place_entity()).
3876 *
3877 * If it was on the rq, we've just 'preempted' it, which does convert
3878 * ->vruntime to a relative base.
3879 *
3880 * Make sure both cases convert their relative position when migrating
3881 * to another cgroup's rq. This does somewhat interfere with the
3882 * fair sleeper stuff for the first placement, but who cares.
3883 */
3884 if (!on_rq)
3885 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
3886 set_task_rq(p, task_cpu(p));
3874 if (!on_rq) 3887 if (!on_rq)
3875 place_entity(cfs_rq, &p->se, 1); 3888 p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
3876} 3889}
3877#endif 3890#endif
3878 3891
@@ -3924,7 +3937,7 @@ static const struct sched_class fair_sched_class = {
3924 .get_rr_interval = get_rr_interval_fair, 3937 .get_rr_interval = get_rr_interval_fair,
3925 3938
3926#ifdef CONFIG_FAIR_GROUP_SCHED 3939#ifdef CONFIG_FAIR_GROUP_SCHED
3927 .moved_group = moved_group_fair, 3940 .task_move_group = task_move_group_fair,
3928#endif 3941#endif
3929}; 3942};
3930 3943