aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-02-29 15:21:01 -0500
committerIngo Molnar <mingo@elte.hu>2008-03-07 10:42:59 -0500
commit810b38179e9e4d4f57b4b733767bb08f8291a965 (patch)
tree91692de4fbd46879b29d803839b5bf7e25f30cd8
parentce4796d1e16cf3761dc2a02b8d588667d05b3078 (diff)
sched: retain vruntime
Kei Tokunaga reported an interactivity problem when moving tasks between control groups. Tasks would retain their old vruntime when moved between groups, this can cause funny lags. Re-set the vruntime on group move to fit within the new tree. Reported-by: Kei Tokunaga <tokunaga.keiich@jp.fujitsu.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/sched_fair.c14
3 files changed, 23 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9ae4030067a9..11d8e9a74eff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -899,6 +899,10 @@ struct sched_class {
899 int running); 899 int running);
900 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 900 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
901 int oldprio, int running); 901 int oldprio, int running);
902
903#ifdef CONFIG_FAIR_GROUP_SCHED
904 void (*moved_group) (struct task_struct *p);
905#endif
902}; 906};
903 907
904struct load_weight { 908struct load_weight {
diff --git a/kernel/sched.c b/kernel/sched.c
index dcd553cc4ee8..0b949c4e73ad 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7625,6 +7625,11 @@ void sched_move_task(struct task_struct *tsk)
7625 7625
7626 set_task_rq(tsk, task_cpu(tsk)); 7626 set_task_rq(tsk, task_cpu(tsk));
7627 7627
7628#ifdef CONFIG_FAIR_GROUP_SCHED
7629 if (tsk->sched_class->moved_group)
7630 tsk->sched_class->moved_group(tsk);
7631#endif
7632
7628 if (on_rq) { 7633 if (on_rq) {
7629 if (unlikely(running)) 7634 if (unlikely(running))
7630 tsk->sched_class->set_curr_task(rq); 7635 tsk->sched_class->set_curr_task(rq);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3df4d46994ca..e2a530515619 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1353,6 +1353,16 @@ static void set_curr_task_fair(struct rq *rq)
1353 set_next_entity(cfs_rq_of(se), se); 1353 set_next_entity(cfs_rq_of(se), se);
1354} 1354}
1355 1355
1356#ifdef CONFIG_FAIR_GROUP_SCHED
1357static void moved_group_fair(struct task_struct *p)
1358{
1359 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1360
1361 update_curr(cfs_rq);
1362 place_entity(cfs_rq, &p->se, 1);
1363}
1364#endif
1365
1356/* 1366/*
1357 * All the scheduling class methods: 1367 * All the scheduling class methods:
1358 */ 1368 */
@@ -1381,6 +1391,10 @@ static const struct sched_class fair_sched_class = {
1381 1391
1382 .prio_changed = prio_changed_fair, 1392 .prio_changed = prio_changed_fair,
1383 .switched_to = switched_to_fair, 1393 .switched_to = switched_to_fair,
1394
1395#ifdef CONFIG_FAIR_GROUP_SCHED
1396 .moved_group = moved_group_fair,
1397#endif
1384}; 1398};
1385 1399
1386#ifdef CONFIG_SCHED_DEBUG 1400#ifdef CONFIG_SCHED_DEBUG