aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2010-11-15 18:47:10 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:50 -0500
commit9437178f623a19af5951808d880a8599f66ac150 (patch)
treea2f315092f0b7b31cf023a82175c6cf68fd5e04c /kernel/sched.c
parentd6b5591829bd348a5fbe1c428d28dea00621cdba (diff)
sched: Update tg->shares after cpu.shares write
Formerly sched_group_set_shares would force a rebalance by overflowing domain share sums. Now that per-cpu averages are maintained we can set the true value by issuing an update_cfs_shares() following a tg->shares update. Also initialize tg se->load to 0 for consistency since we'll now set correct weights on enqueue. Signed-off-by: Paul Turner <pjt@google.com?> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234938.465521344@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c42
1 files changed, 11 insertions, 31 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e914a716e1d4..550cf3a02377 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7646,7 +7646,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7646 se->cfs_rq = parent->my_q; 7646 se->cfs_rq = parent->my_q;
7647 7647
7648 se->my_q = cfs_rq; 7648 se->my_q = cfs_rq;
7649 update_load_set(&se->load, tg->shares); 7649 update_load_set(&se->load, 0);
7650 se->parent = parent; 7650 se->parent = parent;
7651} 7651}
7652#endif 7652#endif
@@ -8274,37 +8274,12 @@ void sched_move_task(struct task_struct *tsk)
8274#endif /* CONFIG_CGROUP_SCHED */ 8274#endif /* CONFIG_CGROUP_SCHED */
8275 8275
8276#ifdef CONFIG_FAIR_GROUP_SCHED 8276#ifdef CONFIG_FAIR_GROUP_SCHED
8277static void __set_se_shares(struct sched_entity *se, unsigned long shares)
8278{
8279 struct cfs_rq *cfs_rq = se->cfs_rq;
8280 int on_rq;
8281
8282 on_rq = se->on_rq;
8283 if (on_rq)
8284 dequeue_entity(cfs_rq, se, 0);
8285
8286 update_load_set(&se->load, shares);
8287
8288 if (on_rq)
8289 enqueue_entity(cfs_rq, se, 0);
8290}
8291
8292static void set_se_shares(struct sched_entity *se, unsigned long shares)
8293{
8294 struct cfs_rq *cfs_rq = se->cfs_rq;
8295 struct rq *rq = cfs_rq->rq;
8296 unsigned long flags;
8297
8298 raw_spin_lock_irqsave(&rq->lock, flags);
8299 __set_se_shares(se, shares);
8300 raw_spin_unlock_irqrestore(&rq->lock, flags);
8301}
8302
8303static DEFINE_MUTEX(shares_mutex); 8277static DEFINE_MUTEX(shares_mutex);
8304 8278
8305int sched_group_set_shares(struct task_group *tg, unsigned long shares) 8279int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8306{ 8280{
8307 int i; 8281 int i;
8282 unsigned long flags;
8308 8283
8309 /* 8284 /*
8310 * We can't change the weight of the root cgroup. 8285 * We can't change the weight of the root cgroup.
@@ -8323,10 +8298,15 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8323 8298
8324 tg->shares = shares; 8299 tg->shares = shares;
8325 for_each_possible_cpu(i) { 8300 for_each_possible_cpu(i) {
8326 /* 8301 struct rq *rq = cpu_rq(i);
8327 * force a rebalance 8302 struct sched_entity *se;
8328 */ 8303
8329 set_se_shares(tg->se[i], shares); 8304 se = tg->se[i];
8305 /* Propagate contribution to hierarchy */
8306 raw_spin_lock_irqsave(&rq->lock, flags);
8307 for_each_sched_entity(se)
8308 update_cfs_shares(group_cfs_rq(se), 0);
8309 raw_spin_unlock_irqrestore(&rq->lock, flags);
8330 } 8310 }
8331 8311
8332done: 8312done: