aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-03 09:41:20 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-02 08:26:07 -0400
commite709715915d69b6a929d77e7652c9c3fea61c317 (patch)
tree69ed9845b4b4c412cc17d59469e46ebb41de0c31 /kernel/sched.c
parenta5004278f0525dcb9aa43703ef77bf371ea837cd (diff)
sched: Optimize unused cgroup configuration
When cgroup group scheduling is built in, skip some code paths if we don't have any (but the root) cgroups configured. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 26976cd8be0f..ca1f76ba7773 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1629,8 +1629,14 @@ static int tg_load_down(struct task_group *tg, void *data)
1629 1629
1630static void update_shares(struct sched_domain *sd) 1630static void update_shares(struct sched_domain *sd)
1631{ 1631{
1632 u64 now = cpu_clock(raw_smp_processor_id()); 1632 s64 elapsed;
1633 s64 elapsed = now - sd->last_update; 1633 u64 now;
1634
1635 if (root_task_group_empty())
1636 return;
1637
1638 now = cpu_clock(raw_smp_processor_id());
1639 elapsed = now - sd->last_update;
1634 1640
1635 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { 1641 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1636 sd->last_update = now; 1642 sd->last_update = now;
@@ -1640,6 +1646,9 @@ static void update_shares(struct sched_domain *sd)
1640 1646
1641static void update_shares_locked(struct rq *rq, struct sched_domain *sd) 1647static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1642{ 1648{
1649 if (root_task_group_empty())
1650 return;
1651
1643 spin_unlock(&rq->lock); 1652 spin_unlock(&rq->lock);
1644 update_shares(sd); 1653 update_shares(sd);
1645 spin_lock(&rq->lock); 1654 spin_lock(&rq->lock);
@@ -1647,6 +1656,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1647 1656
1648static void update_h_load(long cpu) 1657static void update_h_load(long cpu)
1649{ 1658{
1659 if (root_task_group_empty())
1660 return;
1661
1650 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); 1662 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1651} 1663}
1652 1664