diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-07-13 07:09:25 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-07-21 12:01:46 -0400 |
commit | 9763b67fb9f3050c6da739105888327587c30c4d (patch) | |
tree | 822e6a5243c3d872f86d9c9b980896bc4cd8a491 /kernel/sched.c | |
parent | 9598c82dcacadc3b9daa8170613fd054c6124d30 (diff) |
sched, cgroup: Optimize load_balance_fair()
Use for_each_leaf_cfs_rq() instead of list_for_each_entry_rcu(), this
achieves that load_balance_fair() only iterates those task_groups that
actually have tasks on busiest, and that we iterate bottom-up, trying to
move light groups before the heavier ones.
No idea if it will actually work out to be beneficial in practice, does
anybody have a cgroup workload that might show a difference one way or
the other?
[ Also move update_h_load to sched_fair.c, loosing #ifdef-ery ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Paul Turner <pjt@google.com>
Link: http://lkml.kernel.org/r/1310557009.2586.28.camel@twins
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 32 |
1 files changed, 0 insertions, 32 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b0e7ad796d3b..474f341d6f91 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1568,38 +1568,6 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1568 | return rq->avg_load_per_task; | 1568 | return rq->avg_load_per_task; |
1569 | } | 1569 | } |
1570 | 1570 | ||
1571 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1572 | |||
1573 | /* | ||
1574 | * Compute the cpu's hierarchical load factor for each task group. | ||
1575 | * This needs to be done in a top-down fashion because the load of a child | ||
1576 | * group is a fraction of its parents load. | ||
1577 | */ | ||
1578 | static int tg_load_down(struct task_group *tg, void *data) | ||
1579 | { | ||
1580 | unsigned long load; | ||
1581 | long cpu = (long)data; | ||
1582 | |||
1583 | if (!tg->parent) { | ||
1584 | load = cpu_rq(cpu)->load.weight; | ||
1585 | } else { | ||
1586 | load = tg->parent->cfs_rq[cpu]->h_load; | ||
1587 | load *= tg->se[cpu]->load.weight; | ||
1588 | load /= tg->parent->cfs_rq[cpu]->load.weight + 1; | ||
1589 | } | ||
1590 | |||
1591 | tg->cfs_rq[cpu]->h_load = load; | ||
1592 | |||
1593 | return 0; | ||
1594 | } | ||
1595 | |||
1596 | static void update_h_load(long cpu) | ||
1597 | { | ||
1598 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | ||
1599 | } | ||
1600 | |||
1601 | #endif | ||
1602 | |||
1603 | #ifdef CONFIG_PREEMPT | 1571 | #ifdef CONFIG_PREEMPT |
1604 | 1572 | ||
1605 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | 1573 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); |