diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:36 -0400 |
commit | c8cba857b4997d5b00451d01474638f6a153f713 (patch) | |
tree | a784dce37d72ae20a0efb81b8e498b504a207650 /kernel/sched_fair.c | |
parent | a25b5aca8740ea99d5e18dfc71235a52b685dcf7 (diff) |
sched: simplify the group load balancer
While thinking about the previous patch - I realized that using per domain
aggregate load values in load_balance_fair() is wrong. We should use the
load value for that CPU.
By not needing per domain hierarchical load values we don't need to store
per domain aggregate shares, which greatly simplifies all the math.
It basically falls apart in two separate computations:
- per domain update of the shares
- per CPU update of the hierarchical load
Also get rid of the move_group_shares() stuff - just re-compute the shares
again after a successful load balance.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 03b9fbd9d648..7b8d664d6f22 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1421,17 +1421,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1421 | struct task_group *tg; | 1421 | struct task_group *tg; |
1422 | 1422 | ||
1423 | rcu_read_lock(); | 1423 | rcu_read_lock(); |
1424 | update_h_load(busiest_cpu); | ||
1425 | |||
1424 | list_for_each_entry(tg, &task_groups, list) { | 1426 | list_for_each_entry(tg, &task_groups, list) { |
1427 | struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; | ||
1425 | long rem_load, moved_load; | 1428 | long rem_load, moved_load; |
1426 | 1429 | ||
1427 | /* | 1430 | /* |
1428 | * empty group | 1431 | * empty group |
1429 | */ | 1432 | */ |
1430 | if (!tg->cfs_rq[busiest_cpu]->task_weight) | 1433 | if (!busiest_cfs_rq->task_weight) |
1431 | continue; | 1434 | continue; |
1432 | 1435 | ||
1433 | rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight; | 1436 | rem_load = rem_load_move * busiest_cfs_rq->load.weight; |
1434 | rem_load /= aggregate(tg, this_cpu)->load + 1; | 1437 | rem_load /= busiest_cfs_rq->h_load + 1; |
1435 | 1438 | ||
1436 | moved_load = __load_balance_fair(this_rq, this_cpu, busiest, | 1439 | moved_load = __load_balance_fair(this_rq, this_cpu, busiest, |
1437 | rem_load, sd, idle, all_pinned, this_best_prio, | 1440 | rem_load, sd, idle, all_pinned, this_best_prio, |
@@ -1440,10 +1443,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1440 | if (!moved_load) | 1443 | if (!moved_load) |
1441 | continue; | 1444 | continue; |
1442 | 1445 | ||
1443 | move_group_shares(tg, this_cpu, sd, busiest_cpu, this_cpu); | 1446 | moved_load *= busiest_cfs_rq->h_load; |
1444 | 1447 | moved_load /= busiest_cfs_rq->load.weight + 1; | |
1445 | moved_load *= aggregate(tg, this_cpu)->load; | ||
1446 | moved_load /= aggregate(tg, this_cpu)->rq_weight + 1; | ||
1447 | 1448 | ||
1448 | rem_load_move -= moved_load; | 1449 | rem_load_move -= moved_load; |
1449 | if (rem_load_move < 0) | 1450 | if (rem_load_move < 0) |