diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:18 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:32 -0400 |
commit | b6a86c746f5b708012809958462234d19e9c8177 (patch) | |
tree | 38654c70da6382f50779ede1e973d2d395f38e54 /kernel/sched_fair.c | |
parent | 32df2ee86a580f70f2dbb90cf81f413aa655f838 (diff) |
sched: fix sched_domain aggregation
Keeping the aggregate on the first cpu of the sched domain has two problems:
- it could collide between different sched domains on different cpus
- it could slow things down because of the remote accesses
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 509092af0330..40cf24ab4de8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1429,11 +1429,11 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1429 | /* | 1429 | /* |
1430 | * empty group | 1430 | * empty group |
1431 | */ | 1431 | */ |
1432 | if (!aggregate(tg, sd)->task_weight) | 1432 | if (!aggregate(tg, this_cpu)->task_weight) |
1433 | continue; | 1433 | continue; |
1434 | 1434 | ||
1435 | rem_load = rem_load_move * aggregate(tg, sd)->rq_weight; | 1435 | rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight; |
1436 | rem_load /= aggregate(tg, sd)->load + 1; | 1436 | rem_load /= aggregate(tg, this_cpu)->load + 1; |
1437 | 1437 | ||
1438 | this_weight = tg->cfs_rq[this_cpu]->task_weight; | 1438 | this_weight = tg->cfs_rq[this_cpu]->task_weight; |
1439 | busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight; | 1439 | busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight; |
@@ -1451,10 +1451,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1451 | if (!moved_load) | 1451 | if (!moved_load) |
1452 | continue; | 1452 | continue; |
1453 | 1453 | ||
1454 | move_group_shares(tg, sd, busiest_cpu, this_cpu); | 1454 | move_group_shares(tg, this_cpu, sd, busiest_cpu, this_cpu); |
1455 | 1455 | ||
1456 | moved_load *= aggregate(tg, sd)->load; | 1456 | moved_load *= aggregate(tg, this_cpu)->load; |
1457 | moved_load /= aggregate(tg, sd)->rq_weight + 1; | 1457 | moved_load /= aggregate(tg, this_cpu)->rq_weight + 1; |
1458 | 1458 | ||
1459 | rem_load_move -= moved_load; | 1459 | rem_load_move -= moved_load; |
1460 | if (rem_load_move < 0) | 1460 | if (rem_load_move < 0) |