aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2008-06-27 07:41:20 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-27 08:31:33 -0400
commit53fecd8ae1900fb571086f54f664051004665b55 (patch)
tree2dfd5aba9d974f0f114e96cbdc2aef82a32078a9 /kernel/sched_fair.c
parent4d8d595dfa69e1c807bf928f364668a7f30da5dc (diff)
sched: kill task_group balancing
The idea was to balance groups until we've reached the global goal, however Vatsa rightly pointed out that we might never reach that goal this way - hence take out this logic. [ the initial rationale for this 'feature' was to promote max concurrency within a group - it does not however affect fairness ] Reported-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c15
1 files changed, 2 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 40cf24ab4de8..b10c0d61a2a9 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1422,9 +1422,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1422 1422
1423 rcu_read_lock(); 1423 rcu_read_lock();
1424 list_for_each_entry(tg, &task_groups, list) { 1424 list_for_each_entry(tg, &task_groups, list) {
1425 long imbalance; 1425 long rem_load, moved_load;
1426 unsigned long this_weight, busiest_weight;
1427 long rem_load, max_load, moved_load;
1428 1426
1429 /* 1427 /*
1430 * empty group 1428 * empty group
@@ -1435,17 +1433,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1435 rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight; 1433 rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight;
1436 rem_load /= aggregate(tg, this_cpu)->load + 1; 1434 rem_load /= aggregate(tg, this_cpu)->load + 1;
1437 1435
1438 this_weight = tg->cfs_rq[this_cpu]->task_weight;
1439 busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
1440
1441 imbalance = (busiest_weight - this_weight) / 2;
1442
1443 if (imbalance < 0)
1444 imbalance = busiest_weight;
1445
1446 max_load = max(rem_load, imbalance);
1447 moved_load = __load_balance_fair(this_rq, this_cpu, busiest, 1436 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1448 max_load, sd, idle, all_pinned, this_best_prio, 1437 rem_load, sd, idle, all_pinned, this_best_prio,
1449 tg->cfs_rq[busiest_cpu]); 1438 tg->cfs_rq[busiest_cpu]);
1450 1439
1451 if (!moved_load) 1440 if (!moved_load)