diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 15 |
1 files changed, 2 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 40cf24ab4de8..b10c0d61a2a9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1422,9 +1422,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1422 | 1422 | ||
1423 | rcu_read_lock(); | 1423 | rcu_read_lock(); |
1424 | list_for_each_entry(tg, &task_groups, list) { | 1424 | list_for_each_entry(tg, &task_groups, list) { |
1425 | long imbalance; | 1425 | long rem_load, moved_load; |
1426 | unsigned long this_weight, busiest_weight; | ||
1427 | long rem_load, max_load, moved_load; | ||
1428 | 1426 | ||
1429 | /* | 1427 | /* |
1430 | * empty group | 1428 | * empty group |
@@ -1435,17 +1433,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1435 | rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight; | 1433 | rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight; |
1436 | rem_load /= aggregate(tg, this_cpu)->load + 1; | 1434 | rem_load /= aggregate(tg, this_cpu)->load + 1; |
1437 | 1435 | ||
1438 | this_weight = tg->cfs_rq[this_cpu]->task_weight; | ||
1439 | busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight; | ||
1440 | |||
1441 | imbalance = (busiest_weight - this_weight) / 2; | ||
1442 | |||
1443 | if (imbalance < 0) | ||
1444 | imbalance = busiest_weight; | ||
1445 | |||
1446 | max_load = max(rem_load, imbalance); | ||
1447 | moved_load = __load_balance_fair(this_rq, this_cpu, busiest, | 1436 | moved_load = __load_balance_fair(this_rq, this_cpu, busiest, |
1448 | max_load, sd, idle, all_pinned, this_best_prio, | 1437 | rem_load, sd, idle, all_pinned, this_best_prio, |
1449 | tg->cfs_rq[busiest_cpu]); | 1438 | tg->cfs_rq[busiest_cpu]); |
1450 | 1439 | ||
1451 | if (!moved_load) | 1440 | if (!moved_load) |