aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c47
1 files changed, 2 insertions, 45 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 0014b03adaca..85e1721594f0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1657,42 +1657,6 @@ void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd)
1657} 1657}
1658 1658
1659/* 1659/*
1660 * Redistribute tg->shares amongst all tg->cfs_rq[]s.
1661 */
1662static void __aggregate_redistribute_shares(struct task_group *tg)
1663{
1664 int i, max_cpu = smp_processor_id();
1665 unsigned long rq_weight = 0;
1666 unsigned long shares, max_shares = 0, shares_rem = tg->shares;
1667
1668 for_each_possible_cpu(i)
1669 rq_weight += tg->cfs_rq[i]->load.weight;
1670
1671 for_each_possible_cpu(i) {
1672 /*
1673 * divide shares proportional to the rq_weights.
1674 */
1675 shares = tg->shares * tg->cfs_rq[i]->load.weight;
1676 shares /= rq_weight + 1;
1677
1678 tg->cfs_rq[i]->shares = shares;
1679
1680 if (shares > max_shares) {
1681 max_shares = shares;
1682 max_cpu = i;
1683 }
1684 shares_rem -= shares;
1685 }
1686
1687 /*
1688 * Ensure it all adds up to tg->shares; we can loose a few
1689 * due to rounding down when computing the per-cpu shares.
1690 */
1691 if (shares_rem)
1692 tg->cfs_rq[max_cpu]->shares += shares_rem;
1693}
1694
1695/*
1696 * Compute the weight of this group on the given cpus. 1660 * Compute the weight of this group on the given cpus.
1697 */ 1661 */
1698static 1662static
@@ -1701,18 +1665,11 @@ void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd)
1701 unsigned long shares = 0; 1665 unsigned long shares = 0;
1702 int i; 1666 int i;
1703 1667
1704again:
1705 for_each_cpu_mask(i, sd->span) 1668 for_each_cpu_mask(i, sd->span)
1706 shares += tg->cfs_rq[i]->shares; 1669 shares += tg->cfs_rq[i]->shares;
1707 1670
1708 /* 1671 if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares)
1709 * When the span doesn't have any shares assigned, but does have 1672 shares = tg->shares;
1710 * tasks to run do a machine wide rebalance (should be rare).
1711 */
1712 if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) {
1713 __aggregate_redistribute_shares(tg);
1714 goto again;
1715 }
1716 1673
1717 aggregate(tg, sd)->shares = shares; 1674 aggregate(tg, sd)->shares = shares;
1718} 1675}