aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 07:41:21 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-27 08:31:34 -0400
commitd3f40dbab954d83383b6a516582d5c09cc216dcc (patch)
treedb50344cb0dfa11a214f7758c4254b9ffeb9f118 /kernel/sched.c
parent53fecd8ae1900fb571086f54f664051004665b55 (diff)
sched: dont micro manage share losses
We used to try and contain the loss of 'shares' by playing arithmetic games. Replace that by noticing that at the top sched_domain we'll always have the full weight in shares to distribute. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c26
1 files changed, 3 insertions, 23 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index dae20199dc9c..28229c5d4983 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1551,6 +1551,9 @@ aggregate_group_shares(struct task_group *tg, int cpu, struct sched_domain *sd)
1551 if ((!shares && aggregate(tg, cpu)->rq_weight) || shares > tg->shares) 1551 if ((!shares && aggregate(tg, cpu)->rq_weight) || shares > tg->shares)
1552 shares = tg->shares; 1552 shares = tg->shares;
1553 1553
1554 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1555 shares = tg->shares;
1556
1554 aggregate(tg, cpu)->shares = shares; 1557 aggregate(tg, cpu)->shares = shares;
1555} 1558}
1556 1559
@@ -1642,20 +1645,8 @@ static void
1642__move_group_shares(struct task_group *tg, int cpu, struct sched_domain *sd, 1645__move_group_shares(struct task_group *tg, int cpu, struct sched_domain *sd,
1643 int scpu, int dcpu) 1646 int scpu, int dcpu)
1644{ 1647{
1645 unsigned long shares;
1646
1647 shares = tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
1648
1649 __update_group_shares_cpu(tg, cpu, sd, scpu); 1648 __update_group_shares_cpu(tg, cpu, sd, scpu);
1650 __update_group_shares_cpu(tg, cpu, sd, dcpu); 1649 __update_group_shares_cpu(tg, cpu, sd, dcpu);
1651
1652 /*
1653 * ensure we never loose shares due to rounding errors in the
1654 * above redistribution.
1655 */
1656 shares -= tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
1657 if (shares)
1658 tg->cfs_rq[dcpu]->shares += shares;
1659} 1650}
1660 1651
1661/* 1652/*
@@ -1675,7 +1666,6 @@ move_group_shares(struct task_group *tg, int cpu, struct sched_domain *sd,
1675static void 1666static void
1676aggregate_group_set_shares(struct task_group *tg, int cpu, struct sched_domain *sd) 1667aggregate_group_set_shares(struct task_group *tg, int cpu, struct sched_domain *sd)
1677{ 1668{
1678 unsigned long shares = aggregate(tg, cpu)->shares;
1679 int i; 1669 int i;
1680 1670
1681 for_each_cpu_mask(i, sd->span) { 1671 for_each_cpu_mask(i, sd->span) {
@@ -1688,16 +1678,6 @@ aggregate_group_set_shares(struct task_group *tg, int cpu, struct sched_domain *
1688 } 1678 }
1689 1679
1690 aggregate_group_shares(tg, cpu, sd); 1680 aggregate_group_shares(tg, cpu, sd);
1691
1692 /*
1693 * ensure we never loose shares due to rounding errors in the
1694 * above redistribution.
1695 */
1696 shares -= aggregate(tg, cpu)->shares;
1697 if (shares) {
1698 tg->cfs_rq[cpu]->shares += shares;
1699 aggregate(tg, cpu)->shares += shares;
1700 }
1701} 1681}
1702 1682
1703/* 1683/*