diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-12-03 12:00:07 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-09 04:03:09 -0500 |
commit | cd8ad40de36c2fe75f3b731bd70198b385895246 (patch) | |
tree | c1e6d5c8dddfa0d9ad643a1df8e92f26700e3143 /kernel/sched.c | |
parent | fb58bac5c75bfff8bbf7d02071a10a62f32fe28b (diff) |
sched: cgroup: Implement different treatment for idle shares
When setting the weight for a per-cpu task-group, we have to put in a
phantom weight when there is no work on that cpu, otherwise we'll not
service that cpu when new work gets placed there until we again update
the per-cpu weights.
We used to add these phantom weights to the total, so that the idle
per-cpu shares don't get inflated, this however causes the non-idle
parts to get deflated, causing unexpected weight distibutions.
Reverse this, so that the non-idle shares are correct but the idle
shares are inflated.
Reported-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Tested-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1257934048.23203.76.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0170735bdafc..71eb0622f548 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1614,7 +1614,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1614 | */ | 1614 | */ |
1615 | static int tg_shares_up(struct task_group *tg, void *data) | 1615 | static int tg_shares_up(struct task_group *tg, void *data) |
1616 | { | 1616 | { |
1617 | unsigned long weight, rq_weight = 0, shares = 0; | 1617 | unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0; |
1618 | unsigned long *usd_rq_weight; | 1618 | unsigned long *usd_rq_weight; |
1619 | struct sched_domain *sd = data; | 1619 | struct sched_domain *sd = data; |
1620 | unsigned long flags; | 1620 | unsigned long flags; |
@@ -1630,6 +1630,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1630 | weight = tg->cfs_rq[i]->load.weight; | 1630 | weight = tg->cfs_rq[i]->load.weight; |
1631 | usd_rq_weight[i] = weight; | 1631 | usd_rq_weight[i] = weight; |
1632 | 1632 | ||
1633 | rq_weight += weight; | ||
1633 | /* | 1634 | /* |
1634 | * If there are currently no tasks on the cpu pretend there | 1635 | * If there are currently no tasks on the cpu pretend there |
1635 | * is one of average load so that when a new task gets to | 1636 | * is one of average load so that when a new task gets to |
@@ -1638,10 +1639,13 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1638 | if (!weight) | 1639 | if (!weight) |
1639 | weight = NICE_0_LOAD; | 1640 | weight = NICE_0_LOAD; |
1640 | 1641 | ||
1641 | rq_weight += weight; | 1642 | sum_weight += weight; |
1642 | shares += tg->cfs_rq[i]->shares; | 1643 | shares += tg->cfs_rq[i]->shares; |
1643 | } | 1644 | } |
1644 | 1645 | ||
1646 | if (!rq_weight) | ||
1647 | rq_weight = sum_weight; | ||
1648 | |||
1645 | if ((!shares && rq_weight) || shares > tg->shares) | 1649 | if ((!shares && rq_weight) || shares > tg->shares) |
1646 | shares = tg->shares; | 1650 | shares = tg->shares; |
1647 | 1651 | ||