aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-08-21 07:58:54 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-21 08:15:10 -0400
commita8af7246c114bfd939e539f9566b872c06f6225c (patch)
treed624ea07739b92d3ff1a676a3244fd7188d8198f /kernel/sched.c
parentcde7e5ca4e329a157108769d1f752d191cbb71c6 (diff)
sched: Avoid division by zero
Patch a5004278f0525dcb9aa43703ef77bf371ea837cd (sched: Fix cgroup smp fairness) introduced the possibility of a divide-by-zero because load-balancing is not synchronized between sched_domains. This can cause the state of cpus to change between the first and second loop over the sched domain in tg_shares_up(). Reported-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Jes Sorensen <jes@sgi.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <1250855934.7538.30.camel@twins> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b529efe8872..8f8a98eab9db 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1522,7 +1522,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1522 */ 1522 */
1523static void 1523static void
1524update_group_shares_cpu(struct task_group *tg, int cpu, 1524update_group_shares_cpu(struct task_group *tg, int cpu,
1525 unsigned long sd_shares, unsigned long sd_rq_weight) 1525 unsigned long sd_shares, unsigned long sd_rq_weight,
1526 unsigned long sd_eff_weight)
1526{ 1527{
1527 unsigned long rq_weight; 1528 unsigned long rq_weight;
1528 unsigned long shares; 1529 unsigned long shares;
@@ -1535,13 +1536,15 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
1535 if (!rq_weight) { 1536 if (!rq_weight) {
1536 boost = 1; 1537 boost = 1;
1537 rq_weight = NICE_0_LOAD; 1538 rq_weight = NICE_0_LOAD;
1539 if (sd_rq_weight == sd_eff_weight)
1540 sd_eff_weight += NICE_0_LOAD;
1541 sd_rq_weight = sd_eff_weight;
1538 } 1542 }
1539 1543
1540 /* 1544 /*
1541 * \Sum shares * rq_weight 1545 * \Sum_j shares_j * rq_weight_i
1542 * shares = ----------------------- 1546 * shares_i = -----------------------------
1543 * \Sum rq_weight 1547 * \Sum_j rq_weight_j
1544 *
1545 */ 1548 */
1546 shares = (sd_shares * rq_weight) / sd_rq_weight; 1549 shares = (sd_shares * rq_weight) / sd_rq_weight;
1547 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); 1550 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
@@ -1593,14 +1596,8 @@ static int tg_shares_up(struct task_group *tg, void *data)
1593 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) 1596 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1594 shares = tg->shares; 1597 shares = tg->shares;
1595 1598
1596 for_each_cpu(i, sched_domain_span(sd)) { 1599 for_each_cpu(i, sched_domain_span(sd))
1597 unsigned long sd_rq_weight = rq_weight; 1600 update_group_shares_cpu(tg, i, shares, rq_weight, eff_weight);
1598
1599 if (!tg->cfs_rq[i]->rq_weight)
1600 sd_rq_weight = eff_weight;
1601
1602 update_group_shares_cpu(tg, i, shares, sd_rq_weight);
1603 }
1604 1601
1605 return 0; 1602 return 0;
1606} 1603}