aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2008-06-27 07:41:36 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-27 08:31:45 -0400
commit243e0e7b7d3b54749ece2e879ecd7e2a11874443 (patch)
tree0dd6af7eb63d261d15d3720f77a9430387e3db42 /kernel/sched_fair.c
parent2398f2c6d34b43025f274fc42eaca34d23ec2320 (diff)
sched: fix mult overflow
It was observed these mults can overflow. Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0d197be3e3e9..26ebe180cdea 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1477,7 +1477,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1477 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; 1477 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1478 unsigned long busiest_h_load = busiest_cfs_rq->h_load; 1478 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1479 unsigned long busiest_weight = busiest_cfs_rq->load.weight; 1479 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1480 long rem_load, moved_load; 1480 u64 rem_load, moved_load;
1481 1481
1482 /* 1482 /*
1483 * empty group 1483 * empty group
@@ -1485,8 +1485,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1485 if (!busiest_cfs_rq->task_weight) 1485 if (!busiest_cfs_rq->task_weight)
1486 continue; 1486 continue;
1487 1487
1488 rem_load = rem_load_move * busiest_weight; 1488 rem_load = (u64)rem_load_move * busiest_weight;
1489 rem_load /= busiest_h_load + 1; 1489 rem_load = div_u64(rem_load, busiest_h_load + 1);
1490 1490
1491 moved_load = __load_balance_fair(this_rq, this_cpu, busiest, 1491 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1492 rem_load, sd, idle, all_pinned, this_best_prio, 1492 rem_load, sd, idle, all_pinned, this_best_prio,
@@ -1496,7 +1496,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1496 continue; 1496 continue;
1497 1497
1498 moved_load *= busiest_h_load; 1498 moved_load *= busiest_h_load;
1499 moved_load /= busiest_weight + 1; 1499 moved_load = div_u64(moved_load, busiest_weight + 1);
1500 1500
1501 rem_load_move -= moved_load; 1501 rem_load_move -= moved_load;
1502 if (rem_load_move < 0) 1502 if (rem_load_move < 0)