diff options
author | Balbir Singh <balbir@linux.vnet.ibm.com> | 2008-11-12 05:49:00 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-12 06:33:50 -0500 |
commit | a2d477778e82a60a0b7114cefdb70aa43af28782 (patch) | |
tree | 52fb05ee2d63765ddddcf4a2e06f310510607735 /kernel/sched.c | |
parent | f21f237cf55494c3a4209de323281a3b0528da10 (diff) |
sched: fix stale value in average load per task
Impact: fix load balancer load average calculation accuracy
cpu_avg_load_per_task() returns a stale value when nr_running is 0.
It returns an older stale (caculated when nr_running was non zero) value.
This patch returns and sets rq->avg_load_per_task to zero when nr_running
is 0.
Compile and boot tested on a x86_64 box.
Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 50a21f964679..3bafbe350f4f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1456,6 +1456,8 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1456 | 1456 | ||
1457 | if (rq->nr_running) | 1457 | if (rq->nr_running) |
1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; |
1459 | else | ||
1460 | rq->avg_load_per_task = 0; | ||
1459 | 1461 | ||
1460 | return rq->avg_load_per_task; | 1462 | return rq->avg_load_per_task; |
1461 | } | 1463 | } |