diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-16 15:09:13 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-17 03:51:20 -0400 |
commit | eb24073bc1fe3e569a855cf38d529fb650c35524 (patch) | |
tree | 1dfc4a556f5b78c59cb1accf96666d3105a1cd78 | |
parent | 182a85f8a119c789610a9d464f4129ded9f3c107 (diff) |
sched: Fix TASK_WAKING & loadaverage breakage
Fix this:
top - 21:54:00 up 2:59, 1 user, load average: 432512.33, 426421.74, 417432.74
Which happens because we now set TASK_WAKING before activate_task().
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5049d959bb26..969dfaef2465 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2343,7 +2343,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2343 | /* | 2343 | /* |
2344 | * In order to handle concurrent wakeups and release the rq->lock | 2344 | * In order to handle concurrent wakeups and release the rq->lock |
2345 | * we put the task in TASK_WAKING state. | 2345 | * we put the task in TASK_WAKING state. |
2346 | * | ||
2347 | * First fix up the nr_uninterruptible count: | ||
2346 | */ | 2348 | */ |
2349 | if (task_contributes_to_load(p)) | ||
2350 | rq->nr_uninterruptible--; | ||
2347 | p->state = TASK_WAKING; | 2351 | p->state = TASK_WAKING; |
2348 | task_rq_unlock(rq, &flags); | 2352 | task_rq_unlock(rq, &flags); |
2349 | 2353 | ||