diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:38 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:47 -0400 |
commit | f1d239f73200a5803a89e5929fb3abc1596b7589 (patch) | |
tree | 33c59b6f2621284af91825ea7fbab718ffe65ade /kernel/sched.c | |
parent | 83378269a5fad98f562ebc0f09c349575e6cbfe1 (diff) |
sched: incremental effective_load()
Increase the accuracy of the effective_load values.
Not only consider the current increment (as per the attempted wakeup), but
also consider the delta between when we last adjusted the shares and the
current situation.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 01d3e51b7116..7613f69f0978 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -427,6 +427,11 @@ struct cfs_rq { | |||
427 | * this cpu's part of tg->shares | 427 | * this cpu's part of tg->shares |
428 | */ | 428 | */ |
429 | unsigned long shares; | 429 | unsigned long shares; |
430 | |||
431 | /* | ||
432 | * load.weight at the time we set shares | ||
433 | */ | ||
434 | unsigned long rq_weight; | ||
430 | #endif | 435 | #endif |
431 | #endif | 436 | #endif |
432 | }; | 437 | }; |
@@ -1527,6 +1532,7 @@ __update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1527 | * record the actual number of shares, not the boosted amount. | 1532 | * record the actual number of shares, not the boosted amount. |
1528 | */ | 1533 | */ |
1529 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | 1534 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; |
1535 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
1530 | 1536 | ||
1531 | if (shares < MIN_SHARES) | 1537 | if (shares < MIN_SHARES) |
1532 | shares = MIN_SHARES; | 1538 | shares = MIN_SHARES; |