diff options
author | Paul Turner <pjt@google.com> | 2011-01-14 20:57:50 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-01-18 09:09:38 -0500 |
commit | 977dda7c9b540f48b228174346d8b31542c1e99f (patch) | |
tree | 77d40bcc72c1590dfcaeadb60e92a270bd204c52 /kernel | |
parent | c9b5f501ef1580faa30c40c644b7691870462201 (diff) |
sched: Update effective_load() to use global share weights
Previously effective_load would approximate the global load weight present on
a group taking advantage of:
entity_weight = tg->shares ( lw / global_lw ), where entity_weight was provided
by tg_shares_up.
This worked (approximately) for an 'empty' (at tg level) cpu since we would
place boost load representative of what a newly woken task would receive.
However, now that load is instantaneously updated this assumption is no longer
true and the load calculation is rather incorrect in this case.
Fix this (and improve the general case) by re-writing effective_load to take
advantage of the new shares distribution code.
Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20110115015817.069769529@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c62ebae65cf0..414145cf5344 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1362,27 +1362,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | |||
1362 | return wl; | 1362 | return wl; |
1363 | 1363 | ||
1364 | for_each_sched_entity(se) { | 1364 | for_each_sched_entity(se) { |
1365 | long S, rw, s, a, b; | 1365 | long lw, w; |
1366 | 1366 | ||
1367 | S = se->my_q->tg->shares; | 1367 | tg = se->my_q->tg; |
1368 | s = se->load.weight; | 1368 | w = se->my_q->load.weight; |
1369 | rw = se->my_q->load.weight; | ||
1370 | 1369 | ||
1371 | a = S*(rw + wl); | 1370 | /* use this cpu's instantaneous contribution */ |
1372 | b = S*rw + s*wg; | 1371 | lw = atomic_read(&tg->load_weight); |
1372 | lw -= se->my_q->load_contribution; | ||
1373 | lw += w + wg; | ||
1373 | 1374 | ||
1374 | wl = s*(a-b); | 1375 | wl += w; |
1375 | 1376 | ||
1376 | if (likely(b)) | 1377 | if (lw > 0 && wl < lw) |
1377 | wl /= b; | 1378 | wl = (wl * tg->shares) / lw; |
1379 | else | ||
1380 | wl = tg->shares; | ||
1378 | 1381 | ||
1379 | /* | 1382 | /* zero point is MIN_SHARES */ |
1380 | * Assume the group is already running and will | 1383 | if (wl < MIN_SHARES) |
1381 | * thus already be accounted for in the weight. | 1384 | wl = MIN_SHARES; |
1382 | * | 1385 | wl -= se->load.weight; |
1383 | * That is, moving shares between CPUs, does not | ||
1384 | * alter the group weight. | ||
1385 | */ | ||
1386 | wg = 0; | 1386 | wg = 0; |
1387 | } | 1387 | } |
1388 | 1388 | ||