diff options
-rw-r--r-- | kernel/sched_fair.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c62ebae65cf0..414145cf5344 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1362,27 +1362,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | |||
1362 | return wl; | 1362 | return wl; |
1363 | 1363 | ||
1364 | for_each_sched_entity(se) { | 1364 | for_each_sched_entity(se) { |
1365 | long S, rw, s, a, b; | 1365 | long lw, w; |
1366 | 1366 | ||
1367 | S = se->my_q->tg->shares; | 1367 | tg = se->my_q->tg; |
1368 | s = se->load.weight; | 1368 | w = se->my_q->load.weight; |
1369 | rw = se->my_q->load.weight; | ||
1370 | 1369 | ||
1371 | a = S*(rw + wl); | 1370 | /* use this cpu's instantaneous contribution */ |
1372 | b = S*rw + s*wg; | 1371 | lw = atomic_read(&tg->load_weight); |
1372 | lw -= se->my_q->load_contribution; | ||
1373 | lw += w + wg; | ||
1373 | 1374 | ||
1374 | wl = s*(a-b); | 1375 | wl += w; |
1375 | 1376 | ||
1376 | if (likely(b)) | 1377 | if (lw > 0 && wl < lw) |
1377 | wl /= b; | 1378 | wl = (wl * tg->shares) / lw; |
1379 | else | ||
1380 | wl = tg->shares; | ||
1378 | 1381 | ||
1379 | /* | 1382 | /* zero point is MIN_SHARES */ |
1380 | * Assume the group is already running and will | 1383 | if (wl < MIN_SHARES) |
1381 | * thus already be accounted for in the weight. | 1384 | wl = MIN_SHARES; |
1382 | * | 1385 | wl -= se->load.weight; |
1383 | * That is, moving shares between CPUs, does not | ||
1384 | * alter the group weight. | ||
1385 | */ | ||
1386 | wg = 0; | 1386 | wg = 0; |
1387 | } | 1387 | } |
1388 | 1388 | ||