aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c62ebae65cf0..77e9166d7bbf 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1062,6 +1062,9 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1062 struct sched_entity *se = __pick_next_entity(cfs_rq); 1062 struct sched_entity *se = __pick_next_entity(cfs_rq);
1063 s64 delta = curr->vruntime - se->vruntime; 1063 s64 delta = curr->vruntime - se->vruntime;
1064 1064
1065 if (delta < 0)
1066 return;
1067
1065 if (delta > ideal_runtime) 1068 if (delta > ideal_runtime)
1066 resched_task(rq_of(cfs_rq)->curr); 1069 resched_task(rq_of(cfs_rq)->curr);
1067 } 1070 }
@@ -1362,27 +1365,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
1362 return wl; 1365 return wl;
1363 1366
1364 for_each_sched_entity(se) { 1367 for_each_sched_entity(se) {
1365 long S, rw, s, a, b; 1368 long lw, w;
1366 1369
1367 S = se->my_q->tg->shares; 1370 tg = se->my_q->tg;
1368 s = se->load.weight; 1371 w = se->my_q->load.weight;
1369 rw = se->my_q->load.weight;
1370 1372
1371 a = S*(rw + wl); 1373 /* use this cpu's instantaneous contribution */
1372 b = S*rw + s*wg; 1374 lw = atomic_read(&tg->load_weight);
1375 lw -= se->my_q->load_contribution;
1376 lw += w + wg;
1373 1377
1374 wl = s*(a-b); 1378 wl += w;
1375 1379
1376 if (likely(b)) 1380 if (lw > 0 && wl < lw)
1377 wl /= b; 1381 wl = (wl * tg->shares) / lw;
1382 else
1383 wl = tg->shares;
1378 1384
1379 /* 1385 /* zero point is MIN_SHARES */
1380 * Assume the group is already running and will 1386 if (wl < MIN_SHARES)
1381 * thus already be accounted for in the weight. 1387 wl = MIN_SHARES;
1382 * 1388 wl -= se->load.weight;
1383 * That is, moving shares between CPUs, does not
1384 * alter the group weight.
1385 */
1386 wg = 0; 1389 wg = 0;
1387 } 1390 }
1388 1391