aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-20 19:37:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-20 19:37:55 -0500
commit466c19063b4b426d5c362572787cb249fbf4296b (patch)
tree214c9d9e914c80eb6b46c9ccd0cd261167393b44 /kernel/sched_fair.c
parent67290f41b2715de0e0ae93c9285fcbe37ffc5b22 (diff)
parent068c5cc5ac7414a8e9eb7856b4bf3cc4d4744267 (diff)
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched, cgroup: Use exit hook to avoid use-after-free crash sched: Fix signed unsigned comparison in check_preempt_tick() sched: Replace rq->bkl_count with rq->rq_sched_info.bkl_count sched, autogroup: Fix CONFIG_RT_GROUP_SCHED sched_setscheduler() failure sched: Display autogroup names in /proc/sched_debug sched: Reinstate group names in /proc/sched_debug sched: Update effective_load() to use global share weights
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c62ebae65cf..77e9166d7bb 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1062,6 +1062,9 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1062 struct sched_entity *se = __pick_next_entity(cfs_rq); 1062 struct sched_entity *se = __pick_next_entity(cfs_rq);
1063 s64 delta = curr->vruntime - se->vruntime; 1063 s64 delta = curr->vruntime - se->vruntime;
1064 1064
1065 if (delta < 0)
1066 return;
1067
1065 if (delta > ideal_runtime) 1068 if (delta > ideal_runtime)
1066 resched_task(rq_of(cfs_rq)->curr); 1069 resched_task(rq_of(cfs_rq)->curr);
1067 } 1070 }
@@ -1362,27 +1365,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
1362 return wl; 1365 return wl;
1363 1366
1364 for_each_sched_entity(se) { 1367 for_each_sched_entity(se) {
1365 long S, rw, s, a, b; 1368 long lw, w;
1366 1369
1367 S = se->my_q->tg->shares; 1370 tg = se->my_q->tg;
1368 s = se->load.weight; 1371 w = se->my_q->load.weight;
1369 rw = se->my_q->load.weight;
1370 1372
1371 a = S*(rw + wl); 1373 /* use this cpu's instantaneous contribution */
1372 b = S*rw + s*wg; 1374 lw = atomic_read(&tg->load_weight);
1375 lw -= se->my_q->load_contribution;
1376 lw += w + wg;
1373 1377
1374 wl = s*(a-b); 1378 wl += w;
1375 1379
1376 if (likely(b)) 1380 if (lw > 0 && wl < lw)
1377 wl /= b; 1381 wl = (wl * tg->shares) / lw;
1382 else
1383 wl = tg->shares;
1378 1384
1379 /* 1385 /* zero point is MIN_SHARES */
1380 * Assume the group is already running and will 1386 if (wl < MIN_SHARES)
1381 * thus already be accounted for in the weight. 1387 wl = MIN_SHARES;
1382 * 1388 wl -= se->load.weight;
1383 * That is, moving shares between CPUs, does not
1384 * alter the group weight.
1385 */
1386 wg = 0; 1389 wg = 0;
1387 } 1390 }
1388 1391