aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLin Ming <ming.m.lin@intel.com>2009-01-15 11:17:15 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-15 15:07:57 -0500
commit6272d68cc6a5f90c6b1a2228cf0f67b895305d17 (patch)
tree9f7e6635d4ffb9d713d3b2be83a4cd4bd4749080
parente17036dac189dd034c092a91df56aa740db7146d (diff)
sched: sched_slice() fixlet
Mike's change: 0a582440f "sched: fix sched_slice())" broke group scheduling by forgetting to reload cfs_rq on each loop. This patch fixes aim7 regression and specjbb2005 regression becomes less than 1.5% on 8-core stokley. Signed-off-by: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Jayson King <dev@jaysonking.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched_fair.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 16b419bb8b0a..5cc1c162044f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); 429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
430 430
431 for_each_sched_entity(se) { 431 for_each_sched_entity(se) {
432 struct load_weight *load = &cfs_rq->load; 432 struct load_weight *load;
433
434 cfs_rq = cfs_rq_of(se);
435 load = &cfs_rq->load;
433 436
434 if (unlikely(!se->on_rq)) { 437 if (unlikely(!se->on_rq)) {
435 struct load_weight lw = cfs_rq->load; 438 struct load_weight lw = cfs_rq->load;