aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-02-25 11:34:02 -0500
committerIngo Molnar <mingo@elte.hu>2008-03-04 11:54:06 -0500
commit62fb185130e4d420f71a30ff59d8b16b74ef5d2b (patch)
tree474c0824a5bf90950b0a430a11a52b358c9e1f31 /kernel/sched_rt.c
parent976dde010e513a9c7c3117a32b7b015f84b37430 (diff)
sched: revert load_balance_monitor() changes
The following commits cause a number of regressions: commit 58e2d4ca581167c2a079f4ee02be2f0bc52e8729 Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Date: Fri Jan 25 21:08:00 2008 +0100 sched: group scheduling, change how cpu load is calculated commit 6b2d7700266b9402e12824e11e0099ae6a4a6a79 Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Date: Fri Jan 25 21:08:00 2008 +0100 sched: group scheduler, fix fairness of cpu bandwidth allocation for task groups Namely: - very frequent wakeups on SMP, reported by PowerTop users. - cacheline trashing on (large) SMP - some latencies larger than 500ms While there is a mergeable patch to fix the latter, the former issues are not fixable in a manner suitable for .25 (we're at -rc3 now). Hence we revert them and try again in v2.6.26. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> CC: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Tested-by: Alexey Zaytsev <alexey.zaytsev@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index f54792b175b2..76e828517541 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -393,8 +393,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
393 */ 393 */
394 for_each_sched_rt_entity(rt_se) 394 for_each_sched_rt_entity(rt_se)
395 enqueue_rt_entity(rt_se); 395 enqueue_rt_entity(rt_se);
396
397 inc_cpu_load(rq, p->se.load.weight);
398} 396}
399 397
400static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 398static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -414,8 +412,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
414 if (rt_rq && rt_rq->rt_nr_running) 412 if (rt_rq && rt_rq->rt_nr_running)
415 enqueue_rt_entity(rt_se); 413 enqueue_rt_entity(rt_se);
416 } 414 }
417
418 dec_cpu_load(rq, p->se.load.weight);
419} 415}
420 416
421/* 417/*