aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-09-20 17:38:02 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-22 10:28:32 -0400
commit15afe09bf496ae10c989e1a375a6b5da7bd3e16e (patch)
tree4565659d1084e357eea42e6321a4d304ac950faa /kernel/sched_rt.c
parent09b22a2f678ae733801b888c44756d0abd686b8a (diff)
sched: wakeup preempt when small overlap
Lin Ming reported a 10% OLTP regression against 2.6.27-rc4. The difference seems to come from different preemption agressiveness, which affects the cache footprint of the workload and its effective cache trashing. Aggresively preempt a task if its avg overlap is very small, this should avoid the task going to sleep and find it still running when we schedule back to it - saving a wakeup. Reported-by: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 552310798dad..6d2d0a5d030b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -783,7 +783,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
783/* 783/*
784 * Preempt the current task with a newly woken task if needed: 784 * Preempt the current task with a newly woken task if needed:
785 */ 785 */
786static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) 786static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
787{ 787{
788 if (p->prio < rq->curr->prio) { 788 if (p->prio < rq->curr->prio) {
789 resched_task(rq->curr); 789 resched_task(rq->curr);