aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-09-20 17:38:02 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-22 10:28:32 -0400
commit15afe09bf496ae10c989e1a375a6b5da7bd3e16e (patch)
tree4565659d1084e357eea42e6321a4d304ac950faa /include/linux
parent09b22a2f678ae733801b888c44756d0abd686b8a (diff)
sched: wakeup preempt when small overlap
Lin Ming reported a 10% OLTP regression against 2.6.27-rc4. The difference seems to come from different preemption agressiveness, which affects the cache footprint of the workload and its effective cache trashing. Aggresively preempt a task if its avg overlap is very small, this should avoid the task going to sleep and find it still running when we schedule back to it - saving a wakeup. Reported-by: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b3b7a8f32477..d8e699b55858 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -897,7 +897,7 @@ struct sched_class {
897 void (*yield_task) (struct rq *rq); 897 void (*yield_task) (struct rq *rq);
898 int (*select_task_rq)(struct task_struct *p, int sync); 898 int (*select_task_rq)(struct task_struct *p, int sync);
899 899
900 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 900 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
901 901
902 struct task_struct * (*pick_next_task) (struct rq *rq); 902 struct task_struct * (*pick_next_task) (struct rq *rq);
903 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 903 void (*put_prev_task) (struct rq *rq, struct task_struct *p);