aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2008-10-17 09:33:21 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-17 09:36:58 -0400
commitb0aa51b999c449e5e3f9faa1ee406e052d407fe7 (patch)
treec3c21be0aa90658469ef039065b260172ab9b974 /kernel
parentb968905292eaa52b25abb7b3e6c0841dac9f03ae (diff)
sched: minor fast-path overhead reduction
Greetings, 103638d added a bit of avoidable overhead to the fast-path. Use sysctl_sched_min_granularity instead of sched_slice() to restrict buddy wakeups. Signed-off-by: Mike Galbraith <efault@gmx.de> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 18fd17172eb6..67084936b602 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -747,7 +747,7 @@ pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
747 struct rq *rq = rq_of(cfs_rq); 747 struct rq *rq = rq_of(cfs_rq);
748 u64 pair_slice = rq->clock - cfs_rq->pair_start; 748 u64 pair_slice = rq->clock - cfs_rq->pair_start;
749 749
750 if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) { 750 if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) {
751 cfs_rq->pair_start = rq->clock; 751 cfs_rq->pair_start = rq->clock;
752 return se; 752 return se;
753 } 753 }