diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-09-20 17:38:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-22 10:28:32 -0400 |
commit | 15afe09bf496ae10c989e1a375a6b5da7bd3e16e (patch) | |
tree | 4565659d1084e357eea42e6321a4d304ac950faa /kernel/sched_fair.c | |
parent | 09b22a2f678ae733801b888c44756d0abd686b8a (diff) |
sched: wakeup preempt when small overlap
Lin Ming reported a 10% OLTP regression against 2.6.27-rc4.
The difference seems to come from different preemption agressiveness,
which affects the cache footprint of the workload and its effective
cache trashing.
Aggresively preempt a task if its avg overlap is very small, this should
avoid the task going to sleep and find it still running when we schedule
back to it - saving a wakeup.
Reported-by: Lin Ming <ming.m.lin@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a10ac0bcee6..7328383690f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1331,7 +1331,7 @@ static inline int depth_se(struct sched_entity *se) | |||
1331 | /* | 1331 | /* |
1332 | * Preempt the current task with a newly woken task if needed: | 1332 | * Preempt the current task with a newly woken task if needed: |
1333 | */ | 1333 | */ |
1334 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | 1334 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) |
1335 | { | 1335 | { |
1336 | struct task_struct *curr = rq->curr; | 1336 | struct task_struct *curr = rq->curr; |
1337 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1337 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
@@ -1367,6 +1367,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1367 | if (!sched_feat(WAKEUP_PREEMPT)) | 1367 | if (!sched_feat(WAKEUP_PREEMPT)) |
1368 | return; | 1368 | return; |
1369 | 1369 | ||
1370 | if (sched_feat(WAKEUP_OVERLAP) && sync && | ||
1371 | se->avg_overlap < sysctl_sched_migration_cost && | ||
1372 | pse->avg_overlap < sysctl_sched_migration_cost) { | ||
1373 | resched_task(curr); | ||
1374 | return; | ||
1375 | } | ||
1376 | |||
1370 | /* | 1377 | /* |
1371 | * preemption test can be made between sibling entities who are in the | 1378 | * preemption test can be made between sibling entities who are in the |
1372 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | 1379 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of |
@@ -1649,7 +1656,7 @@ static void prio_changed_fair(struct rq *rq, struct task_struct *p, | |||
1649 | if (p->prio > oldprio) | 1656 | if (p->prio > oldprio) |
1650 | resched_task(rq->curr); | 1657 | resched_task(rq->curr); |
1651 | } else | 1658 | } else |
1652 | check_preempt_curr(rq, p); | 1659 | check_preempt_curr(rq, p, 0); |
1653 | } | 1660 | } |
1654 | 1661 | ||
1655 | /* | 1662 | /* |
@@ -1666,7 +1673,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p, | |||
1666 | if (running) | 1673 | if (running) |
1667 | resched_task(rq->curr); | 1674 | resched_task(rq->curr); |
1668 | else | 1675 | else |
1669 | check_preempt_curr(rq, p); | 1676 | check_preempt_curr(rq, p, 0); |
1670 | } | 1677 | } |
1671 | 1678 | ||
1672 | /* Account for a task changing its policy or group. | 1679 | /* Account for a task changing its policy or group. |