aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2010-03-11 11:15:51 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-11 12:32:50 -0500
commite12f31d3e5d36328c7fbd0fce40a95e70b59152c (patch)
tree3eaee7fede5ba830395d2e527fdfe60f1aba73f4 /kernel/sched_fair.c
parentb42e0c41a422a212ddea0666d5a3a0e3c35206db (diff)
sched: Remove avg_overlap
Both avg_overlap and avg_wakeup had an inherent problem in that their accuracy was detrimentally affected by cross-cpu wakeups, this because we are missing the necessary call to update_curr(). This can't be fixed without increasing overhead in our already too fat fastpath. Additionally, with recent load balancing changes making us prefer to place tasks in an idle cache domain (which is good for compute bound loads), communicating tasks suffer when a sync wakeup, which would enable affine placement, is turned into a non-sync wakeup by SYNC_LESS. With one task on the runqueue, wake_affine() rejects the affine wakeup request, leaving the unfortunate where placed, taking frequent cache misses. Remove it, and recover some fastpath cycles. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301121.6785.30.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c18
1 files changed, 0 insertions, 18 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 6fc62854422c..c3b69d4b5d65 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1241,7 +1241,6 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
1241 1241
1242static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) 1242static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1243{ 1243{
1244 struct task_struct *curr = current;
1245 unsigned long this_load, load; 1244 unsigned long this_load, load;
1246 int idx, this_cpu, prev_cpu; 1245 int idx, this_cpu, prev_cpu;
1247 unsigned long tl_per_task; 1246 unsigned long tl_per_task;
@@ -1256,18 +1255,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1256 load = source_load(prev_cpu, idx); 1255 load = source_load(prev_cpu, idx);
1257 this_load = target_load(this_cpu, idx); 1256 this_load = target_load(this_cpu, idx);
1258 1257
1259 if (sync) {
1260 if (sched_feat(SYNC_LESS) &&
1261 (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1262 p->se.avg_overlap > sysctl_sched_migration_cost))
1263 sync = 0;
1264 } else {
1265 if (sched_feat(SYNC_MORE) &&
1266 (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1267 p->se.avg_overlap < sysctl_sched_migration_cost))
1268 sync = 1;
1269 }
1270
1271 /* 1258 /*
1272 * If sync wakeup then subtract the (maximum possible) 1259 * If sync wakeup then subtract the (maximum possible)
1273 * effect of the currently running task from the load 1260 * effect of the currently running task from the load
@@ -1711,11 +1698,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1711 if (sched_feat(WAKEUP_SYNC) && sync) 1698 if (sched_feat(WAKEUP_SYNC) && sync)
1712 goto preempt; 1699 goto preempt;
1713 1700
1714 if (sched_feat(WAKEUP_OVERLAP) &&
1715 se->avg_overlap < sysctl_sched_migration_cost &&
1716 pse->avg_overlap < sysctl_sched_migration_cost)
1717 goto preempt;
1718
1719 if (!sched_feat(WAKEUP_PREEMPT)) 1701 if (!sched_feat(WAKEUP_PREEMPT))
1720 return; 1702 return;
1721 1703