aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2008-10-08 03:16:04 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-08 06:20:26 -0400
commit2fb7635c4cea310992a39580133099dd99ad151c (patch)
treee898c690d05c3dfeaa733fb3472eff400669be3a /kernel/sched_fair.c
parent990d0f2ced23052abc7efa09bd05bff34e00cf73 (diff)
sched: sync wakeups vs avg_overlap
While looking at the code I wondered why we always do: sync && avg_overlap < migration_cost Which is a bit odd, since the overlap test was meant to detect sync wakeups so using it to specialize sync wakeups doesn't make much sense. Hence change the code to do: sync || avg_overlap < migration_cost Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index fcbe850a5a90..18fd17172eb6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1103,6 +1103,11 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1103 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) 1103 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1104 return 0; 1104 return 0;
1105 1105
1106 if (!sync && sched_feat(SYNC_WAKEUPS) &&
1107 curr->se.avg_overlap < sysctl_sched_migration_cost &&
1108 p->se.avg_overlap < sysctl_sched_migration_cost)
1109 sync = 1;
1110
1106 /* 1111 /*
1107 * If sync wakeup then subtract the (maximum possible) 1112 * If sync wakeup then subtract the (maximum possible)
1108 * effect of the currently running task from the load 1113 * effect of the currently running task from the load
@@ -1127,11 +1132,8 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1127 * a reasonable amount of time then attract this newly 1132 * a reasonable amount of time then attract this newly
1128 * woken task: 1133 * woken task:
1129 */ 1134 */
1130 if (sync && balanced) { 1135 if (sync && balanced)
1131 if (curr->se.avg_overlap < sysctl_sched_migration_cost && 1136 return 1;
1132 p->se.avg_overlap < sysctl_sched_migration_cost)
1133 return 1;
1134 }
1135 1137
1136 schedstat_inc(p, se.nr_wakeups_affine_attempts); 1138 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1137 tl_per_task = cpu_avg_load_per_task(this_cpu); 1139 tl_per_task = cpu_avg_load_per_task(this_cpu);
@@ -1268,9 +1270,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1268 if (!sched_feat(WAKEUP_PREEMPT)) 1270 if (!sched_feat(WAKEUP_PREEMPT))
1269 return; 1271 return;
1270 1272
1271 if (sched_feat(WAKEUP_OVERLAP) && sync && 1273 if (sched_feat(WAKEUP_OVERLAP) && (sync ||
1272 se->avg_overlap < sysctl_sched_migration_cost && 1274 (se->avg_overlap < sysctl_sched_migration_cost &&
1273 pse->avg_overlap < sysctl_sched_migration_cost) { 1275 pse->avg_overlap < sysctl_sched_migration_cost))) {
1274 resched_task(curr); 1276 resched_task(curr);
1275 return; 1277 return;
1276 } 1278 }