aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_fair.c11
2 files changed, 9 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e1fc67d0674c..f11c02b86c73 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2266,16 +2266,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2266 if (!sched_feat(SYNC_WAKEUPS)) 2266 if (!sched_feat(SYNC_WAKEUPS))
2267 sync = 0; 2267 sync = 0;
2268 2268
2269 if (!sync) {
2270 if (current->se.avg_overlap < sysctl_sched_migration_cost &&
2271 p->se.avg_overlap < sysctl_sched_migration_cost)
2272 sync = 1;
2273 } else {
2274 if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
2275 p->se.avg_overlap >= sysctl_sched_migration_cost)
2276 sync = 0;
2277 }
2278
2279#ifdef CONFIG_SMP 2269#ifdef CONFIG_SMP
2280 if (sched_feat(LB_WAKEUP_UPDATE)) { 2270 if (sched_feat(LB_WAKEUP_UPDATE)) {
2281 struct sched_domain *sd; 2271 struct sched_domain *sd;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a7e50ba185ac..0566f2a03c42 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1191,15 +1191,20 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1191 int idx, unsigned long load, unsigned long this_load, 1191 int idx, unsigned long load, unsigned long this_load,
1192 unsigned int imbalance) 1192 unsigned int imbalance)
1193{ 1193{
1194 struct task_struct *curr = this_rq->curr;
1195 struct task_group *tg;
1194 unsigned long tl = this_load; 1196 unsigned long tl = this_load;
1195 unsigned long tl_per_task; 1197 unsigned long tl_per_task;
1196 struct task_group *tg;
1197 unsigned long weight; 1198 unsigned long weight;
1198 int balanced; 1199 int balanced;
1199 1200
1200 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) 1201 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1201 return 0; 1202 return 0;
1202 1203
1204 if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1205 p->se.avg_overlap > sysctl_sched_migration_cost))
1206 sync = 0;
1207
1203 /* 1208 /*
1204 * If sync wakeup then subtract the (maximum possible) 1209 * If sync wakeup then subtract the (maximum possible)
1205 * effect of the currently running task from the load 1210 * effect of the currently running task from the load
@@ -1426,7 +1431,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1426 if (!sched_feat(WAKEUP_PREEMPT)) 1431 if (!sched_feat(WAKEUP_PREEMPT))
1427 return; 1432 return;
1428 1433
1429 if (sched_feat(WAKEUP_OVERLAP) && sync) { 1434 if (sched_feat(WAKEUP_OVERLAP) && (sync ||
1435 (se->avg_overlap < sysctl_sched_migration_cost &&
1436 pse->avg_overlap < sysctl_sched_migration_cost))) {
1430 resched_task(curr); 1437 resched_task(curr);
1431 return; 1438 return;
1432 } 1439 }