aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/fair.c30
1 files changed, 6 insertions, 24 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 01856a8bcd4c..391eaf25a2aa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4285,7 +4285,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4285{ 4285{
4286 s64 this_load, load; 4286 s64 this_load, load;
4287 int idx, this_cpu, prev_cpu; 4287 int idx, this_cpu, prev_cpu;
4288 unsigned long tl_per_task;
4289 struct task_group *tg; 4288 struct task_group *tg;
4290 unsigned long weight; 4289 unsigned long weight;
4291 int balanced; 4290 int balanced;
@@ -4343,32 +4342,15 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4343 balanced = this_eff_load <= prev_eff_load; 4342 balanced = this_eff_load <= prev_eff_load;
4344 } else 4343 } else
4345 balanced = true; 4344 balanced = true;
4346
4347 /*
4348 * If the currently running task will sleep within
4349 * a reasonable amount of time then attract this newly
4350 * woken task:
4351 */
4352 if (sync && balanced)
4353 return 1;
4354
4355 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); 4345 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4356 tl_per_task = cpu_avg_load_per_task(this_cpu);
4357 4346
4358 if (balanced || 4347 if (!balanced)
4359 (this_load <= load && 4348 return 0;
4360 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
4361 /*
4362 * This domain has SD_WAKE_AFFINE and
4363 * p is cache cold in this domain, and
4364 * there is no bad imbalance.
4365 */
4366 schedstat_inc(sd, ttwu_move_affine);
4367 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4368 4349
4369 return 1; 4350 schedstat_inc(sd, ttwu_move_affine);
4370 } 4351 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4371 return 0; 4352
4353 return 1;
4372} 4354}
4373 4355
4374/* 4356/*