aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2014-08-26 07:06:45 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-19 06:35:25 -0400
commit05bfb65f52cbdabe26ebb629959416a6cffb034d (patch)
treefff575820fd06b3a17aa3769ad98d7515069b2e6 /kernel
parentafdeee0510db918b31bb4aba47452df2ddbdbcf2 (diff)
sched: Remove a wake_affine() condition
In wake_affine() I have tried to understand the meaning of the condition: (this_load <= load && this_load + target_load(prev_cpu, idx) <= tl_per_task) but I failed to find a use case that can take advantage of it and I haven't found clear description in the previous commit's log. Futhermore, the comment of the condition refers to the task_hot function that was used before being replaced by the current condition: /* * This domain has SD_WAKE_AFFINE and * p is cache cold in this domain, and * there is no bad imbalance. */ If we look more deeply the below condition: this_load + target_load(prev_cpu, idx) <= tl_per_task When sync is clear, we have: tl_per_task = runnable_load_avg / nr_running this_load = max(runnable_load_avg, cpuload[idx]) target_load = max(runnable_load_avg', cpuload'[idx]) It implies that runnable_load_avg == 0 and nr_running <= 1 in order to match the condition. This implies that runnable_load_avg == 0 too because of the condition: this_load <= load. but if this _load is null, 'balanced' is already set and the test is redundant. If sync is set, it's not as straight forward as above (especially if cgroup are involved) but the policy should be similar as we have removed a task that's going to sleep in order to get a more accurate load and this_load values. The current conclusion is that these additional condition don't give any benefit so we can remove them. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: preeti@linux.vnet.ibm.com Cc: riel@redhat.com Cc: Morten.Rasmussen@arm.com Cc: efault@gmx.de Cc: nicolas.pitre@linaro.org Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1409051215-16788-3-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c30
1 files changed, 6 insertions, 24 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 01856a8bcd4c..391eaf25a2aa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4285,7 +4285,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4285{ 4285{
4286 s64 this_load, load; 4286 s64 this_load, load;
4287 int idx, this_cpu, prev_cpu; 4287 int idx, this_cpu, prev_cpu;
4288 unsigned long tl_per_task;
4289 struct task_group *tg; 4288 struct task_group *tg;
4290 unsigned long weight; 4289 unsigned long weight;
4291 int balanced; 4290 int balanced;
@@ -4343,32 +4342,15 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4343 balanced = this_eff_load <= prev_eff_load; 4342 balanced = this_eff_load <= prev_eff_load;
4344 } else 4343 } else
4345 balanced = true; 4344 balanced = true;
4346
4347 /*
4348 * If the currently running task will sleep within
4349 * a reasonable amount of time then attract this newly
4350 * woken task:
4351 */
4352 if (sync && balanced)
4353 return 1;
4354
4355 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); 4345 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4356 tl_per_task = cpu_avg_load_per_task(this_cpu);
4357 4346
4358 if (balanced || 4347 if (!balanced)
4359 (this_load <= load && 4348 return 0;
4360 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
4361 /*
4362 * This domain has SD_WAKE_AFFINE and
4363 * p is cache cold in this domain, and
4364 * there is no bad imbalance.
4365 */
4366 schedstat_inc(sd, ttwu_move_affine);
4367 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4368 4349
4369 return 1; 4350 schedstat_inc(sd, ttwu_move_affine);
4370 } 4351 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4371 return 0; 4352
4353 return 1;
4372} 4354}
4373 4355
4374/* 4356/*