aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2014-08-26 07:06:50 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-19 06:35:28 -0400
commitbd61c98f9b3f142cd63f9e15acfe203bec9e5f5a (patch)
tree22f81a29271fae693da87fc6df170828775c1be4 /kernel/sched
parentd3bfca1a7b028a57d648dbc0985492c6a4466ccf (diff)
sched: Test the CPU's capacity in wake_affine()
Currently the task always wakes affine on this_cpu if the latter is idle. Before waking up the task on this_cpu, we check that this_cpu capacity is not significantly reduced because of RT tasks or irq activity. Use case where the number of irq and/or the time spent under irq is important will take benefit of this because the task that is woken up by irq or softirq will not use the same CPU than irq (and softirq) but a idle one. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: preeti@linux.vnet.ibm.com Cc: riel@redhat.com Cc: Morten.Rasmussen@arm.com Cc: efault@gmx.de Cc: nicolas.pitre@linaro.org Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1409051215-16788-8-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index be530e40ceb9..74fa2c210b6d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4284,6 +4284,7 @@ static int wake_wide(struct task_struct *p)
4284static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) 4284static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4285{ 4285{
4286 s64 this_load, load; 4286 s64 this_load, load;
4287 s64 this_eff_load, prev_eff_load;
4287 int idx, this_cpu, prev_cpu; 4288 int idx, this_cpu, prev_cpu;
4288 struct task_group *tg; 4289 struct task_group *tg;
4289 unsigned long weight; 4290 unsigned long weight;
@@ -4327,21 +4328,21 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4327 * Otherwise check if either cpus are near enough in load to allow this 4328 * Otherwise check if either cpus are near enough in load to allow this
4328 * task to be woken on this_cpu. 4329 * task to be woken on this_cpu.
4329 */ 4330 */
4330 if (this_load > 0) { 4331 this_eff_load = 100;
4331 s64 this_eff_load, prev_eff_load; 4332 this_eff_load *= capacity_of(prev_cpu);
4333
4334 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4335 prev_eff_load *= capacity_of(this_cpu);
4332 4336
4333 this_eff_load = 100; 4337 if (this_load > 0) {
4334 this_eff_load *= capacity_of(prev_cpu);
4335 this_eff_load *= this_load + 4338 this_eff_load *= this_load +
4336 effective_load(tg, this_cpu, weight, weight); 4339 effective_load(tg, this_cpu, weight, weight);
4337 4340
4338 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4339 prev_eff_load *= capacity_of(this_cpu);
4340 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); 4341 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4342 }
4343
4344 balanced = this_eff_load <= prev_eff_load;
4341 4345
4342 balanced = this_eff_load <= prev_eff_load;
4343 } else
4344 balanced = true;
4345 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); 4346 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4346 4347
4347 if (!balanced) 4348 if (!balanced)