diff options
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index be530e40ceb9..74fa2c210b6d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4284,6 +4284,7 @@ static int wake_wide(struct task_struct *p) | |||
4284 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | 4284 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) |
4285 | { | 4285 | { |
4286 | s64 this_load, load; | 4286 | s64 this_load, load; |
4287 | s64 this_eff_load, prev_eff_load; | ||
4287 | int idx, this_cpu, prev_cpu; | 4288 | int idx, this_cpu, prev_cpu; |
4288 | struct task_group *tg; | 4289 | struct task_group *tg; |
4289 | unsigned long weight; | 4290 | unsigned long weight; |
@@ -4327,21 +4328,21 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
4327 | * Otherwise check if either cpus are near enough in load to allow this | 4328 | * Otherwise check if either cpus are near enough in load to allow this |
4328 | * task to be woken on this_cpu. | 4329 | * task to be woken on this_cpu. |
4329 | */ | 4330 | */ |
4330 | if (this_load > 0) { | 4331 | this_eff_load = 100; |
4331 | s64 this_eff_load, prev_eff_load; | 4332 | this_eff_load *= capacity_of(prev_cpu); |
4333 | |||
4334 | prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; | ||
4335 | prev_eff_load *= capacity_of(this_cpu); | ||
4332 | 4336 | ||
4333 | this_eff_load = 100; | 4337 | if (this_load > 0) { |
4334 | this_eff_load *= capacity_of(prev_cpu); | ||
4335 | this_eff_load *= this_load + | 4338 | this_eff_load *= this_load + |
4336 | effective_load(tg, this_cpu, weight, weight); | 4339 | effective_load(tg, this_cpu, weight, weight); |
4337 | 4340 | ||
4338 | prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; | ||
4339 | prev_eff_load *= capacity_of(this_cpu); | ||
4340 | prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); | 4341 | prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); |
4342 | } | ||
4343 | |||
4344 | balanced = this_eff_load <= prev_eff_load; | ||
4341 | 4345 | ||
4342 | balanced = this_eff_load <= prev_eff_load; | ||
4343 | } else | ||
4344 | balanced = true; | ||
4345 | schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); | 4346 | schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); |
4346 | 4347 | ||
4347 | if (!balanced) | 4348 | if (!balanced) |