diff options
-rw-r--r-- | kernel/sched_fair.c | 12 |
1 files changed, 11 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index d7fda41ddaf0..cc97ea498f24 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1262,7 +1262,17 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, | |||
1262 | tg = task_group(p); | 1262 | tg = task_group(p); |
1263 | weight = p->se.load.weight; | 1263 | weight = p->se.load.weight; |
1264 | 1264 | ||
1265 | balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <= | 1265 | /* |
1266 | * In low-load situations, where prev_cpu is idle and this_cpu is idle | ||
1267 | * due to the sync cause above having dropped tl to 0, we'll always have | ||
1268 | * an imbalance, but there's really nothing you can do about that, so | ||
1269 | * that's good too. | ||
1270 | * | ||
1271 | * Otherwise check if either cpus are near enough in load to allow this | ||
1272 | * task to be woken on this_cpu. | ||
1273 | */ | ||
1274 | balanced = !tl || | ||
1275 | 100*(tl + effective_load(tg, this_cpu, weight, weight)) <= | ||
1266 | imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); | 1276 | imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); |
1267 | 1277 | ||
1268 | /* | 1278 | /* |