diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 217e4a9393e4..eed35eded602 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1225,7 +1225,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1225 | unsigned long this_load, load; | 1225 | unsigned long this_load, load; |
1226 | int idx, this_cpu, prev_cpu; | 1226 | int idx, this_cpu, prev_cpu; |
1227 | unsigned long tl_per_task; | 1227 | unsigned long tl_per_task; |
1228 | unsigned int imbalance; | ||
1229 | struct task_group *tg; | 1228 | struct task_group *tg; |
1230 | unsigned long weight; | 1229 | unsigned long weight; |
1231 | int balanced; | 1230 | int balanced; |
@@ -1252,8 +1251,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1252 | tg = task_group(p); | 1251 | tg = task_group(p); |
1253 | weight = p->se.load.weight; | 1252 | weight = p->se.load.weight; |
1254 | 1253 | ||
1255 | imbalance = 100 + (sd->imbalance_pct - 100) / 2; | ||
1256 | |||
1257 | /* | 1254 | /* |
1258 | * In low-load situations, where prev_cpu is idle and this_cpu is idle | 1255 | * In low-load situations, where prev_cpu is idle and this_cpu is idle |
1259 | * due to the sync cause above having dropped this_load to 0, we'll | 1256 | * due to the sync cause above having dropped this_load to 0, we'll |
@@ -1263,9 +1260,21 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1263 | * Otherwise check if either cpus are near enough in load to allow this | 1260 | * Otherwise check if either cpus are near enough in load to allow this |
1264 | * task to be woken on this_cpu. | 1261 | * task to be woken on this_cpu. |
1265 | */ | 1262 | */ |
1266 | balanced = !this_load || | 1263 | if (this_load) { |
1267 | 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <= | 1264 | unsigned long this_eff_load, prev_eff_load; |
1268 | imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); | 1265 | |
1266 | this_eff_load = 100; | ||
1267 | this_eff_load *= power_of(prev_cpu); | ||
1268 | this_eff_load *= this_load + | ||
1269 | effective_load(tg, this_cpu, weight, weight); | ||
1270 | |||
1271 | prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; | ||
1272 | prev_eff_load *= power_of(this_cpu); | ||
1273 | prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); | ||
1274 | |||
1275 | balanced = this_eff_load <= prev_eff_load; | ||
1276 | } else | ||
1277 | balanced = true; | ||
1269 | 1278 | ||
1270 | /* | 1279 | /* |
1271 | * If the currently running task will sleep within | 1280 | * If the currently running task will sleep within |
@@ -2298,6 +2307,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) | |||
2298 | if (!power) | 2307 | if (!power) |
2299 | power = 1; | 2308 | power = 1; |
2300 | 2309 | ||
2310 | cpu_rq(cpu)->cpu_power = power; | ||
2301 | sdg->cpu_power = power; | 2311 | sdg->cpu_power = power; |
2302 | } | 2312 | } |
2303 | 2313 | ||