diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-17 03:01:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-17 04:40:31 -0400 |
commit | 29cd8bae396583a2ee9a3340db8c5102acf9f6fd (patch) | |
tree | d16fba0c7c7da9047db532fcf5cbb847bd6fea51 | |
parent | de69a80be32445b0a71e8e3b757e584d7beb90f7 (diff) |
sched: Fix SD_POWERSAVING_BALANCE|SD_PREFER_LOCAL vs SD_WAKE_AFFINE
The SD_POWERSAVING_BALANCE|SD_PREFER_LOCAL code can break out of
the domain iteration early, making us miss the SD_WAKE_AFFINE bits.
Fix this by continuing iteration until there is no need for a
larger domain.
This also cleans up the cgroup stuff a bit, but not having two
update_shares() invocations.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched_fair.c | 42 |
1 files changed, 27 insertions, 15 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ffee827fa22f..10d218ab69f2 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1333,11 +1333,12 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | |||
1333 | */ | 1333 | */ |
1334 | static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | 1334 | static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) |
1335 | { | 1335 | { |
1336 | struct sched_domain *tmp, *shares = NULL, *sd = NULL; | 1336 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; |
1337 | int cpu = smp_processor_id(); | 1337 | int cpu = smp_processor_id(); |
1338 | int prev_cpu = task_cpu(p); | 1338 | int prev_cpu = task_cpu(p); |
1339 | int new_cpu = cpu; | 1339 | int new_cpu = cpu; |
1340 | int want_affine = 0; | 1340 | int want_affine = 0; |
1341 | int want_sd = 1; | ||
1341 | int sync = wake_flags & WF_SYNC; | 1342 | int sync = wake_flags & WF_SYNC; |
1342 | 1343 | ||
1343 | if (sd_flag & SD_BALANCE_WAKE) { | 1344 | if (sd_flag & SD_BALANCE_WAKE) { |
@@ -1369,33 +1370,44 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | |||
1369 | nr_running /= 2; | 1370 | nr_running /= 2; |
1370 | 1371 | ||
1371 | if (nr_running < capacity) | 1372 | if (nr_running < capacity) |
1372 | break; | 1373 | want_sd = 0; |
1373 | } | 1374 | } |
1374 | 1375 | ||
1375 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | 1376 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && |
1376 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | 1377 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { |
1377 | 1378 | ||
1378 | if (sched_feat(LB_SHARES_UPDATE)) { | 1379 | affine_sd = tmp; |
1379 | update_shares(tmp); | ||
1380 | shares = tmp; | ||
1381 | } | ||
1382 | |||
1383 | if (wake_affine(tmp, p, sync)) { | ||
1384 | new_cpu = cpu; | ||
1385 | goto out; | ||
1386 | } | ||
1387 | |||
1388 | want_affine = 0; | 1380 | want_affine = 0; |
1389 | } | 1381 | } |
1390 | 1382 | ||
1383 | if (!want_sd && !want_affine) | ||
1384 | break; | ||
1385 | |||
1391 | if (!(tmp->flags & sd_flag)) | 1386 | if (!(tmp->flags & sd_flag)) |
1392 | continue; | 1387 | continue; |
1393 | 1388 | ||
1394 | sd = tmp; | 1389 | if (want_sd) |
1390 | sd = tmp; | ||
1391 | } | ||
1392 | |||
1393 | if (sched_feat(LB_SHARES_UPDATE)) { | ||
1394 | /* | ||
1395 | * Pick the largest domain to update shares over | ||
1396 | */ | ||
1397 | tmp = sd; | ||
1398 | if (affine_sd && (!tmp || | ||
1399 | cpumask_weight(sched_domain_span(affine_sd)) > | ||
1400 | cpumask_weight(sched_domain_span(sd)))) | ||
1401 | tmp = affine_sd; | ||
1402 | |||
1403 | if (tmp) | ||
1404 | update_shares(tmp); | ||
1395 | } | 1405 | } |
1396 | 1406 | ||
1397 | if (sd && sd != shares && sched_feat(LB_SHARES_UPDATE)) | 1407 | if (affine_sd && wake_affine(affine_sd, p, sync)) { |
1398 | update_shares(sd); | 1408 | new_cpu = cpu; |
1409 | goto out; | ||
1410 | } | ||
1399 | 1411 | ||
1400 | while (sd) { | 1412 | while (sd) { |
1401 | int load_idx = sd->forkexec_idx; | 1413 | int load_idx = sd->forkexec_idx; |