diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-16 07:44:33 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-16 10:44:32 -0400 |
commit | 3b6408942206f940dd538e980e9904e48f4b64f8 (patch) | |
tree | 7e0a0dc37d07e0308ef2294a5de1cd1ea42a388c /kernel/sched_fair.c | |
parent | 7c423e98856df9b941223a7e7845b2502ad84b00 (diff) |
sched: Optimize cgroup vs wakeup a bit
We don't need to call update_shares() for each domain we iterate,
just got the largets one.
However, we should call it before wake_affine() as well, so that
that can use up-to-date values too.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 23 |
1 files changed, 9 insertions, 14 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index acf16a8d934b..722d392b0dac 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1348,7 +1348,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | |||
1348 | */ | 1348 | */ |
1349 | static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags) | 1349 | static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags) |
1350 | { | 1350 | { |
1351 | struct sched_domain *tmp, *sd = NULL; | 1351 | struct sched_domain *tmp, *shares = NULL, *sd = NULL; |
1352 | int cpu = smp_processor_id(); | 1352 | int cpu = smp_processor_id(); |
1353 | int prev_cpu = task_cpu(p); | 1353 | int prev_cpu = task_cpu(p); |
1354 | int new_cpu = cpu; | 1354 | int new_cpu = cpu; |
@@ -1387,22 +1387,14 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags) | |||
1387 | break; | 1387 | break; |
1388 | } | 1388 | } |
1389 | 1389 | ||
1390 | switch (sd_flag) { | ||
1391 | case SD_BALANCE_WAKE: | ||
1392 | if (!sched_feat(LB_WAKEUP_UPDATE)) | ||
1393 | break; | ||
1394 | case SD_BALANCE_FORK: | ||
1395 | case SD_BALANCE_EXEC: | ||
1396 | if (root_task_group_empty()) | ||
1397 | break; | ||
1398 | update_shares(tmp); | ||
1399 | default: | ||
1400 | break; | ||
1401 | } | ||
1402 | |||
1403 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | 1390 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && |
1404 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | 1391 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { |
1405 | 1392 | ||
1393 | if (sched_feat(LB_SHARES_UPDATE)) { | ||
1394 | update_shares(tmp); | ||
1395 | shares = tmp; | ||
1396 | } | ||
1397 | |||
1406 | if (wake_affine(tmp, p, sync)) { | 1398 | if (wake_affine(tmp, p, sync)) { |
1407 | new_cpu = cpu; | 1399 | new_cpu = cpu; |
1408 | goto out; | 1400 | goto out; |
@@ -1417,6 +1409,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags) | |||
1417 | sd = tmp; | 1409 | sd = tmp; |
1418 | } | 1410 | } |
1419 | 1411 | ||
1412 | if (sd && sd != shares && sched_feat(LB_SHARES_UPDATE)) | ||
1413 | update_shares(sd); | ||
1414 | |||
1420 | while (sd) { | 1415 | while (sd) { |
1421 | struct sched_group *group; | 1416 | struct sched_group *group; |
1422 | int weight; | 1417 | int weight; |