diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-01 04:34:32 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-04 04:09:53 -0400 |
commit | f93e65c186ab3c05ce2068733ca10e34fd00125e (patch) | |
tree | 0e42dc647e8f24fd5abc0be7ef3d9a4fc58a2069 /kernel/sched.c | |
parent | 9aa55fbd01779a0b476d87cd9b5170fd5bebab1d (diff) |
sched: Restore __cpu_power to a straight sum of power
cpu_power is supposed to be a representation of the process
capacity of the cpu, not a value to randomly tweak in order to
affect placement.
Remove the placement hacks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Acked-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Acked-by: Gautham R Shenoy <ego@in.ibm.com>
Cc: Balbir Singh <balbir@in.ibm.com>
LKML-Reference: <20090901083825.810860576@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 28 |
1 files changed, 12 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index da1edc8277d0..584a122b553c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -8464,15 +8464,13 @@ static void free_sched_groups(const struct cpumask *cpu_map, | |||
8464 | * there are asymmetries in the topology. If there are asymmetries, group | 8464 | * there are asymmetries in the topology. If there are asymmetries, group |
8465 | * having more cpu_power will pickup more load compared to the group having | 8465 | * having more cpu_power will pickup more load compared to the group having |
8466 | * less cpu_power. | 8466 | * less cpu_power. |
8467 | * | ||
8468 | * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents | ||
8469 | * the maximum number of tasks a group can handle in the presence of other idle | ||
8470 | * or lightly loaded groups in the same sched domain. | ||
8471 | */ | 8467 | */ |
8472 | static void init_sched_groups_power(int cpu, struct sched_domain *sd) | 8468 | static void init_sched_groups_power(int cpu, struct sched_domain *sd) |
8473 | { | 8469 | { |
8474 | struct sched_domain *child; | 8470 | struct sched_domain *child; |
8475 | struct sched_group *group; | 8471 | struct sched_group *group; |
8472 | long power; | ||
8473 | int weight; | ||
8476 | 8474 | ||
8477 | WARN_ON(!sd || !sd->groups); | 8475 | WARN_ON(!sd || !sd->groups); |
8478 | 8476 | ||
@@ -8483,22 +8481,20 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
8483 | 8481 | ||
8484 | sd->groups->__cpu_power = 0; | 8482 | sd->groups->__cpu_power = 0; |
8485 | 8483 | ||
8486 | /* | 8484 | if (!child) { |
8487 | * For perf policy, if the groups in child domain share resources | 8485 | power = SCHED_LOAD_SCALE; |
8488 | * (for example cores sharing some portions of the cache hierarchy | 8486 | weight = cpumask_weight(sched_domain_span(sd)); |
8489 | * or SMT), then set this domain groups cpu_power such that each group | 8487 | /* |
8490 | * can handle only one task, when there are other idle groups in the | 8488 | * SMT siblings share the power of a single core. |
8491 | * same sched domain. | 8489 | */ |
8492 | */ | 8490 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) |
8493 | if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) && | 8491 | power /= weight; |
8494 | (child->flags & | 8492 | sg_inc_cpu_power(sd->groups, power); |
8495 | (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) { | ||
8496 | sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE); | ||
8497 | return; | 8493 | return; |
8498 | } | 8494 | } |
8499 | 8495 | ||
8500 | /* | 8496 | /* |
8501 | * add cpu_power of each child group to this groups cpu_power | 8497 | * Add cpu_power of each child group to this groups cpu_power. |
8502 | */ | 8498 | */ |
8503 | group = child->groups; | 8499 | group = child->groups; |
8504 | do { | 8500 | do { |