diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-07 08:09:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-11 06:58:16 -0400 |
commit | d274cb30f4a08045492d3f0c47cdf1a25668b1f5 (patch) | |
tree | 11e11302a1f708fe6154046026b2e56447366bcd /kernel/sched.c | |
parent | c4a8849af939082052d8117f9ea3e170a99ff232 (diff) |
sched: Simplify ->cpu_power initialization
The code in update_group_power() does what init_sched_groups_power()
does and more, so remove the special init_ code and call the generic
code instead.
Also move the sd->span_weight initialization because
update_group_power() needs it.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122941.875856012@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 44 |
1 files changed, 5 insertions, 39 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index d3e183c85f49..50d5fd33e8d5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6679,9 +6679,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6679 | struct rq *rq = cpu_rq(cpu); | 6679 | struct rq *rq = cpu_rq(cpu); |
6680 | struct sched_domain *tmp; | 6680 | struct sched_domain *tmp; |
6681 | 6681 | ||
6682 | for (tmp = sd; tmp; tmp = tmp->parent) | ||
6683 | tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); | ||
6684 | |||
6685 | /* Remove the sched domains which do not contribute to scheduling. */ | 6682 | /* Remove the sched domains which do not contribute to scheduling. */ |
6686 | for (tmp = sd; tmp; ) { | 6683 | for (tmp = sd; tmp; ) { |
6687 | struct sched_domain *parent = tmp->parent; | 6684 | struct sched_domain *parent = tmp->parent; |
@@ -7159,11 +7156,6 @@ static void free_sched_groups(const struct cpumask *cpu_map, | |||
7159 | */ | 7156 | */ |
7160 | static void init_sched_groups_power(int cpu, struct sched_domain *sd) | 7157 | static void init_sched_groups_power(int cpu, struct sched_domain *sd) |
7161 | { | 7158 | { |
7162 | struct sched_domain *child; | ||
7163 | struct sched_group *group; | ||
7164 | long power; | ||
7165 | int weight; | ||
7166 | |||
7167 | WARN_ON(!sd || !sd->groups); | 7159 | WARN_ON(!sd || !sd->groups); |
7168 | 7160 | ||
7169 | if (cpu != group_first_cpu(sd->groups)) | 7161 | if (cpu != group_first_cpu(sd->groups)) |
@@ -7171,36 +7163,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
7171 | 7163 | ||
7172 | sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); | 7164 | sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); |
7173 | 7165 | ||
7174 | child = sd->child; | 7166 | update_group_power(sd, cpu); |
7175 | |||
7176 | sd->groups->cpu_power = 0; | ||
7177 | |||
7178 | if (!child) { | ||
7179 | power = SCHED_LOAD_SCALE; | ||
7180 | weight = cpumask_weight(sched_domain_span(sd)); | ||
7181 | /* | ||
7182 | * SMT siblings share the power of a single core. | ||
7183 | * Usually multiple threads get a better yield out of | ||
7184 | * that one core than a single thread would have, | ||
7185 | * reflect that in sd->smt_gain. | ||
7186 | */ | ||
7187 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { | ||
7188 | power *= sd->smt_gain; | ||
7189 | power /= weight; | ||
7190 | power >>= SCHED_LOAD_SHIFT; | ||
7191 | } | ||
7192 | sd->groups->cpu_power += power; | ||
7193 | return; | ||
7194 | } | ||
7195 | |||
7196 | /* | ||
7197 | * Add cpu_power of each child group to this groups cpu_power. | ||
7198 | */ | ||
7199 | group = child->groups; | ||
7200 | do { | ||
7201 | sd->groups->cpu_power += group->cpu_power; | ||
7202 | group = group->next; | ||
7203 | } while (group != child->groups); | ||
7204 | } | 7167 | } |
7205 | 7168 | ||
7206 | /* | 7169 | /* |
@@ -7507,7 +7470,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7507 | { | 7470 | { |
7508 | enum s_alloc alloc_state = sa_none; | 7471 | enum s_alloc alloc_state = sa_none; |
7509 | struct s_data d; | 7472 | struct s_data d; |
7510 | struct sched_domain *sd; | 7473 | struct sched_domain *sd, *tmp; |
7511 | int i; | 7474 | int i; |
7512 | #ifdef CONFIG_NUMA | 7475 | #ifdef CONFIG_NUMA |
7513 | d.sd_allnodes = 0; | 7476 | d.sd_allnodes = 0; |
@@ -7530,6 +7493,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7530 | sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); | 7493 | sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); |
7531 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); | 7494 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); |
7532 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); | 7495 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); |
7496 | |||
7497 | for (tmp = sd; tmp; tmp = tmp->parent) | ||
7498 | tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); | ||
7533 | } | 7499 | } |
7534 | 7500 | ||
7535 | for_each_cpu(i, cpu_map) { | 7501 | for_each_cpu(i, cpu_map) { |