aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-07 08:09:49 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-11 06:58:19 -0400
commita9c9a9b6bff27ac9c746344a9c1a19bf3327002c (patch)
tree5257413293a39f8a2812961611425db1d8173189 /kernel/sched.c
parent21d42ccfd6c6c11f96c2acfd32a85cfc33514d3a (diff)
sched: Simplify sched_groups_power initialization
Again, instead of relying on knowing the possible domains and their order, simply rely on the sched_domain tree and whatever domains are present in there to initialize the sched_group cpu_power. Note: we need to iterate the CPU mask backwards because of the cpumask_first() condition for iterating up the tree. By iterating the mask backwards we ensure all groups of a domain are set-up before starting on the parent groups that rely on its children to be completely done. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110407122942.187335414@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c39
1 files changed, 5 insertions, 34 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d6992bfa11eb..1cca59ec4a49 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7334,43 +7334,14 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7334 } 7334 }
7335 7335
7336 /* Calculate CPU power for physical packages and nodes */ 7336 /* Calculate CPU power for physical packages and nodes */
7337#ifdef CONFIG_SCHED_SMT 7337 for (i = nr_cpumask_bits-1; i >= 0; i--) {
7338 for_each_cpu(i, cpu_map) { 7338 if (!cpumask_test_cpu(i, cpu_map))
7339 sd = &per_cpu(cpu_domains, i).sd; 7339 continue;
7340 init_sched_groups_power(i, sd);
7341 }
7342#endif
7343#ifdef CONFIG_SCHED_MC
7344 for_each_cpu(i, cpu_map) {
7345 sd = &per_cpu(core_domains, i).sd;
7346 init_sched_groups_power(i, sd);
7347 }
7348#endif
7349#ifdef CONFIG_SCHED_BOOK
7350 for_each_cpu(i, cpu_map) {
7351 sd = &per_cpu(book_domains, i).sd;
7352 init_sched_groups_power(i, sd);
7353 }
7354#endif
7355
7356 for_each_cpu(i, cpu_map) {
7357 sd = &per_cpu(phys_domains, i).sd;
7358 init_sched_groups_power(i, sd);
7359 }
7360
7361#ifdef CONFIG_NUMA
7362 for_each_cpu(i, cpu_map) {
7363 sd = &per_cpu(node_domains, i).sd;
7364 init_sched_groups_power(i, sd);
7365 }
7366 7340
7367 if (d.sd_allnodes) { 7341 sd = *per_cpu_ptr(d.sd, i);
7368 for_each_cpu(i, cpu_map) { 7342 for (; sd; sd = sd->parent)
7369 sd = &per_cpu(allnodes_domains, i).sd;
7370 init_sched_groups_power(i, sd); 7343 init_sched_groups_power(i, sd);
7371 }
7372 } 7344 }
7373#endif
7374 7345
7375 /* Attach the domains */ 7346 /* Attach the domains */
7376 for_each_cpu(i, cpu_map) { 7347 for_each_cpu(i, cpu_map) {