diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-07 08:09:47 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-11 06:58:18 -0400 |
commit | 1cf51902546d60b8a7a6aba2dd557bd4ba8840ea (patch) | |
tree | 438eee650197f32cfd8cd6e2551c38f2c7f9d620 /kernel/sched.c | |
parent | 3739494e08da50c8a68d65eed5ba3012a54b40d4 (diff) |
sched: Simplify sched_group creation
Instead of calling build_sched_groups() for each possible sched_domain
we might have created, note that we can simply iterate the
sched_domain tree and call it for each sched_domain present.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.077862519@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 24 |
1 files changed, 5 insertions, 19 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index fa10cf73c80c..e66d24aaf6d1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7231,15 +7231,12 @@ static struct sched_domain *__build_smt_sched_domain(struct s_data *d, | |||
7231 | return sd; | 7231 | return sd; |
7232 | } | 7232 | } |
7233 | 7233 | ||
7234 | static void build_sched_groups(struct s_data *d, enum sched_domain_level l, | 7234 | static void build_sched_groups(struct s_data *d, struct sched_domain *sd, |
7235 | const struct cpumask *cpu_map, int cpu) | 7235 | const struct cpumask *cpu_map, int cpu) |
7236 | { | 7236 | { |
7237 | struct sched_domain *sd; | 7237 | switch (sd->level) { |
7238 | |||
7239 | switch (l) { | ||
7240 | #ifdef CONFIG_SCHED_SMT | 7238 | #ifdef CONFIG_SCHED_SMT |
7241 | case SD_LV_SIBLING: /* set up CPU (sibling) groups */ | 7239 | case SD_LV_SIBLING: /* set up CPU (sibling) groups */ |
7242 | sd = &per_cpu(cpu_domains, cpu).sd; | ||
7243 | if (cpu == cpumask_first(sched_domain_span(sd))) | 7240 | if (cpu == cpumask_first(sched_domain_span(sd))) |
7244 | init_sched_build_groups(sched_domain_span(sd), cpu_map, | 7241 | init_sched_build_groups(sched_domain_span(sd), cpu_map, |
7245 | &cpu_to_cpu_group, | 7242 | &cpu_to_cpu_group, |
@@ -7248,7 +7245,6 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l, | |||
7248 | #endif | 7245 | #endif |
7249 | #ifdef CONFIG_SCHED_MC | 7246 | #ifdef CONFIG_SCHED_MC |
7250 | case SD_LV_MC: /* set up multi-core groups */ | 7247 | case SD_LV_MC: /* set up multi-core groups */ |
7251 | sd = &per_cpu(core_domains, cpu).sd; | ||
7252 | if (cpu == cpumask_first(sched_domain_span(sd))) | 7248 | if (cpu == cpumask_first(sched_domain_span(sd))) |
7253 | init_sched_build_groups(sched_domain_span(sd), cpu_map, | 7249 | init_sched_build_groups(sched_domain_span(sd), cpu_map, |
7254 | &cpu_to_core_group, | 7250 | &cpu_to_core_group, |
@@ -7257,7 +7253,6 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l, | |||
7257 | #endif | 7253 | #endif |
7258 | #ifdef CONFIG_SCHED_BOOK | 7254 | #ifdef CONFIG_SCHED_BOOK |
7259 | case SD_LV_BOOK: /* set up book groups */ | 7255 | case SD_LV_BOOK: /* set up book groups */ |
7260 | sd = &per_cpu(book_domains, cpu).sd; | ||
7261 | if (cpu == cpumask_first(sched_domain_span(sd))) | 7256 | if (cpu == cpumask_first(sched_domain_span(sd))) |
7262 | init_sched_build_groups(sched_domain_span(sd), cpu_map, | 7257 | init_sched_build_groups(sched_domain_span(sd), cpu_map, |
7263 | &cpu_to_book_group, | 7258 | &cpu_to_book_group, |
@@ -7265,7 +7260,6 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l, | |||
7265 | break; | 7260 | break; |
7266 | #endif | 7261 | #endif |
7267 | case SD_LV_CPU: /* set up physical groups */ | 7262 | case SD_LV_CPU: /* set up physical groups */ |
7268 | sd = &per_cpu(phys_domains, cpu).sd; | ||
7269 | if (cpu == cpumask_first(sched_domain_span(sd))) | 7263 | if (cpu == cpumask_first(sched_domain_span(sd))) |
7270 | init_sched_build_groups(sched_domain_span(sd), cpu_map, | 7264 | init_sched_build_groups(sched_domain_span(sd), cpu_map, |
7271 | &cpu_to_phys_group, | 7265 | &cpu_to_phys_group, |
@@ -7273,7 +7267,6 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l, | |||
7273 | break; | 7267 | break; |
7274 | #ifdef CONFIG_NUMA | 7268 | #ifdef CONFIG_NUMA |
7275 | case SD_LV_NODE: | 7269 | case SD_LV_NODE: |
7276 | sd = &per_cpu(node_domains, cpu).sd; | ||
7277 | if (cpu == cpumask_first(sched_domain_span(sd))) | 7270 | if (cpu == cpumask_first(sched_domain_span(sd))) |
7278 | init_sched_build_groups(sched_domain_span(sd), cpu_map, | 7271 | init_sched_build_groups(sched_domain_span(sd), cpu_map, |
7279 | &cpu_to_node_group, | 7272 | &cpu_to_node_group, |
@@ -7323,17 +7316,10 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7323 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); | 7316 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); |
7324 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); | 7317 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); |
7325 | 7318 | ||
7326 | for (tmp = sd; tmp; tmp = tmp->parent) | 7319 | for (tmp = sd; tmp; tmp = tmp->parent) { |
7327 | tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); | 7320 | tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); |
7328 | } | 7321 | build_sched_groups(&d, tmp, cpu_map, i); |
7329 | 7322 | } | |
7330 | for_each_cpu(i, cpu_map) { | ||
7331 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); | ||
7332 | build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); | ||
7333 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); | ||
7334 | build_sched_groups(&d, SD_LV_CPU, cpu_map, i); | ||
7335 | build_sched_groups(&d, SD_LV_NODE, cpu_map, i); | ||
7336 | build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, i); | ||
7337 | } | 7323 | } |
7338 | 7324 | ||
7339 | /* Calculate CPU power for physical packages and nodes */ | 7325 | /* Calculate CPU power for physical packages and nodes */ |