diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-07 08:09:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-11 06:58:17 -0400 |
commit | a06dadbec5c5df0bf3a35f33616f67d10ca9ba28 (patch) | |
tree | bd30d8b4e54f49567baf286bb42c63d30a4c9662 /kernel/sched.c | |
parent | d274cb30f4a08045492d3f0c47cdf1a25668b1f5 (diff) |
sched: Simplify build_sched_groups()
Notice that the mask being computed is the same as the domain span we
just computed. By using the domain_span we can avoid some mask
allocations and computations.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122941.925028189@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 52 |
1 files changed, 16 insertions, 36 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 50d5fd33e8d5..e3818f1b98fe 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6866,9 +6866,6 @@ struct s_data { | |||
6866 | cpumask_var_t notcovered; | 6866 | cpumask_var_t notcovered; |
6867 | #endif | 6867 | #endif |
6868 | cpumask_var_t nodemask; | 6868 | cpumask_var_t nodemask; |
6869 | cpumask_var_t this_sibling_map; | ||
6870 | cpumask_var_t this_core_map; | ||
6871 | cpumask_var_t this_book_map; | ||
6872 | cpumask_var_t send_covered; | 6869 | cpumask_var_t send_covered; |
6873 | cpumask_var_t tmpmask; | 6870 | cpumask_var_t tmpmask; |
6874 | struct sched_group **sched_group_nodes; | 6871 | struct sched_group **sched_group_nodes; |
@@ -6880,9 +6877,6 @@ enum s_alloc { | |||
6880 | sa_rootdomain, | 6877 | sa_rootdomain, |
6881 | sa_tmpmask, | 6878 | sa_tmpmask, |
6882 | sa_send_covered, | 6879 | sa_send_covered, |
6883 | sa_this_book_map, | ||
6884 | sa_this_core_map, | ||
6885 | sa_this_sibling_map, | ||
6886 | sa_nodemask, | 6880 | sa_nodemask, |
6887 | sa_sched_group_nodes, | 6881 | sa_sched_group_nodes, |
6888 | #ifdef CONFIG_NUMA | 6882 | #ifdef CONFIG_NUMA |
@@ -7251,12 +7245,6 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | |||
7251 | free_cpumask_var(d->tmpmask); /* fall through */ | 7245 | free_cpumask_var(d->tmpmask); /* fall through */ |
7252 | case sa_send_covered: | 7246 | case sa_send_covered: |
7253 | free_cpumask_var(d->send_covered); /* fall through */ | 7247 | free_cpumask_var(d->send_covered); /* fall through */ |
7254 | case sa_this_book_map: | ||
7255 | free_cpumask_var(d->this_book_map); /* fall through */ | ||
7256 | case sa_this_core_map: | ||
7257 | free_cpumask_var(d->this_core_map); /* fall through */ | ||
7258 | case sa_this_sibling_map: | ||
7259 | free_cpumask_var(d->this_sibling_map); /* fall through */ | ||
7260 | case sa_nodemask: | 7248 | case sa_nodemask: |
7261 | free_cpumask_var(d->nodemask); /* fall through */ | 7249 | free_cpumask_var(d->nodemask); /* fall through */ |
7262 | case sa_sched_group_nodes: | 7250 | case sa_sched_group_nodes: |
@@ -7295,14 +7283,8 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | |||
7295 | #endif | 7283 | #endif |
7296 | if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL)) | 7284 | if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL)) |
7297 | return sa_sched_group_nodes; | 7285 | return sa_sched_group_nodes; |
7298 | if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL)) | ||
7299 | return sa_nodemask; | ||
7300 | if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) | ||
7301 | return sa_this_sibling_map; | ||
7302 | if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL)) | ||
7303 | return sa_this_core_map; | ||
7304 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) | 7286 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) |
7305 | return sa_this_book_map; | 7287 | return sa_nodemask; |
7306 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) | 7288 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) |
7307 | return sa_send_covered; | 7289 | return sa_send_covered; |
7308 | d->rd = alloc_rootdomain(); | 7290 | d->rd = alloc_rootdomain(); |
@@ -7414,39 +7396,40 @@ static struct sched_domain *__build_smt_sched_domain(struct s_data *d, | |||
7414 | static void build_sched_groups(struct s_data *d, enum sched_domain_level l, | 7396 | static void build_sched_groups(struct s_data *d, enum sched_domain_level l, |
7415 | const struct cpumask *cpu_map, int cpu) | 7397 | const struct cpumask *cpu_map, int cpu) |
7416 | { | 7398 | { |
7399 | struct sched_domain *sd; | ||
7400 | |||
7417 | switch (l) { | 7401 | switch (l) { |
7418 | #ifdef CONFIG_SCHED_SMT | 7402 | #ifdef CONFIG_SCHED_SMT |
7419 | case SD_LV_SIBLING: /* set up CPU (sibling) groups */ | 7403 | case SD_LV_SIBLING: /* set up CPU (sibling) groups */ |
7420 | cpumask_and(d->this_sibling_map, cpu_map, | 7404 | sd = &per_cpu(cpu_domains, cpu).sd; |
7421 | topology_thread_cpumask(cpu)); | 7405 | if (cpu == cpumask_first(sched_domain_span(sd))) |
7422 | if (cpu == cpumask_first(d->this_sibling_map)) | 7406 | init_sched_build_groups(sched_domain_span(sd), cpu_map, |
7423 | init_sched_build_groups(d->this_sibling_map, cpu_map, | ||
7424 | &cpu_to_cpu_group, | 7407 | &cpu_to_cpu_group, |
7425 | d->send_covered, d->tmpmask); | 7408 | d->send_covered, d->tmpmask); |
7426 | break; | 7409 | break; |
7427 | #endif | 7410 | #endif |
7428 | #ifdef CONFIG_SCHED_MC | 7411 | #ifdef CONFIG_SCHED_MC |
7429 | case SD_LV_MC: /* set up multi-core groups */ | 7412 | case SD_LV_MC: /* set up multi-core groups */ |
7430 | cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu)); | 7413 | sd = &per_cpu(core_domains, cpu).sd; |
7431 | if (cpu == cpumask_first(d->this_core_map)) | 7414 | if (cpu == cpumask_first(sched_domain_span(sd))) |
7432 | init_sched_build_groups(d->this_core_map, cpu_map, | 7415 | init_sched_build_groups(sched_domain_span(sd), cpu_map, |
7433 | &cpu_to_core_group, | 7416 | &cpu_to_core_group, |
7434 | d->send_covered, d->tmpmask); | 7417 | d->send_covered, d->tmpmask); |
7435 | break; | 7418 | break; |
7436 | #endif | 7419 | #endif |
7437 | #ifdef CONFIG_SCHED_BOOK | 7420 | #ifdef CONFIG_SCHED_BOOK |
7438 | case SD_LV_BOOK: /* set up book groups */ | 7421 | case SD_LV_BOOK: /* set up book groups */ |
7439 | cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu)); | 7422 | sd = &per_cpu(book_domains, cpu).sd; |
7440 | if (cpu == cpumask_first(d->this_book_map)) | 7423 | if (cpu == cpumask_first(sched_domain_span(sd))) |
7441 | init_sched_build_groups(d->this_book_map, cpu_map, | 7424 | init_sched_build_groups(sched_domain_span(sd), cpu_map, |
7442 | &cpu_to_book_group, | 7425 | &cpu_to_book_group, |
7443 | d->send_covered, d->tmpmask); | 7426 | d->send_covered, d->tmpmask); |
7444 | break; | 7427 | break; |
7445 | #endif | 7428 | #endif |
7446 | case SD_LV_CPU: /* set up physical groups */ | 7429 | case SD_LV_CPU: /* set up physical groups */ |
7447 | cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); | 7430 | sd = &per_cpu(phys_domains, cpu).sd; |
7448 | if (!cpumask_empty(d->nodemask)) | 7431 | if (cpu == cpumask_first(sched_domain_span(sd))) |
7449 | init_sched_build_groups(d->nodemask, cpu_map, | 7432 | init_sched_build_groups(sched_domain_span(sd), cpu_map, |
7450 | &cpu_to_phys_group, | 7433 | &cpu_to_phys_group, |
7451 | d->send_covered, d->tmpmask); | 7434 | d->send_covered, d->tmpmask); |
7452 | break; | 7435 | break; |
@@ -7502,11 +7485,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7502 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); | 7485 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); |
7503 | build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); | 7486 | build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); |
7504 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); | 7487 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); |
7505 | } | ||
7506 | |||
7507 | /* Set up physical groups */ | ||
7508 | for (i = 0; i < nr_node_ids; i++) | ||
7509 | build_sched_groups(&d, SD_LV_CPU, cpu_map, i); | 7488 | build_sched_groups(&d, SD_LV_CPU, cpu_map, i); |
7489 | } | ||
7510 | 7490 | ||
7511 | #ifdef CONFIG_NUMA | 7491 | #ifdef CONFIG_NUMA |
7512 | /* Set up node groups */ | 7492 | /* Set up node groups */ |