aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann3@amd.com>2009-08-18 06:58:38 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-18 12:35:43 -0400
commita2af04cdbb748158043e31799b28c48272081600 (patch)
tree3a8e758141439c867ebe289b0731545977edf9b2 /kernel
parent0e8e85c941d8f1b43bcc2e3b8b7026cdae476c53 (diff)
sched: Separate out build of MC sched groups from __build_sched_domains
... to further strip down __build_sched_domains(). Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20090818105838.GI29515@alberich.amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 43cfc6e54e96..f2c202f66297 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8577,6 +8577,15 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
8577 d->send_covered, d->tmpmask); 8577 d->send_covered, d->tmpmask);
8578 break; 8578 break;
8579#endif 8579#endif
8580#ifdef CONFIG_SCHED_MC
8581 case SD_LV_MC: /* set up multi-core groups */
8582 cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
8583 if (cpu == cpumask_first(d->this_core_map))
8584 init_sched_build_groups(d->this_core_map, cpu_map,
8585 &cpu_to_core_group,
8586 d->send_covered, d->tmpmask);
8587 break;
8588#endif
8580 default: 8589 default:
8581 break; 8590 break;
8582 } 8591 }
@@ -8618,21 +8627,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8618 8627
8619 for_each_cpu(i, cpu_map) { 8628 for_each_cpu(i, cpu_map) {
8620 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); 8629 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
8630 build_sched_groups(&d, SD_LV_MC, cpu_map, i);
8621 } 8631 }
8622 8632
8623#ifdef CONFIG_SCHED_MC
8624 /* Set up multi-core groups */
8625 for_each_cpu(i, cpu_map) {
8626 cpumask_and(d.this_core_map, cpu_coregroup_mask(i), cpu_map);
8627 if (i != cpumask_first(d.this_core_map))
8628 continue;
8629
8630 init_sched_build_groups(d.this_core_map, cpu_map,
8631 &cpu_to_core_group,
8632 d.send_covered, d.tmpmask);
8633 }
8634#endif
8635
8636 /* Set up physical groups */ 8633 /* Set up physical groups */
8637 for (i = 0; i < nr_node_ids; i++) { 8634 for (i = 0; i < nr_node_ids; i++) {
8638 cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map); 8635 cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);