aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann3@amd.com>2009-08-18 06:57:51 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-18 12:35:42 -0400
commit0e8e85c941d8f1b43bcc2e3b8b7026cdae476c53 (patch)
tree2b9f50df17aeb9ed346a5b0c7f6ed98d38992623 /kernel/sched.c
parentd81735355533cd4b2bce9508d86fcad24a38cf47 (diff)
sched: Separate out build of SMT sched groups from __build_sched_domains
... to further strip down __build_sched_domains(). Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20090818105751.GH29515@alberich.amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c31
1 files changed, 20 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2ecec06e3f0c..43cfc6e54e96 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8563,6 +8563,25 @@ static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
8563 return sd; 8563 return sd;
8564} 8564}
8565 8565
8566static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
8567 const struct cpumask *cpu_map, int cpu)
8568{
8569 switch (l) {
8570#ifdef CONFIG_SCHED_SMT
8571 case SD_LV_SIBLING: /* set up CPU (sibling) groups */
8572 cpumask_and(d->this_sibling_map, cpu_map,
8573 topology_thread_cpumask(cpu));
8574 if (cpu == cpumask_first(d->this_sibling_map))
8575 init_sched_build_groups(d->this_sibling_map, cpu_map,
8576 &cpu_to_cpu_group,
8577 d->send_covered, d->tmpmask);
8578 break;
8579#endif
8580 default:
8581 break;
8582 }
8583}
8584
8566/* 8585/*
8567 * Build sched domains for a given set of cpus and attach the sched domains 8586 * Build sched domains for a given set of cpus and attach the sched domains
8568 * to the individual cpus 8587 * to the individual cpus
@@ -8597,19 +8616,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8597 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); 8616 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
8598 } 8617 }
8599 8618
8600#ifdef CONFIG_SCHED_SMT
8601 /* Set up CPU (sibling) groups */
8602 for_each_cpu(i, cpu_map) { 8619 for_each_cpu(i, cpu_map) {
8603 cpumask_and(d.this_sibling_map, 8620 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
8604 topology_thread_cpumask(i), cpu_map);
8605 if (i != cpumask_first(d.this_sibling_map))
8606 continue;
8607
8608 init_sched_build_groups(d.this_sibling_map, cpu_map,
8609 &cpu_to_cpu_group,
8610 d.send_covered, d.tmpmask);
8611 } 8621 }
8612#endif
8613 8622
8614#ifdef CONFIG_SCHED_MC 8623#ifdef CONFIG_SCHED_MC
8615 /* Set up multi-core groups */ 8624 /* Set up multi-core groups */