aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann3@amd.com>2009-08-18 06:54:06 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-18 12:35:40 -0400
commit7f4588f3aa395632fec9ba2e15a1920f0682fda0 (patch)
treeb827a407ef4f509e80aa60bbc0b0eb0dcf8cf402
parent2109b99ee192764b407dc7f52babb74740eea6f9 (diff)
sched: Separate out build of NUMA sched domain from __build_sched_domains
... to further strip down __build_sched_domains(). Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20090818105406.GD29515@alberich.amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c5d1fee42360..dd95a4708370 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8482,6 +8482,37 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
8482 return sa_rootdomain; 8482 return sa_rootdomain;
8483} 8483}
8484 8484
8485static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
8486 const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
8487{
8488 struct sched_domain *sd = NULL;
8489#ifdef CONFIG_NUMA
8490 struct sched_domain *parent;
8491
8492 d->sd_allnodes = 0;
8493 if (cpumask_weight(cpu_map) >
8494 SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
8495 sd = &per_cpu(allnodes_domains, i).sd;
8496 SD_INIT(sd, ALLNODES);
8497 set_domain_attribute(sd, attr);
8498 cpumask_copy(sched_domain_span(sd), cpu_map);
8499 cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
8500 d->sd_allnodes = 1;
8501 }
8502 parent = sd;
8503
8504 sd = &per_cpu(node_domains, i).sd;
8505 SD_INIT(sd, NODE);
8506 set_domain_attribute(sd, attr);
8507 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
8508 sd->parent = parent;
8509 if (parent)
8510 parent->child = sd;
8511 cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
8512#endif
8513 return sd;
8514}
8515
8485/* 8516/*
8486 * Build sched domains for a given set of cpus and attach the sched domains 8517 * Build sched domains for a given set of cpus and attach the sched domains
8487 * to the individual cpus 8518 * to the individual cpus
@@ -8510,31 +8541,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8510 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), 8541 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
8511 cpu_map); 8542 cpu_map);
8512 8543
8513#ifdef CONFIG_NUMA 8544 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
8514 if (cpumask_weight(cpu_map) >
8515 SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) {
8516 sd = &per_cpu(allnodes_domains, i).sd;
8517 SD_INIT(sd, ALLNODES);
8518 set_domain_attribute(sd, attr);
8519 cpumask_copy(sched_domain_span(sd), cpu_map);
8520 cpu_to_allnodes_group(i, cpu_map, &sd->groups,
8521 d.tmpmask);
8522 p = sd;
8523 d.sd_allnodes = 1;
8524 } else
8525 p = NULL;
8526
8527 sd = &per_cpu(node_domains, i).sd;
8528 SD_INIT(sd, NODE);
8529 set_domain_attribute(sd, attr);
8530 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
8531 sd->parent = p;
8532 if (p)
8533 p->child = sd;
8534 cpumask_and(sched_domain_span(sd),
8535 sched_domain_span(sd), cpu_map);
8536#endif
8537
8538 p = sd; 8545 p = sd;
8539 sd = &per_cpu(phys_domains, i).sd; 8546 sd = &per_cpu(phys_domains, i).sd;
8540 SD_INIT(sd, CPU); 8547 SD_INIT(sd, CPU);