diff options
-rw-r--r-- | kernel/sched.c | 57 |
1 files changed, 32 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index c5d1fee42360..dd95a4708370 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -8482,6 +8482,37 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | |||
8482 | return sa_rootdomain; | 8482 | return sa_rootdomain; |
8483 | } | 8483 | } |
8484 | 8484 | ||
8485 | static struct sched_domain *__build_numa_sched_domains(struct s_data *d, | ||
8486 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) | ||
8487 | { | ||
8488 | struct sched_domain *sd = NULL; | ||
8489 | #ifdef CONFIG_NUMA | ||
8490 | struct sched_domain *parent; | ||
8491 | |||
8492 | d->sd_allnodes = 0; | ||
8493 | if (cpumask_weight(cpu_map) > | ||
8494 | SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { | ||
8495 | sd = &per_cpu(allnodes_domains, i).sd; | ||
8496 | SD_INIT(sd, ALLNODES); | ||
8497 | set_domain_attribute(sd, attr); | ||
8498 | cpumask_copy(sched_domain_span(sd), cpu_map); | ||
8499 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask); | ||
8500 | d->sd_allnodes = 1; | ||
8501 | } | ||
8502 | parent = sd; | ||
8503 | |||
8504 | sd = &per_cpu(node_domains, i).sd; | ||
8505 | SD_INIT(sd, NODE); | ||
8506 | set_domain_attribute(sd, attr); | ||
8507 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); | ||
8508 | sd->parent = parent; | ||
8509 | if (parent) | ||
8510 | parent->child = sd; | ||
8511 | cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); | ||
8512 | #endif | ||
8513 | return sd; | ||
8514 | } | ||
8515 | |||
8485 | /* | 8516 | /* |
8486 | * Build sched domains for a given set of cpus and attach the sched domains | 8517 | * Build sched domains for a given set of cpus and attach the sched domains |
8487 | * to the individual cpus | 8518 | * to the individual cpus |
@@ -8510,31 +8541,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
8510 | cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), | 8541 | cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), |
8511 | cpu_map); | 8542 | cpu_map); |
8512 | 8543 | ||
8513 | #ifdef CONFIG_NUMA | 8544 | sd = __build_numa_sched_domains(&d, cpu_map, attr, i); |
8514 | if (cpumask_weight(cpu_map) > | ||
8515 | SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) { | ||
8516 | sd = &per_cpu(allnodes_domains, i).sd; | ||
8517 | SD_INIT(sd, ALLNODES); | ||
8518 | set_domain_attribute(sd, attr); | ||
8519 | cpumask_copy(sched_domain_span(sd), cpu_map); | ||
8520 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, | ||
8521 | d.tmpmask); | ||
8522 | p = sd; | ||
8523 | d.sd_allnodes = 1; | ||
8524 | } else | ||
8525 | p = NULL; | ||
8526 | |||
8527 | sd = &per_cpu(node_domains, i).sd; | ||
8528 | SD_INIT(sd, NODE); | ||
8529 | set_domain_attribute(sd, attr); | ||
8530 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); | ||
8531 | sd->parent = p; | ||
8532 | if (p) | ||
8533 | p->child = sd; | ||
8534 | cpumask_and(sched_domain_span(sd), | ||
8535 | sched_domain_span(sd), cpu_map); | ||
8536 | #endif | ||
8537 | |||
8538 | p = sd; | 8545 | p = sd; |
8539 | sd = &per_cpu(phys_domains, i).sd; | 8546 | sd = &per_cpu(phys_domains, i).sd; |
8540 | SD_INIT(sd, CPU); | 8547 | SD_INIT(sd, CPU); |