diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 565 |
1 files changed, 320 insertions, 245 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 38d05a89e0f..da1edc8277d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -8191,6 +8191,39 @@ struct static_sched_domain { | |||
8191 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); | 8191 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); |
8192 | }; | 8192 | }; |
8193 | 8193 | ||
8194 | struct s_data { | ||
8195 | #ifdef CONFIG_NUMA | ||
8196 | int sd_allnodes; | ||
8197 | cpumask_var_t domainspan; | ||
8198 | cpumask_var_t covered; | ||
8199 | cpumask_var_t notcovered; | ||
8200 | #endif | ||
8201 | cpumask_var_t nodemask; | ||
8202 | cpumask_var_t this_sibling_map; | ||
8203 | cpumask_var_t this_core_map; | ||
8204 | cpumask_var_t send_covered; | ||
8205 | cpumask_var_t tmpmask; | ||
8206 | struct sched_group **sched_group_nodes; | ||
8207 | struct root_domain *rd; | ||
8208 | }; | ||
8209 | |||
8210 | enum s_alloc { | ||
8211 | sa_sched_groups = 0, | ||
8212 | sa_rootdomain, | ||
8213 | sa_tmpmask, | ||
8214 | sa_send_covered, | ||
8215 | sa_this_core_map, | ||
8216 | sa_this_sibling_map, | ||
8217 | sa_nodemask, | ||
8218 | sa_sched_group_nodes, | ||
8219 | #ifdef CONFIG_NUMA | ||
8220 | sa_notcovered, | ||
8221 | sa_covered, | ||
8222 | sa_domainspan, | ||
8223 | #endif | ||
8224 | sa_none, | ||
8225 | }; | ||
8226 | |||
8194 | /* | 8227 | /* |
8195 | * SMT sched-domains: | 8228 | * SMT sched-domains: |
8196 | */ | 8229 | */ |
@@ -8313,6 +8346,71 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
8313 | sg = sg->next; | 8346 | sg = sg->next; |
8314 | } while (sg != group_head); | 8347 | } while (sg != group_head); |
8315 | } | 8348 | } |
8349 | |||
8350 | static int build_numa_sched_groups(struct s_data *d, | ||
8351 | const struct cpumask *cpu_map, int num) | ||
8352 | { | ||
8353 | struct sched_domain *sd; | ||
8354 | struct sched_group *sg, *prev; | ||
8355 | int n, j; | ||
8356 | |||
8357 | cpumask_clear(d->covered); | ||
8358 | cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map); | ||
8359 | if (cpumask_empty(d->nodemask)) { | ||
8360 | d->sched_group_nodes[num] = NULL; | ||
8361 | goto out; | ||
8362 | } | ||
8363 | |||
8364 | sched_domain_node_span(num, d->domainspan); | ||
8365 | cpumask_and(d->domainspan, d->domainspan, cpu_map); | ||
8366 | |||
8367 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), | ||
8368 | GFP_KERNEL, num); | ||
8369 | if (!sg) { | ||
8370 | printk(KERN_WARNING "Can not alloc domain group for node %d\n", | ||
8371 | num); | ||
8372 | return -ENOMEM; | ||
8373 | } | ||
8374 | d->sched_group_nodes[num] = sg; | ||
8375 | |||
8376 | for_each_cpu(j, d->nodemask) { | ||
8377 | sd = &per_cpu(node_domains, j).sd; | ||
8378 | sd->groups = sg; | ||
8379 | } | ||
8380 | |||
8381 | sg->__cpu_power = 0; | ||
8382 | cpumask_copy(sched_group_cpus(sg), d->nodemask); | ||
8383 | sg->next = sg; | ||
8384 | cpumask_or(d->covered, d->covered, d->nodemask); | ||
8385 | |||
8386 | prev = sg; | ||
8387 | for (j = 0; j < nr_node_ids; j++) { | ||
8388 | n = (num + j) % nr_node_ids; | ||
8389 | cpumask_complement(d->notcovered, d->covered); | ||
8390 | cpumask_and(d->tmpmask, d->notcovered, cpu_map); | ||
8391 | cpumask_and(d->tmpmask, d->tmpmask, d->domainspan); | ||
8392 | if (cpumask_empty(d->tmpmask)) | ||
8393 | break; | ||
8394 | cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n)); | ||
8395 | if (cpumask_empty(d->tmpmask)) | ||
8396 | continue; | ||
8397 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), | ||
8398 | GFP_KERNEL, num); | ||
8399 | if (!sg) { | ||
8400 | printk(KERN_WARNING | ||
8401 | "Can not alloc domain group for node %d\n", j); | ||
8402 | return -ENOMEM; | ||
8403 | } | ||
8404 | sg->__cpu_power = 0; | ||
8405 | cpumask_copy(sched_group_cpus(sg), d->tmpmask); | ||
8406 | sg->next = prev->next; | ||
8407 | cpumask_or(d->covered, d->covered, d->tmpmask); | ||
8408 | prev->next = sg; | ||
8409 | prev = sg; | ||
8410 | } | ||
8411 | out: | ||
8412 | return 0; | ||
8413 | } | ||
8316 | #endif /* CONFIG_NUMA */ | 8414 | #endif /* CONFIG_NUMA */ |
8317 | 8415 | ||
8318 | #ifdef CONFIG_NUMA | 8416 | #ifdef CONFIG_NUMA |
@@ -8478,280 +8576,285 @@ static void set_domain_attribute(struct sched_domain *sd, | |||
8478 | } | 8576 | } |
8479 | } | 8577 | } |
8480 | 8578 | ||
8481 | /* | 8579 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, |
8482 | * Build sched domains for a given set of cpus and attach the sched domains | 8580 | const struct cpumask *cpu_map) |
8483 | * to the individual cpus | 8581 | { |
8484 | */ | 8582 | switch (what) { |
8485 | static int __build_sched_domains(const struct cpumask *cpu_map, | 8583 | case sa_sched_groups: |
8486 | struct sched_domain_attr *attr) | 8584 | free_sched_groups(cpu_map, d->tmpmask); /* fall through */ |
8487 | { | 8585 | d->sched_group_nodes = NULL; |
8488 | int i, err = -ENOMEM; | 8586 | case sa_rootdomain: |
8489 | struct root_domain *rd; | 8587 | free_rootdomain(d->rd); /* fall through */ |
8490 | cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered, | 8588 | case sa_tmpmask: |
8491 | tmpmask; | 8589 | free_cpumask_var(d->tmpmask); /* fall through */ |
8590 | case sa_send_covered: | ||
8591 | free_cpumask_var(d->send_covered); /* fall through */ | ||
8592 | case sa_this_core_map: | ||
8593 | free_cpumask_var(d->this_core_map); /* fall through */ | ||
8594 | case sa_this_sibling_map: | ||
8595 | free_cpumask_var(d->this_sibling_map); /* fall through */ | ||
8596 | case sa_nodemask: | ||
8597 | free_cpumask_var(d->nodemask); /* fall through */ | ||
8598 | case sa_sched_group_nodes: | ||
8492 | #ifdef CONFIG_NUMA | 8599 | #ifdef CONFIG_NUMA |
8493 | cpumask_var_t domainspan, covered, notcovered; | 8600 | kfree(d->sched_group_nodes); /* fall through */ |
8494 | struct sched_group **sched_group_nodes = NULL; | 8601 | case sa_notcovered: |
8495 | int sd_allnodes = 0; | 8602 | free_cpumask_var(d->notcovered); /* fall through */ |
8496 | 8603 | case sa_covered: | |
8497 | if (!alloc_cpumask_var(&domainspan, GFP_KERNEL)) | 8604 | free_cpumask_var(d->covered); /* fall through */ |
8498 | goto out; | 8605 | case sa_domainspan: |
8499 | if (!alloc_cpumask_var(&covered, GFP_KERNEL)) | 8606 | free_cpumask_var(d->domainspan); /* fall through */ |
8500 | goto free_domainspan; | 8607 | #endif |
8501 | if (!alloc_cpumask_var(¬covered, GFP_KERNEL)) | 8608 | case sa_none: |
8502 | goto free_covered; | 8609 | break; |
8503 | #endif | 8610 | } |
8504 | 8611 | } | |
8505 | if (!alloc_cpumask_var(&nodemask, GFP_KERNEL)) | ||
8506 | goto free_notcovered; | ||
8507 | if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL)) | ||
8508 | goto free_nodemask; | ||
8509 | if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL)) | ||
8510 | goto free_this_sibling_map; | ||
8511 | if (!alloc_cpumask_var(&send_covered, GFP_KERNEL)) | ||
8512 | goto free_this_core_map; | ||
8513 | if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) | ||
8514 | goto free_send_covered; | ||
8515 | 8612 | ||
8613 | static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | ||
8614 | const struct cpumask *cpu_map) | ||
8615 | { | ||
8516 | #ifdef CONFIG_NUMA | 8616 | #ifdef CONFIG_NUMA |
8517 | /* | 8617 | if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL)) |
8518 | * Allocate the per-node list of sched groups | 8618 | return sa_none; |
8519 | */ | 8619 | if (!alloc_cpumask_var(&d->covered, GFP_KERNEL)) |
8520 | sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *), | 8620 | return sa_domainspan; |
8521 | GFP_KERNEL); | 8621 | if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL)) |
8522 | if (!sched_group_nodes) { | 8622 | return sa_covered; |
8623 | /* Allocate the per-node list of sched groups */ | ||
8624 | d->sched_group_nodes = kcalloc(nr_node_ids, | ||
8625 | sizeof(struct sched_group *), GFP_KERNEL); | ||
8626 | if (!d->sched_group_nodes) { | ||
8523 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 8627 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
8524 | goto free_tmpmask; | 8628 | return sa_notcovered; |
8525 | } | 8629 | } |
8526 | #endif | 8630 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; |
8527 | 8631 | #endif | |
8528 | rd = alloc_rootdomain(); | 8632 | if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL)) |
8529 | if (!rd) { | 8633 | return sa_sched_group_nodes; |
8634 | if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL)) | ||
8635 | return sa_nodemask; | ||
8636 | if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) | ||
8637 | return sa_this_sibling_map; | ||
8638 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) | ||
8639 | return sa_this_core_map; | ||
8640 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) | ||
8641 | return sa_send_covered; | ||
8642 | d->rd = alloc_rootdomain(); | ||
8643 | if (!d->rd) { | ||
8530 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 8644 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
8531 | goto free_sched_groups; | 8645 | return sa_tmpmask; |
8532 | } | 8646 | } |
8647 | return sa_rootdomain; | ||
8648 | } | ||
8533 | 8649 | ||
8650 | static struct sched_domain *__build_numa_sched_domains(struct s_data *d, | ||
8651 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) | ||
8652 | { | ||
8653 | struct sched_domain *sd = NULL; | ||
8534 | #ifdef CONFIG_NUMA | 8654 | #ifdef CONFIG_NUMA |
8535 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; | 8655 | struct sched_domain *parent; |
8536 | #endif | ||
8537 | |||
8538 | /* | ||
8539 | * Set up domains for cpus specified by the cpu_map. | ||
8540 | */ | ||
8541 | for_each_cpu(i, cpu_map) { | ||
8542 | struct sched_domain *sd = NULL, *p; | ||
8543 | |||
8544 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); | ||
8545 | |||
8546 | #ifdef CONFIG_NUMA | ||
8547 | if (cpumask_weight(cpu_map) > | ||
8548 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { | ||
8549 | sd = &per_cpu(allnodes_domains, i).sd; | ||
8550 | SD_INIT(sd, ALLNODES); | ||
8551 | set_domain_attribute(sd, attr); | ||
8552 | cpumask_copy(sched_domain_span(sd), cpu_map); | ||
8553 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); | ||
8554 | p = sd; | ||
8555 | sd_allnodes = 1; | ||
8556 | } else | ||
8557 | p = NULL; | ||
8558 | 8656 | ||
8559 | sd = &per_cpu(node_domains, i).sd; | 8657 | d->sd_allnodes = 0; |
8560 | SD_INIT(sd, NODE); | 8658 | if (cpumask_weight(cpu_map) > |
8659 | SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { | ||
8660 | sd = &per_cpu(allnodes_domains, i).sd; | ||
8661 | SD_INIT(sd, ALLNODES); | ||
8561 | set_domain_attribute(sd, attr); | 8662 | set_domain_attribute(sd, attr); |
8562 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); | 8663 | cpumask_copy(sched_domain_span(sd), cpu_map); |
8563 | sd->parent = p; | 8664 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask); |
8564 | if (p) | 8665 | d->sd_allnodes = 1; |
8565 | p->child = sd; | 8666 | } |
8566 | cpumask_and(sched_domain_span(sd), | 8667 | parent = sd; |
8567 | sched_domain_span(sd), cpu_map); | 8668 | |
8669 | sd = &per_cpu(node_domains, i).sd; | ||
8670 | SD_INIT(sd, NODE); | ||
8671 | set_domain_attribute(sd, attr); | ||
8672 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); | ||
8673 | sd->parent = parent; | ||
8674 | if (parent) | ||
8675 | parent->child = sd; | ||
8676 | cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); | ||
8568 | #endif | 8677 | #endif |
8678 | return sd; | ||
8679 | } | ||
8569 | 8680 | ||
8570 | p = sd; | 8681 | static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, |
8571 | sd = &per_cpu(phys_domains, i).sd; | 8682 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
8572 | SD_INIT(sd, CPU); | 8683 | struct sched_domain *parent, int i) |
8573 | set_domain_attribute(sd, attr); | 8684 | { |
8574 | cpumask_copy(sched_domain_span(sd), nodemask); | 8685 | struct sched_domain *sd; |
8575 | sd->parent = p; | 8686 | sd = &per_cpu(phys_domains, i).sd; |
8576 | if (p) | 8687 | SD_INIT(sd, CPU); |
8577 | p->child = sd; | 8688 | set_domain_attribute(sd, attr); |
8578 | cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask); | 8689 | cpumask_copy(sched_domain_span(sd), d->nodemask); |
8690 | sd->parent = parent; | ||
8691 | if (parent) | ||
8692 | parent->child = sd; | ||
8693 | cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask); | ||
8694 | return sd; | ||
8695 | } | ||
8579 | 8696 | ||
8697 | static struct sched_domain *__build_mc_sched_domain(struct s_data *d, | ||
8698 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
8699 | struct sched_domain *parent, int i) | ||
8700 | { | ||
8701 | struct sched_domain *sd = parent; | ||
8580 | #ifdef CONFIG_SCHED_MC | 8702 | #ifdef CONFIG_SCHED_MC |
8581 | p = sd; | 8703 | sd = &per_cpu(core_domains, i).sd; |
8582 | sd = &per_cpu(core_domains, i).sd; | 8704 | SD_INIT(sd, MC); |
8583 | SD_INIT(sd, MC); | 8705 | set_domain_attribute(sd, attr); |
8584 | set_domain_attribute(sd, attr); | 8706 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i)); |
8585 | cpumask_and(sched_domain_span(sd), cpu_map, | 8707 | sd->parent = parent; |
8586 | cpu_coregroup_mask(i)); | 8708 | parent->child = sd; |
8587 | sd->parent = p; | 8709 | cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask); |
8588 | p->child = sd; | ||
8589 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); | ||
8590 | #endif | 8710 | #endif |
8711 | return sd; | ||
8712 | } | ||
8591 | 8713 | ||
8714 | static struct sched_domain *__build_smt_sched_domain(struct s_data *d, | ||
8715 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
8716 | struct sched_domain *parent, int i) | ||
8717 | { | ||
8718 | struct sched_domain *sd = parent; | ||
8592 | #ifdef CONFIG_SCHED_SMT | 8719 | #ifdef CONFIG_SCHED_SMT |
8593 | p = sd; | 8720 | sd = &per_cpu(cpu_domains, i).sd; |
8594 | sd = &per_cpu(cpu_domains, i).sd; | 8721 | SD_INIT(sd, SIBLING); |
8595 | SD_INIT(sd, SIBLING); | 8722 | set_domain_attribute(sd, attr); |
8596 | set_domain_attribute(sd, attr); | 8723 | cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i)); |
8597 | cpumask_and(sched_domain_span(sd), | 8724 | sd->parent = parent; |
8598 | topology_thread_cpumask(i), cpu_map); | 8725 | parent->child = sd; |
8599 | sd->parent = p; | 8726 | cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask); |
8600 | p->child = sd; | ||
8601 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); | ||
8602 | #endif | 8727 | #endif |
8603 | } | 8728 | return sd; |
8729 | } | ||
8604 | 8730 | ||
8731 | static void build_sched_groups(struct s_data *d, enum sched_domain_level l, | ||
8732 | const struct cpumask *cpu_map, int cpu) | ||
8733 | { | ||
8734 | switch (l) { | ||
8605 | #ifdef CONFIG_SCHED_SMT | 8735 | #ifdef CONFIG_SCHED_SMT |
8606 | /* Set up CPU (sibling) groups */ | 8736 | case SD_LV_SIBLING: /* set up CPU (sibling) groups */ |
8607 | for_each_cpu(i, cpu_map) { | 8737 | cpumask_and(d->this_sibling_map, cpu_map, |
8608 | cpumask_and(this_sibling_map, | 8738 | topology_thread_cpumask(cpu)); |
8609 | topology_thread_cpumask(i), cpu_map); | 8739 | if (cpu == cpumask_first(d->this_sibling_map)) |
8610 | if (i != cpumask_first(this_sibling_map)) | 8740 | init_sched_build_groups(d->this_sibling_map, cpu_map, |
8611 | continue; | 8741 | &cpu_to_cpu_group, |
8612 | 8742 | d->send_covered, d->tmpmask); | |
8613 | init_sched_build_groups(this_sibling_map, cpu_map, | 8743 | break; |
8614 | &cpu_to_cpu_group, | ||
8615 | send_covered, tmpmask); | ||
8616 | } | ||
8617 | #endif | 8744 | #endif |
8618 | |||
8619 | #ifdef CONFIG_SCHED_MC | 8745 | #ifdef CONFIG_SCHED_MC |
8620 | /* Set up multi-core groups */ | 8746 | case SD_LV_MC: /* set up multi-core groups */ |
8621 | for_each_cpu(i, cpu_map) { | 8747 | cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu)); |
8622 | cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); | 8748 | if (cpu == cpumask_first(d->this_core_map)) |
8623 | if (i != cpumask_first(this_core_map)) | 8749 | init_sched_build_groups(d->this_core_map, cpu_map, |
8624 | continue; | 8750 | &cpu_to_core_group, |
8625 | 8751 | d->send_covered, d->tmpmask); | |
8626 | init_sched_build_groups(this_core_map, cpu_map, | 8752 | break; |
8627 | &cpu_to_core_group, | ||
8628 | send_covered, tmpmask); | ||
8629 | } | ||
8630 | #endif | 8753 | #endif |
8631 | 8754 | case SD_LV_CPU: /* set up physical groups */ | |
8632 | /* Set up physical groups */ | 8755 | cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); |
8633 | for (i = 0; i < nr_node_ids; i++) { | 8756 | if (!cpumask_empty(d->nodemask)) |
8634 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); | 8757 | init_sched_build_groups(d->nodemask, cpu_map, |
8635 | if (cpumask_empty(nodemask)) | 8758 | &cpu_to_phys_group, |
8636 | continue; | 8759 | d->send_covered, d->tmpmask); |
8637 | 8760 | break; | |
8638 | init_sched_build_groups(nodemask, cpu_map, | ||
8639 | &cpu_to_phys_group, | ||
8640 | send_covered, tmpmask); | ||
8641 | } | ||
8642 | |||
8643 | #ifdef CONFIG_NUMA | 8761 | #ifdef CONFIG_NUMA |
8644 | /* Set up node groups */ | 8762 | case SD_LV_ALLNODES: |
8645 | if (sd_allnodes) { | 8763 | init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group, |
8646 | init_sched_build_groups(cpu_map, cpu_map, | 8764 | d->send_covered, d->tmpmask); |
8647 | &cpu_to_allnodes_group, | 8765 | break; |
8648 | send_covered, tmpmask); | 8766 | #endif |
8767 | default: | ||
8768 | break; | ||
8649 | } | 8769 | } |
8770 | } | ||
8650 | 8771 | ||
8651 | for (i = 0; i < nr_node_ids; i++) { | 8772 | /* |
8652 | /* Set up node groups */ | 8773 | * Build sched domains for a given set of cpus and attach the sched domains |
8653 | struct sched_group *sg, *prev; | 8774 | * to the individual cpus |
8654 | int j; | 8775 | */ |
8655 | 8776 | static int __build_sched_domains(const struct cpumask *cpu_map, | |
8656 | cpumask_clear(covered); | 8777 | struct sched_domain_attr *attr) |
8657 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); | 8778 | { |
8658 | if (cpumask_empty(nodemask)) { | 8779 | enum s_alloc alloc_state = sa_none; |
8659 | sched_group_nodes[i] = NULL; | 8780 | struct s_data d; |
8660 | continue; | 8781 | struct sched_domain *sd; |
8661 | } | 8782 | int i; |
8783 | #ifdef CONFIG_NUMA | ||
8784 | d.sd_allnodes = 0; | ||
8785 | #endif | ||
8662 | 8786 | ||
8663 | sched_domain_node_span(i, domainspan); | 8787 | alloc_state = __visit_domain_allocation_hell(&d, cpu_map); |
8664 | cpumask_and(domainspan, domainspan, cpu_map); | 8788 | if (alloc_state != sa_rootdomain) |
8789 | goto error; | ||
8790 | alloc_state = sa_sched_groups; | ||
8665 | 8791 | ||
8666 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), | 8792 | /* |
8667 | GFP_KERNEL, i); | 8793 | * Set up domains for cpus specified by the cpu_map. |
8668 | if (!sg) { | 8794 | */ |
8669 | printk(KERN_WARNING "Can not alloc domain group for " | 8795 | for_each_cpu(i, cpu_map) { |
8670 | "node %d\n", i); | 8796 | cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), |
8671 | goto error; | 8797 | cpu_map); |
8672 | } | ||
8673 | sched_group_nodes[i] = sg; | ||
8674 | for_each_cpu(j, nodemask) { | ||
8675 | struct sched_domain *sd; | ||
8676 | 8798 | ||
8677 | sd = &per_cpu(node_domains, j).sd; | 8799 | sd = __build_numa_sched_domains(&d, cpu_map, attr, i); |
8678 | sd->groups = sg; | 8800 | sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); |
8679 | } | 8801 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); |
8680 | sg->__cpu_power = 0; | 8802 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); |
8681 | cpumask_copy(sched_group_cpus(sg), nodemask); | 8803 | } |
8682 | sg->next = sg; | ||
8683 | cpumask_or(covered, covered, nodemask); | ||
8684 | prev = sg; | ||
8685 | 8804 | ||
8686 | for (j = 0; j < nr_node_ids; j++) { | 8805 | for_each_cpu(i, cpu_map) { |
8687 | int n = (i + j) % nr_node_ids; | 8806 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); |
8807 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); | ||
8808 | } | ||
8688 | 8809 | ||
8689 | cpumask_complement(notcovered, covered); | 8810 | /* Set up physical groups */ |
8690 | cpumask_and(tmpmask, notcovered, cpu_map); | 8811 | for (i = 0; i < nr_node_ids; i++) |
8691 | cpumask_and(tmpmask, tmpmask, domainspan); | 8812 | build_sched_groups(&d, SD_LV_CPU, cpu_map, i); |
8692 | if (cpumask_empty(tmpmask)) | ||
8693 | break; | ||
8694 | 8813 | ||
8695 | cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); | 8814 | #ifdef CONFIG_NUMA |
8696 | if (cpumask_empty(tmpmask)) | 8815 | /* Set up node groups */ |
8697 | continue; | 8816 | if (d.sd_allnodes) |
8817 | build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0); | ||
8698 | 8818 | ||
8699 | sg = kmalloc_node(sizeof(struct sched_group) + | 8819 | for (i = 0; i < nr_node_ids; i++) |
8700 | cpumask_size(), | 8820 | if (build_numa_sched_groups(&d, cpu_map, i)) |
8701 | GFP_KERNEL, i); | 8821 | goto error; |
8702 | if (!sg) { | ||
8703 | printk(KERN_WARNING | ||
8704 | "Can not alloc domain group for node %d\n", j); | ||
8705 | goto error; | ||
8706 | } | ||
8707 | sg->__cpu_power = 0; | ||
8708 | cpumask_copy(sched_group_cpus(sg), tmpmask); | ||
8709 | sg->next = prev->next; | ||
8710 | cpumask_or(covered, covered, tmpmask); | ||
8711 | prev->next = sg; | ||
8712 | prev = sg; | ||
8713 | } | ||
8714 | } | ||
8715 | #endif | 8822 | #endif |
8716 | 8823 | ||
8717 | /* Calculate CPU power for physical packages and nodes */ | 8824 | /* Calculate CPU power for physical packages and nodes */ |
8718 | #ifdef CONFIG_SCHED_SMT | 8825 | #ifdef CONFIG_SCHED_SMT |
8719 | for_each_cpu(i, cpu_map) { | 8826 | for_each_cpu(i, cpu_map) { |
8720 | struct sched_domain *sd = &per_cpu(cpu_domains, i).sd; | 8827 | sd = &per_cpu(cpu_domains, i).sd; |
8721 | |||
8722 | init_sched_groups_power(i, sd); | 8828 | init_sched_groups_power(i, sd); |
8723 | } | 8829 | } |
8724 | #endif | 8830 | #endif |
8725 | #ifdef CONFIG_SCHED_MC | 8831 | #ifdef CONFIG_SCHED_MC |
8726 | for_each_cpu(i, cpu_map) { | 8832 | for_each_cpu(i, cpu_map) { |
8727 | struct sched_domain *sd = &per_cpu(core_domains, i).sd; | 8833 | sd = &per_cpu(core_domains, i).sd; |
8728 | |||
8729 | init_sched_groups_power(i, sd); | 8834 | init_sched_groups_power(i, sd); |
8730 | } | 8835 | } |
8731 | #endif | 8836 | #endif |
8732 | 8837 | ||
8733 | for_each_cpu(i, cpu_map) { | 8838 | for_each_cpu(i, cpu_map) { |
8734 | struct sched_domain *sd = &per_cpu(phys_domains, i).sd; | 8839 | sd = &per_cpu(phys_domains, i).sd; |
8735 | |||
8736 | init_sched_groups_power(i, sd); | 8840 | init_sched_groups_power(i, sd); |
8737 | } | 8841 | } |
8738 | 8842 | ||
8739 | #ifdef CONFIG_NUMA | 8843 | #ifdef CONFIG_NUMA |
8740 | for (i = 0; i < nr_node_ids; i++) | 8844 | for (i = 0; i < nr_node_ids; i++) |
8741 | init_numa_sched_groups_power(sched_group_nodes[i]); | 8845 | init_numa_sched_groups_power(d.sched_group_nodes[i]); |
8742 | 8846 | ||
8743 | if (sd_allnodes) { | 8847 | if (d.sd_allnodes) { |
8744 | struct sched_group *sg; | 8848 | struct sched_group *sg; |
8745 | 8849 | ||
8746 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, | 8850 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, |
8747 | tmpmask); | 8851 | d.tmpmask); |
8748 | init_numa_sched_groups_power(sg); | 8852 | init_numa_sched_groups_power(sg); |
8749 | } | 8853 | } |
8750 | #endif | 8854 | #endif |
8751 | 8855 | ||
8752 | /* Attach the domains */ | 8856 | /* Attach the domains */ |
8753 | for_each_cpu(i, cpu_map) { | 8857 | for_each_cpu(i, cpu_map) { |
8754 | struct sched_domain *sd; | ||
8755 | #ifdef CONFIG_SCHED_SMT | 8858 | #ifdef CONFIG_SCHED_SMT |
8756 | sd = &per_cpu(cpu_domains, i).sd; | 8859 | sd = &per_cpu(cpu_domains, i).sd; |
8757 | #elif defined(CONFIG_SCHED_MC) | 8860 | #elif defined(CONFIG_SCHED_MC) |
@@ -8759,44 +8862,16 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
8759 | #else | 8862 | #else |
8760 | sd = &per_cpu(phys_domains, i).sd; | 8863 | sd = &per_cpu(phys_domains, i).sd; |
8761 | #endif | 8864 | #endif |
8762 | cpu_attach_domain(sd, rd, i); | 8865 | cpu_attach_domain(sd, d.rd, i); |
8763 | } | 8866 | } |
8764 | 8867 | ||
8765 | err = 0; | 8868 | d.sched_group_nodes = NULL; /* don't free this we still need it */ |
8766 | 8869 | __free_domain_allocs(&d, sa_tmpmask, cpu_map); | |
8767 | free_tmpmask: | 8870 | return 0; |
8768 | free_cpumask_var(tmpmask); | ||
8769 | free_send_covered: | ||
8770 | free_cpumask_var(send_covered); | ||
8771 | free_this_core_map: | ||
8772 | free_cpumask_var(this_core_map); | ||
8773 | free_this_sibling_map: | ||
8774 | free_cpumask_var(this_sibling_map); | ||
8775 | free_nodemask: | ||
8776 | free_cpumask_var(nodemask); | ||
8777 | free_notcovered: | ||
8778 | #ifdef CONFIG_NUMA | ||
8779 | free_cpumask_var(notcovered); | ||
8780 | free_covered: | ||
8781 | free_cpumask_var(covered); | ||
8782 | free_domainspan: | ||
8783 | free_cpumask_var(domainspan); | ||
8784 | out: | ||
8785 | #endif | ||
8786 | return err; | ||
8787 | |||
8788 | free_sched_groups: | ||
8789 | #ifdef CONFIG_NUMA | ||
8790 | kfree(sched_group_nodes); | ||
8791 | #endif | ||
8792 | goto free_tmpmask; | ||
8793 | 8871 | ||
8794 | #ifdef CONFIG_NUMA | ||
8795 | error: | 8872 | error: |
8796 | free_sched_groups(cpu_map, tmpmask); | 8873 | __free_domain_allocs(&d, alloc_state, cpu_map); |
8797 | free_rootdomain(rd); | 8874 | return -ENOMEM; |
8798 | goto free_tmpmask; | ||
8799 | #endif | ||
8800 | } | 8875 | } |
8801 | 8876 | ||
8802 | static int build_sched_domains(const struct cpumask *cpu_map) | 8877 | static int build_sched_domains(const struct cpumask *cpu_map) |