aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann3@amd.com>2009-08-18 07:01:11 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-18 12:35:44 -0400
commit0601a88d8fa4508eaa49a6d96c6685e1dece38e3 (patch)
tree986cc76c135f45817a56291b8148bafac304bf09 /kernel/sched.c
parentde616e36c700dc312d9021dd75f769c463f85122 (diff)
sched: Separate out build of NUMA sched groups from __build_sched_domains
... to further strip down __build_sched_domains(). Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20090818110111.GL29515@alberich.amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c130
1 files changed, 67 insertions, 63 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 52c1953bc41d..c1ce884a0163 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8246,6 +8246,71 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
8246 sg = sg->next; 8246 sg = sg->next;
8247 } while (sg != group_head); 8247 } while (sg != group_head);
8248} 8248}
8249
8250static int build_numa_sched_groups(struct s_data *d,
8251 const struct cpumask *cpu_map, int num)
8252{
8253 struct sched_domain *sd;
8254 struct sched_group *sg, *prev;
8255 int n, j;
8256
8257 cpumask_clear(d->covered);
8258 cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
8259 if (cpumask_empty(d->nodemask)) {
8260 d->sched_group_nodes[num] = NULL;
8261 goto out;
8262 }
8263
8264 sched_domain_node_span(num, d->domainspan);
8265 cpumask_and(d->domainspan, d->domainspan, cpu_map);
8266
8267 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8268 GFP_KERNEL, num);
8269 if (!sg) {
8270 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
8271 num);
8272 return -ENOMEM;
8273 }
8274 d->sched_group_nodes[num] = sg;
8275
8276 for_each_cpu(j, d->nodemask) {
8277 sd = &per_cpu(node_domains, j).sd;
8278 sd->groups = sg;
8279 }
8280
8281 sg->__cpu_power = 0;
8282 cpumask_copy(sched_group_cpus(sg), d->nodemask);
8283 sg->next = sg;
8284 cpumask_or(d->covered, d->covered, d->nodemask);
8285
8286 prev = sg;
8287 for (j = 0; j < nr_node_ids; j++) {
8288 n = (num + j) % nr_node_ids;
8289 cpumask_complement(d->notcovered, d->covered);
8290 cpumask_and(d->tmpmask, d->notcovered, cpu_map);
8291 cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
8292 if (cpumask_empty(d->tmpmask))
8293 break;
8294 cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
8295 if (cpumask_empty(d->tmpmask))
8296 continue;
8297 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8298 GFP_KERNEL, num);
8299 if (!sg) {
8300 printk(KERN_WARNING
8301 "Can not alloc domain group for node %d\n", j);
8302 return -ENOMEM;
8303 }
8304 sg->__cpu_power = 0;
8305 cpumask_copy(sched_group_cpus(sg), d->tmpmask);
8306 sg->next = prev->next;
8307 cpumask_or(d->covered, d->covered, d->tmpmask);
8308 prev->next = sg;
8309 prev = sg;
8310 }
8311out:
8312 return 0;
8313}
8249#endif /* CONFIG_NUMA */ 8314#endif /* CONFIG_NUMA */
8250 8315
8251#ifdef CONFIG_NUMA 8316#ifdef CONFIG_NUMA
@@ -8652,70 +8717,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8652 if (d.sd_allnodes) 8717 if (d.sd_allnodes)
8653 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0); 8718 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
8654 8719
8655 for (i = 0; i < nr_node_ids; i++) { 8720 for (i = 0; i < nr_node_ids; i++)
8656 /* Set up node groups */ 8721 if (build_numa_sched_groups(&d, cpu_map, i))
8657 struct sched_group *sg, *prev;
8658 int j;
8659
8660 cpumask_clear(d.covered);
8661 cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
8662 if (cpumask_empty(d.nodemask)) {
8663 d.sched_group_nodes[i] = NULL;
8664 continue;
8665 }
8666
8667 sched_domain_node_span(i, d.domainspan);
8668 cpumask_and(d.domainspan, d.domainspan, cpu_map);
8669
8670 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8671 GFP_KERNEL, i);
8672 if (!sg) {
8673 printk(KERN_WARNING "Can not alloc domain group for "
8674 "node %d\n", i);
8675 goto error; 8722 goto error;
8676 }
8677 d.sched_group_nodes[i] = sg;
8678 for_each_cpu(j, d.nodemask) {
8679 struct sched_domain *sd;
8680
8681 sd = &per_cpu(node_domains, j).sd;
8682 sd->groups = sg;
8683 }
8684 sg->__cpu_power = 0;
8685 cpumask_copy(sched_group_cpus(sg), d.nodemask);
8686 sg->next = sg;
8687 cpumask_or(d.covered, d.covered, d.nodemask);
8688 prev = sg;
8689
8690 for (j = 0; j < nr_node_ids; j++) {
8691 int n = (i + j) % nr_node_ids;
8692
8693 cpumask_complement(d.notcovered, d.covered);
8694 cpumask_and(d.tmpmask, d.notcovered, cpu_map);
8695 cpumask_and(d.tmpmask, d.tmpmask, d.domainspan);
8696 if (cpumask_empty(d.tmpmask))
8697 break;
8698
8699 cpumask_and(d.tmpmask, d.tmpmask, cpumask_of_node(n));
8700 if (cpumask_empty(d.tmpmask))
8701 continue;
8702
8703 sg = kmalloc_node(sizeof(struct sched_group) +
8704 cpumask_size(),
8705 GFP_KERNEL, i);
8706 if (!sg) {
8707 printk(KERN_WARNING
8708 "Can not alloc domain group for node %d\n", j);
8709 goto error;
8710 }
8711 sg->__cpu_power = 0;
8712 cpumask_copy(sched_group_cpus(sg), d.tmpmask);
8713 sg->next = prev->next;
8714 cpumask_or(d.covered, d.covered, d.tmpmask);
8715 prev->next = sg;
8716 prev = sg;
8717 }
8718 }
8719#endif 8723#endif
8720 8724
8721 /* Calculate CPU power for physical packages and nodes */ 8725 /* Calculate CPU power for physical packages and nodes */