aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann3@amd.com>2009-08-18 06:51:52 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-18 12:35:39 -0400
commit49a02c514d967921a908ac64e9c0ec0f0fc17fd8 (patch)
tree2ec8a1719585c30ca37d8c0ec05cdec8f9d55f26 /kernel/sched.c
parentdf4ecf1524c7793de3121b2d4e5fc6bcc0da3bfb (diff)
sched: Use structure to store local data in __build_sched_domains
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20090818105152.GB29515@alberich.amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c165
1 files changed, 89 insertions, 76 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b59e265273b..565ff775fcda 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8091,6 +8091,22 @@ struct static_sched_domain {
8091 DECLARE_BITMAP(span, CONFIG_NR_CPUS); 8091 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
8092}; 8092};
8093 8093
8094struct s_data {
8095#ifdef CONFIG_NUMA
8096 int sd_allnodes;
8097 cpumask_var_t domainspan;
8098 cpumask_var_t covered;
8099 cpumask_var_t notcovered;
8100#endif
8101 cpumask_var_t nodemask;
8102 cpumask_var_t this_sibling_map;
8103 cpumask_var_t this_core_map;
8104 cpumask_var_t send_covered;
8105 cpumask_var_t tmpmask;
8106 struct sched_group **sched_group_nodes;
8107 struct root_domain *rd;
8108};
8109
8094/* 8110/*
8095 * SMT sched-domains: 8111 * SMT sched-domains:
8096 */ 8112 */
@@ -8385,54 +8401,49 @@ static void set_domain_attribute(struct sched_domain *sd,
8385static int __build_sched_domains(const struct cpumask *cpu_map, 8401static int __build_sched_domains(const struct cpumask *cpu_map,
8386 struct sched_domain_attr *attr) 8402 struct sched_domain_attr *attr)
8387{ 8403{
8404 struct s_data d;
8388 int i, err = -ENOMEM; 8405 int i, err = -ENOMEM;
8389 struct root_domain *rd;
8390 cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
8391 tmpmask;
8392#ifdef CONFIG_NUMA 8406#ifdef CONFIG_NUMA
8393 cpumask_var_t domainspan, covered, notcovered; 8407 d.sd_allnodes = 0;
8394 struct sched_group **sched_group_nodes = NULL; 8408 if (!alloc_cpumask_var(&d.domainspan, GFP_KERNEL))
8395 int sd_allnodes = 0;
8396
8397 if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
8398 goto out; 8409 goto out;
8399 if (!alloc_cpumask_var(&covered, GFP_KERNEL)) 8410 if (!alloc_cpumask_var(&d.covered, GFP_KERNEL))
8400 goto free_domainspan; 8411 goto free_domainspan;
8401 if (!alloc_cpumask_var(&notcovered, GFP_KERNEL)) 8412 if (!alloc_cpumask_var(&d.notcovered, GFP_KERNEL))
8402 goto free_covered; 8413 goto free_covered;
8403#endif 8414#endif
8404 8415
8405 if (!alloc_cpumask_var(&nodemask, GFP_KERNEL)) 8416 if (!alloc_cpumask_var(&d.nodemask, GFP_KERNEL))
8406 goto free_notcovered; 8417 goto free_notcovered;
8407 if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL)) 8418 if (!alloc_cpumask_var(&d.this_sibling_map, GFP_KERNEL))
8408 goto free_nodemask; 8419 goto free_nodemask;
8409 if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL)) 8420 if (!alloc_cpumask_var(&d.this_core_map, GFP_KERNEL))
8410 goto free_this_sibling_map; 8421 goto free_this_sibling_map;
8411 if (!alloc_cpumask_var(&send_covered, GFP_KERNEL)) 8422 if (!alloc_cpumask_var(&d.send_covered, GFP_KERNEL))
8412 goto free_this_core_map; 8423 goto free_this_core_map;
8413 if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 8424 if (!alloc_cpumask_var(&d.tmpmask, GFP_KERNEL))
8414 goto free_send_covered; 8425 goto free_send_covered;
8415 8426
8416#ifdef CONFIG_NUMA 8427#ifdef CONFIG_NUMA
8417 /* 8428 /*
8418 * Allocate the per-node list of sched groups 8429 * Allocate the per-node list of sched groups
8419 */ 8430 */
8420 sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *), 8431 d.sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
8421 GFP_KERNEL); 8432 GFP_KERNEL);
8422 if (!sched_group_nodes) { 8433 if (!d.sched_group_nodes) {
8423 printk(KERN_WARNING "Can not alloc sched group node list\n"); 8434 printk(KERN_WARNING "Can not alloc sched group node list\n");
8424 goto free_tmpmask; 8435 goto free_tmpmask;
8425 } 8436 }
8426#endif 8437#endif
8427 8438
8428 rd = alloc_rootdomain(); 8439 d.rd = alloc_rootdomain();
8429 if (!rd) { 8440 if (!d.rd) {
8430 printk(KERN_WARNING "Cannot alloc root domain\n"); 8441 printk(KERN_WARNING "Cannot alloc root domain\n");
8431 goto free_sched_groups; 8442 goto free_sched_groups;
8432 } 8443 }
8433 8444
8434#ifdef CONFIG_NUMA 8445#ifdef CONFIG_NUMA
8435 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; 8446 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d.sched_group_nodes;
8436#endif 8447#endif
8437 8448
8438 /* 8449 /*
@@ -8441,18 +8452,20 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8441 for_each_cpu(i, cpu_map) { 8452 for_each_cpu(i, cpu_map) {
8442 struct sched_domain *sd = NULL, *p; 8453 struct sched_domain *sd = NULL, *p;
8443 8454
8444 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); 8455 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
8456 cpu_map);
8445 8457
8446#ifdef CONFIG_NUMA 8458#ifdef CONFIG_NUMA
8447 if (cpumask_weight(cpu_map) > 8459 if (cpumask_weight(cpu_map) >
8448 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { 8460 SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) {
8449 sd = &per_cpu(allnodes_domains, i).sd; 8461 sd = &per_cpu(allnodes_domains, i).sd;
8450 SD_INIT(sd, ALLNODES); 8462 SD_INIT(sd, ALLNODES);
8451 set_domain_attribute(sd, attr); 8463 set_domain_attribute(sd, attr);
8452 cpumask_copy(sched_domain_span(sd), cpu_map); 8464 cpumask_copy(sched_domain_span(sd), cpu_map);
8453 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); 8465 cpu_to_allnodes_group(i, cpu_map, &sd->groups,
8466 d.tmpmask);
8454 p = sd; 8467 p = sd;
8455 sd_allnodes = 1; 8468 d.sd_allnodes = 1;
8456 } else 8469 } else
8457 p = NULL; 8470 p = NULL;
8458 8471
@@ -8471,11 +8484,11 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8471 sd = &per_cpu(phys_domains, i).sd; 8484 sd = &per_cpu(phys_domains, i).sd;
8472 SD_INIT(sd, CPU); 8485 SD_INIT(sd, CPU);
8473 set_domain_attribute(sd, attr); 8486 set_domain_attribute(sd, attr);
8474 cpumask_copy(sched_domain_span(sd), nodemask); 8487 cpumask_copy(sched_domain_span(sd), d.nodemask);
8475 sd->parent = p; 8488 sd->parent = p;
8476 if (p) 8489 if (p)
8477 p->child = sd; 8490 p->child = sd;
8478 cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask); 8491 cpu_to_phys_group(i, cpu_map, &sd->groups, d.tmpmask);
8479 8492
8480#ifdef CONFIG_SCHED_MC 8493#ifdef CONFIG_SCHED_MC
8481 p = sd; 8494 p = sd;
@@ -8486,7 +8499,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8486 cpu_coregroup_mask(i)); 8499 cpu_coregroup_mask(i));
8487 sd->parent = p; 8500 sd->parent = p;
8488 p->child = sd; 8501 p->child = sd;
8489 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); 8502 cpu_to_core_group(i, cpu_map, &sd->groups, d.tmpmask);
8490#endif 8503#endif
8491 8504
8492#ifdef CONFIG_SCHED_SMT 8505#ifdef CONFIG_SCHED_SMT
@@ -8498,54 +8511,54 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8498 topology_thread_cpumask(i), cpu_map); 8511 topology_thread_cpumask(i), cpu_map);
8499 sd->parent = p; 8512 sd->parent = p;
8500 p->child = sd; 8513 p->child = sd;
8501 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); 8514 cpu_to_cpu_group(i, cpu_map, &sd->groups, d.tmpmask);
8502#endif 8515#endif
8503 } 8516 }
8504 8517
8505#ifdef CONFIG_SCHED_SMT 8518#ifdef CONFIG_SCHED_SMT
8506 /* Set up CPU (sibling) groups */ 8519 /* Set up CPU (sibling) groups */
8507 for_each_cpu(i, cpu_map) { 8520 for_each_cpu(i, cpu_map) {
8508 cpumask_and(this_sibling_map, 8521 cpumask_and(d.this_sibling_map,
8509 topology_thread_cpumask(i), cpu_map); 8522 topology_thread_cpumask(i), cpu_map);
8510 if (i != cpumask_first(this_sibling_map)) 8523 if (i != cpumask_first(d.this_sibling_map))
8511 continue; 8524 continue;
8512 8525
8513 init_sched_build_groups(this_sibling_map, cpu_map, 8526 init_sched_build_groups(d.this_sibling_map, cpu_map,
8514 &cpu_to_cpu_group, 8527 &cpu_to_cpu_group,
8515 send_covered, tmpmask); 8528 d.send_covered, d.tmpmask);
8516 } 8529 }
8517#endif 8530#endif
8518 8531
8519#ifdef CONFIG_SCHED_MC 8532#ifdef CONFIG_SCHED_MC
8520 /* Set up multi-core groups */ 8533 /* Set up multi-core groups */
8521 for_each_cpu(i, cpu_map) { 8534 for_each_cpu(i, cpu_map) {
8522 cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); 8535 cpumask_and(d.this_core_map, cpu_coregroup_mask(i), cpu_map);
8523 if (i != cpumask_first(this_core_map)) 8536 if (i != cpumask_first(d.this_core_map))
8524 continue; 8537 continue;
8525 8538
8526 init_sched_build_groups(this_core_map, cpu_map, 8539 init_sched_build_groups(d.this_core_map, cpu_map,
8527 &cpu_to_core_group, 8540 &cpu_to_core_group,
8528 send_covered, tmpmask); 8541 d.send_covered, d.tmpmask);
8529 } 8542 }
8530#endif 8543#endif
8531 8544
8532 /* Set up physical groups */ 8545 /* Set up physical groups */
8533 for (i = 0; i < nr_node_ids; i++) { 8546 for (i = 0; i < nr_node_ids; i++) {
8534 cpumask_and(nodemask, cpumask_of_node(i), cpu_map); 8547 cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
8535 if (cpumask_empty(nodemask)) 8548 if (cpumask_empty(d.nodemask))
8536 continue; 8549 continue;
8537 8550
8538 init_sched_build_groups(nodemask, cpu_map, 8551 init_sched_build_groups(d.nodemask, cpu_map,
8539 &cpu_to_phys_group, 8552 &cpu_to_phys_group,
8540 send_covered, tmpmask); 8553 d.send_covered, d.tmpmask);
8541 } 8554 }
8542 8555
8543#ifdef CONFIG_NUMA 8556#ifdef CONFIG_NUMA
8544 /* Set up node groups */ 8557 /* Set up node groups */
8545 if (sd_allnodes) { 8558 if (d.sd_allnodes) {
8546 init_sched_build_groups(cpu_map, cpu_map, 8559 init_sched_build_groups(cpu_map, cpu_map,
8547 &cpu_to_allnodes_group, 8560 &cpu_to_allnodes_group,
8548 send_covered, tmpmask); 8561 d.send_covered, d.tmpmask);
8549 } 8562 }
8550 8563
8551 for (i = 0; i < nr_node_ids; i++) { 8564 for (i = 0; i < nr_node_ids; i++) {
@@ -8553,15 +8566,15 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8553 struct sched_group *sg, *prev; 8566 struct sched_group *sg, *prev;
8554 int j; 8567 int j;
8555 8568
8556 cpumask_clear(covered); 8569 cpumask_clear(d.covered);
8557 cpumask_and(nodemask, cpumask_of_node(i), cpu_map); 8570 cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
8558 if (cpumask_empty(nodemask)) { 8571 if (cpumask_empty(d.nodemask)) {
8559 sched_group_nodes[i] = NULL; 8572 d.sched_group_nodes[i] = NULL;
8560 continue; 8573 continue;
8561 } 8574 }
8562 8575
8563 sched_domain_node_span(i, domainspan); 8576 sched_domain_node_span(i, d.domainspan);
8564 cpumask_and(domainspan, domainspan, cpu_map); 8577 cpumask_and(d.domainspan, d.domainspan, cpu_map);
8565 8578
8566 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 8579 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8567 GFP_KERNEL, i); 8580 GFP_KERNEL, i);
@@ -8570,30 +8583,30 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8570 "node %d\n", i); 8583 "node %d\n", i);
8571 goto error; 8584 goto error;
8572 } 8585 }
8573 sched_group_nodes[i] = sg; 8586 d.sched_group_nodes[i] = sg;
8574 for_each_cpu(j, nodemask) { 8587 for_each_cpu(j, d.nodemask) {
8575 struct sched_domain *sd; 8588 struct sched_domain *sd;
8576 8589
8577 sd = &per_cpu(node_domains, j).sd; 8590 sd = &per_cpu(node_domains, j).sd;
8578 sd->groups = sg; 8591 sd->groups = sg;
8579 } 8592 }
8580 sg->__cpu_power = 0; 8593 sg->__cpu_power = 0;
8581 cpumask_copy(sched_group_cpus(sg), nodemask); 8594 cpumask_copy(sched_group_cpus(sg), d.nodemask);
8582 sg->next = sg; 8595 sg->next = sg;
8583 cpumask_or(covered, covered, nodemask); 8596 cpumask_or(d.covered, d.covered, d.nodemask);
8584 prev = sg; 8597 prev = sg;
8585 8598
8586 for (j = 0; j < nr_node_ids; j++) { 8599 for (j = 0; j < nr_node_ids; j++) {
8587 int n = (i + j) % nr_node_ids; 8600 int n = (i + j) % nr_node_ids;
8588 8601
8589 cpumask_complement(notcovered, covered); 8602 cpumask_complement(d.notcovered, d.covered);
8590 cpumask_and(tmpmask, notcovered, cpu_map); 8603 cpumask_and(d.tmpmask, d.notcovered, cpu_map);
8591 cpumask_and(tmpmask, tmpmask, domainspan); 8604 cpumask_and(d.tmpmask, d.tmpmask, d.domainspan);
8592 if (cpumask_empty(tmpmask)) 8605 if (cpumask_empty(d.tmpmask))
8593 break; 8606 break;
8594 8607
8595 cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); 8608 cpumask_and(d.tmpmask, d.tmpmask, cpumask_of_node(n));
8596 if (cpumask_empty(tmpmask)) 8609 if (cpumask_empty(d.tmpmask))
8597 continue; 8610 continue;
8598 8611
8599 sg = kmalloc_node(sizeof(struct sched_group) + 8612 sg = kmalloc_node(sizeof(struct sched_group) +
@@ -8605,9 +8618,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8605 goto error; 8618 goto error;
8606 } 8619 }
8607 sg->__cpu_power = 0; 8620 sg->__cpu_power = 0;
8608 cpumask_copy(sched_group_cpus(sg), tmpmask); 8621 cpumask_copy(sched_group_cpus(sg), d.tmpmask);
8609 sg->next = prev->next; 8622 sg->next = prev->next;
8610 cpumask_or(covered, covered, tmpmask); 8623 cpumask_or(d.covered, d.covered, d.tmpmask);
8611 prev->next = sg; 8624 prev->next = sg;
8612 prev = sg; 8625 prev = sg;
8613 } 8626 }
@@ -8638,13 +8651,13 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8638 8651
8639#ifdef CONFIG_NUMA 8652#ifdef CONFIG_NUMA
8640 for (i = 0; i < nr_node_ids; i++) 8653 for (i = 0; i < nr_node_ids; i++)
8641 init_numa_sched_groups_power(sched_group_nodes[i]); 8654 init_numa_sched_groups_power(d.sched_group_nodes[i]);
8642 8655
8643 if (sd_allnodes) { 8656 if (d.sd_allnodes) {
8644 struct sched_group *sg; 8657 struct sched_group *sg;
8645 8658
8646 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, 8659 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
8647 tmpmask); 8660 d.tmpmask);
8648 init_numa_sched_groups_power(sg); 8661 init_numa_sched_groups_power(sg);
8649 } 8662 }
8650#endif 8663#endif
@@ -8659,42 +8672,42 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8659#else 8672#else
8660 sd = &per_cpu(phys_domains, i).sd; 8673 sd = &per_cpu(phys_domains, i).sd;
8661#endif 8674#endif
8662 cpu_attach_domain(sd, rd, i); 8675 cpu_attach_domain(sd, d.rd, i);
8663 } 8676 }
8664 8677
8665 err = 0; 8678 err = 0;
8666 8679
8667free_tmpmask: 8680free_tmpmask:
8668 free_cpumask_var(tmpmask); 8681 free_cpumask_var(d.tmpmask);
8669free_send_covered: 8682free_send_covered:
8670 free_cpumask_var(send_covered); 8683 free_cpumask_var(d.send_covered);
8671free_this_core_map: 8684free_this_core_map:
8672 free_cpumask_var(this_core_map); 8685 free_cpumask_var(d.this_core_map);
8673free_this_sibling_map: 8686free_this_sibling_map:
8674 free_cpumask_var(this_sibling_map); 8687 free_cpumask_var(d.this_sibling_map);
8675free_nodemask: 8688free_nodemask:
8676 free_cpumask_var(nodemask); 8689 free_cpumask_var(d.nodemask);
8677free_notcovered: 8690free_notcovered:
8678#ifdef CONFIG_NUMA 8691#ifdef CONFIG_NUMA
8679 free_cpumask_var(notcovered); 8692 free_cpumask_var(d.notcovered);
8680free_covered: 8693free_covered:
8681 free_cpumask_var(covered); 8694 free_cpumask_var(d.covered);
8682free_domainspan: 8695free_domainspan:
8683 free_cpumask_var(domainspan); 8696 free_cpumask_var(d.domainspan);
8684out: 8697out:
8685#endif 8698#endif
8686 return err; 8699 return err;
8687 8700
8688free_sched_groups: 8701free_sched_groups:
8689#ifdef CONFIG_NUMA 8702#ifdef CONFIG_NUMA
8690 kfree(sched_group_nodes); 8703 kfree(d.sched_group_nodes);
8691#endif 8704#endif
8692 goto free_tmpmask; 8705 goto free_tmpmask;
8693 8706
8694#ifdef CONFIG_NUMA 8707#ifdef CONFIG_NUMA
8695error: 8708error:
8696 free_sched_groups(cpu_map, tmpmask); 8709 free_sched_groups(cpu_map, d.tmpmask);
8697 free_rootdomain(rd); 8710 free_rootdomain(d.rd);
8698 goto free_tmpmask; 8711 goto free_tmpmask;
8699#endif 8712#endif
8700} 8713}