aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann3@amd.com>2009-08-18 06:53:00 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-18 12:35:39 -0400
commit2109b99ee192764b407dc7f52babb74740eea6f9 (patch)
treea18bfe802e36669c2bd0150387d78d107c3071bb /kernel/sched.c
parent49a02c514d967921a908ac64e9c0ec0f0fc17fd8 (diff)
sched: Separate out allocation/free/goto-hell from __build_sched_domains
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20090818105300.GC29515@alberich.amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c171
1 files changed, 99 insertions, 72 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 565ff775fcda..c5d1fee42360 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8107,6 +8107,23 @@ struct s_data {
8107 struct root_domain *rd; 8107 struct root_domain *rd;
8108}; 8108};
8109 8109
8110enum s_alloc {
8111 sa_sched_groups = 0,
8112 sa_rootdomain,
8113 sa_tmpmask,
8114 sa_send_covered,
8115 sa_this_core_map,
8116 sa_this_sibling_map,
8117 sa_nodemask,
8118 sa_sched_group_nodes,
8119#ifdef CONFIG_NUMA
8120 sa_notcovered,
8121 sa_covered,
8122 sa_domainspan,
8123#endif
8124 sa_none,
8125};
8126
8110/* 8127/*
8111 * SMT sched-domains: 8128 * SMT sched-domains:
8112 */ 8129 */
@@ -8394,6 +8411,77 @@ static void set_domain_attribute(struct sched_domain *sd,
8394 } 8411 }
8395} 8412}
8396 8413
8414static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
8415 const struct cpumask *cpu_map)
8416{
8417 switch (what) {
8418 case sa_sched_groups:
8419 free_sched_groups(cpu_map, d->tmpmask); /* fall through */
8420 d->sched_group_nodes = NULL;
8421 case sa_rootdomain:
8422 free_rootdomain(d->rd); /* fall through */
8423 case sa_tmpmask:
8424 free_cpumask_var(d->tmpmask); /* fall through */
8425 case sa_send_covered:
8426 free_cpumask_var(d->send_covered); /* fall through */
8427 case sa_this_core_map:
8428 free_cpumask_var(d->this_core_map); /* fall through */
8429 case sa_this_sibling_map:
8430 free_cpumask_var(d->this_sibling_map); /* fall through */
8431 case sa_nodemask:
8432 free_cpumask_var(d->nodemask); /* fall through */
8433 case sa_sched_group_nodes:
8434#ifdef CONFIG_NUMA
8435 kfree(d->sched_group_nodes); /* fall through */
8436 case sa_notcovered:
8437 free_cpumask_var(d->notcovered); /* fall through */
8438 case sa_covered:
8439 free_cpumask_var(d->covered); /* fall through */
8440 case sa_domainspan:
8441 free_cpumask_var(d->domainspan); /* fall through */
8442#endif
8443 case sa_none:
8444 break;
8445 }
8446}
8447
8448static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
8449 const struct cpumask *cpu_map)
8450{
8451#ifdef CONFIG_NUMA
8452 if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
8453 return sa_none;
8454 if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
8455 return sa_domainspan;
8456 if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
8457 return sa_covered;
8458 /* Allocate the per-node list of sched groups */
8459 d->sched_group_nodes = kcalloc(nr_node_ids,
8460 sizeof(struct sched_group *), GFP_KERNEL);
8461 if (!d->sched_group_nodes) {
8462 printk(KERN_WARNING "Can not alloc sched group node list\n");
8463 return sa_notcovered;
8464 }
8465 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
8466#endif
8467 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
8468 return sa_sched_group_nodes;
8469 if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
8470 return sa_nodemask;
8471 if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
8472 return sa_this_sibling_map;
8473 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
8474 return sa_this_core_map;
8475 if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
8476 return sa_send_covered;
8477 d->rd = alloc_rootdomain();
8478 if (!d->rd) {
8479 printk(KERN_WARNING "Cannot alloc root domain\n");
8480 return sa_tmpmask;
8481 }
8482 return sa_rootdomain;
8483}
8484
8397/* 8485/*
8398 * Build sched domains for a given set of cpus and attach the sched domains 8486 * Build sched domains for a given set of cpus and attach the sched domains
8399 * to the individual cpus 8487 * to the individual cpus
@@ -8401,50 +8489,17 @@ static void set_domain_attribute(struct sched_domain *sd,
8401static int __build_sched_domains(const struct cpumask *cpu_map, 8489static int __build_sched_domains(const struct cpumask *cpu_map,
8402 struct sched_domain_attr *attr) 8490 struct sched_domain_attr *attr)
8403{ 8491{
8492 enum s_alloc alloc_state = sa_none;
8404 struct s_data d; 8493 struct s_data d;
8405 int i, err = -ENOMEM; 8494 int i;
8406#ifdef CONFIG_NUMA 8495#ifdef CONFIG_NUMA
8407 d.sd_allnodes = 0; 8496 d.sd_allnodes = 0;
8408 if (!alloc_cpumask_var(&d.domainspan, GFP_KERNEL))
8409 goto out;
8410 if (!alloc_cpumask_var(&d.covered, GFP_KERNEL))
8411 goto free_domainspan;
8412 if (!alloc_cpumask_var(&d.notcovered, GFP_KERNEL))
8413 goto free_covered;
8414#endif
8415
8416 if (!alloc_cpumask_var(&d.nodemask, GFP_KERNEL))
8417 goto free_notcovered;
8418 if (!alloc_cpumask_var(&d.this_sibling_map, GFP_KERNEL))
8419 goto free_nodemask;
8420 if (!alloc_cpumask_var(&d.this_core_map, GFP_KERNEL))
8421 goto free_this_sibling_map;
8422 if (!alloc_cpumask_var(&d.send_covered, GFP_KERNEL))
8423 goto free_this_core_map;
8424 if (!alloc_cpumask_var(&d.tmpmask, GFP_KERNEL))
8425 goto free_send_covered;
8426
8427#ifdef CONFIG_NUMA
8428 /*
8429 * Allocate the per-node list of sched groups
8430 */
8431 d.sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
8432 GFP_KERNEL);
8433 if (!d.sched_group_nodes) {
8434 printk(KERN_WARNING "Can not alloc sched group node list\n");
8435 goto free_tmpmask;
8436 }
8437#endif 8497#endif
8438 8498
8439 d.rd = alloc_rootdomain(); 8499 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
8440 if (!d.rd) { 8500 if (alloc_state != sa_rootdomain)
8441 printk(KERN_WARNING "Cannot alloc root domain\n"); 8501 goto error;
8442 goto free_sched_groups; 8502 alloc_state = sa_sched_groups;
8443 }
8444
8445#ifdef CONFIG_NUMA
8446 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d.sched_group_nodes;
8447#endif
8448 8503
8449 /* 8504 /*
8450 * Set up domains for cpus specified by the cpu_map. 8505 * Set up domains for cpus specified by the cpu_map.
@@ -8675,41 +8730,13 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8675 cpu_attach_domain(sd, d.rd, i); 8730 cpu_attach_domain(sd, d.rd, i);
8676 } 8731 }
8677 8732
8678 err = 0; 8733 d.sched_group_nodes = NULL; /* don't free this we still need it */
8679 8734 __free_domain_allocs(&d, sa_tmpmask, cpu_map);
8680free_tmpmask: 8735 return 0;
8681 free_cpumask_var(d.tmpmask);
8682free_send_covered:
8683 free_cpumask_var(d.send_covered);
8684free_this_core_map:
8685 free_cpumask_var(d.this_core_map);
8686free_this_sibling_map:
8687 free_cpumask_var(d.this_sibling_map);
8688free_nodemask:
8689 free_cpumask_var(d.nodemask);
8690free_notcovered:
8691#ifdef CONFIG_NUMA
8692 free_cpumask_var(d.notcovered);
8693free_covered:
8694 free_cpumask_var(d.covered);
8695free_domainspan:
8696 free_cpumask_var(d.domainspan);
8697out:
8698#endif
8699 return err;
8700
8701free_sched_groups:
8702#ifdef CONFIG_NUMA
8703 kfree(d.sched_group_nodes);
8704#endif
8705 goto free_tmpmask;
8706 8736
8707#ifdef CONFIG_NUMA
8708error: 8737error:
8709 free_sched_groups(cpu_map, d.tmpmask); 8738 __free_domain_allocs(&d, alloc_state, cpu_map);
8710 free_rootdomain(d.rd); 8739 return -ENOMEM;
8711 goto free_tmpmask;
8712#endif
8713} 8740}
8714 8741
8715static int build_sched_domains(const struct cpumask *cpu_map) 8742static int build_sched_domains(const struct cpumask *cpu_map)