aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-24 11:05:03 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-24 11:50:04 -0500
commit3404c8d97c2d3eb87b1bf4aadad957bfb5235b14 (patch)
tree1ffe3b53bf7382405dc303b9d0f2721733ff2017 /kernel
parentabcd083a1a658d2bc1f7fced02632bfe03918002 (diff)
sched: get rid of boutique sched.c allocations, use cpumask_var_t.
Impact: use new general API Using lots of allocs rather than one big alloc is less efficient, but who cares for this setup function? Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Mike Travis <travis@sgi.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c139
1 files changed, 55 insertions, 84 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e59978eead17..0dc9d5752d68 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7263,48 +7263,6 @@ SD_INIT_FUNC(CPU)
7263 SD_INIT_FUNC(MC) 7263 SD_INIT_FUNC(MC)
7264#endif 7264#endif
7265 7265
7266/*
7267 * To minimize stack usage kmalloc room for cpumasks and share the
7268 * space as the usage in build_sched_domains() dictates. Used only
7269 * if the amount of space is significant.
7270 */
7271struct allmasks {
7272 cpumask_t tmpmask; /* make this one first */
7273 union {
7274 cpumask_t nodemask;
7275 cpumask_t this_sibling_map;
7276 cpumask_t this_core_map;
7277 };
7278 cpumask_t send_covered;
7279
7280#ifdef CONFIG_NUMA
7281 cpumask_t domainspan;
7282 cpumask_t covered;
7283 cpumask_t notcovered;
7284#endif
7285};
7286
7287#if NR_CPUS > 128
7288#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v
7289static inline void sched_cpumask_alloc(struct allmasks **masks)
7290{
7291 *masks = kmalloc(sizeof(**masks), GFP_KERNEL);
7292}
7293static inline void sched_cpumask_free(struct allmasks *masks)
7294{
7295 kfree(masks);
7296}
7297#else
7298#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v
7299static inline void sched_cpumask_alloc(struct allmasks **masks)
7300{ }
7301static inline void sched_cpumask_free(struct allmasks *masks)
7302{ }
7303#endif
7304
7305#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
7306 ((unsigned long)(a) + offsetof(struct allmasks, v))
7307
7308static int default_relax_domain_level = -1; 7266static int default_relax_domain_level = -1;
7309 7267
7310static int __init setup_relax_domain_level(char *str) 7268static int __init setup_relax_domain_level(char *str)
@@ -7347,14 +7305,35 @@ static void set_domain_attribute(struct sched_domain *sd,
7347static int __build_sched_domains(const cpumask_t *cpu_map, 7305static int __build_sched_domains(const cpumask_t *cpu_map,
7348 struct sched_domain_attr *attr) 7306 struct sched_domain_attr *attr)
7349{ 7307{
7350 int i; 7308 int i, err = -ENOMEM;
7351 struct root_domain *rd; 7309 struct root_domain *rd;
7352 SCHED_CPUMASK_DECLARE(allmasks); 7310 cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
7353 cpumask_t *tmpmask; 7311 tmpmask;
7354#ifdef CONFIG_NUMA 7312#ifdef CONFIG_NUMA
7313 cpumask_var_t domainspan, covered, notcovered;
7355 struct sched_group **sched_group_nodes = NULL; 7314 struct sched_group **sched_group_nodes = NULL;
7356 int sd_allnodes = 0; 7315 int sd_allnodes = 0;
7357 7316
7317 if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
7318 goto out;
7319 if (!alloc_cpumask_var(&covered, GFP_KERNEL))
7320 goto free_domainspan;
7321 if (!alloc_cpumask_var(&notcovered, GFP_KERNEL))
7322 goto free_covered;
7323#endif
7324
7325 if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
7326 goto free_notcovered;
7327 if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
7328 goto free_nodemask;
7329 if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
7330 goto free_this_sibling_map;
7331 if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
7332 goto free_this_core_map;
7333 if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
7334 goto free_send_covered;
7335
7336#ifdef CONFIG_NUMA
7358 /* 7337 /*
7359 * Allocate the per-node list of sched groups 7338 * Allocate the per-node list of sched groups
7360 */ 7339 */
@@ -7362,33 +7341,16 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7362 GFP_KERNEL); 7341 GFP_KERNEL);
7363 if (!sched_group_nodes) { 7342 if (!sched_group_nodes) {
7364 printk(KERN_WARNING "Can not alloc sched group node list\n"); 7343 printk(KERN_WARNING "Can not alloc sched group node list\n");
7365 return -ENOMEM; 7344 goto free_tmpmask;
7366 } 7345 }
7367#endif 7346#endif
7368 7347
7369 rd = alloc_rootdomain(); 7348 rd = alloc_rootdomain();
7370 if (!rd) { 7349 if (!rd) {
7371 printk(KERN_WARNING "Cannot alloc root domain\n"); 7350 printk(KERN_WARNING "Cannot alloc root domain\n");
7372#ifdef CONFIG_NUMA 7351 goto free_sched_groups;
7373 kfree(sched_group_nodes);
7374#endif
7375 return -ENOMEM;
7376 } 7352 }
7377 7353
7378 /* get space for all scratch cpumask variables */
7379 sched_cpumask_alloc(&allmasks);
7380 if (!allmasks) {
7381 printk(KERN_WARNING "Cannot alloc cpumask array\n");
7382 kfree(rd);
7383#ifdef CONFIG_NUMA
7384 kfree(sched_group_nodes);
7385#endif
7386 return -ENOMEM;
7387 }
7388
7389 tmpmask = (cpumask_t *)allmasks;
7390
7391
7392#ifdef CONFIG_NUMA 7354#ifdef CONFIG_NUMA
7393 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; 7355 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
7394#endif 7356#endif
@@ -7398,7 +7360,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7398 */ 7360 */
7399 for_each_cpu(i, cpu_map) { 7361 for_each_cpu(i, cpu_map) {
7400 struct sched_domain *sd = NULL, *p; 7362 struct sched_domain *sd = NULL, *p;
7401 SCHED_CPUMASK_VAR(nodemask, allmasks);
7402 7363
7403 *nodemask = node_to_cpumask(cpu_to_node(i)); 7364 *nodemask = node_to_cpumask(cpu_to_node(i));
7404 cpus_and(*nodemask, *nodemask, *cpu_map); 7365 cpus_and(*nodemask, *nodemask, *cpu_map);
@@ -7464,9 +7425,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7464#ifdef CONFIG_SCHED_SMT 7425#ifdef CONFIG_SCHED_SMT
7465 /* Set up CPU (sibling) groups */ 7426 /* Set up CPU (sibling) groups */
7466 for_each_cpu(i, cpu_map) { 7427 for_each_cpu(i, cpu_map) {
7467 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7468 SCHED_CPUMASK_VAR(send_covered, allmasks);
7469
7470 *this_sibling_map = per_cpu(cpu_sibling_map, i); 7428 *this_sibling_map = per_cpu(cpu_sibling_map, i);
7471 cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); 7429 cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map);
7472 if (i != first_cpu(*this_sibling_map)) 7430 if (i != first_cpu(*this_sibling_map))
@@ -7481,9 +7439,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7481#ifdef CONFIG_SCHED_MC 7439#ifdef CONFIG_SCHED_MC
7482 /* Set up multi-core groups */ 7440 /* Set up multi-core groups */
7483 for_each_cpu(i, cpu_map) { 7441 for_each_cpu(i, cpu_map) {
7484 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7485 SCHED_CPUMASK_VAR(send_covered, allmasks);
7486
7487 *this_core_map = cpu_coregroup_map(i); 7442 *this_core_map = cpu_coregroup_map(i);
7488 cpus_and(*this_core_map, *this_core_map, *cpu_map); 7443 cpus_and(*this_core_map, *this_core_map, *cpu_map);
7489 if (i != first_cpu(*this_core_map)) 7444 if (i != first_cpu(*this_core_map))
@@ -7497,9 +7452,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7497 7452
7498 /* Set up physical groups */ 7453 /* Set up physical groups */
7499 for (i = 0; i < nr_node_ids; i++) { 7454 for (i = 0; i < nr_node_ids; i++) {
7500 SCHED_CPUMASK_VAR(nodemask, allmasks);
7501 SCHED_CPUMASK_VAR(send_covered, allmasks);
7502
7503 *nodemask = node_to_cpumask(i); 7455 *nodemask = node_to_cpumask(i);
7504 cpus_and(*nodemask, *nodemask, *cpu_map); 7456 cpus_and(*nodemask, *nodemask, *cpu_map);
7505 if (cpus_empty(*nodemask)) 7457 if (cpus_empty(*nodemask))
@@ -7513,8 +7465,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7513#ifdef CONFIG_NUMA 7465#ifdef CONFIG_NUMA
7514 /* Set up node groups */ 7466 /* Set up node groups */
7515 if (sd_allnodes) { 7467 if (sd_allnodes) {
7516 SCHED_CPUMASK_VAR(send_covered, allmasks);
7517
7518 init_sched_build_groups(cpu_map, cpu_map, 7468 init_sched_build_groups(cpu_map, cpu_map,
7519 &cpu_to_allnodes_group, 7469 &cpu_to_allnodes_group,
7520 send_covered, tmpmask); 7470 send_covered, tmpmask);
@@ -7523,9 +7473,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7523 for (i = 0; i < nr_node_ids; i++) { 7473 for (i = 0; i < nr_node_ids; i++) {
7524 /* Set up node groups */ 7474 /* Set up node groups */
7525 struct sched_group *sg, *prev; 7475 struct sched_group *sg, *prev;
7526 SCHED_CPUMASK_VAR(nodemask, allmasks);
7527 SCHED_CPUMASK_VAR(domainspan, allmasks);
7528 SCHED_CPUMASK_VAR(covered, allmasks);
7529 int j; 7476 int j;
7530 7477
7531 *nodemask = node_to_cpumask(i); 7478 *nodemask = node_to_cpumask(i);
@@ -7560,7 +7507,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7560 prev = sg; 7507 prev = sg;
7561 7508
7562 for (j = 0; j < nr_node_ids; j++) { 7509 for (j = 0; j < nr_node_ids; j++) {
7563 SCHED_CPUMASK_VAR(notcovered, allmasks);
7564 int n = (i + j) % nr_node_ids; 7510 int n = (i + j) % nr_node_ids;
7565 node_to_cpumask_ptr(pnodemask, n); 7511 node_to_cpumask_ptr(pnodemask, n);
7566 7512
@@ -7639,15 +7585,40 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7639 cpu_attach_domain(sd, rd, i); 7585 cpu_attach_domain(sd, rd, i);
7640 } 7586 }
7641 7587
7642 sched_cpumask_free(allmasks); 7588 err = 0;
7643 return 0; 7589
7590free_tmpmask:
7591 free_cpumask_var(tmpmask);
7592free_send_covered:
7593 free_cpumask_var(send_covered);
7594free_this_core_map:
7595 free_cpumask_var(this_core_map);
7596free_this_sibling_map:
7597 free_cpumask_var(this_sibling_map);
7598free_nodemask:
7599 free_cpumask_var(nodemask);
7600free_notcovered:
7601#ifdef CONFIG_NUMA
7602 free_cpumask_var(notcovered);
7603free_covered:
7604 free_cpumask_var(covered);
7605free_domainspan:
7606 free_cpumask_var(domainspan);
7607out:
7608#endif
7609 return err;
7610
7611free_sched_groups:
7612#ifdef CONFIG_NUMA
7613 kfree(sched_group_nodes);
7614#endif
7615 goto free_tmpmask;
7644 7616
7645#ifdef CONFIG_NUMA 7617#ifdef CONFIG_NUMA
7646error: 7618error:
7647 free_sched_groups(cpu_map, tmpmask); 7619 free_sched_groups(cpu_map, tmpmask);
7648 sched_cpumask_free(allmasks);
7649 kfree(rd); 7620 kfree(rd);
7650 return -ENOMEM; 7621 goto free_tmpmask;
7651#endif 7622#endif
7652} 7623}
7653 7624