diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-11-02 23:23:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-04 07:16:40 -0500 |
commit | acc3f5d7cabbfd6cec71f0c1f9900621fa2d6ae7 (patch) | |
tree | 672ed45f6df2f52e8f3cced2ee11ad29a1533890 /kernel/cpuset.c | |
parent | e2c880630438f80b474378d5487b511b07665051 (diff) |
cpumask: Partition_sched_domains takes array of cpumask_var_t
Currently partition_sched_domains() takes a 'struct cpumask
*doms_new' which is a kmalloc'ed array of cpumask_t. You can't
have such an array if 'struct cpumask' is undefined, as we plan
for CONFIG_CPUMASK_OFFSTACK=y.
So, we make this an array of cpumask_var_t instead: this is the
same for the CONFIG_CPUMASK_OFFSTACK=n case, but requires
multiple allocations for the CONFIG_CPUMASK_OFFSTACK=y case.
Hence we add alloc_sched_domains() and free_sched_domains()
functions.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <200911031453.40668.rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index d247381e7371..3cf2183b472d 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -537,8 +537,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) | |||
537 | * element of the partition (one sched domain) to be passed to | 537 | * element of the partition (one sched domain) to be passed to |
538 | * partition_sched_domains(). | 538 | * partition_sched_domains(). |
539 | */ | 539 | */ |
540 | /* FIXME: see the FIXME in partition_sched_domains() */ | 540 | static int generate_sched_domains(cpumask_var_t **domains, |
541 | static int generate_sched_domains(struct cpumask **domains, | ||
542 | struct sched_domain_attr **attributes) | 541 | struct sched_domain_attr **attributes) |
543 | { | 542 | { |
544 | LIST_HEAD(q); /* queue of cpusets to be scanned */ | 543 | LIST_HEAD(q); /* queue of cpusets to be scanned */ |
@@ -546,7 +545,7 @@ static int generate_sched_domains(struct cpumask **domains, | |||
546 | struct cpuset **csa; /* array of all cpuset ptrs */ | 545 | struct cpuset **csa; /* array of all cpuset ptrs */ |
547 | int csn; /* how many cpuset ptrs in csa so far */ | 546 | int csn; /* how many cpuset ptrs in csa so far */ |
548 | int i, j, k; /* indices for partition finding loops */ | 547 | int i, j, k; /* indices for partition finding loops */ |
549 | struct cpumask *doms; /* resulting partition; i.e. sched domains */ | 548 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
550 | struct sched_domain_attr *dattr; /* attributes for custom domains */ | 549 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
551 | int ndoms = 0; /* number of sched domains in result */ | 550 | int ndoms = 0; /* number of sched domains in result */ |
552 | int nslot; /* next empty doms[] struct cpumask slot */ | 551 | int nslot; /* next empty doms[] struct cpumask slot */ |
@@ -557,7 +556,8 @@ static int generate_sched_domains(struct cpumask **domains, | |||
557 | 556 | ||
558 | /* Special case for the 99% of systems with one, full, sched domain */ | 557 | /* Special case for the 99% of systems with one, full, sched domain */ |
559 | if (is_sched_load_balance(&top_cpuset)) { | 558 | if (is_sched_load_balance(&top_cpuset)) { |
560 | doms = kmalloc(cpumask_size(), GFP_KERNEL); | 559 | ndoms = 1; |
560 | doms = alloc_sched_domains(ndoms); | ||
561 | if (!doms) | 561 | if (!doms) |
562 | goto done; | 562 | goto done; |
563 | 563 | ||
@@ -566,9 +566,8 @@ static int generate_sched_domains(struct cpumask **domains, | |||
566 | *dattr = SD_ATTR_INIT; | 566 | *dattr = SD_ATTR_INIT; |
567 | update_domain_attr_tree(dattr, &top_cpuset); | 567 | update_domain_attr_tree(dattr, &top_cpuset); |
568 | } | 568 | } |
569 | cpumask_copy(doms, top_cpuset.cpus_allowed); | 569 | cpumask_copy(doms[0], top_cpuset.cpus_allowed); |
570 | 570 | ||
571 | ndoms = 1; | ||
572 | goto done; | 571 | goto done; |
573 | } | 572 | } |
574 | 573 | ||
@@ -636,7 +635,7 @@ restart: | |||
636 | * Now we know how many domains to create. | 635 | * Now we know how many domains to create. |
637 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | 636 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
638 | */ | 637 | */ |
639 | doms = kmalloc(ndoms * cpumask_size(), GFP_KERNEL); | 638 | doms = alloc_sched_domains(ndoms); |
640 | if (!doms) | 639 | if (!doms) |
641 | goto done; | 640 | goto done; |
642 | 641 | ||
@@ -656,7 +655,7 @@ restart: | |||
656 | continue; | 655 | continue; |
657 | } | 656 | } |
658 | 657 | ||
659 | dp = doms + nslot; | 658 | dp = doms[nslot]; |
660 | 659 | ||
661 | if (nslot == ndoms) { | 660 | if (nslot == ndoms) { |
662 | static int warnings = 10; | 661 | static int warnings = 10; |
@@ -718,7 +717,7 @@ done: | |||
718 | static void do_rebuild_sched_domains(struct work_struct *unused) | 717 | static void do_rebuild_sched_domains(struct work_struct *unused) |
719 | { | 718 | { |
720 | struct sched_domain_attr *attr; | 719 | struct sched_domain_attr *attr; |
721 | struct cpumask *doms; | 720 | cpumask_var_t *doms; |
722 | int ndoms; | 721 | int ndoms; |
723 | 722 | ||
724 | get_online_cpus(); | 723 | get_online_cpus(); |
@@ -2052,7 +2051,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | |||
2052 | unsigned long phase, void *unused_cpu) | 2051 | unsigned long phase, void *unused_cpu) |
2053 | { | 2052 | { |
2054 | struct sched_domain_attr *attr; | 2053 | struct sched_domain_attr *attr; |
2055 | struct cpumask *doms; | 2054 | cpumask_var_t *doms; |
2056 | int ndoms; | 2055 | int ndoms; |
2057 | 2056 | ||
2058 | switch (phase) { | 2057 | switch (phase) { |