diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2009-01-07 21:08:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 11:31:11 -0500 |
commit | 6af866af34a96fed24a55979a78b6f73bd4e8e87 (patch) | |
tree | e0c4b27ce3b684ebb2f6fa3685051e01a86d7354 /kernel/cpuset.c | |
parent | 300ed6cbb70718872cb4936d1d22ef295f9ba44d (diff) |
cpuset: remove remaining pointers to cpumask_t
Impact: cleanups, use new cpumask API
Final trivial cleanups: mainly s/cpumask_t/struct cpumask
Note there is a FIXME in generate_sched_domains(). A future patch will
change struct cpumask *doms to struct cpumask *doms[].
(I suppose Rusty will do this.)
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Mike Travis <travis@sgi.com>
Cc: Paul Menage <menage@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index fc294aa9a97a..647c77a88fcb 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -289,7 +289,8 @@ static struct file_system_type cpuset_fs_type = { | |||
289 | * Call with callback_mutex held. | 289 | * Call with callback_mutex held. |
290 | */ | 290 | */ |
291 | 291 | ||
292 | static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) | 292 | static void guarantee_online_cpus(const struct cpuset *cs, |
293 | struct cpumask *pmask) | ||
293 | { | 294 | { |
294 | while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask)) | 295 | while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask)) |
295 | cs = cs->parent; | 296 | cs = cs->parent; |
@@ -610,7 +611,8 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) | |||
610 | * element of the partition (one sched domain) to be passed to | 611 | * element of the partition (one sched domain) to be passed to |
611 | * partition_sched_domains(). | 612 | * partition_sched_domains(). |
612 | */ | 613 | */ |
613 | static int generate_sched_domains(cpumask_t **domains, | 614 | /* FIXME: see the FIXME in partition_sched_domains() */ |
615 | static int generate_sched_domains(struct cpumask **domains, | ||
614 | struct sched_domain_attr **attributes) | 616 | struct sched_domain_attr **attributes) |
615 | { | 617 | { |
616 | LIST_HEAD(q); /* queue of cpusets to be scanned */ | 618 | LIST_HEAD(q); /* queue of cpusets to be scanned */ |
@@ -618,10 +620,10 @@ static int generate_sched_domains(cpumask_t **domains, | |||
618 | struct cpuset **csa; /* array of all cpuset ptrs */ | 620 | struct cpuset **csa; /* array of all cpuset ptrs */ |
619 | int csn; /* how many cpuset ptrs in csa so far */ | 621 | int csn; /* how many cpuset ptrs in csa so far */ |
620 | int i, j, k; /* indices for partition finding loops */ | 622 | int i, j, k; /* indices for partition finding loops */ |
621 | cpumask_t *doms; /* resulting partition; i.e. sched domains */ | 623 | struct cpumask *doms; /* resulting partition; i.e. sched domains */ |
622 | struct sched_domain_attr *dattr; /* attributes for custom domains */ | 624 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
623 | int ndoms = 0; /* number of sched domains in result */ | 625 | int ndoms = 0; /* number of sched domains in result */ |
624 | int nslot; /* next empty doms[] cpumask_t slot */ | 626 | int nslot; /* next empty doms[] struct cpumask slot */ |
625 | 627 | ||
626 | doms = NULL; | 628 | doms = NULL; |
627 | dattr = NULL; | 629 | dattr = NULL; |
@@ -629,7 +631,7 @@ static int generate_sched_domains(cpumask_t **domains, | |||
629 | 631 | ||
630 | /* Special case for the 99% of systems with one, full, sched domain */ | 632 | /* Special case for the 99% of systems with one, full, sched domain */ |
631 | if (is_sched_load_balance(&top_cpuset)) { | 633 | if (is_sched_load_balance(&top_cpuset)) { |
632 | doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 634 | doms = kmalloc(cpumask_size(), GFP_KERNEL); |
633 | if (!doms) | 635 | if (!doms) |
634 | goto done; | 636 | goto done; |
635 | 637 | ||
@@ -708,7 +710,7 @@ restart: | |||
708 | * Now we know how many domains to create. | 710 | * Now we know how many domains to create. |
709 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | 711 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
710 | */ | 712 | */ |
711 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); | 713 | doms = kmalloc(ndoms * cpumask_size(), GFP_KERNEL); |
712 | if (!doms) | 714 | if (!doms) |
713 | goto done; | 715 | goto done; |
714 | 716 | ||
@@ -720,7 +722,7 @@ restart: | |||
720 | 722 | ||
721 | for (nslot = 0, i = 0; i < csn; i++) { | 723 | for (nslot = 0, i = 0; i < csn; i++) { |
722 | struct cpuset *a = csa[i]; | 724 | struct cpuset *a = csa[i]; |
723 | cpumask_t *dp; | 725 | struct cpumask *dp; |
724 | int apn = a->pn; | 726 | int apn = a->pn; |
725 | 727 | ||
726 | if (apn < 0) { | 728 | if (apn < 0) { |
@@ -743,7 +745,7 @@ restart: | |||
743 | continue; | 745 | continue; |
744 | } | 746 | } |
745 | 747 | ||
746 | cpus_clear(*dp); | 748 | cpumask_clear(dp); |
747 | if (dattr) | 749 | if (dattr) |
748 | *(dattr + nslot) = SD_ATTR_INIT; | 750 | *(dattr + nslot) = SD_ATTR_INIT; |
749 | for (j = i; j < csn; j++) { | 751 | for (j = i; j < csn; j++) { |
@@ -790,7 +792,7 @@ done: | |||
790 | static void do_rebuild_sched_domains(struct work_struct *unused) | 792 | static void do_rebuild_sched_domains(struct work_struct *unused) |
791 | { | 793 | { |
792 | struct sched_domain_attr *attr; | 794 | struct sched_domain_attr *attr; |
793 | cpumask_t *doms; | 795 | struct cpumask *doms; |
794 | int ndoms; | 796 | int ndoms; |
795 | 797 | ||
796 | get_online_cpus(); | 798 | get_online_cpus(); |
@@ -2044,7 +2046,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | |||
2044 | unsigned long phase, void *unused_cpu) | 2046 | unsigned long phase, void *unused_cpu) |
2045 | { | 2047 | { |
2046 | struct sched_domain_attr *attr; | 2048 | struct sched_domain_attr *attr; |
2047 | cpumask_t *doms; | 2049 | struct cpumask *doms; |
2048 | int ndoms; | 2050 | int ndoms; |
2049 | 2051 | ||
2050 | switch (phase) { | 2052 | switch (phase) { |
@@ -2114,7 +2116,7 @@ void __init cpuset_init_smp(void) | |||
2114 | /** | 2116 | /** |
2115 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. | 2117 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
2116 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | 2118 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. |
2117 | * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. | 2119 | * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. |
2118 | * | 2120 | * |
2119 | * Description: Returns the cpumask_var_t cpus_allowed of the cpuset | 2121 | * Description: Returns the cpumask_var_t cpus_allowed of the cpuset |
2120 | * attached to the specified @tsk. Guaranteed to return some non-empty | 2122 | * attached to the specified @tsk. Guaranteed to return some non-empty |
@@ -2122,7 +2124,7 @@ void __init cpuset_init_smp(void) | |||
2122 | * tasks cpuset. | 2124 | * tasks cpuset. |
2123 | **/ | 2125 | **/ |
2124 | 2126 | ||
2125 | void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask) | 2127 | void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) |
2126 | { | 2128 | { |
2127 | mutex_lock(&callback_mutex); | 2129 | mutex_lock(&callback_mutex); |
2128 | cpuset_cpus_allowed_locked(tsk, pmask); | 2130 | cpuset_cpus_allowed_locked(tsk, pmask); |
@@ -2133,7 +2135,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask) | |||
2133 | * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. | 2135 | * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. |
2134 | * Must be called with callback_mutex held. | 2136 | * Must be called with callback_mutex held. |
2135 | **/ | 2137 | **/ |
2136 | void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask) | 2138 | void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask) |
2137 | { | 2139 | { |
2138 | task_lock(tsk); | 2140 | task_lock(tsk); |
2139 | guarantee_online_cpus(task_cs(tsk), pmask); | 2141 | guarantee_online_cpus(task_cs(tsk), pmask); |