aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-31 00:05:15 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-03-30 07:35:16 -0400
commitaa85ea5b89c36c51200d795dd788139bd9b8cf50 (patch)
tree0b68a35b691417d927127376beb0541d96c9cc64
parent1a8a51004a18b627ea81444201f7867875212f46 (diff)
cpumask: use new cpumask_ functions in core code.
Impact: cleanup Time to clean up remaining laggards using the old cpu_ functions. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Greg Kroah-Hartman <gregkh@suse.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Trond.Myklebust@netapp.com
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--kernel/workqueue.c6
-rw-r--r--mm/allocpercpu.c2
-rw-r--r--mm/vmstat.c2
-rw-r--r--net/sunrpc/svc.c2
6 files changed, 9 insertions, 9 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 5b257a57bc57..e62a4ccea54d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -119,7 +119,7 @@ static ssize_t print_cpus_map(char *buf, const struct cpumask *map)
119#define print_cpus_func(type) \ 119#define print_cpus_func(type) \
120static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \ 120static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \
121{ \ 121{ \
122 return print_cpus_map(buf, &cpu_##type##_map); \ 122 return print_cpus_map(buf, cpu_##type##_mask); \
123} \ 123} \
124static struct sysdev_class_attribute attr_##type##_map = \ 124static struct sysdev_class_attribute attr_##type##_map = \
125 _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) 125 _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 90c6074a36ca..2e0d79678deb 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -90,12 +90,12 @@ static inline void cpuset_init_smp(void) {}
90static inline void cpuset_cpus_allowed(struct task_struct *p, 90static inline void cpuset_cpus_allowed(struct task_struct *p,
91 struct cpumask *mask) 91 struct cpumask *mask)
92{ 92{
93 *mask = cpu_possible_map; 93 cpumask_copy(mask, cpu_possible_mask);
94} 94}
95static inline void cpuset_cpus_allowed_locked(struct task_struct *p, 95static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
96 struct cpumask *mask) 96 struct cpumask *mask)
97{ 97{
98 *mask = cpu_possible_map; 98 cpumask_copy(mask, cpu_possible_mask);
99} 99}
100 100
101static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 101static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1f0c509b40d3..9aedd9fd825b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -416,7 +416,7 @@ void flush_workqueue(struct workqueue_struct *wq)
416 might_sleep(); 416 might_sleep();
417 lock_map_acquire(&wq->lockdep_map); 417 lock_map_acquire(&wq->lockdep_map);
418 lock_map_release(&wq->lockdep_map); 418 lock_map_release(&wq->lockdep_map);
419 for_each_cpu_mask_nr(cpu, *cpu_map) 419 for_each_cpu(cpu, cpu_map)
420 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 420 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
421} 421}
422EXPORT_SYMBOL_GPL(flush_workqueue); 422EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -547,7 +547,7 @@ static void wait_on_work(struct work_struct *work)
547 wq = cwq->wq; 547 wq = cwq->wq;
548 cpu_map = wq_cpu_map(wq); 548 cpu_map = wq_cpu_map(wq);
549 549
550 for_each_cpu_mask_nr(cpu, *cpu_map) 550 for_each_cpu(cpu, cpu_map)
551 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 551 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
552} 552}
553 553
@@ -911,7 +911,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
911 list_del(&wq->list); 911 list_del(&wq->list);
912 spin_unlock(&workqueue_lock); 912 spin_unlock(&workqueue_lock);
913 913
914 for_each_cpu_mask_nr(cpu, *cpu_map) 914 for_each_cpu(cpu, cpu_map)
915 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 915 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
916 cpu_maps_update_done(); 916 cpu_maps_update_done();
917 917
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 1882923bc706..139d5b7b6621 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -143,7 +143,7 @@ void free_percpu(void *__pdata)
143{ 143{
144 if (unlikely(!__pdata)) 144 if (unlikely(!__pdata))
145 return; 145 return;
146 __percpu_depopulate_mask(__pdata, &cpu_possible_map); 146 __percpu_depopulate_mask(__pdata, cpu_possible_mask);
147 kfree(__percpu_disguise(__pdata)); 147 kfree(__percpu_disguise(__pdata));
148} 148}
149EXPORT_SYMBOL_GPL(free_percpu); 149EXPORT_SYMBOL_GPL(free_percpu);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 91149746bb8d..8cd81ea1ddc1 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -27,7 +27,7 @@ static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
27 27
28 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 28 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
29 29
30 for_each_cpu_mask_nr(cpu, *cpumask) { 30 for_each_cpu(cpu, cpumask) {
31 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 31 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
32 32
33 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 33 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index c51fed4d1af1..bb507e2bb94d 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -312,7 +312,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
312 switch (m->mode) { 312 switch (m->mode) {
313 case SVC_POOL_PERCPU: 313 case SVC_POOL_PERCPU:
314 { 314 {
315 set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); 315 set_cpus_allowed_ptr(task, cpumask_of(node));
316 break; 316 break;
317 } 317 }
318 case SVC_POOL_PERNODE: 318 case SVC_POOL_PERNODE: