aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2011-05-16 16:38:07 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-16 16:38:07 -0400
commitfb1fece5da027d3c7e69cf44ca8e58aaf0faf520 (patch)
treee2c50029304ea0eebef9ca40e8e33888900b7b72
parent55dd23eca666876e6028aa35d5e391cfced54871 (diff)
sparc: convert old cpumask API into new one
Adapt new API. Almost change is trivial, most important change are to remove following like =operator. cpumask_t cpu_mask = *mm_cpumask(mm); cpus_allowed = current->cpus_allowed; Because cpumask_var_t is =operator unsafe. These usage might prevent kernel core improvement. No functional change. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/smp_32.h10
-rw-r--r--arch/sparc/kernel/cpumap.c4
-rw-r--r--arch/sparc/kernel/ds.c14
-rw-r--r--arch/sparc/kernel/irq_64.c6
-rw-r--r--arch/sparc/kernel/leon_smp.c20
-rw-r--r--arch/sparc/kernel/mdesc.c2
-rw-r--r--arch/sparc/kernel/of_device_64.c3
-rw-r--r--arch/sparc/kernel/pci_msi.c3
-rw-r--r--arch/sparc/kernel/smp_32.c51
-rw-r--r--arch/sparc/kernel/smp_64.c58
-rw-r--r--arch/sparc/kernel/sun4d_smp.c12
-rw-r--r--arch/sparc/kernel/sun4m_smp.c12
-rw-r--r--arch/sparc/kernel/sysfs.c3
-rw-r--r--arch/sparc/kernel/us2e_cpufreq.c4
-rw-r--r--arch/sparc/kernel/us3_cpufreq.c4
-rw-r--r--arch/sparc/mm/init_64.c14
16 files changed, 115 insertions, 105 deletions
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index 7a8e6cbd640c..093f10843ff2 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -68,17 +68,17 @@ BTFIXUPDEF_BLACKBOX(load_current)
68 68
69#define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4) 69#define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)
70 70
71static inline void xc0(smpfunc_t func) { smp_cross_call(func, cpu_online_map, 0, 0, 0, 0); } 71static inline void xc0(smpfunc_t func) { smp_cross_call(func, *cpu_online_mask, 0, 0, 0, 0); }
72static inline void xc1(smpfunc_t func, unsigned long arg1) 72static inline void xc1(smpfunc_t func, unsigned long arg1)
73{ smp_cross_call(func, cpu_online_map, arg1, 0, 0, 0); } 73{ smp_cross_call(func, *cpu_online_mask, arg1, 0, 0, 0); }
74static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) 74static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
75{ smp_cross_call(func, cpu_online_map, arg1, arg2, 0, 0); } 75{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0); }
76static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, 76static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
77 unsigned long arg3) 77 unsigned long arg3)
78{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, 0); } 78{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, 0); }
79static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, 79static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
80 unsigned long arg3, unsigned long arg4) 80 unsigned long arg3, unsigned long arg4)
81{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); } 81{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, arg4); }
82 82
83extern void arch_send_call_function_single_ipi(int cpu); 83extern void arch_send_call_function_single_ipi(int cpu);
84extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 84extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index 8de64c8126bc..d91fd782743a 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -202,7 +202,7 @@ static struct cpuinfo_tree *build_cpuinfo_tree(void)
202 new_tree->total_nodes = n; 202 new_tree->total_nodes = n;
203 memcpy(&new_tree->level, tmp_level, sizeof(tmp_level)); 203 memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
204 204
205 prev_cpu = cpu = first_cpu(cpu_online_map); 205 prev_cpu = cpu = cpumask_first(cpu_online_mask);
206 206
207 /* Initialize all levels in the tree with the first CPU */ 207 /* Initialize all levels in the tree with the first CPU */
208 for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) { 208 for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
@@ -381,7 +381,7 @@ static int simple_map_to_cpu(unsigned int index)
381 } 381 }
382 382
383 /* Impossible, since num_online_cpus() <= num_possible_cpus() */ 383 /* Impossible, since num_online_cpus() <= num_possible_cpus() */
384 return first_cpu(cpu_online_map); 384 return cpumask_first(cpu_online_mask);
385} 385}
386 386
387static int _map_to_cpu(unsigned int index) 387static int _map_to_cpu(unsigned int index)
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 3add4de8a1a9..dd1342c0a3be 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -497,7 +497,7 @@ static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
497 tag->num_records = ncpus; 497 tag->num_records = ncpus;
498 498
499 i = 0; 499 i = 0;
500 for_each_cpu_mask(cpu, *mask) { 500 for_each_cpu(cpu, mask) {
501 ent[i].cpu = cpu; 501 ent[i].cpu = cpu;
502 ent[i].result = DR_CPU_RES_OK; 502 ent[i].result = DR_CPU_RES_OK;
503 ent[i].stat = default_stat; 503 ent[i].stat = default_stat;
@@ -534,7 +534,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
534 int resp_len, ncpus, cpu; 534 int resp_len, ncpus, cpu;
535 unsigned long flags; 535 unsigned long flags;
536 536
537 ncpus = cpus_weight(*mask); 537 ncpus = cpumask_weight(mask);
538 resp_len = dr_cpu_size_response(ncpus); 538 resp_len = dr_cpu_size_response(ncpus);
539 resp = kzalloc(resp_len, GFP_KERNEL); 539 resp = kzalloc(resp_len, GFP_KERNEL);
540 if (!resp) 540 if (!resp)
@@ -547,7 +547,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
547 mdesc_populate_present_mask(mask); 547 mdesc_populate_present_mask(mask);
548 mdesc_fill_in_cpu_data(mask); 548 mdesc_fill_in_cpu_data(mask);
549 549
550 for_each_cpu_mask(cpu, *mask) { 550 for_each_cpu(cpu, mask) {
551 int err; 551 int err;
552 552
553 printk(KERN_INFO "ds-%llu: Starting cpu %d...\n", 553 printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
@@ -593,7 +593,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
593 int resp_len, ncpus, cpu; 593 int resp_len, ncpus, cpu;
594 unsigned long flags; 594 unsigned long flags;
595 595
596 ncpus = cpus_weight(*mask); 596 ncpus = cpumask_weight(mask);
597 resp_len = dr_cpu_size_response(ncpus); 597 resp_len = dr_cpu_size_response(ncpus);
598 resp = kzalloc(resp_len, GFP_KERNEL); 598 resp = kzalloc(resp_len, GFP_KERNEL);
599 if (!resp) 599 if (!resp)
@@ -603,7 +603,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
603 resp_len, ncpus, mask, 603 resp_len, ncpus, mask,
604 DR_CPU_STAT_UNCONFIGURED); 604 DR_CPU_STAT_UNCONFIGURED);
605 605
606 for_each_cpu_mask(cpu, *mask) { 606 for_each_cpu(cpu, mask) {
607 int err; 607 int err;
608 608
609 printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n", 609 printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
@@ -649,13 +649,13 @@ static void __cpuinit dr_cpu_data(struct ds_info *dp,
649 649
650 purge_dups(cpu_list, tag->num_records); 650 purge_dups(cpu_list, tag->num_records);
651 651
652 cpus_clear(mask); 652 cpumask_clear(&mask);
653 for (i = 0; i < tag->num_records; i++) { 653 for (i = 0; i < tag->num_records; i++) {
654 if (cpu_list[i] == CPU_SENTINEL) 654 if (cpu_list[i] == CPU_SENTINEL)
655 continue; 655 continue;
656 656
657 if (cpu_list[i] < nr_cpu_ids) 657 if (cpu_list[i] < nr_cpu_ids)
658 cpu_set(cpu_list[i], mask); 658 cpumask_set_cpu(cpu_list[i], &mask);
659 } 659 }
660 660
661 if (tag->type == DR_CPU_CONFIGURE) 661 if (tag->type == DR_CPU_CONFIGURE)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index b1d275ce3435..4e78862d12fd 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -224,13 +224,13 @@ static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
224 int cpuid; 224 int cpuid;
225 225
226 cpumask_copy(&mask, affinity); 226 cpumask_copy(&mask, affinity);
227 if (cpus_equal(mask, cpu_online_map)) { 227 if (cpumask_equal(&mask, cpu_online_mask)) {
228 cpuid = map_to_cpu(irq); 228 cpuid = map_to_cpu(irq);
229 } else { 229 } else {
230 cpumask_t tmp; 230 cpumask_t tmp;
231 231
232 cpus_and(tmp, cpu_online_map, mask); 232 cpumask_and(&tmp, cpu_online_mask, &mask);
233 cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp); 233 cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
234 } 234 }
235 235
236 return cpuid; 236 return cpuid;
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index d95e456a04b6..fe8fb44c609c 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -107,11 +107,11 @@ void __cpuinit leon_callin(void)
107 atomic_inc(&init_mm.mm_count); 107 atomic_inc(&init_mm.mm_count);
108 current->active_mm = &init_mm; 108 current->active_mm = &init_mm;
109 109
110 while (!cpu_isset(cpuid, smp_commenced_mask)) 110 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
111 mb(); 111 mb();
112 112
113 local_irq_enable(); 113 local_irq_enable();
114 cpu_set(cpuid, cpu_online_map); 114 set_cpu_online(cpuid, true);
115} 115}
116 116
117/* 117/*
@@ -272,21 +272,21 @@ void __init leon_smp_done(void)
272 local_flush_cache_all(); 272 local_flush_cache_all();
273 273
274 /* Free unneeded trap tables */ 274 /* Free unneeded trap tables */
275 if (!cpu_isset(1, cpu_present_map)) { 275 if (!cpu_present(1)) {
276 ClearPageReserved(virt_to_page(&trapbase_cpu1)); 276 ClearPageReserved(virt_to_page(&trapbase_cpu1));
277 init_page_count(virt_to_page(&trapbase_cpu1)); 277 init_page_count(virt_to_page(&trapbase_cpu1));
278 free_page((unsigned long)&trapbase_cpu1); 278 free_page((unsigned long)&trapbase_cpu1);
279 totalram_pages++; 279 totalram_pages++;
280 num_physpages++; 280 num_physpages++;
281 } 281 }
282 if (!cpu_isset(2, cpu_present_map)) { 282 if (!cpu_present(2)) {
283 ClearPageReserved(virt_to_page(&trapbase_cpu2)); 283 ClearPageReserved(virt_to_page(&trapbase_cpu2));
284 init_page_count(virt_to_page(&trapbase_cpu2)); 284 init_page_count(virt_to_page(&trapbase_cpu2));
285 free_page((unsigned long)&trapbase_cpu2); 285 free_page((unsigned long)&trapbase_cpu2);
286 totalram_pages++; 286 totalram_pages++;
287 num_physpages++; 287 num_physpages++;
288 } 288 }
289 if (!cpu_isset(3, cpu_present_map)) { 289 if (!cpu_present(3)) {
290 ClearPageReserved(virt_to_page(&trapbase_cpu3)); 290 ClearPageReserved(virt_to_page(&trapbase_cpu3));
291 init_page_count(virt_to_page(&trapbase_cpu3)); 291 init_page_count(virt_to_page(&trapbase_cpu3));
292 free_page((unsigned long)&trapbase_cpu3); 292 free_page((unsigned long)&trapbase_cpu3);
@@ -440,10 +440,10 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
440 { 440 {
441 register int i; 441 register int i;
442 442
443 cpu_clear(smp_processor_id(), mask); 443 cpumask_clear_cpu(smp_processor_id(), &mask);
444 cpus_and(mask, cpu_online_map, mask); 444 cpumask_and(&mask, cpu_online_mask, &mask);
445 for (i = 0; i <= high; i++) { 445 for (i = 0; i <= high; i++) {
446 if (cpu_isset(i, mask)) { 446 if (cpumask_test_cpu(i, &mask)) {
447 ccall_info.processors_in[i] = 0; 447 ccall_info.processors_in[i] = 0;
448 ccall_info.processors_out[i] = 0; 448 ccall_info.processors_out[i] = 0;
449 set_cpu_int(i, LEON3_IRQ_CROSS_CALL); 449 set_cpu_int(i, LEON3_IRQ_CROSS_CALL);
@@ -457,7 +457,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
457 457
458 i = 0; 458 i = 0;
459 do { 459 do {
460 if (!cpu_isset(i, mask)) 460 if (!cpumask_test_cpu(i, &mask))
461 continue; 461 continue;
462 462
463 while (!ccall_info.processors_in[i]) 463 while (!ccall_info.processors_in[i])
@@ -466,7 +466,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
466 466
467 i = 0; 467 i = 0;
468 do { 468 do {
469 if (!cpu_isset(i, mask)) 469 if (!cpumask_test_cpu(i, &mask))
470 continue; 470 continue;
471 471
472 while (!ccall_info.processors_out[i]) 472 while (!ccall_info.processors_out[i])
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 56db06432ce9..42f28c7420e1 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -768,7 +768,7 @@ static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handl
768 cpuid, NR_CPUS); 768 cpuid, NR_CPUS);
769 continue; 769 continue;
770 } 770 }
771 if (!cpu_isset(cpuid, *mask)) 771 if (!cpumask_test_cpu(cpuid, mask))
772 continue; 772 continue;
773#endif 773#endif
774 774
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 5c149689bb20..3bb2eace58cf 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -622,8 +622,9 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
622out: 622out:
623 nid = of_node_to_nid(dp); 623 nid = of_node_to_nid(dp);
624 if (nid != -1) { 624 if (nid != -1) {
625 cpumask_t numa_mask = *cpumask_of_node(nid); 625 cpumask_t numa_mask;
626 626
627 cpumask_copy(&numa_mask, cpumask_of_node(nid));
627 irq_set_affinity(irq, &numa_mask); 628 irq_set_affinity(irq, &numa_mask);
628 } 629 }
629 630
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 30982e9ab626..580651af73f2 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -284,8 +284,9 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
284 284
285 nid = pbm->numa_node; 285 nid = pbm->numa_node;
286 if (nid != -1) { 286 if (nid != -1) {
287 cpumask_t numa_mask = *cpumask_of_node(nid); 287 cpumask_t numa_mask;
288 288
289 cpumask_copy(&numa_mask, cpumask_of_node(nid));
289 irq_set_affinity(irq, &numa_mask); 290 irq_set_affinity(irq, &numa_mask);
290 } 291 }
291 err = request_irq(irq, sparc64_msiq_interrupt, 0, 292 err = request_irq(irq, sparc64_msiq_interrupt, 0,
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 705a94e1b8a5..139c312a41f7 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -190,9 +190,10 @@ void smp_flush_tlb_all(void)
190void smp_flush_cache_mm(struct mm_struct *mm) 190void smp_flush_cache_mm(struct mm_struct *mm)
191{ 191{
192 if(mm->context != NO_CONTEXT) { 192 if(mm->context != NO_CONTEXT) {
193 cpumask_t cpu_mask = *mm_cpumask(mm); 193 cpumask_t cpu_mask;
194 cpu_clear(smp_processor_id(), cpu_mask); 194 cpumask_copy(&cpu_mask, mm_cpumask(mm));
195 if (!cpus_empty(cpu_mask)) 195 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
196 if (!cpumask_empty(&cpu_mask))
196 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); 197 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
197 local_flush_cache_mm(mm); 198 local_flush_cache_mm(mm);
198 } 199 }
@@ -201,9 +202,10 @@ void smp_flush_cache_mm(struct mm_struct *mm)
201void smp_flush_tlb_mm(struct mm_struct *mm) 202void smp_flush_tlb_mm(struct mm_struct *mm)
202{ 203{
203 if(mm->context != NO_CONTEXT) { 204 if(mm->context != NO_CONTEXT) {
204 cpumask_t cpu_mask = *mm_cpumask(mm); 205 cpumask_t cpu_mask;
205 cpu_clear(smp_processor_id(), cpu_mask); 206 cpumask_copy(&cpu_mask, mm_cpumask(mm));
206 if (!cpus_empty(cpu_mask)) { 207 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
208 if (!cpumask_empty(&cpu_mask)) {
207 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); 209 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
208 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) 210 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
209 cpumask_copy(mm_cpumask(mm), 211 cpumask_copy(mm_cpumask(mm),
@@ -219,9 +221,10 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
219 struct mm_struct *mm = vma->vm_mm; 221 struct mm_struct *mm = vma->vm_mm;
220 222
221 if (mm->context != NO_CONTEXT) { 223 if (mm->context != NO_CONTEXT) {
222 cpumask_t cpu_mask = *mm_cpumask(mm); 224 cpumask_t cpu_mask;
223 cpu_clear(smp_processor_id(), cpu_mask); 225 cpumask_copy(&cpu_mask, mm_cpumask(mm));
224 if (!cpus_empty(cpu_mask)) 226 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
227 if (!cpumask_empty(&cpu_mask))
225 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); 228 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
226 local_flush_cache_range(vma, start, end); 229 local_flush_cache_range(vma, start, end);
227 } 230 }
@@ -233,9 +236,10 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
233 struct mm_struct *mm = vma->vm_mm; 236 struct mm_struct *mm = vma->vm_mm;
234 237
235 if (mm->context != NO_CONTEXT) { 238 if (mm->context != NO_CONTEXT) {
236 cpumask_t cpu_mask = *mm_cpumask(mm); 239 cpumask_t cpu_mask;
237 cpu_clear(smp_processor_id(), cpu_mask); 240 cpumask_copy(&cpu_mask, mm_cpumask(mm));
238 if (!cpus_empty(cpu_mask)) 241 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
242 if (!cpumask_empty(&cpu_mask))
239 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); 243 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
240 local_flush_tlb_range(vma, start, end); 244 local_flush_tlb_range(vma, start, end);
241 } 245 }
@@ -246,9 +250,10 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
246 struct mm_struct *mm = vma->vm_mm; 250 struct mm_struct *mm = vma->vm_mm;
247 251
248 if(mm->context != NO_CONTEXT) { 252 if(mm->context != NO_CONTEXT) {
249 cpumask_t cpu_mask = *mm_cpumask(mm); 253 cpumask_t cpu_mask;
250 cpu_clear(smp_processor_id(), cpu_mask); 254 cpumask_copy(&cpu_mask, mm_cpumask(mm));
251 if (!cpus_empty(cpu_mask)) 255 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
256 if (!cpumask_empty(&cpu_mask))
252 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); 257 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
253 local_flush_cache_page(vma, page); 258 local_flush_cache_page(vma, page);
254 } 259 }
@@ -259,9 +264,10 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
259 struct mm_struct *mm = vma->vm_mm; 264 struct mm_struct *mm = vma->vm_mm;
260 265
261 if(mm->context != NO_CONTEXT) { 266 if(mm->context != NO_CONTEXT) {
262 cpumask_t cpu_mask = *mm_cpumask(mm); 267 cpumask_t cpu_mask;
263 cpu_clear(smp_processor_id(), cpu_mask); 268 cpumask_copy(&cpu_mask, mm_cpumask(mm));
264 if (!cpus_empty(cpu_mask)) 269 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
270 if (!cpumask_empty(&cpu_mask))
265 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); 271 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
266 local_flush_tlb_page(vma, page); 272 local_flush_tlb_page(vma, page);
267 } 273 }
@@ -283,9 +289,10 @@ void smp_flush_page_to_ram(unsigned long page)
283 289
284void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 290void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
285{ 291{
286 cpumask_t cpu_mask = *mm_cpumask(mm); 292 cpumask_t cpu_mask;
287 cpu_clear(smp_processor_id(), cpu_mask); 293 cpumask_copy(&cpu_mask, mm_cpumask(mm));
288 if (!cpus_empty(cpu_mask)) 294 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
295 if (!cpumask_empty(&cpu_mask))
289 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); 296 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
290 local_flush_sig_insns(mm, insn_addr); 297 local_flush_sig_insns(mm, insn_addr);
291} 298}
@@ -439,7 +446,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
439 }; 446 };
440 447
441 if (!ret) { 448 if (!ret) {
442 cpu_set(cpu, smp_commenced_mask); 449 cpumask_set_cpu(cpu, &smp_commenced_mask);
443 while (!cpu_online(cpu)) 450 while (!cpu_online(cpu))
444 mb(); 451 mb();
445 } 452 }
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 3e94a8c23238..c274a30c3cbf 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -121,11 +121,11 @@ void __cpuinit smp_callin(void)
121 /* inform the notifiers about the new cpu */ 121 /* inform the notifiers about the new cpu */
122 notify_cpu_starting(cpuid); 122 notify_cpu_starting(cpuid);
123 123
124 while (!cpu_isset(cpuid, smp_commenced_mask)) 124 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
125 rmb(); 125 rmb();
126 126
127 ipi_call_lock_irq(); 127 ipi_call_lock_irq();
128 cpu_set(cpuid, cpu_online_map); 128 set_cpu_online(cpuid, true);
129 ipi_call_unlock_irq(); 129 ipi_call_unlock_irq();
130 130
131 /* idle thread is expected to have preempt disabled */ 131 /* idle thread is expected to have preempt disabled */
@@ -785,7 +785,7 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask
785 785
786/* Send cross call to all processors mentioned in MASK_P 786/* Send cross call to all processors mentioned in MASK_P
787 * except self. Really, there are only two cases currently, 787 * except self. Really, there are only two cases currently,
788 * "&cpu_online_map" and "&mm->cpu_vm_mask". 788 * "cpu_online_mask" and "mm_cpumask(mm)".
789 */ 789 */
790static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) 790static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
791{ 791{
@@ -797,7 +797,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
797/* Send cross call to all processors except self. */ 797/* Send cross call to all processors except self. */
798static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) 798static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
799{ 799{
800 smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map); 800 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
801} 801}
802 802
803extern unsigned long xcall_sync_tick; 803extern unsigned long xcall_sync_tick;
@@ -805,7 +805,7 @@ extern unsigned long xcall_sync_tick;
805static void smp_start_sync_tick_client(int cpu) 805static void smp_start_sync_tick_client(int cpu)
806{ 806{
807 xcall_deliver((u64) &xcall_sync_tick, 0, 0, 807 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
808 &cpumask_of_cpu(cpu)); 808 cpumask_of(cpu));
809} 809}
810 810
811extern unsigned long xcall_call_function; 811extern unsigned long xcall_call_function;
@@ -820,7 +820,7 @@ extern unsigned long xcall_call_function_single;
820void arch_send_call_function_single_ipi(int cpu) 820void arch_send_call_function_single_ipi(int cpu)
821{ 821{
822 xcall_deliver((u64) &xcall_call_function_single, 0, 0, 822 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
823 &cpumask_of_cpu(cpu)); 823 cpumask_of(cpu));
824} 824}
825 825
826void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) 826void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
@@ -918,7 +918,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
918 } 918 }
919 if (data0) { 919 if (data0) {
920 xcall_deliver(data0, __pa(pg_addr), 920 xcall_deliver(data0, __pa(pg_addr),
921 (u64) pg_addr, &cpumask_of_cpu(cpu)); 921 (u64) pg_addr, cpumask_of(cpu));
922#ifdef CONFIG_DEBUG_DCFLUSH 922#ifdef CONFIG_DEBUG_DCFLUSH
923 atomic_inc(&dcpage_flushes_xcall); 923 atomic_inc(&dcpage_flushes_xcall);
924#endif 924#endif
@@ -954,7 +954,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
954 } 954 }
955 if (data0) { 955 if (data0) {
956 xcall_deliver(data0, __pa(pg_addr), 956 xcall_deliver(data0, __pa(pg_addr),
957 (u64) pg_addr, &cpu_online_map); 957 (u64) pg_addr, cpu_online_mask);
958#ifdef CONFIG_DEBUG_DCFLUSH 958#ifdef CONFIG_DEBUG_DCFLUSH
959 atomic_inc(&dcpage_flushes_xcall); 959 atomic_inc(&dcpage_flushes_xcall);
960#endif 960#endif
@@ -1197,32 +1197,32 @@ void __devinit smp_fill_in_sib_core_maps(void)
1197 for_each_present_cpu(i) { 1197 for_each_present_cpu(i) {
1198 unsigned int j; 1198 unsigned int j;
1199 1199
1200 cpus_clear(cpu_core_map[i]); 1200 cpumask_clear(&cpu_core_map[i]);
1201 if (cpu_data(i).core_id == 0) { 1201 if (cpu_data(i).core_id == 0) {
1202 cpu_set(i, cpu_core_map[i]); 1202 cpumask_set_cpu(i, &cpu_core_map[i]);
1203 continue; 1203 continue;
1204 } 1204 }
1205 1205
1206 for_each_present_cpu(j) { 1206 for_each_present_cpu(j) {
1207 if (cpu_data(i).core_id == 1207 if (cpu_data(i).core_id ==
1208 cpu_data(j).core_id) 1208 cpu_data(j).core_id)
1209 cpu_set(j, cpu_core_map[i]); 1209 cpumask_set_cpu(j, &cpu_core_map[i]);
1210 } 1210 }
1211 } 1211 }
1212 1212
1213 for_each_present_cpu(i) { 1213 for_each_present_cpu(i) {
1214 unsigned int j; 1214 unsigned int j;
1215 1215
1216 cpus_clear(per_cpu(cpu_sibling_map, i)); 1216 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1217 if (cpu_data(i).proc_id == -1) { 1217 if (cpu_data(i).proc_id == -1) {
1218 cpu_set(i, per_cpu(cpu_sibling_map, i)); 1218 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1219 continue; 1219 continue;
1220 } 1220 }
1221 1221
1222 for_each_present_cpu(j) { 1222 for_each_present_cpu(j) {
1223 if (cpu_data(i).proc_id == 1223 if (cpu_data(i).proc_id ==
1224 cpu_data(j).proc_id) 1224 cpu_data(j).proc_id)
1225 cpu_set(j, per_cpu(cpu_sibling_map, i)); 1225 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1226 } 1226 }
1227 } 1227 }
1228} 1228}
@@ -1232,10 +1232,10 @@ int __cpuinit __cpu_up(unsigned int cpu)
1232 int ret = smp_boot_one_cpu(cpu); 1232 int ret = smp_boot_one_cpu(cpu);
1233 1233
1234 if (!ret) { 1234 if (!ret) {
1235 cpu_set(cpu, smp_commenced_mask); 1235 cpumask_set_cpu(cpu, &smp_commenced_mask);
1236 while (!cpu_isset(cpu, cpu_online_map)) 1236 while (!cpu_online(cpu))
1237 mb(); 1237 mb();
1238 if (!cpu_isset(cpu, cpu_online_map)) { 1238 if (!cpu_online(cpu)) {
1239 ret = -ENODEV; 1239 ret = -ENODEV;
1240 } else { 1240 } else {
1241 /* On SUN4V, writes to %tick and %stick are 1241 /* On SUN4V, writes to %tick and %stick are
@@ -1269,7 +1269,7 @@ void cpu_play_dead(void)
1269 tb->nonresum_mondo_pa, 0); 1269 tb->nonresum_mondo_pa, 0);
1270 } 1270 }
1271 1271
1272 cpu_clear(cpu, smp_commenced_mask); 1272 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1273 membar_safe("#Sync"); 1273 membar_safe("#Sync");
1274 1274
1275 local_irq_disable(); 1275 local_irq_disable();
@@ -1290,13 +1290,13 @@ int __cpu_disable(void)
1290 cpuinfo_sparc *c; 1290 cpuinfo_sparc *c;
1291 int i; 1291 int i;
1292 1292
1293 for_each_cpu_mask(i, cpu_core_map[cpu]) 1293 for_each_cpu(i, &cpu_core_map[cpu])
1294 cpu_clear(cpu, cpu_core_map[i]); 1294 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1295 cpus_clear(cpu_core_map[cpu]); 1295 cpumask_clear(&cpu_core_map[cpu]);
1296 1296
1297 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) 1297 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1298 cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); 1298 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1299 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1299 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1300 1300
1301 c = &cpu_data(cpu); 1301 c = &cpu_data(cpu);
1302 1302
@@ -1313,7 +1313,7 @@ int __cpu_disable(void)
1313 local_irq_disable(); 1313 local_irq_disable();
1314 1314
1315 ipi_call_lock(); 1315 ipi_call_lock();
1316 cpu_clear(cpu, cpu_online_map); 1316 set_cpu_online(cpu, false);
1317 ipi_call_unlock(); 1317 ipi_call_unlock();
1318 1318
1319 cpu_map_rebuild(); 1319 cpu_map_rebuild();
@@ -1327,11 +1327,11 @@ void __cpu_die(unsigned int cpu)
1327 1327
1328 for (i = 0; i < 100; i++) { 1328 for (i = 0; i < 100; i++) {
1329 smp_rmb(); 1329 smp_rmb();
1330 if (!cpu_isset(cpu, smp_commenced_mask)) 1330 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1331 break; 1331 break;
1332 msleep(100); 1332 msleep(100);
1333 } 1333 }
1334 if (cpu_isset(cpu, smp_commenced_mask)) { 1334 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1335 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1335 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1336 } else { 1336 } else {
1337#if defined(CONFIG_SUN_LDOMS) 1337#if defined(CONFIG_SUN_LDOMS)
@@ -1341,7 +1341,7 @@ void __cpu_die(unsigned int cpu)
1341 do { 1341 do {
1342 hv_err = sun4v_cpu_stop(cpu); 1342 hv_err = sun4v_cpu_stop(cpu);
1343 if (hv_err == HV_EOK) { 1343 if (hv_err == HV_EOK) {
1344 cpu_clear(cpu, cpu_present_map); 1344 set_cpu_present(cpu, false);
1345 break; 1345 break;
1346 } 1346 }
1347 } while (--limit > 0); 1347 } while (--limit > 0);
@@ -1362,7 +1362,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
1362void smp_send_reschedule(int cpu) 1362void smp_send_reschedule(int cpu)
1363{ 1363{
1364 xcall_deliver((u64) &xcall_receive_signal, 0, 0, 1364 xcall_deliver((u64) &xcall_receive_signal, 0, 0,
1365 &cpumask_of_cpu(cpu)); 1365 cpumask_of(cpu));
1366} 1366}
1367 1367
1368void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) 1368void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 30ca6245692f..133387980b56 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -104,7 +104,7 @@ void __cpuinit smp4d_callin(void)
104 104
105 local_irq_enable(); /* We don't allow PIL 14 yet */ 105 local_irq_enable(); /* We don't allow PIL 14 yet */
106 106
107 while (!cpu_isset(cpuid, smp_commenced_mask)) 107 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
108 barrier(); 108 barrier();
109 109
110 spin_lock_irqsave(&sun4d_imsk_lock, flags); 110 spin_lock_irqsave(&sun4d_imsk_lock, flags);
@@ -313,10 +313,10 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
313 { 313 {
314 register int i; 314 register int i;
315 315
316 cpu_clear(smp_processor_id(), mask); 316 cpumask_clear_cpu(smp_processor_id(), &mask);
317 cpus_and(mask, cpu_online_map, mask); 317 cpumask_and(&mask, cpu_online_mask, &mask);
318 for (i = 0; i <= high; i++) { 318 for (i = 0; i <= high; i++) {
319 if (cpu_isset(i, mask)) { 319 if (cpumask_test_cpu(i, &mask)) {
320 ccall_info.processors_in[i] = 0; 320 ccall_info.processors_in[i] = 0;
321 ccall_info.processors_out[i] = 0; 321 ccall_info.processors_out[i] = 0;
322 sun4d_send_ipi(i, IRQ_CROSS_CALL); 322 sun4d_send_ipi(i, IRQ_CROSS_CALL);
@@ -329,7 +329,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
329 329
330 i = 0; 330 i = 0;
331 do { 331 do {
332 if (!cpu_isset(i, mask)) 332 if (!cpumask_test_cpu(i, &mask))
333 continue; 333 continue;
334 while (!ccall_info.processors_in[i]) 334 while (!ccall_info.processors_in[i])
335 barrier(); 335 barrier();
@@ -337,7 +337,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
337 337
338 i = 0; 338 i = 0;
339 do { 339 do {
340 if (!cpu_isset(i, mask)) 340 if (!cpumask_test_cpu(i, &mask))
341 continue; 341 continue;
342 while (!ccall_info.processors_out[i]) 342 while (!ccall_info.processors_out[i])
343 barrier(); 343 barrier();
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index e2e687312e2e..594768686525 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -72,7 +72,7 @@ void __cpuinit smp4m_callin(void)
72 atomic_inc(&init_mm.mm_count); 72 atomic_inc(&init_mm.mm_count);
73 current->active_mm = &init_mm; 73 current->active_mm = &init_mm;
74 74
75 while (!cpu_isset(cpuid, smp_commenced_mask)) 75 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
76 mb(); 76 mb();
77 77
78 local_irq_enable(); 78 local_irq_enable();
@@ -209,10 +209,10 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
209 { 209 {
210 register int i; 210 register int i;
211 211
212 cpu_clear(smp_processor_id(), mask); 212 cpumask_clear_cpu(smp_processor_id(), &mask);
213 cpus_and(mask, cpu_online_map, mask); 213 cpumask_and(&mask, cpu_online_mask, &mask);
214 for (i = 0; i < ncpus; i++) { 214 for (i = 0; i < ncpus; i++) {
215 if (cpu_isset(i, mask)) { 215 if (cpumask_test_cpu(i, &mask)) {
216 ccall_info.processors_in[i] = 0; 216 ccall_info.processors_in[i] = 0;
217 ccall_info.processors_out[i] = 0; 217 ccall_info.processors_out[i] = 0;
218 set_cpu_int(i, IRQ_CROSS_CALL); 218 set_cpu_int(i, IRQ_CROSS_CALL);
@@ -228,7 +228,7 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
228 228
229 i = 0; 229 i = 0;
230 do { 230 do {
231 if (!cpu_isset(i, mask)) 231 if (!cpumask_test_cpu(i, &mask))
232 continue; 232 continue;
233 while (!ccall_info.processors_in[i]) 233 while (!ccall_info.processors_in[i])
234 barrier(); 234 barrier();
@@ -236,7 +236,7 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
236 236
237 i = 0; 237 i = 0;
238 do { 238 do {
239 if (!cpu_isset(i, mask)) 239 if (!cpumask_test_cpu(i, &mask))
240 continue; 240 continue;
241 while (!ccall_info.processors_out[i]) 241 while (!ccall_info.processors_out[i])
242 barrier(); 242 barrier();
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 1eb8b00aed75..7408201d7efb 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -103,9 +103,10 @@ static unsigned long run_on_cpu(unsigned long cpu,
103 unsigned long (*func)(unsigned long), 103 unsigned long (*func)(unsigned long),
104 unsigned long arg) 104 unsigned long arg)
105{ 105{
106 cpumask_t old_affinity = current->cpus_allowed; 106 cpumask_t old_affinity;
107 unsigned long ret; 107 unsigned long ret;
108 108
109 cpumask_copy(&old_affinity, tsk_cpus_allowed(current));
109 /* should return -EINVAL to userspace */ 110 /* should return -EINVAL to userspace */
110 if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) 111 if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
111 return 0; 112 return 0;
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c
index 8f982b76c712..531d54fc9829 100644
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ b/arch/sparc/kernel/us2e_cpufreq.c
@@ -237,7 +237,7 @@ static unsigned int us2e_freq_get(unsigned int cpu)
237 if (!cpu_online(cpu)) 237 if (!cpu_online(cpu))
238 return 0; 238 return 0;
239 239
240 cpus_allowed = current->cpus_allowed; 240 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
241 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 241 set_cpus_allowed_ptr(current, cpumask_of(cpu));
242 242
243 clock_tick = sparc64_get_clock_tick(cpu) / 1000; 243 clock_tick = sparc64_get_clock_tick(cpu) / 1000;
@@ -258,7 +258,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
258 if (!cpu_online(cpu)) 258 if (!cpu_online(cpu))
259 return; 259 return;
260 260
261 cpus_allowed = current->cpus_allowed; 261 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
262 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 262 set_cpus_allowed_ptr(current, cpumask_of(cpu));
263 263
264 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; 264 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
index f35d1e794548..9a8ceb700833 100644
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ b/arch/sparc/kernel/us3_cpufreq.c
@@ -85,7 +85,7 @@ static unsigned int us3_freq_get(unsigned int cpu)
85 if (!cpu_online(cpu)) 85 if (!cpu_online(cpu))
86 return 0; 86 return 0;
87 87
88 cpus_allowed = current->cpus_allowed; 88 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
89 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 89 set_cpus_allowed_ptr(current, cpumask_of(cpu));
90 90
91 reg = read_safari_cfg(); 91 reg = read_safari_cfg();
@@ -105,7 +105,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
105 if (!cpu_online(cpu)) 105 if (!cpu_online(cpu))
106 return; 106 return;
107 107
108 cpus_allowed = current->cpus_allowed; 108 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
109 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 109 set_cpus_allowed_ptr(current, cpumask_of(cpu));
110 110
111 new_freq = sparc64_get_clock_tick(cpu) / 1000; 111 new_freq = sparc64_get_clock_tick(cpu) / 1000;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 2f6ae1d1fb6b..e10cd03fab80 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -862,7 +862,7 @@ static void init_node_masks_nonnuma(void)
862 for (i = 0; i < NR_CPUS; i++) 862 for (i = 0; i < NR_CPUS; i++)
863 numa_cpu_lookup_table[i] = 0; 863 numa_cpu_lookup_table[i] = 0;
864 864
865 numa_cpumask_lookup_table[0] = CPU_MASK_ALL; 865 cpumask_setall(&numa_cpumask_lookup_table[0]);
866} 866}
867 867
868#ifdef CONFIG_NEED_MULTIPLE_NODES 868#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -1080,7 +1080,7 @@ static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1080{ 1080{
1081 u64 arc; 1081 u64 arc;
1082 1082
1083 cpus_clear(*mask); 1083 cpumask_clear(mask);
1084 1084
1085 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { 1085 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1086 u64 target = mdesc_arc_target(md, arc); 1086 u64 target = mdesc_arc_target(md, arc);
@@ -1091,7 +1091,7 @@ static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1091 continue; 1091 continue;
1092 id = mdesc_get_property(md, target, "id", NULL); 1092 id = mdesc_get_property(md, target, "id", NULL);
1093 if (*id < nr_cpu_ids) 1093 if (*id < nr_cpu_ids)
1094 cpu_set(*id, *mask); 1094 cpumask_set_cpu(*id, mask);
1095 } 1095 }
1096} 1096}
1097 1097
@@ -1153,13 +1153,13 @@ static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1153 1153
1154 numa_parse_mdesc_group_cpus(md, grp, &mask); 1154 numa_parse_mdesc_group_cpus(md, grp, &mask);
1155 1155
1156 for_each_cpu_mask(cpu, mask) 1156 for_each_cpu(cpu, &mask)
1157 numa_cpu_lookup_table[cpu] = index; 1157 numa_cpu_lookup_table[cpu] = index;
1158 numa_cpumask_lookup_table[index] = mask; 1158 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1159 1159
1160 if (numa_debug) { 1160 if (numa_debug) {
1161 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); 1161 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1162 for_each_cpu_mask(cpu, mask) 1162 for_each_cpu(cpu, &mask)
1163 printk("%d ", cpu); 1163 printk("%d ", cpu);
1164 printk("]\n"); 1164 printk("]\n");
1165 } 1165 }
@@ -1218,7 +1218,7 @@ static int __init numa_parse_jbus(void)
1218 index = 0; 1218 index = 0;
1219 for_each_present_cpu(cpu) { 1219 for_each_present_cpu(cpu) {
1220 numa_cpu_lookup_table[cpu] = index; 1220 numa_cpu_lookup_table[cpu] = index;
1221 numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu); 1221 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1222 node_masks[index].mask = ~((1UL << 36UL) - 1UL); 1222 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1223 node_masks[index].val = cpu << 36UL; 1223 node_masks[index].val = cpu << 36UL;
1224 1224