aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/smp.h2
-rw-r--r--arch/alpha/include/asm/topology.h18
-rw-r--r--arch/alpha/kernel/smp.c14
-rw-r--r--arch/arm/include/asm/cacheflush.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h7
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h4
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/mm/context.c2
-rw-r--r--arch/arm/mm/flush.c10
-rw-r--r--arch/ia64/include/asm/smp.h1
-rw-r--r--arch/ia64/include/asm/topology.h3
-rw-r--r--arch/ia64/kernel/smp.c2
-rw-r--r--arch/m32r/include/asm/mmu_context.h4
-rw-r--r--arch/m32r/include/asm/smp.h2
-rw-r--r--arch/m32r/kernel/smp.c30
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/mips/alchemy/common/time.c2
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h2
-rw-r--r--arch/mips/include/asm/mmu_context.h10
-rw-r--r--arch/mips/include/asm/smp-ops.h2
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/kernel/smp-cmp.c6
-rw-r--r--arch/mips/kernel/smp-mt.c6
-rw-r--r--arch/mips/kernel/smp-up.c3
-rw-r--r--arch/mips/kernel/smp.c8
-rw-r--r--arch/mips/kernel/smtc.c6
-rw-r--r--arch/mips/mipssim/sim_smtc.c5
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c4
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c4
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c5
-rw-r--r--arch/mips/sibyte/sb1250/smp.c5
-rw-r--r--arch/mn10300/include/asm/mmu_context.h12
-rw-r--r--arch/parisc/include/asm/smp.h1
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/topology.h12
-rw-r--r--arch/powerpc/kernel/setup-common.c6
-rw-r--r--arch/powerpc/kernel/smp.c12
-rw-r--r--arch/powerpc/platforms/powermac/smp.c6
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c6
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/topology.h1
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/sh/include/asm/smp.h1
-rw-r--r--arch/sh/include/asm/topology.h1
-rw-r--r--arch/sparc/include/asm/smp_64.h1
-rw-r--r--arch/sparc/include/asm/topology_64.h16
-rw-r--r--arch/um/include/asm/mmu_context.h4
-rw-r--r--arch/um/kernel/smp.c2
-rw-r--r--arch/x86/include/asm/mmu_context.h6
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/time.c1
-rw-r--r--arch/x86/mm/tlb.c15
-rw-r--r--arch/x86/xen/mmu.c4
61 files changed, 139 insertions, 201 deletions
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h
index 547e90951cec..3f390e8cc0b3 100644
--- a/arch/alpha/include/asm/smp.h
+++ b/arch/alpha/include/asm/smp.h
@@ -47,7 +47,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
47extern int smp_num_cpus; 47extern int smp_num_cpus;
48 48
49extern void arch_send_call_function_single_ipi(int cpu); 49extern void arch_send_call_function_single_ipi(int cpu);
50extern void arch_send_call_function_ipi(cpumask_t mask); 50extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
51 51
52#else /* CONFIG_SMP */ 52#else /* CONFIG_SMP */
53 53
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index b4f284c72ff3..36b3a30ba0e5 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -22,23 +22,6 @@ static inline int cpu_to_node(int cpu)
22 return node; 22 return node;
23} 23}
24 24
25static inline cpumask_t node_to_cpumask(int node)
26{
27 cpumask_t node_cpu_mask = CPU_MASK_NONE;
28 int cpu;
29
30 for_each_online_cpu(cpu) {
31 if (cpu_to_node(cpu) == node)
32 cpu_set(cpu, node_cpu_mask);
33 }
34
35#ifdef DEBUG_NUMA
36 printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask);
37#endif
38
39 return node_cpu_mask;
40}
41
42extern struct cpumask node_to_cpumask_map[]; 25extern struct cpumask node_to_cpumask_map[];
43/* FIXME: This is dumb, recalculating every time. But simple. */ 26/* FIXME: This is dumb, recalculating every time. But simple. */
44static const struct cpumask *cpumask_of_node(int node) 27static const struct cpumask *cpumask_of_node(int node)
@@ -55,7 +38,6 @@ static const struct cpumask *cpumask_of_node(int node)
55 return &node_to_cpumask_map[node]; 38 return &node_to_cpumask_map[node];
56} 39}
57 40
58#define pcibus_to_cpumask(bus) (cpu_online_map)
59#define cpumask_of_pcibus(bus) (cpu_online_mask) 41#define cpumask_of_pcibus(bus) (cpu_online_mask)
60 42
61#endif /* !CONFIG_NUMA */ 43#endif /* !CONFIG_NUMA */
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index b1fe5674c3a1..42aa078a5e4d 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -548,16 +548,16 @@ setup_profiling_timer(unsigned int multiplier)
548 548
549 549
550static void 550static void
551send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) 551send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
552{ 552{
553 int i; 553 int i;
554 554
555 mb(); 555 mb();
556 for_each_cpu_mask(i, to_whom) 556 for_each_cpu(i, to_whom)
557 set_bit(operation, &ipi_data[i].bits); 557 set_bit(operation, &ipi_data[i].bits);
558 558
559 mb(); 559 mb();
560 for_each_cpu_mask(i, to_whom) 560 for_each_cpu(i, to_whom)
561 wripir(i); 561 wripir(i);
562} 562}
563 563
@@ -624,7 +624,7 @@ smp_send_reschedule(int cpu)
624 printk(KERN_WARNING 624 printk(KERN_WARNING
625 "smp_send_reschedule: Sending IPI to self.\n"); 625 "smp_send_reschedule: Sending IPI to self.\n");
626#endif 626#endif
627 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); 627 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
628} 628}
629 629
630void 630void
@@ -636,17 +636,17 @@ smp_send_stop(void)
636 if (hard_smp_processor_id() != boot_cpu_id) 636 if (hard_smp_processor_id() != boot_cpu_id)
637 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); 637 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
638#endif 638#endif
639 send_ipi_message(to_whom, IPI_CPU_STOP); 639 send_ipi_message(&to_whom, IPI_CPU_STOP);
640} 640}
641 641
642void arch_send_call_function_ipi(cpumask_t mask) 642void arch_send_call_function_ipi_mask(const struct cpumask *mask)
643{ 643{
644 send_ipi_message(mask, IPI_CALL_FUNC); 644 send_ipi_message(mask, IPI_CALL_FUNC);
645} 645}
646 646
647void arch_send_call_function_single_ipi(int cpu) 647void arch_send_call_function_single_ipi(int cpu)
648{ 648{
649 send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); 649 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
650} 650}
651 651
652static void 652static void
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 1a711ea8418b..fd03fb63a332 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
334#ifndef CONFIG_CPU_CACHE_VIPT 334#ifndef CONFIG_CPU_CACHE_VIPT
335static inline void flush_cache_mm(struct mm_struct *mm) 335static inline void flush_cache_mm(struct mm_struct *mm)
336{ 336{
337 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 337 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
338 __cpuc_flush_user_all(); 338 __cpuc_flush_user_all();
339} 339}
340 340
341static inline void 341static inline void
342flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 342flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
343{ 343{
344 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) 344 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
346 vma->vm_flags); 346 vma->vm_flags);
347} 347}
@@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
349static inline void 349static inline void
350flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 350flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
351{ 351{
352 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 352 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
353 unsigned long addr = user_addr & PAGE_MASK; 353 unsigned long addr = user_addr & PAGE_MASK;
354 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 354 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
355 } 355 }
@@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
360 unsigned long uaddr, void *kaddr, 360 unsigned long uaddr, void *kaddr,
361 unsigned long len, int write) 361 unsigned long len, int write)
362{ 362{
363 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 363 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
364 unsigned long addr = (unsigned long)kaddr; 364 unsigned long addr = (unsigned long)kaddr;
365 __cpuc_coherent_kern_range(addr, addr + len); 365 __cpuc_coherent_kern_range(addr, addr + len);
366 } 366 }
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index bcdb9291ef0c..de6cefb329dd 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -103,14 +103,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
103 103
104#ifdef CONFIG_SMP 104#ifdef CONFIG_SMP
105 /* check for possible thread migration */ 105 /* check for possible thread migration */
106 if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask)) 106 if (!cpumask_empty(mm_cpumask(next)) &&
107 !cpumask_test_cpu(cpu, mm_cpumask(next)))
107 __flush_icache_all(); 108 __flush_icache_all();
108#endif 109#endif
109 if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) { 110 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
110 check_context(next); 111 check_context(next);
111 cpu_switch_mm(next->pgd, next); 112 cpu_switch_mm(next->pgd, next);
112 if (cache_is_vivt()) 113 if (cache_is_vivt())
113 cpu_clear(cpu, prev->cpu_vm_mask); 114 cpumask_clear_cpu(cpu, mm_cpumask(prev));
114 } 115 }
115#endif 116#endif
116} 117}
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a06e735b262a..e0d763be1846 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -93,7 +93,6 @@ extern void platform_cpu_enable(unsigned int cpu);
93 93
94extern void arch_send_call_function_single_ipi(int cpu); 94extern void arch_send_call_function_single_ipi(int cpu);
95extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 95extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
96#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
97 96
98/* 97/*
99 * show local interrupt info 98 * show local interrupt info
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c964f3fc3bc5..a45ab5dd8255 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
350 if (tlb_flag(TLB_WB)) 350 if (tlb_flag(TLB_WB))
351 dsb(); 351 dsb();
352 352
353 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { 353 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
354 if (tlb_flag(TLB_V3_FULL)) 354 if (tlb_flag(TLB_V3_FULL))
355 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 355 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
356 if (tlb_flag(TLB_V4_U_FULL)) 356 if (tlb_flag(TLB_V4_U_FULL))
@@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
388 if (tlb_flag(TLB_WB)) 388 if (tlb_flag(TLB_WB))
389 dsb(); 389 dsb();
390 390
391 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 391 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
392 if (tlb_flag(TLB_V3_PAGE)) 392 if (tlb_flag(TLB_V3_PAGE))
393 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); 393 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
394 if (tlb_flag(TLB_V4_U_PAGE)) 394 if (tlb_flag(TLB_V4_U_PAGE))
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index de885fd256c5..e0d32770bb3d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void)
189 read_lock(&tasklist_lock); 189 read_lock(&tasklist_lock);
190 for_each_process(p) { 190 for_each_process(p) {
191 if (p->mm) 191 if (p->mm)
192 cpu_clear(cpu, p->mm->cpu_vm_mask); 192 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
193 } 193 }
194 read_unlock(&tasklist_lock); 194 read_unlock(&tasklist_lock);
195 195
@@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
257 atomic_inc(&mm->mm_users); 257 atomic_inc(&mm->mm_users);
258 atomic_inc(&mm->mm_count); 258 atomic_inc(&mm->mm_count);
259 current->active_mm = mm; 259 current->active_mm = mm;
260 cpu_set(cpu, mm->cpu_vm_mask); 260 cpumask_set_cpu(cpu, mm_cpumask(mm));
261 cpu_switch_mm(mm->pgd, mm); 261 cpu_switch_mm(mm->pgd, mm);
262 enter_lazy_tlb(mm, current); 262 enter_lazy_tlb(mm, current);
263 local_flush_tlb_all(); 263 local_flush_tlb_all();
@@ -643,7 +643,7 @@ void flush_tlb_all(void)
643void flush_tlb_mm(struct mm_struct *mm) 643void flush_tlb_mm(struct mm_struct *mm)
644{ 644{
645 if (tlb_ops_need_broadcast()) 645 if (tlb_ops_need_broadcast())
646 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); 646 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
647 else 647 else
648 local_flush_tlb_mm(mm); 648 local_flush_tlb_mm(mm);
649} 649}
@@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
654 struct tlb_args ta; 654 struct tlb_args ta;
655 ta.ta_vma = vma; 655 ta.ta_vma = vma;
656 ta.ta_start = uaddr; 656 ta.ta_start = uaddr;
657 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); 657 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
658 } else 658 } else
659 local_flush_tlb_page(vma, uaddr); 659 local_flush_tlb_page(vma, uaddr);
660} 660}
@@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
677 ta.ta_vma = vma; 677 ta.ta_vma = vma;
678 ta.ta_start = start; 678 ta.ta_start = start;
679 ta.ta_end = end; 679 ta.ta_end = end;
680 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); 680 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
681 } else 681 } else
682 local_flush_tlb_range(vma, start, end); 682 local_flush_tlb_range(vma, start, end);
683} 683}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index fc84fcc74380..6bda76a43199 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm)
59 } 59 }
60 spin_unlock(&cpu_asid_lock); 60 spin_unlock(&cpu_asid_lock);
61 61
62 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); 62 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
63 mm->context.id = asid; 63 mm->context.id = asid;
64} 64}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 575f3ad722e7..b27942909b23 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
50void flush_cache_mm(struct mm_struct *mm) 50void flush_cache_mm(struct mm_struct *mm)
51{ 51{
52 if (cache_is_vivt()) { 52 if (cache_is_vivt()) {
53 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 53 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
54 __cpuc_flush_user_all(); 54 __cpuc_flush_user_all();
55 return; 55 return;
56 } 56 }
@@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm)
73void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 73void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
74{ 74{
75 if (cache_is_vivt()) { 75 if (cache_is_vivt()) {
76 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) 76 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
77 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 77 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
78 vma->vm_flags); 78 vma->vm_flags);
79 return; 79 return;
@@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
97void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 97void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
98{ 98{
99 if (cache_is_vivt()) { 99 if (cache_is_vivt()) {
100 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 100 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
101 unsigned long addr = user_addr & PAGE_MASK; 101 unsigned long addr = user_addr & PAGE_MASK;
102 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 102 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
103 } 103 }
@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
113 unsigned long len, int write) 113 unsigned long len, int write)
114{ 114{
115 if (cache_is_vivt()) { 115 if (cache_is_vivt()) {
116 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 116 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
117 unsigned long addr = (unsigned long)kaddr; 117 unsigned long addr = (unsigned long)kaddr;
118 __cpuc_coherent_kern_range(addr, addr + len); 118 __cpuc_coherent_kern_range(addr, addr + len);
119 } 119 }
@@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
126 } 126 }
127 127
128 /* VIPT non-aliasing cache */ 128 /* VIPT non-aliasing cache */
129 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && 129 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
130 vma->vm_flags & VM_EXEC) { 130 vma->vm_flags & VM_EXEC) {
131 unsigned long addr = (unsigned long)kaddr; 131 unsigned long addr = (unsigned long)kaddr;
132 /* only flushing the kernel mapping on non-aliasing VIPT */ 132 /* only flushing the kernel mapping on non-aliasing VIPT */
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index d217d1d4e051..0b3b3997decd 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -127,7 +127,6 @@ extern int is_multithreading_enabled(void);
127 127
128extern void arch_send_call_function_single_ipi(int cpu); 128extern void arch_send_call_function_single_ipi(int cpu);
129extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 129extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
130#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
131 130
132#else /* CONFIG_SMP */ 131#else /* CONFIG_SMP */
133 132
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index d0141fbf51d0..3ddb4e709dba 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,6 @@
33/* 33/*
34 * Returns a bitmask of CPUs on Node 'node'. 34 * Returns a bitmask of CPUs on Node 'node'.
35 */ 35 */
36#define node_to_cpumask(node) (node_to_cpu_mask[node])
37#define cpumask_of_node(node) (&node_to_cpu_mask[node]) 36#define cpumask_of_node(node) (&node_to_cpu_mask[node])
38 37
39/* 38/*
@@ -104,8 +103,6 @@ void build_cpu_to_node_map(void);
104#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
105#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id) 104#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
106#define topology_core_id(cpu) (cpu_data(cpu)->core_id) 105#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
107#define topology_core_siblings(cpu) (cpu_core_map[cpu])
108#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
109#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 106#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
110#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 107#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
111#define smt_capable() (smp_num_siblings > 1) 108#define smt_capable() (smp_num_siblings > 1)
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 93ebfea43c6c..dabeefe21134 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -302,7 +302,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
302 return; 302 return;
303 } 303 }
304 304
305 smp_call_function_mask(mm->cpu_vm_mask, 305 smp_call_function_many(mm_cpumask(mm),
306 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); 306 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
307 local_irq_disable(); 307 local_irq_disable();
308 local_finish_flush_tlb_mm(mm); 308 local_finish_flush_tlb_mm(mm);
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index 91909e5dd9d0..a70a3df33635 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev,
127 127
128 if (prev != next) { 128 if (prev != next) {
129#ifdef CONFIG_SMP 129#ifdef CONFIG_SMP
130 cpu_set(cpu, next->cpu_vm_mask); 130 cpumask_set_cpu(cpu, mm_cpumask(next));
131#endif /* CONFIG_SMP */ 131#endif /* CONFIG_SMP */
132 /* Set MPTB = next->pgd */ 132 /* Set MPTB = next->pgd */
133 *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; 133 *(volatile unsigned long *)MPTB = (unsigned long)next->pgd;
@@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev,
135 } 135 }
136#ifdef CONFIG_SMP 136#ifdef CONFIG_SMP
137 else 137 else
138 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) 138 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
139 activate_context(next); 139 activate_context(next);
140#endif /* CONFIG_SMP */ 140#endif /* CONFIG_SMP */
141} 141}
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index b96a6d2ffbc3..e67ded1aab91 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -88,7 +88,7 @@ extern void smp_send_timer(void);
88extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); 88extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
89 89
90extern void arch_send_call_function_single_ipi(int cpu); 90extern void arch_send_call_function_single_ipi(int cpu);
91extern void arch_send_call_function_ipi(cpumask_t mask); 91extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
92 92
93#endif /* not __ASSEMBLY__ */ 93#endif /* not __ASSEMBLY__ */
94 94
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 929e5c9d3ad9..1b7598e6f6e8 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -85,7 +85,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *);
85void smp_local_timer_interrupt(void); 85void smp_local_timer_interrupt(void);
86 86
87static void send_IPI_allbutself(int, int); 87static void send_IPI_allbutself(int, int);
88static void send_IPI_mask(cpumask_t, int, int); 88static void send_IPI_mask(const struct cpumask *, int, int);
89unsigned long send_IPI_mask_phys(cpumask_t, int, int); 89unsigned long send_IPI_mask_phys(cpumask_t, int, int);
90 90
91/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 91/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -113,7 +113,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int);
113void smp_send_reschedule(int cpu_id) 113void smp_send_reschedule(int cpu_id)
114{ 114{
115 WARN_ON(cpu_is_offline(cpu_id)); 115 WARN_ON(cpu_is_offline(cpu_id));
116 send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); 116 send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
117} 117}
118 118
119/*==========================================================================* 119/*==========================================================================*
@@ -168,7 +168,7 @@ void smp_flush_cache_all(void)
168 spin_lock(&flushcache_lock); 168 spin_lock(&flushcache_lock);
169 mask=cpus_addr(cpumask); 169 mask=cpus_addr(cpumask);
170 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 170 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
171 send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); 171 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
172 _flush_cache_copyback_all(); 172 _flush_cache_copyback_all();
173 while (flushcache_cpumask) 173 while (flushcache_cpumask)
174 mb(); 174 mb();
@@ -264,7 +264,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
264 preempt_disable(); 264 preempt_disable();
265 cpu_id = smp_processor_id(); 265 cpu_id = smp_processor_id();
266 mmc = &mm->context[cpu_id]; 266 mmc = &mm->context[cpu_id];
267 cpu_mask = mm->cpu_vm_mask; 267 cpu_mask = *mm_cpumask(mm);
268 cpu_clear(cpu_id, cpu_mask); 268 cpu_clear(cpu_id, cpu_mask);
269 269
270 if (*mmc != NO_CONTEXT) { 270 if (*mmc != NO_CONTEXT) {
@@ -273,7 +273,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
273 if (mm == current->mm) 273 if (mm == current->mm)
274 activate_context(mm); 274 activate_context(mm);
275 else 275 else
276 cpu_clear(cpu_id, mm->cpu_vm_mask); 276 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
277 local_irq_restore(flags); 277 local_irq_restore(flags);
278 } 278 }
279 if (!cpus_empty(cpu_mask)) 279 if (!cpus_empty(cpu_mask))
@@ -334,7 +334,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
334 preempt_disable(); 334 preempt_disable();
335 cpu_id = smp_processor_id(); 335 cpu_id = smp_processor_id();
336 mmc = &mm->context[cpu_id]; 336 mmc = &mm->context[cpu_id];
337 cpu_mask = mm->cpu_vm_mask; 337 cpu_mask = *mm_cpumask(mm);
338 cpu_clear(cpu_id, cpu_mask); 338 cpu_clear(cpu_id, cpu_mask);
339 339
340#ifdef DEBUG_SMP 340#ifdef DEBUG_SMP
@@ -424,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
424 * We have to send the IPI only to 424 * We have to send the IPI only to
425 * CPUs affected. 425 * CPUs affected.
426 */ 426 */
427 send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); 427 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
428 428
429 while (!cpus_empty(flush_cpumask)) { 429 while (!cpus_empty(flush_cpumask)) {
430 /* nothing. lockup detection does not belong here */ 430 /* nothing. lockup detection does not belong here */
@@ -469,7 +469,7 @@ void smp_invalidate_interrupt(void)
469 if (flush_mm == current->active_mm) 469 if (flush_mm == current->active_mm)
470 activate_context(flush_mm); 470 activate_context(flush_mm);
471 else 471 else
472 cpu_clear(cpu_id, flush_mm->cpu_vm_mask); 472 cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
473 } else { 473 } else {
474 unsigned long va = flush_va; 474 unsigned long va = flush_va;
475 475
@@ -546,14 +546,14 @@ static void stop_this_cpu(void *dummy)
546 for ( ; ; ); 546 for ( ; ; );
547} 547}
548 548
549void arch_send_call_function_ipi(cpumask_t mask) 549void arch_send_call_function_ipi_mask(const struct cpumask *mask)
550{ 550{
551 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); 551 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
552} 552}
553 553
554void arch_send_call_function_single_ipi(int cpu) 554void arch_send_call_function_single_ipi(int cpu)
555{ 555{
556 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); 556 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
557} 557}
558 558
559/*==========================================================================* 559/*==========================================================================*
@@ -729,7 +729,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
729 cpumask = cpu_online_map; 729 cpumask = cpu_online_map;
730 cpu_clear(smp_processor_id(), cpumask); 730 cpu_clear(smp_processor_id(), cpumask);
731 731
732 send_IPI_mask(cpumask, ipi_num, try); 732 send_IPI_mask(&cpumask, ipi_num, try);
733} 733}
734 734
735/*==========================================================================* 735/*==========================================================================*
@@ -752,7 +752,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
752 * ---------- --- -------------------------------------------------------- 752 * ---------- --- --------------------------------------------------------
753 * 753 *
754 *==========================================================================*/ 754 *==========================================================================*/
755static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) 755static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
756{ 756{
757 cpumask_t physid_mask, tmp; 757 cpumask_t physid_mask, tmp;
758 int cpu_id, phys_id; 758 int cpu_id, phys_id;
@@ -761,11 +761,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
761 if (num_cpus <= 1) /* NO MP */ 761 if (num_cpus <= 1) /* NO MP */
762 return; 762 return;
763 763
764 cpus_and(tmp, cpumask, cpu_online_map); 764 cpumask_and(&tmp, cpumask, cpu_online_mask);
765 BUG_ON(!cpus_equal(cpumask, tmp)); 765 BUG_ON(!cpumask_equal(cpumask, &tmp));
766 766
767 physid_mask = CPU_MASK_NONE; 767 physid_mask = CPU_MASK_NONE;
768 for_each_cpu_mask(cpu_id, cpumask){ 768 for_each_cpu(cpu_id, cpumask) {
769 if ((phys_id = cpu_to_physid(cpu_id)) != -1) 769 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
770 cpu_set(phys_id, physid_mask); 770 cpu_set(phys_id, physid_mask);
771 } 771 }
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 655ea1c47a0f..e034844cfc0d 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) 178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
179 physid_set(phys_id, phys_cpu_present_map); 179 physid_set(phys_id, phys_cpu_present_map);
180#ifndef CONFIG_HOTPLUG_CPU 180#ifndef CONFIG_HOTPLUG_CPU
181 cpu_present_map = cpu_possible_map; 181 init_cpu_present(&cpu_possible_map);
182#endif 182#endif
183 183
184 show_mp_info(nr_cpu); 184 show_mp_info(nr_cpu);
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index f34ff8601942..379a664809b0 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -88,7 +88,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = {
88 .irq = AU1000_RTC_MATCH2_INT, 88 .irq = AU1000_RTC_MATCH2_INT,
89 .set_next_event = au1x_rtcmatch2_set_next_event, 89 .set_next_event = au1x_rtcmatch2_set_next_event,
90 .set_mode = au1x_rtcmatch2_set_mode, 90 .set_mode = au1x_rtcmatch2_set_mode,
91 .cpumask = CPU_MASK_ALL_PTR, 91 .cpumask = cpu_all_mask,
92}; 92};
93 93
94static struct irqaction au1x_rtcmatch2_irqaction = { 94static struct irqaction au1x_rtcmatch2_irqaction = {
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 230591707005..f6837422fe65 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -24,12 +24,10 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
24 24
25#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) 25#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
26#define parent_node(node) (node) 26#define parent_node(node) (node)
27#define node_to_cpumask(node) (hub_data(node)->h_cpus)
28#define cpumask_of_node(node) (&hub_data(node)->h_cpus) 27#define cpumask_of_node(node) (&hub_data(node)->h_cpus)
29struct pci_bus; 28struct pci_bus;
30extern int pcibus_to_node(struct pci_bus *); 29extern int pcibus_to_node(struct pci_bus *);
31 30
32#define pcibus_to_cpumask(bus) (cpu_online_map)
33#define cpumask_of_pcibus(bus) (cpu_online_mask) 31#define cpumask_of_pcibus(bus) (cpu_online_mask)
34 32
35extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; 33extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index d3bea88d8744..d9743536a621 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -178,8 +178,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
178 * Mark current->active_mm as not "active" anymore. 178 * Mark current->active_mm as not "active" anymore.
179 * We don't want to mislead possible IPI tlb flush routines. 179 * We don't want to mislead possible IPI tlb flush routines.
180 */ 180 */
181 cpu_clear(cpu, prev->cpu_vm_mask); 181 cpumask_clear_cpu(cpu, mm_cpumask(prev));
182 cpu_set(cpu, next->cpu_vm_mask); 182 cpumask_set_cpu(cpu, mm_cpumask(next));
183 183
184 local_irq_restore(flags); 184 local_irq_restore(flags);
185} 185}
@@ -235,8 +235,8 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
235 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 235 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
236 236
237 /* mark mmu ownership change */ 237 /* mark mmu ownership change */
238 cpu_clear(cpu, prev->cpu_vm_mask); 238 cpumask_clear_cpu(cpu, mm_cpumask(prev));
239 cpu_set(cpu, next->cpu_vm_mask); 239 cpumask_set_cpu(cpu, mm_cpumask(next));
240 240
241 local_irq_restore(flags); 241 local_irq_restore(flags);
242} 242}
@@ -258,7 +258,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
258 258
259 local_irq_save(flags); 259 local_irq_save(flags);
260 260
261 if (cpu_isset(cpu, mm->cpu_vm_mask)) { 261 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
262 get_new_mmu_context(mm, cpu); 262 get_new_mmu_context(mm, cpu);
263#ifdef CONFIG_MIPS_MT_SMTC 263#ifdef CONFIG_MIPS_MT_SMTC
264 /* See comments for similar code above */ 264 /* See comments for similar code above */
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index fd545547b8aa..9e09af34c8a8 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -19,7 +19,7 @@ struct task_struct;
19 19
20struct plat_smp_ops { 20struct plat_smp_ops {
21 void (*send_ipi_single)(int cpu, unsigned int action); 21 void (*send_ipi_single)(int cpu, unsigned int action);
22 void (*send_ipi_mask)(cpumask_t mask, unsigned int action); 22 void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
23 void (*init_secondary)(void); 23 void (*init_secondary)(void);
24 void (*smp_finish)(void); 24 void (*smp_finish)(void);
25 void (*cpus_done)(void); 25 void (*cpus_done)(void);
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index aaa2d4ab26dc..e15f11a09311 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -78,6 +78,6 @@ extern void play_dead(void);
78extern asmlinkage void smp_call_function_interrupt(void); 78extern asmlinkage void smp_call_function_interrupt(void);
79 79
80extern void arch_send_call_function_single_ipi(int cpu); 80extern void arch_send_call_function_single_ipi(int cpu);
81extern void arch_send_call_function_ipi(cpumask_t mask); 81extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
82 82
83#endif /* __ASM_SMP_H */ 83#endif /* __ASM_SMP_H */
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index ad0ff5dc4d59..cc81771b882c 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -80,11 +80,11 @@ void cmp_send_ipi_single(int cpu, unsigned int action)
80 local_irq_restore(flags); 80 local_irq_restore(flags);
81} 81}
82 82
83static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action) 83static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
84{ 84{
85 unsigned int i; 85 unsigned int i;
86 86
87 for_each_cpu_mask(i, mask) 87 for_each_cpu(i, mask)
88 cmp_send_ipi_single(i, action); 88 cmp_send_ipi_single(i, action);
89} 89}
90 90
@@ -171,7 +171,7 @@ void __init cmp_smp_setup(void)
171 171
172 for (i = 1; i < NR_CPUS; i++) { 172 for (i = 1; i < NR_CPUS; i++) {
173 if (amon_cpu_avail(i)) { 173 if (amon_cpu_avail(i)) {
174 cpu_set(i, cpu_possible_map); 174 set_cpu_possible(i, true);
175 __cpu_number_map[i] = ++ncpu; 175 __cpu_number_map[i] = ++ncpu;
176 __cpu_logical_map[ncpu] = i; 176 __cpu_logical_map[ncpu] = i;
177 } 177 }
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 6f7ee5ac46ee..43e7cdc5ded2 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
70 write_vpe_c0_vpeconf0(tmp); 70 write_vpe_c0_vpeconf0(tmp);
71 71
72 /* Record this as available CPU */ 72 /* Record this as available CPU */
73 cpu_set(tc, cpu_possible_map); 73 set_cpu_possible(tc, true);
74 __cpu_number_map[tc] = ++ncpu; 74 __cpu_number_map[tc] = ++ncpu;
75 __cpu_logical_map[ncpu] = tc; 75 __cpu_logical_map[ncpu] = tc;
76 } 76 }
@@ -141,11 +141,11 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
141 local_irq_restore(flags); 141 local_irq_restore(flags);
142} 142}
143 143
144static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action) 144static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
145{ 145{
146 unsigned int i; 146 unsigned int i;
147 147
148 for_each_cpu_mask(i, mask) 148 for_each_cpu(i, mask)
149 vsmp_send_ipi_single(i, action); 149 vsmp_send_ipi_single(i, action);
150} 150}
151 151
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 2508d55d68fd..00500fea2750 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -18,7 +18,8 @@ static void up_send_ipi_single(int cpu, unsigned int action)
18 panic(KERN_ERR "%s called", __func__); 18 panic(KERN_ERR "%s called", __func__);
19} 19}
20 20
21static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action) 21static inline void up_send_ipi_mask(const struct cpumask *mask,
22 unsigned int action)
22{ 23{
23 panic(KERN_ERR "%s called", __func__); 24 panic(KERN_ERR "%s called", __func__);
24} 25}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 64668a93248b..4eb106c6a3ec 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -128,7 +128,7 @@ asmlinkage __cpuinit void start_secondary(void)
128 cpu_idle(); 128 cpu_idle();
129} 129}
130 130
131void arch_send_call_function_ipi(cpumask_t mask) 131void arch_send_call_function_ipi_mask(const struct cpumask *mask)
132{ 132{
133 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 133 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
134} 134}
@@ -183,15 +183,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
183 mp_ops->prepare_cpus(max_cpus); 183 mp_ops->prepare_cpus(max_cpus);
184 set_cpu_sibling_map(0); 184 set_cpu_sibling_map(0);
185#ifndef CONFIG_HOTPLUG_CPU 185#ifndef CONFIG_HOTPLUG_CPU
186 cpu_present_map = cpu_possible_map; 186 init_cpu_present(&cpu_possible_map);
187#endif 187#endif
188} 188}
189 189
190/* preload SMP state for boot cpu */ 190/* preload SMP state for boot cpu */
191void __devinit smp_prepare_boot_cpu(void) 191void __devinit smp_prepare_boot_cpu(void)
192{ 192{
193 cpu_set(0, cpu_possible_map); 193 set_cpu_possible(0, true);
194 cpu_set(0, cpu_online_map); 194 set_cpu_online(0, true);
195 cpu_set(0, cpu_callin_map); 195 cpu_set(0, cpu_callin_map);
196} 196}
197 197
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 1a466baf0edf..67153a0dc267 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -305,7 +305,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
305 */ 305 */
306 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 306 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
307 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { 307 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
308 cpu_set(i, cpu_possible_map); 308 set_cpu_possible(i, true);
309 __cpu_number_map[i] = i; 309 __cpu_number_map[i] = i;
310 __cpu_logical_map[i] = i; 310 __cpu_logical_map[i] = i;
311 } 311 }
@@ -525,8 +525,8 @@ void smtc_prepare_cpus(int cpus)
525 * Pull any physically present but unused TCs out of circulation. 525 * Pull any physically present but unused TCs out of circulation.
526 */ 526 */
527 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { 527 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
528 cpu_clear(tc, cpu_possible_map); 528 set_cpu_possible(tc, false);
529 cpu_clear(tc, cpu_present_map); 529 set_cpu_present(tc, false);
530 tc++; 530 tc++;
531 } 531 }
532 532
diff --git a/arch/mips/mipssim/sim_smtc.c b/arch/mips/mipssim/sim_smtc.c
index d6e4f656ad14..5da30b6a65b7 100644
--- a/arch/mips/mipssim/sim_smtc.c
+++ b/arch/mips/mipssim/sim_smtc.c
@@ -43,11 +43,12 @@ static void ssmtc_send_ipi_single(int cpu, unsigned int action)
43 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 43 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
44} 44}
45 45
46static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action) 46static inline void ssmtc_send_ipi_mask(const struct cpumask *mask,
47 unsigned int action)
47{ 48{
48 unsigned int i; 49 unsigned int i;
49 50
50 for_each_cpu_mask(i, mask) 51 for_each_cpu(i, mask)
51 ssmtc_send_ipi_single(i, action); 52 ssmtc_send_ipi_single(i, action);
52} 53}
53 54
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 10ab69f7183f..94e05e5733c1 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -79,7 +79,7 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
79 * cores it has been used on 79 * cores it has been used on
80 */ 80 */
81 if (vma) 81 if (vma)
82 mask = vma->vm_mm->cpu_vm_mask; 82 mask = *mm_cpumask(vma->vm_mm);
83 else 83 else
84 mask = cpu_online_map; 84 mask = cpu_online_map;
85 cpu_clear(cpu, mask); 85 cpu_clear(cpu, mask);
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 499ffe5475df..192cfd2a539c 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -21,11 +21,11 @@ static void msmtc_send_ipi_single(int cpu, unsigned int action)
21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
22} 22}
23 23
24static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action) 24static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
25{ 25{
26 unsigned int i; 26 unsigned int i;
27 27
28 for_each_cpu_mask(i, mask) 28 for_each_cpu(i, mask)
29 msmtc_send_ipi_single(i, action); 29 msmtc_send_ipi_single(i, action);
30} 30}
31 31
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 8ace27716232..326fe7a392e8 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -97,11 +97,11 @@ static void yos_send_ipi_single(int cpu, unsigned int action)
97 } 97 }
98} 98}
99 99
100static void yos_send_ipi_mask(cpumask_t mask, unsigned int action) 100static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
101{ 101{
102 unsigned int i; 102 unsigned int i;
103 103
104 for_each_cpu_mask(i, mask) 104 for_each_cpu(i, mask)
105 yos_send_ipi_single(i, action); 105 yos_send_ipi_single(i, action);
106} 106}
107 107
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 060d853d7b35..f61c164d1e67 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -421,7 +421,7 @@ static void __init node_mem_init(cnodeid_t node)
421 421
422/* 422/*
423 * A node with nothing. We use it to avoid any special casing in 423 * A node with nothing. We use it to avoid any special casing in
424 * node_to_cpumask 424 * cpumask_of_node
425 */ 425 */
426static struct node_data null_node = { 426static struct node_data null_node = {
427 .hub = { 427 .hub = {
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index cbcd7eb83bd1..9aa8f2951df6 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -165,11 +165,11 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
165 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); 165 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
166} 166}
167 167
168static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action) 168static void ip27_send_ipi(const struct cpumask *mask, unsigned int action)
169{ 169{
170 unsigned int i; 170 unsigned int i;
171 171
172 for_each_cpu_mask(i, mask) 172 for_each_cpu(i, mask)
173 ip27_send_ipi_single(i, action); 173 ip27_send_ipi_single(i, action);
174} 174}
175 175
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index 314691648c97..47b347c992ea 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -82,11 +82,12 @@ static void bcm1480_send_ipi_single(int cpu, unsigned int action)
82 __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); 82 __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
83} 83}
84 84
85static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action) 85static void bcm1480_send_ipi_mask(const struct cpumask *mask,
86 unsigned int action)
86{ 87{
87 unsigned int i; 88 unsigned int i;
88 89
89 for_each_cpu_mask(i, mask) 90 for_each_cpu(i, mask)
90 bcm1480_send_ipi_single(i, action); 91 bcm1480_send_ipi_single(i, action);
91} 92}
92 93
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index cad14003b84f..c00a5cb1128d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -70,11 +70,12 @@ static void sb1250_send_ipi_single(int cpu, unsigned int action)
70 __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]); 70 __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
71} 71}
72 72
73static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action) 73static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
74 unsigned int action)
74{ 75{
75 unsigned int i; 76 unsigned int i;
76 77
77 for_each_cpu_mask(i, mask) 78 for_each_cpu(i, mask)
78 sb1250_send_ipi_single(i, action); 79 sb1250_send_ipi_single(i, action);
79} 80}
80 81
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index a9e2e34f69b0..cb294c244de3 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -38,13 +38,13 @@ extern unsigned long mmu_context_cache[NR_CPUS];
38#define enter_lazy_tlb(mm, tsk) do {} while (0) 38#define enter_lazy_tlb(mm, tsk) do {} while (0)
39 39
40#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
41#define cpu_ran_vm(cpu, task) \ 41#define cpu_ran_vm(cpu, mm) \
42 cpu_set((cpu), (task)->cpu_vm_mask) 42 cpumask_set_cpu((cpu), mm_cpumask(mm))
43#define cpu_maybe_ran_vm(cpu, task) \ 43#define cpu_maybe_ran_vm(cpu, mm) \
44 cpu_test_and_set((cpu), (task)->cpu_vm_mask) 44 cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
45#else 45#else
46#define cpu_ran_vm(cpu, task) do {} while (0) 46#define cpu_ran_vm(cpu, mm) do {} while (0)
47#define cpu_maybe_ran_vm(cpu, task) true 47#define cpu_maybe_ran_vm(cpu, mm) true
48#endif /* CONFIG_SMP */ 48#endif /* CONFIG_SMP */
49 49
50/* 50/*
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 21eb45a52629..2e73623feb6b 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -30,7 +30,6 @@ extern void smp_send_all_nop(void);
30 30
31extern void arch_send_call_function_single_ipi(int cpu); 31extern void arch_send_call_function_single_ipi(int cpu);
32extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 32extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
33#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
34 33
35#endif /* !ASSEMBLY */ 34#endif /* !ASSEMBLY */
36 35
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index c0d3b8af9319..d9ea8d39c342 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -146,7 +146,7 @@ extern void smp_generic_take_timebase(void);
146extern struct smp_ops_t *smp_ops; 146extern struct smp_ops_t *smp_ops;
147 147
148extern void arch_send_call_function_single_ipi(int cpu); 148extern void arch_send_call_function_single_ipi(int cpu);
149extern void arch_send_call_function_ipi(cpumask_t mask); 149extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
150 150
151/* Definitions relative to the secondary CPU spin loop 151/* Definitions relative to the secondary CPU spin loop
152 * and entry point. Not all of them exist on both 32 and 152 * and entry point. Not all of them exist on both 32 and
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 394edcbcce71..22f738d12ad9 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -17,11 +17,6 @@ static inline int cpu_to_node(int cpu)
17 17
18#define parent_node(node) (node) 18#define parent_node(node) (node)
19 19
20static inline cpumask_t node_to_cpumask(int node)
21{
22 return numa_cpumask_lookup_table[node];
23}
24
25#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 20#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
26 21
27int of_node_to_nid(struct device_node *device); 22int of_node_to_nid(struct device_node *device);
@@ -36,11 +31,6 @@ static inline int pcibus_to_node(struct pci_bus *bus)
36} 31}
37#endif 32#endif
38 33
39#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
40 CPU_MASK_ALL : \
41 node_to_cpumask(pcibus_to_node(bus)) \
42 )
43
44#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 34#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
45 cpu_all_mask : \ 35 cpu_all_mask : \
46 cpumask_of_node(pcibus_to_node(bus))) 36 cpumask_of_node(pcibus_to_node(bus)))
@@ -104,8 +94,6 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
104#ifdef CONFIG_PPC64 94#ifdef CONFIG_PPC64
105#include <asm/smp.h> 95#include <asm/smp.h>
106 96
107#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
108#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
109#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 97#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
110#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) 98#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
111#define topology_core_id(cpu) (cpu_to_core_id(cpu)) 99#define topology_core_id(cpu) (cpu_to_core_id(cpu))
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 74cd1a7d0d4b..4271f7a655a3 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -431,9 +431,9 @@ void __init smp_setup_cpu_maps(void)
431 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { 431 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
432 DBG(" thread %d -> cpu %d (hard id %d)\n", 432 DBG(" thread %d -> cpu %d (hard id %d)\n",
433 j, cpu, intserv[j]); 433 j, cpu, intserv[j]);
434 cpu_set(cpu, cpu_present_map); 434 set_cpu_present(cpu, true);
435 set_hard_smp_processor_id(cpu, intserv[j]); 435 set_hard_smp_processor_id(cpu, intserv[j]);
436 cpu_set(cpu, cpu_possible_map); 436 set_cpu_possible(cpu, true);
437 cpu++; 437 cpu++;
438 } 438 }
439 } 439 }
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
479 maxcpus); 479 maxcpus);
480 480
481 for (cpu = 0; cpu < maxcpus; cpu++) 481 for (cpu = 0; cpu < maxcpus; cpu++)
482 cpu_set(cpu, cpu_possible_map); 482 set_cpu_possible(cpu, true);
483 out: 483 out:
484 of_node_put(dn); 484 of_node_put(dn);
485 } 485 }
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d387b3937ccc..9b86a74d2815 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -189,11 +189,11 @@ void arch_send_call_function_single_ipi(int cpu)
189 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); 189 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
190} 190}
191 191
192void arch_send_call_function_ipi(cpumask_t mask) 192void arch_send_call_function_ipi_mask(const struct cpumask *mask)
193{ 193{
194 unsigned int cpu; 194 unsigned int cpu;
195 195
196 for_each_cpu_mask(cpu, mask) 196 for_each_cpu(cpu, mask)
197 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); 197 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
198} 198}
199 199
@@ -287,7 +287,7 @@ void __devinit smp_prepare_boot_cpu(void)
287{ 287{
288 BUG_ON(smp_processor_id() != boot_cpuid); 288 BUG_ON(smp_processor_id() != boot_cpuid);
289 289
290 cpu_set(boot_cpuid, cpu_online_map); 290 set_cpu_online(boot_cpuid, true);
291 cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); 291 cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
292 cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); 292 cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
293#ifdef CONFIG_PPC64 293#ifdef CONFIG_PPC64
@@ -307,7 +307,7 @@ int generic_cpu_disable(void)
307 if (cpu == boot_cpuid) 307 if (cpu == boot_cpuid)
308 return -EBUSY; 308 return -EBUSY;
309 309
310 cpu_clear(cpu, cpu_online_map); 310 set_cpu_online(cpu, false);
311#ifdef CONFIG_PPC64 311#ifdef CONFIG_PPC64
312 vdso_data->processorCount--; 312 vdso_data->processorCount--;
313 fixup_irqs(cpu_online_map); 313 fixup_irqs(cpu_online_map);
@@ -361,7 +361,7 @@ void generic_mach_cpu_die(void)
361 smp_wmb(); 361 smp_wmb();
362 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 362 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
363 cpu_relax(); 363 cpu_relax();
364 cpu_set(cpu, cpu_online_map); 364 set_cpu_online(cpu, true);
365 local_irq_enable(); 365 local_irq_enable();
366} 366}
367#endif 367#endif
@@ -508,7 +508,7 @@ int __devinit start_secondary(void *unused)
508 508
509 ipi_call_lock(); 509 ipi_call_lock();
510 notify_cpu_starting(cpu); 510 notify_cpu_starting(cpu);
511 cpu_set(cpu, cpu_online_map); 511 set_cpu_online(cpu, true);
512 /* Update sibling maps */ 512 /* Update sibling maps */
513 base = cpu_first_thread_in_core(cpu); 513 base = cpu_first_thread_in_core(cpu);
514 for (i = 0; i < threads_per_core; i++) { 514 for (i = 0; i < threads_per_core; i++) {
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 937a38e73178..b40c22d697f0 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -320,7 +320,7 @@ static int __init smp_psurge_probe(void)
320 if (ncpus > NR_CPUS) 320 if (ncpus > NR_CPUS)
321 ncpus = NR_CPUS; 321 ncpus = NR_CPUS;
322 for (i = 1; i < ncpus ; ++i) 322 for (i = 1; i < ncpus ; ++i)
323 cpu_set(i, cpu_present_map); 323 set_cpu_present(i, true);
324 324
325 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); 325 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
326 326
@@ -867,7 +867,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
867 867
868int smp_core99_cpu_disable(void) 868int smp_core99_cpu_disable(void)
869{ 869{
870 cpu_clear(smp_processor_id(), cpu_online_map); 870 set_cpu_online(smp_processor_id(), false);
871 871
872 /* XXX reset cpu affinity here */ 872 /* XXX reset cpu affinity here */
873 mpic_cpu_set_priority(0xf); 873 mpic_cpu_set_priority(0xf);
@@ -952,7 +952,7 @@ void __init pmac_setup_smp(void)
952 int cpu; 952 int cpu;
953 953
954 for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu) 954 for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
955 cpu_set(cpu, cpu_possible_map); 955 set_cpu_possible(cpu, true);
956 smp_ops = &psurge_smp_ops; 956 smp_ops = &psurge_smp_ops;
957 } 957 }
958#endif /* CONFIG_PPC32 */ 958#endif /* CONFIG_PPC32 */
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a20ead87153d..ebff6d9a4e39 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -94,7 +94,7 @@ static int pseries_cpu_disable(void)
94{ 94{
95 int cpu = smp_processor_id(); 95 int cpu = smp_processor_id();
96 96
97 cpu_clear(cpu, cpu_online_map); 97 set_cpu_online(cpu, false);
98 vdso_data->processorCount--; 98 vdso_data->processorCount--;
99 99
100 /*fix boot_cpuid here*/ 100 /*fix boot_cpuid here*/
@@ -185,7 +185,7 @@ static int pseries_add_processor(struct device_node *np)
185 185
186 for_each_cpu_mask(cpu, tmp) { 186 for_each_cpu_mask(cpu, tmp) {
187 BUG_ON(cpu_isset(cpu, cpu_present_map)); 187 BUG_ON(cpu_isset(cpu, cpu_present_map));
188 cpu_set(cpu, cpu_present_map); 188 set_cpu_present(cpu, true);
189 set_hard_smp_processor_id(cpu, *intserv++); 189 set_hard_smp_processor_id(cpu, *intserv++);
190 } 190 }
191 err = 0; 191 err = 0;
@@ -217,7 +217,7 @@ static void pseries_remove_processor(struct device_node *np)
217 if (get_hard_smp_processor_id(cpu) != intserv[i]) 217 if (get_hard_smp_processor_id(cpu) != intserv[i])
218 continue; 218 continue;
219 BUG_ON(cpu_online(cpu)); 219 BUG_ON(cpu_online(cpu));
220 cpu_clear(cpu, cpu_present_map); 220 set_cpu_present(cpu, false);
221 set_hard_smp_processor_id(cpu, -1); 221 set_hard_smp_processor_id(cpu, -1);
222 break; 222 break;
223 } 223 }
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index c991fe6473c9..a868b272c257 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -62,7 +62,7 @@ extern struct mutex smp_cpu_state_mutex;
62extern int smp_cpu_polarization[]; 62extern int smp_cpu_polarization[];
63 63
64extern void arch_send_call_function_single_ipi(int cpu); 64extern void arch_send_call_function_single_ipi(int cpu);
65extern void arch_send_call_function_ipi(cpumask_t mask); 65extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
66 66
67#endif 67#endif
68 68
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 5e0ad618dc45..6e7211abd950 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -9,7 +9,6 @@ const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
9 9
10extern cpumask_t cpu_core_map[NR_CPUS]; 10extern cpumask_t cpu_core_map[NR_CPUS];
11 11
12#define topology_core_siblings(cpu) (cpu_core_map[cpu])
13#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 12#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
14 13
15int topology_set_cpu_management(int fc); 14int topology_set_cpu_management(int fc);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b4b6396e6cf0..c932caa5e850 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -147,11 +147,11 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
147 udelay(10); 147 udelay(10);
148} 148}
149 149
150void arch_send_call_function_ipi(cpumask_t mask) 150void arch_send_call_function_ipi_mask(const struct cpumask *mask)
151{ 151{
152 int cpu; 152 int cpu;
153 153
154 for_each_cpu_mask(cpu, mask) 154 for_each_cpu(cpu, mask)
155 smp_ext_bitcall(cpu, ec_call_function); 155 smp_ext_bitcall(cpu, ec_call_function);
156} 156}
157 157
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index ca64f43abe67..53ef26ced75f 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -44,7 +44,6 @@ void plat_send_ipi(unsigned int cpu, unsigned int message);
44 44
45void arch_send_call_function_single_ipi(int cpu); 45void arch_send_call_function_single_ipi(int cpu);
46extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 46extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
47#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
48 47
49#else 48#else
50 49
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index f8c40cc65054..65e7bd2f2240 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -31,7 +31,6 @@
31#define cpu_to_node(cpu) ((void)(cpu),0) 31#define cpu_to_node(cpu) ((void)(cpu),0)
32#define parent_node(node) ((void)(node),0) 32#define parent_node(node) ((void)(node),0)
33 33
34#define node_to_cpumask(node) ((void)node, cpu_online_map)
35#define cpumask_of_node(node) ((void)node, cpu_online_mask) 34#define cpumask_of_node(node) ((void)node, cpu_online_mask)
36 35
37#define pcibus_to_node(bus) ((void)(bus), -1) 36#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index becb6bf353a9..f49e11cd4ded 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -36,7 +36,6 @@ extern int sparc64_multi_core;
36 36
37extern void arch_send_call_function_single_ipi(int cpu); 37extern void arch_send_call_function_single_ipi(int cpu);
38extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 38extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
39#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
40 39
41/* 40/*
42 * General functions that each host system must provide. 41 * General functions that each host system must provide.
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index 26cd25c08399..600a79035fa1 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -12,22 +12,8 @@ static inline int cpu_to_node(int cpu)
12 12
13#define parent_node(node) (node) 13#define parent_node(node) (node)
14 14
15static inline cpumask_t node_to_cpumask(int node)
16{
17 return numa_cpumask_lookup_table[node];
18}
19#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 15#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
20 16
21/*
22 * Returns a pointer to the cpumask of CPUs on Node 'node'.
23 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
24 */
25#define node_to_cpumask_ptr(v, node) \
26 cpumask_t *v = &(numa_cpumask_lookup_table[node])
27
28#define node_to_cpumask_ptr_next(v, node) \
29 v = &(numa_cpumask_lookup_table[node])
30
31struct pci_bus; 17struct pci_bus;
32#ifdef CONFIG_PCI 18#ifdef CONFIG_PCI
33extern int pcibus_to_node(struct pci_bus *pbus); 19extern int pcibus_to_node(struct pci_bus *pbus);
@@ -71,8 +57,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
71#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
72#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 58#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
73#define topology_core_id(cpu) (cpu_data(cpu).core_id) 59#define topology_core_id(cpu) (cpu_data(cpu).core_id)
74#define topology_core_siblings(cpu) (cpu_core_map[cpu])
75#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
76#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 60#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
77#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 61#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
78#define mc_capable() (sparc64_multi_core) 62#define mc_capable() (sparc64_multi_core)
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 54f42e8b0105..34d813011b7a 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -35,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
35 unsigned cpu = smp_processor_id(); 35 unsigned cpu = smp_processor_id();
36 36
37 if(prev != next){ 37 if(prev != next){
38 cpu_clear(cpu, prev->cpu_vm_mask); 38 cpumask_clear_cpu(cpu, mm_cpumask(prev));
39 cpu_set(cpu, next->cpu_vm_mask); 39 cpumask_set_cpu(cpu, mm_cpumask(next));
40 if(next != &init_mm) 40 if(next != &init_mm)
41 __switch_mm(&next->context.id); 41 __switch_mm(&next->context.id);
42 } 42 }
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 98351c78bc81..106bf27e2a9a 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -111,7 +111,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
111 int i; 111 int i;
112 112
113 for (i = 0; i < ncpus; ++i) 113 for (i = 0; i < ncpus; ++i)
114 cpu_set(i, cpu_possible_map); 114 set_cpu_possible(i, true);
115 115
116 cpu_clear(me, cpu_online_map); 116 cpu_clear(me, cpu_online_map);
117 cpu_set(me, cpu_online_map); 117 cpu_set(me, cpu_online_map);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index f923203dc39a..4a2d4e0c18d9 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
37 37
38 if (likely(prev != next)) { 38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */ 39 /* stop flush ipis for the previous mm */
40 cpu_clear(cpu, prev->cpu_vm_mask); 40 cpumask_clear_cpu(cpu, mm_cpumask(prev));
41#ifdef CONFIG_SMP 41#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next); 43 percpu_write(cpu_tlbstate.active_mm, next);
44#endif 44#endif
45 cpu_set(cpu, next->cpu_vm_mask); 45 cpumask_set_cpu(cpu, mm_cpumask(next));
46 46
47 /* Re-load page tables */ 47 /* Re-load page tables */
48 load_cr3(next->pgd); 48 load_cr3(next->pgd);
@@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
58 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 58 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
59 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); 59 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
60 60
61 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { 61 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
62 /* We were in lazy tlb mode and leave_mm disabled 62 /* We were in lazy tlb mode and leave_mm disabled
63 * tlb flush IPI delivery. We must reload CR3 63 * tlb flush IPI delivery. We must reload CR3
64 * to make sure to use no freed page tables. 64 * to make sure to use no freed page tables.
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 6a84ed166aec..1e796782cd7b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -121,7 +121,6 @@ static inline void arch_send_call_function_single_ipi(int cpu)
121 smp_ops.send_call_func_single_ipi(cpu); 121 smp_ops.send_call_func_single_ipi(cpu);
122} 122}
123 123
124#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
125static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) 124static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
126{ 125{
127 smp_ops.send_call_func_ipi(mask); 126 smp_ops.send_call_func_ipi(mask);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 64970b9885f2..dc69f28489f5 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
227 227
228 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 228 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
229 if (cfg) { 229 if (cfg) {
230 if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { 230 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
231 kfree(cfg); 231 kfree(cfg);
232 cfg = NULL; 232 cfg = NULL;
233 } else if (!alloc_cpumask_var_node(&cfg->old_domain, 233 } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
234 GFP_ATOMIC, node)) { 234 GFP_ATOMIC, node)) {
235 free_cpumask_var(cfg->domain); 235 free_cpumask_var(cfg->domain);
236 kfree(cfg); 236 kfree(cfg);
237 cfg = NULL; 237 cfg = NULL;
238 } else {
239 cpumask_clear(cfg->domain);
240 cpumask_clear(cfg->old_domain);
241 } 238 }
242 } 239 }
243 240
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 71f1d99a635d..ec6ef60cbd17 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
67#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
68 preempt_disable(); 68 preempt_disable();
69 load_LDT(pc); 69 load_LDT(pc);
70 if (!cpus_equal(current->mm->cpu_vm_mask, 70 if (!cpumask_equal(mm_cpumask(current->mm),
71 cpumask_of_cpu(smp_processor_id()))) 71 cpumask_of(smp_processor_id())))
72 smp_call_function(flush_ldt, current->mm, 1); 72 smp_call_function(flush_ldt, current->mm, 1);
73 preempt_enable(); 73 preempt_enable();
74#else 74#else
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 847ab4160315..5284cd2b5776 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
555void __init init_c1e_mask(void) 555void __init init_c1e_mask(void)
556{ 556{
557 /* If we're using c1e_idle, we need to allocate c1e_mask. */ 557 /* If we're using c1e_idle, we need to allocate c1e_mask. */
558 if (pm_idle == c1e_idle) { 558 if (pm_idle == c1e_idle)
559 alloc_cpumask_var(&c1e_mask, GFP_KERNEL); 559 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
560 cpumask_clear(c1e_mask);
561 }
562} 560}
563 561
564static int __init idle_setup(char *str) 562static int __init idle_setup(char *str)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 09c5e077dff7..565ebc65920e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1059#endif 1059#endif
1060 current_thread_info()->cpu = 0; /* needed? */ 1060 current_thread_info()->cpu = 0; /* needed? */
1061 for_each_possible_cpu(i) { 1061 for_each_possible_cpu(i) {
1062 alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1062 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1063 alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 1063 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1064 alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); 1064 zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
1065 cpumask_clear(per_cpu(cpu_core_map, i));
1066 cpumask_clear(per_cpu(cpu_sibling_map, i));
1067 cpumask_clear(cpu_data(i).llc_shared_map);
1068 } 1065 }
1069 set_cpu_sibling_map(0); 1066 set_cpu_sibling_map(0);
1070 1067
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index e293ac56c723..dcb00d278512 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -93,7 +93,6 @@ static struct irqaction irq0 = {
93 93
94void __init setup_default_timer_irq(void) 94void __init setup_default_timer_irq(void)
95{ 95{
96 irq0.mask = cpumask_of_cpu(0);
97 setup_irq(0, &irq0); 96 setup_irq(0, &irq0);
98} 97}
99 98
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index c814e144a3f0..36fe08eeb5c3 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -59,7 +59,8 @@ void leave_mm(int cpu)
59{ 59{
60 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 60 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
61 BUG(); 61 BUG();
62 cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); 62 cpumask_clear_cpu(cpu,
63 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
63 load_cr3(swapper_pg_dir); 64 load_cr3(swapper_pg_dir);
64} 65}
65EXPORT_SYMBOL_GPL(leave_mm); 66EXPORT_SYMBOL_GPL(leave_mm);
@@ -234,8 +235,8 @@ void flush_tlb_current_task(void)
234 preempt_disable(); 235 preempt_disable();
235 236
236 local_flush_tlb(); 237 local_flush_tlb();
237 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) 238 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
238 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); 239 flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
239 preempt_enable(); 240 preempt_enable();
240} 241}
241 242
@@ -249,8 +250,8 @@ void flush_tlb_mm(struct mm_struct *mm)
249 else 250 else
250 leave_mm(smp_processor_id()); 251 leave_mm(smp_processor_id());
251 } 252 }
252 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) 253 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
253 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); 254 flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
254 255
255 preempt_enable(); 256 preempt_enable();
256} 257}
@@ -268,8 +269,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
268 leave_mm(smp_processor_id()); 269 leave_mm(smp_processor_id());
269 } 270 }
270 271
271 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) 272 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
272 flush_tlb_others(&mm->cpu_vm_mask, mm, va); 273 flush_tlb_others(mm_cpumask(mm), mm, va);
273 274
274 preempt_enable(); 275 preempt_enable();
275} 276}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 093dd59b5385..3bf7b1d250ce 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1165 /* Get the "official" set of cpus referring to our pagetable. */ 1165 /* Get the "official" set of cpus referring to our pagetable. */
1166 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { 1166 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1167 for_each_online_cpu(cpu) { 1167 for_each_online_cpu(cpu) {
1168 if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask) 1168 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1169 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) 1169 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1170 continue; 1170 continue;
1171 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); 1171 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1172 } 1172 }
1173 return; 1173 return;
1174 } 1174 }
1175 cpumask_copy(mask, &mm->cpu_vm_mask); 1175 cpumask_copy(mask, mm_cpumask(mm));
1176 1176
1177 /* It's possible that a vcpu may have a stale reference to our 1177 /* It's possible that a vcpu may have a stale reference to our
1178 cr3, because its in lazy mode, and it hasn't yet flushed 1178 cr3, because its in lazy mode, and it hasn't yet flushed