aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-23 21:14:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-23 21:14:11 -0400
commit94a8d5caba74211ec76dac80fc6e2d5c391530df (patch)
tree21d17d214a354ae00ae27217d82b67bfc5bff3a3
parent2bcd57ab61e7cabed626226a3771617981c11ce1 (diff)
parent6ba2ef7baac23a5d9bb85e28b882d16b439a2293 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: (39 commits) cpumask: Move deprecated functions to end of header. cpumask: remove unused deprecated functions, avoid accusations of insanity cpumask: use new-style cpumask ops in mm/quicklist. cpumask: use mm_cpumask() wrapper: x86 cpumask: use mm_cpumask() wrapper: um cpumask: use mm_cpumask() wrapper: mips cpumask: use mm_cpumask() wrapper: mn10300 cpumask: use mm_cpumask() wrapper: m32r cpumask: use mm_cpumask() wrapper: arm cpumask: Use accessors for cpu_*_mask: um cpumask: Use accessors for cpu_*_mask: powerpc cpumask: Use accessors for cpu_*_mask: mips cpumask: Use accessors for cpu_*_mask: m32r cpumask: remove arch_send_call_function_ipi cpumask: arch_send_call_function_ipi_mask: s390 cpumask: arch_send_call_function_ipi_mask: powerpc cpumask: arch_send_call_function_ipi_mask: mips cpumask: arch_send_call_function_ipi_mask: m32r cpumask: arch_send_call_function_ipi_mask: alpha cpumask: remove obsolete topology_core_siblings and topology_thread_siblings: ia64 ...
-rw-r--r--arch/alpha/include/asm/smp.h2
-rw-r--r--arch/alpha/include/asm/topology.h18
-rw-r--r--arch/alpha/kernel/smp.c14
-rw-r--r--arch/arm/include/asm/cacheflush.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h7
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h4
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/mm/context.c2
-rw-r--r--arch/arm/mm/flush.c10
-rw-r--r--arch/ia64/include/asm/smp.h1
-rw-r--r--arch/ia64/include/asm/topology.h3
-rw-r--r--arch/ia64/kernel/smp.c2
-rw-r--r--arch/m32r/include/asm/mmu_context.h4
-rw-r--r--arch/m32r/include/asm/smp.h2
-rw-r--r--arch/m32r/kernel/smp.c30
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/mips/alchemy/common/time.c2
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h2
-rw-r--r--arch/mips/include/asm/mmu_context.h10
-rw-r--r--arch/mips/include/asm/smp-ops.h2
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/kernel/smp-cmp.c6
-rw-r--r--arch/mips/kernel/smp-mt.c6
-rw-r--r--arch/mips/kernel/smp-up.c3
-rw-r--r--arch/mips/kernel/smp.c8
-rw-r--r--arch/mips/kernel/smtc.c6
-rw-r--r--arch/mips/mipssim/sim_smtc.c5
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c4
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c4
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c5
-rw-r--r--arch/mips/sibyte/sb1250/smp.c5
-rw-r--r--arch/mn10300/include/asm/mmu_context.h12
-rw-r--r--arch/parisc/include/asm/smp.h1
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/topology.h12
-rw-r--r--arch/powerpc/kernel/setup-common.c6
-rw-r--r--arch/powerpc/kernel/smp.c12
-rw-r--r--arch/powerpc/platforms/powermac/smp.c6
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c6
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/topology.h1
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/sh/include/asm/smp.h1
-rw-r--r--arch/sh/include/asm/topology.h1
-rw-r--r--arch/sparc/include/asm/smp_64.h1
-rw-r--r--arch/sparc/include/asm/topology_64.h16
-rw-r--r--arch/um/include/asm/mmu_context.h4
-rw-r--r--arch/um/kernel/smp.c2
-rw-r--r--arch/x86/include/asm/mmu_context.h6
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/time.c1
-rw-r--r--arch/x86/mm/tlb.c15
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--drivers/acpi/osl.c2
-rw-r--r--drivers/acpi/processor_perflib.c3
-rw-r--r--drivers/acpi/processor_throttling.c3
-rw-r--r--drivers/net/sfc/efx.c3
-rw-r--r--drivers/oprofile/buffer_sync.c3
-rw-r--r--include/asm-generic/topology.h17
-rw-r--r--include/linux/cpumask.h709
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/smp.h11
-rw-r--r--include/linux/topology.h6
-rw-r--r--init/main.c5
-rw-r--r--kernel/smp.c7
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--mm/quicklist.c3
-rw-r--r--virt/kvm/kvm_main.c3
77 files changed, 403 insertions, 724 deletions
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h
index 547e90951cec..3f390e8cc0b3 100644
--- a/arch/alpha/include/asm/smp.h
+++ b/arch/alpha/include/asm/smp.h
@@ -47,7 +47,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
47extern int smp_num_cpus; 47extern int smp_num_cpus;
48 48
49extern void arch_send_call_function_single_ipi(int cpu); 49extern void arch_send_call_function_single_ipi(int cpu);
50extern void arch_send_call_function_ipi(cpumask_t mask); 50extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
51 51
52#else /* CONFIG_SMP */ 52#else /* CONFIG_SMP */
53 53
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index b4f284c72ff3..36b3a30ba0e5 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -22,23 +22,6 @@ static inline int cpu_to_node(int cpu)
22 return node; 22 return node;
23} 23}
24 24
25static inline cpumask_t node_to_cpumask(int node)
26{
27 cpumask_t node_cpu_mask = CPU_MASK_NONE;
28 int cpu;
29
30 for_each_online_cpu(cpu) {
31 if (cpu_to_node(cpu) == node)
32 cpu_set(cpu, node_cpu_mask);
33 }
34
35#ifdef DEBUG_NUMA
36 printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask);
37#endif
38
39 return node_cpu_mask;
40}
41
42extern struct cpumask node_to_cpumask_map[]; 25extern struct cpumask node_to_cpumask_map[];
43/* FIXME: This is dumb, recalculating every time. But simple. */ 26/* FIXME: This is dumb, recalculating every time. But simple. */
44static const struct cpumask *cpumask_of_node(int node) 27static const struct cpumask *cpumask_of_node(int node)
@@ -55,7 +38,6 @@ static const struct cpumask *cpumask_of_node(int node)
55 return &node_to_cpumask_map[node]; 38 return &node_to_cpumask_map[node];
56} 39}
57 40
58#define pcibus_to_cpumask(bus) (cpu_online_map)
59#define cpumask_of_pcibus(bus) (cpu_online_mask) 41#define cpumask_of_pcibus(bus) (cpu_online_mask)
60 42
61#endif /* !CONFIG_NUMA */ 43#endif /* !CONFIG_NUMA */
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index b1fe5674c3a1..42aa078a5e4d 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -548,16 +548,16 @@ setup_profiling_timer(unsigned int multiplier)
548 548
549 549
550static void 550static void
551send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) 551send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
552{ 552{
553 int i; 553 int i;
554 554
555 mb(); 555 mb();
556 for_each_cpu_mask(i, to_whom) 556 for_each_cpu(i, to_whom)
557 set_bit(operation, &ipi_data[i].bits); 557 set_bit(operation, &ipi_data[i].bits);
558 558
559 mb(); 559 mb();
560 for_each_cpu_mask(i, to_whom) 560 for_each_cpu(i, to_whom)
561 wripir(i); 561 wripir(i);
562} 562}
563 563
@@ -624,7 +624,7 @@ smp_send_reschedule(int cpu)
624 printk(KERN_WARNING 624 printk(KERN_WARNING
625 "smp_send_reschedule: Sending IPI to self.\n"); 625 "smp_send_reschedule: Sending IPI to self.\n");
626#endif 626#endif
627 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); 627 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
628} 628}
629 629
630void 630void
@@ -636,17 +636,17 @@ smp_send_stop(void)
636 if (hard_smp_processor_id() != boot_cpu_id) 636 if (hard_smp_processor_id() != boot_cpu_id)
637 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); 637 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
638#endif 638#endif
639 send_ipi_message(to_whom, IPI_CPU_STOP); 639 send_ipi_message(&to_whom, IPI_CPU_STOP);
640} 640}
641 641
642void arch_send_call_function_ipi(cpumask_t mask) 642void arch_send_call_function_ipi_mask(const struct cpumask *mask)
643{ 643{
644 send_ipi_message(mask, IPI_CALL_FUNC); 644 send_ipi_message(mask, IPI_CALL_FUNC);
645} 645}
646 646
647void arch_send_call_function_single_ipi(int cpu) 647void arch_send_call_function_single_ipi(int cpu)
648{ 648{
649 send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); 649 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
650} 650}
651 651
652static void 652static void
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 1a711ea8418b..fd03fb63a332 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
334#ifndef CONFIG_CPU_CACHE_VIPT 334#ifndef CONFIG_CPU_CACHE_VIPT
335static inline void flush_cache_mm(struct mm_struct *mm) 335static inline void flush_cache_mm(struct mm_struct *mm)
336{ 336{
337 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 337 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
338 __cpuc_flush_user_all(); 338 __cpuc_flush_user_all();
339} 339}
340 340
341static inline void 341static inline void
342flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 342flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
343{ 343{
344 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) 344 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
346 vma->vm_flags); 346 vma->vm_flags);
347} 347}
@@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
349static inline void 349static inline void
350flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 350flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
351{ 351{
352 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 352 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
353 unsigned long addr = user_addr & PAGE_MASK; 353 unsigned long addr = user_addr & PAGE_MASK;
354 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 354 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
355 } 355 }
@@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
360 unsigned long uaddr, void *kaddr, 360 unsigned long uaddr, void *kaddr,
361 unsigned long len, int write) 361 unsigned long len, int write)
362{ 362{
363 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 363 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
364 unsigned long addr = (unsigned long)kaddr; 364 unsigned long addr = (unsigned long)kaddr;
365 __cpuc_coherent_kern_range(addr, addr + len); 365 __cpuc_coherent_kern_range(addr, addr + len);
366 } 366 }
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index bcdb9291ef0c..de6cefb329dd 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -103,14 +103,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
103 103
104#ifdef CONFIG_SMP 104#ifdef CONFIG_SMP
105 /* check for possible thread migration */ 105 /* check for possible thread migration */
106 if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask)) 106 if (!cpumask_empty(mm_cpumask(next)) &&
107 !cpumask_test_cpu(cpu, mm_cpumask(next)))
107 __flush_icache_all(); 108 __flush_icache_all();
108#endif 109#endif
109 if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) { 110 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
110 check_context(next); 111 check_context(next);
111 cpu_switch_mm(next->pgd, next); 112 cpu_switch_mm(next->pgd, next);
112 if (cache_is_vivt()) 113 if (cache_is_vivt())
113 cpu_clear(cpu, prev->cpu_vm_mask); 114 cpumask_clear_cpu(cpu, mm_cpumask(prev));
114 } 115 }
115#endif 116#endif
116} 117}
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a06e735b262a..e0d763be1846 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -93,7 +93,6 @@ extern void platform_cpu_enable(unsigned int cpu);
93 93
94extern void arch_send_call_function_single_ipi(int cpu); 94extern void arch_send_call_function_single_ipi(int cpu);
95extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 95extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
96#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
97 96
98/* 97/*
99 * show local interrupt info 98 * show local interrupt info
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c964f3fc3bc5..a45ab5dd8255 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
350 if (tlb_flag(TLB_WB)) 350 if (tlb_flag(TLB_WB))
351 dsb(); 351 dsb();
352 352
353 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { 353 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
354 if (tlb_flag(TLB_V3_FULL)) 354 if (tlb_flag(TLB_V3_FULL))
355 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 355 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
356 if (tlb_flag(TLB_V4_U_FULL)) 356 if (tlb_flag(TLB_V4_U_FULL))
@@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
388 if (tlb_flag(TLB_WB)) 388 if (tlb_flag(TLB_WB))
389 dsb(); 389 dsb();
390 390
391 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 391 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
392 if (tlb_flag(TLB_V3_PAGE)) 392 if (tlb_flag(TLB_V3_PAGE))
393 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); 393 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
394 if (tlb_flag(TLB_V4_U_PAGE)) 394 if (tlb_flag(TLB_V4_U_PAGE))
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index de885fd256c5..e0d32770bb3d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void)
189 read_lock(&tasklist_lock); 189 read_lock(&tasklist_lock);
190 for_each_process(p) { 190 for_each_process(p) {
191 if (p->mm) 191 if (p->mm)
192 cpu_clear(cpu, p->mm->cpu_vm_mask); 192 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
193 } 193 }
194 read_unlock(&tasklist_lock); 194 read_unlock(&tasklist_lock);
195 195
@@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
257 atomic_inc(&mm->mm_users); 257 atomic_inc(&mm->mm_users);
258 atomic_inc(&mm->mm_count); 258 atomic_inc(&mm->mm_count);
259 current->active_mm = mm; 259 current->active_mm = mm;
260 cpu_set(cpu, mm->cpu_vm_mask); 260 cpumask_set_cpu(cpu, mm_cpumask(mm));
261 cpu_switch_mm(mm->pgd, mm); 261 cpu_switch_mm(mm->pgd, mm);
262 enter_lazy_tlb(mm, current); 262 enter_lazy_tlb(mm, current);
263 local_flush_tlb_all(); 263 local_flush_tlb_all();
@@ -643,7 +643,7 @@ void flush_tlb_all(void)
643void flush_tlb_mm(struct mm_struct *mm) 643void flush_tlb_mm(struct mm_struct *mm)
644{ 644{
645 if (tlb_ops_need_broadcast()) 645 if (tlb_ops_need_broadcast())
646 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); 646 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
647 else 647 else
648 local_flush_tlb_mm(mm); 648 local_flush_tlb_mm(mm);
649} 649}
@@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
654 struct tlb_args ta; 654 struct tlb_args ta;
655 ta.ta_vma = vma; 655 ta.ta_vma = vma;
656 ta.ta_start = uaddr; 656 ta.ta_start = uaddr;
657 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); 657 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
658 } else 658 } else
659 local_flush_tlb_page(vma, uaddr); 659 local_flush_tlb_page(vma, uaddr);
660} 660}
@@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
677 ta.ta_vma = vma; 677 ta.ta_vma = vma;
678 ta.ta_start = start; 678 ta.ta_start = start;
679 ta.ta_end = end; 679 ta.ta_end = end;
680 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); 680 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
681 } else 681 } else
682 local_flush_tlb_range(vma, start, end); 682 local_flush_tlb_range(vma, start, end);
683} 683}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index fc84fcc74380..6bda76a43199 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm)
59 } 59 }
60 spin_unlock(&cpu_asid_lock); 60 spin_unlock(&cpu_asid_lock);
61 61
62 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); 62 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
63 mm->context.id = asid; 63 mm->context.id = asid;
64} 64}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 575f3ad722e7..b27942909b23 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
50void flush_cache_mm(struct mm_struct *mm) 50void flush_cache_mm(struct mm_struct *mm)
51{ 51{
52 if (cache_is_vivt()) { 52 if (cache_is_vivt()) {
53 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 53 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
54 __cpuc_flush_user_all(); 54 __cpuc_flush_user_all();
55 return; 55 return;
56 } 56 }
@@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm)
73void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 73void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
74{ 74{
75 if (cache_is_vivt()) { 75 if (cache_is_vivt()) {
76 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) 76 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
77 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 77 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
78 vma->vm_flags); 78 vma->vm_flags);
79 return; 79 return;
@@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
97void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 97void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
98{ 98{
99 if (cache_is_vivt()) { 99 if (cache_is_vivt()) {
100 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 100 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
101 unsigned long addr = user_addr & PAGE_MASK; 101 unsigned long addr = user_addr & PAGE_MASK;
102 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 102 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
103 } 103 }
@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
113 unsigned long len, int write) 113 unsigned long len, int write)
114{ 114{
115 if (cache_is_vivt()) { 115 if (cache_is_vivt()) {
116 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 116 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
117 unsigned long addr = (unsigned long)kaddr; 117 unsigned long addr = (unsigned long)kaddr;
118 __cpuc_coherent_kern_range(addr, addr + len); 118 __cpuc_coherent_kern_range(addr, addr + len);
119 } 119 }
@@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
126 } 126 }
127 127
128 /* VIPT non-aliasing cache */ 128 /* VIPT non-aliasing cache */
129 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && 129 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
130 vma->vm_flags & VM_EXEC) { 130 vma->vm_flags & VM_EXEC) {
131 unsigned long addr = (unsigned long)kaddr; 131 unsigned long addr = (unsigned long)kaddr;
132 /* only flushing the kernel mapping on non-aliasing VIPT */ 132 /* only flushing the kernel mapping on non-aliasing VIPT */
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index d217d1d4e051..0b3b3997decd 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -127,7 +127,6 @@ extern int is_multithreading_enabled(void);
127 127
128extern void arch_send_call_function_single_ipi(int cpu); 128extern void arch_send_call_function_single_ipi(int cpu);
129extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 129extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
130#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
131 130
132#else /* CONFIG_SMP */ 131#else /* CONFIG_SMP */
133 132
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index d0141fbf51d0..3ddb4e709dba 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,6 @@
33/* 33/*
34 * Returns a bitmask of CPUs on Node 'node'. 34 * Returns a bitmask of CPUs on Node 'node'.
35 */ 35 */
36#define node_to_cpumask(node) (node_to_cpu_mask[node])
37#define cpumask_of_node(node) (&node_to_cpu_mask[node]) 36#define cpumask_of_node(node) (&node_to_cpu_mask[node])
38 37
39/* 38/*
@@ -104,8 +103,6 @@ void build_cpu_to_node_map(void);
104#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
105#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id) 104#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
106#define topology_core_id(cpu) (cpu_data(cpu)->core_id) 105#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
107#define topology_core_siblings(cpu) (cpu_core_map[cpu])
108#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
109#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 106#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
110#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 107#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
111#define smt_capable() (smp_num_siblings > 1) 108#define smt_capable() (smp_num_siblings > 1)
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 93ebfea43c6c..dabeefe21134 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -302,7 +302,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
302 return; 302 return;
303 } 303 }
304 304
305 smp_call_function_mask(mm->cpu_vm_mask, 305 smp_call_function_many(mm_cpumask(mm),
306 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); 306 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
307 local_irq_disable(); 307 local_irq_disable();
308 local_finish_flush_tlb_mm(mm); 308 local_finish_flush_tlb_mm(mm);
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index 91909e5dd9d0..a70a3df33635 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev,
127 127
128 if (prev != next) { 128 if (prev != next) {
129#ifdef CONFIG_SMP 129#ifdef CONFIG_SMP
130 cpu_set(cpu, next->cpu_vm_mask); 130 cpumask_set_cpu(cpu, mm_cpumask(next));
131#endif /* CONFIG_SMP */ 131#endif /* CONFIG_SMP */
132 /* Set MPTB = next->pgd */ 132 /* Set MPTB = next->pgd */
133 *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; 133 *(volatile unsigned long *)MPTB = (unsigned long)next->pgd;
@@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev,
135 } 135 }
136#ifdef CONFIG_SMP 136#ifdef CONFIG_SMP
137 else 137 else
138 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) 138 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
139 activate_context(next); 139 activate_context(next);
140#endif /* CONFIG_SMP */ 140#endif /* CONFIG_SMP */
141} 141}
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index b96a6d2ffbc3..e67ded1aab91 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -88,7 +88,7 @@ extern void smp_send_timer(void);
88extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); 88extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
89 89
90extern void arch_send_call_function_single_ipi(int cpu); 90extern void arch_send_call_function_single_ipi(int cpu);
91extern void arch_send_call_function_ipi(cpumask_t mask); 91extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
92 92
93#endif /* not __ASSEMBLY__ */ 93#endif /* not __ASSEMBLY__ */
94 94
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 929e5c9d3ad9..1b7598e6f6e8 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -85,7 +85,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *);
85void smp_local_timer_interrupt(void); 85void smp_local_timer_interrupt(void);
86 86
87static void send_IPI_allbutself(int, int); 87static void send_IPI_allbutself(int, int);
88static void send_IPI_mask(cpumask_t, int, int); 88static void send_IPI_mask(const struct cpumask *, int, int);
89unsigned long send_IPI_mask_phys(cpumask_t, int, int); 89unsigned long send_IPI_mask_phys(cpumask_t, int, int);
90 90
91/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 91/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -113,7 +113,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int);
113void smp_send_reschedule(int cpu_id) 113void smp_send_reschedule(int cpu_id)
114{ 114{
115 WARN_ON(cpu_is_offline(cpu_id)); 115 WARN_ON(cpu_is_offline(cpu_id));
116 send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); 116 send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
117} 117}
118 118
119/*==========================================================================* 119/*==========================================================================*
@@ -168,7 +168,7 @@ void smp_flush_cache_all(void)
168 spin_lock(&flushcache_lock); 168 spin_lock(&flushcache_lock);
169 mask=cpus_addr(cpumask); 169 mask=cpus_addr(cpumask);
170 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 170 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
171 send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); 171 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
172 _flush_cache_copyback_all(); 172 _flush_cache_copyback_all();
173 while (flushcache_cpumask) 173 while (flushcache_cpumask)
174 mb(); 174 mb();
@@ -264,7 +264,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
264 preempt_disable(); 264 preempt_disable();
265 cpu_id = smp_processor_id(); 265 cpu_id = smp_processor_id();
266 mmc = &mm->context[cpu_id]; 266 mmc = &mm->context[cpu_id];
267 cpu_mask = mm->cpu_vm_mask; 267 cpu_mask = *mm_cpumask(mm);
268 cpu_clear(cpu_id, cpu_mask); 268 cpu_clear(cpu_id, cpu_mask);
269 269
270 if (*mmc != NO_CONTEXT) { 270 if (*mmc != NO_CONTEXT) {
@@ -273,7 +273,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
273 if (mm == current->mm) 273 if (mm == current->mm)
274 activate_context(mm); 274 activate_context(mm);
275 else 275 else
276 cpu_clear(cpu_id, mm->cpu_vm_mask); 276 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
277 local_irq_restore(flags); 277 local_irq_restore(flags);
278 } 278 }
279 if (!cpus_empty(cpu_mask)) 279 if (!cpus_empty(cpu_mask))
@@ -334,7 +334,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
334 preempt_disable(); 334 preempt_disable();
335 cpu_id = smp_processor_id(); 335 cpu_id = smp_processor_id();
336 mmc = &mm->context[cpu_id]; 336 mmc = &mm->context[cpu_id];
337 cpu_mask = mm->cpu_vm_mask; 337 cpu_mask = *mm_cpumask(mm);
338 cpu_clear(cpu_id, cpu_mask); 338 cpu_clear(cpu_id, cpu_mask);
339 339
340#ifdef DEBUG_SMP 340#ifdef DEBUG_SMP
@@ -424,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
424 * We have to send the IPI only to 424 * We have to send the IPI only to
425 * CPUs affected. 425 * CPUs affected.
426 */ 426 */
427 send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); 427 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
428 428
429 while (!cpus_empty(flush_cpumask)) { 429 while (!cpus_empty(flush_cpumask)) {
430 /* nothing. lockup detection does not belong here */ 430 /* nothing. lockup detection does not belong here */
@@ -469,7 +469,7 @@ void smp_invalidate_interrupt(void)
469 if (flush_mm == current->active_mm) 469 if (flush_mm == current->active_mm)
470 activate_context(flush_mm); 470 activate_context(flush_mm);
471 else 471 else
472 cpu_clear(cpu_id, flush_mm->cpu_vm_mask); 472 cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
473 } else { 473 } else {
474 unsigned long va = flush_va; 474 unsigned long va = flush_va;
475 475
@@ -546,14 +546,14 @@ static void stop_this_cpu(void *dummy)
546 for ( ; ; ); 546 for ( ; ; );
547} 547}
548 548
549void arch_send_call_function_ipi(cpumask_t mask) 549void arch_send_call_function_ipi_mask(const struct cpumask *mask)
550{ 550{
551 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); 551 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
552} 552}
553 553
554void arch_send_call_function_single_ipi(int cpu) 554void arch_send_call_function_single_ipi(int cpu)
555{ 555{
556 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); 556 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
557} 557}
558 558
559/*==========================================================================* 559/*==========================================================================*
@@ -729,7 +729,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
729 cpumask = cpu_online_map; 729 cpumask = cpu_online_map;
730 cpu_clear(smp_processor_id(), cpumask); 730 cpu_clear(smp_processor_id(), cpumask);
731 731
732 send_IPI_mask(cpumask, ipi_num, try); 732 send_IPI_mask(&cpumask, ipi_num, try);
733} 733}
734 734
735/*==========================================================================* 735/*==========================================================================*
@@ -752,7 +752,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
752 * ---------- --- -------------------------------------------------------- 752 * ---------- --- --------------------------------------------------------
753 * 753 *
754 *==========================================================================*/ 754 *==========================================================================*/
755static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) 755static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
756{ 756{
757 cpumask_t physid_mask, tmp; 757 cpumask_t physid_mask, tmp;
758 int cpu_id, phys_id; 758 int cpu_id, phys_id;
@@ -761,11 +761,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
761 if (num_cpus <= 1) /* NO MP */ 761 if (num_cpus <= 1) /* NO MP */
762 return; 762 return;
763 763
764 cpus_and(tmp, cpumask, cpu_online_map); 764 cpumask_and(&tmp, cpumask, cpu_online_mask);
765 BUG_ON(!cpus_equal(cpumask, tmp)); 765 BUG_ON(!cpumask_equal(cpumask, &tmp));
766 766
767 physid_mask = CPU_MASK_NONE; 767 physid_mask = CPU_MASK_NONE;
768 for_each_cpu_mask(cpu_id, cpumask){ 768 for_each_cpu(cpu_id, cpumask) {
769 if ((phys_id = cpu_to_physid(cpu_id)) != -1) 769 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
770 cpu_set(phys_id, physid_mask); 770 cpu_set(phys_id, physid_mask);
771 } 771 }
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 655ea1c47a0f..e034844cfc0d 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) 178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
179 physid_set(phys_id, phys_cpu_present_map); 179 physid_set(phys_id, phys_cpu_present_map);
180#ifndef CONFIG_HOTPLUG_CPU 180#ifndef CONFIG_HOTPLUG_CPU
181 cpu_present_map = cpu_possible_map; 181 init_cpu_present(&cpu_possible_map);
182#endif 182#endif
183 183
184 show_mp_info(nr_cpu); 184 show_mp_info(nr_cpu);
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index f34ff8601942..379a664809b0 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -88,7 +88,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = {
88 .irq = AU1000_RTC_MATCH2_INT, 88 .irq = AU1000_RTC_MATCH2_INT,
89 .set_next_event = au1x_rtcmatch2_set_next_event, 89 .set_next_event = au1x_rtcmatch2_set_next_event,
90 .set_mode = au1x_rtcmatch2_set_mode, 90 .set_mode = au1x_rtcmatch2_set_mode,
91 .cpumask = CPU_MASK_ALL_PTR, 91 .cpumask = cpu_all_mask,
92}; 92};
93 93
94static struct irqaction au1x_rtcmatch2_irqaction = { 94static struct irqaction au1x_rtcmatch2_irqaction = {
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 230591707005..f6837422fe65 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -24,12 +24,10 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
24 24
25#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) 25#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
26#define parent_node(node) (node) 26#define parent_node(node) (node)
27#define node_to_cpumask(node) (hub_data(node)->h_cpus)
28#define cpumask_of_node(node) (&hub_data(node)->h_cpus) 27#define cpumask_of_node(node) (&hub_data(node)->h_cpus)
29struct pci_bus; 28struct pci_bus;
30extern int pcibus_to_node(struct pci_bus *); 29extern int pcibus_to_node(struct pci_bus *);
31 30
32#define pcibus_to_cpumask(bus) (cpu_online_map)
33#define cpumask_of_pcibus(bus) (cpu_online_mask) 31#define cpumask_of_pcibus(bus) (cpu_online_mask)
34 32
35extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; 33extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index d3bea88d8744..d9743536a621 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -178,8 +178,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
178 * Mark current->active_mm as not "active" anymore. 178 * Mark current->active_mm as not "active" anymore.
179 * We don't want to mislead possible IPI tlb flush routines. 179 * We don't want to mislead possible IPI tlb flush routines.
180 */ 180 */
181 cpu_clear(cpu, prev->cpu_vm_mask); 181 cpumask_clear_cpu(cpu, mm_cpumask(prev));
182 cpu_set(cpu, next->cpu_vm_mask); 182 cpumask_set_cpu(cpu, mm_cpumask(next));
183 183
184 local_irq_restore(flags); 184 local_irq_restore(flags);
185} 185}
@@ -235,8 +235,8 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
235 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 235 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
236 236
237 /* mark mmu ownership change */ 237 /* mark mmu ownership change */
238 cpu_clear(cpu, prev->cpu_vm_mask); 238 cpumask_clear_cpu(cpu, mm_cpumask(prev));
239 cpu_set(cpu, next->cpu_vm_mask); 239 cpumask_set_cpu(cpu, mm_cpumask(next));
240 240
241 local_irq_restore(flags); 241 local_irq_restore(flags);
242} 242}
@@ -258,7 +258,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
258 258
259 local_irq_save(flags); 259 local_irq_save(flags);
260 260
261 if (cpu_isset(cpu, mm->cpu_vm_mask)) { 261 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
262 get_new_mmu_context(mm, cpu); 262 get_new_mmu_context(mm, cpu);
263#ifdef CONFIG_MIPS_MT_SMTC 263#ifdef CONFIG_MIPS_MT_SMTC
264 /* See comments for similar code above */ 264 /* See comments for similar code above */
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index fd545547b8aa..9e09af34c8a8 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -19,7 +19,7 @@ struct task_struct;
19 19
20struct plat_smp_ops { 20struct plat_smp_ops {
21 void (*send_ipi_single)(int cpu, unsigned int action); 21 void (*send_ipi_single)(int cpu, unsigned int action);
22 void (*send_ipi_mask)(cpumask_t mask, unsigned int action); 22 void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
23 void (*init_secondary)(void); 23 void (*init_secondary)(void);
24 void (*smp_finish)(void); 24 void (*smp_finish)(void);
25 void (*cpus_done)(void); 25 void (*cpus_done)(void);
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index aaa2d4ab26dc..e15f11a09311 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -78,6 +78,6 @@ extern void play_dead(void);
78extern asmlinkage void smp_call_function_interrupt(void); 78extern asmlinkage void smp_call_function_interrupt(void);
79 79
80extern void arch_send_call_function_single_ipi(int cpu); 80extern void arch_send_call_function_single_ipi(int cpu);
81extern void arch_send_call_function_ipi(cpumask_t mask); 81extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
82 82
83#endif /* __ASM_SMP_H */ 83#endif /* __ASM_SMP_H */
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index ad0ff5dc4d59..cc81771b882c 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -80,11 +80,11 @@ void cmp_send_ipi_single(int cpu, unsigned int action)
80 local_irq_restore(flags); 80 local_irq_restore(flags);
81} 81}
82 82
83static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action) 83static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
84{ 84{
85 unsigned int i; 85 unsigned int i;
86 86
87 for_each_cpu_mask(i, mask) 87 for_each_cpu(i, mask)
88 cmp_send_ipi_single(i, action); 88 cmp_send_ipi_single(i, action);
89} 89}
90 90
@@ -171,7 +171,7 @@ void __init cmp_smp_setup(void)
171 171
172 for (i = 1; i < NR_CPUS; i++) { 172 for (i = 1; i < NR_CPUS; i++) {
173 if (amon_cpu_avail(i)) { 173 if (amon_cpu_avail(i)) {
174 cpu_set(i, cpu_possible_map); 174 set_cpu_possible(i, true);
175 __cpu_number_map[i] = ++ncpu; 175 __cpu_number_map[i] = ++ncpu;
176 __cpu_logical_map[ncpu] = i; 176 __cpu_logical_map[ncpu] = i;
177 } 177 }
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 6f7ee5ac46ee..43e7cdc5ded2 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
70 write_vpe_c0_vpeconf0(tmp); 70 write_vpe_c0_vpeconf0(tmp);
71 71
72 /* Record this as available CPU */ 72 /* Record this as available CPU */
73 cpu_set(tc, cpu_possible_map); 73 set_cpu_possible(tc, true);
74 __cpu_number_map[tc] = ++ncpu; 74 __cpu_number_map[tc] = ++ncpu;
75 __cpu_logical_map[ncpu] = tc; 75 __cpu_logical_map[ncpu] = tc;
76 } 76 }
@@ -141,11 +141,11 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
141 local_irq_restore(flags); 141 local_irq_restore(flags);
142} 142}
143 143
144static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action) 144static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
145{ 145{
146 unsigned int i; 146 unsigned int i;
147 147
148 for_each_cpu_mask(i, mask) 148 for_each_cpu(i, mask)
149 vsmp_send_ipi_single(i, action); 149 vsmp_send_ipi_single(i, action);
150} 150}
151 151
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 2508d55d68fd..00500fea2750 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -18,7 +18,8 @@ static void up_send_ipi_single(int cpu, unsigned int action)
18 panic(KERN_ERR "%s called", __func__); 18 panic(KERN_ERR "%s called", __func__);
19} 19}
20 20
21static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action) 21static inline void up_send_ipi_mask(const struct cpumask *mask,
22 unsigned int action)
22{ 23{
23 panic(KERN_ERR "%s called", __func__); 24 panic(KERN_ERR "%s called", __func__);
24} 25}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 64668a93248b..4eb106c6a3ec 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -128,7 +128,7 @@ asmlinkage __cpuinit void start_secondary(void)
128 cpu_idle(); 128 cpu_idle();
129} 129}
130 130
131void arch_send_call_function_ipi(cpumask_t mask) 131void arch_send_call_function_ipi_mask(const struct cpumask *mask)
132{ 132{
133 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 133 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
134} 134}
@@ -183,15 +183,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
183 mp_ops->prepare_cpus(max_cpus); 183 mp_ops->prepare_cpus(max_cpus);
184 set_cpu_sibling_map(0); 184 set_cpu_sibling_map(0);
185#ifndef CONFIG_HOTPLUG_CPU 185#ifndef CONFIG_HOTPLUG_CPU
186 cpu_present_map = cpu_possible_map; 186 init_cpu_present(&cpu_possible_map);
187#endif 187#endif
188} 188}
189 189
190/* preload SMP state for boot cpu */ 190/* preload SMP state for boot cpu */
191void __devinit smp_prepare_boot_cpu(void) 191void __devinit smp_prepare_boot_cpu(void)
192{ 192{
193 cpu_set(0, cpu_possible_map); 193 set_cpu_possible(0, true);
194 cpu_set(0, cpu_online_map); 194 set_cpu_online(0, true);
195 cpu_set(0, cpu_callin_map); 195 cpu_set(0, cpu_callin_map);
196} 196}
197 197
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 1a466baf0edf..67153a0dc267 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -305,7 +305,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
305 */ 305 */
306 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 306 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
307 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { 307 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
308 cpu_set(i, cpu_possible_map); 308 set_cpu_possible(i, true);
309 __cpu_number_map[i] = i; 309 __cpu_number_map[i] = i;
310 __cpu_logical_map[i] = i; 310 __cpu_logical_map[i] = i;
311 } 311 }
@@ -525,8 +525,8 @@ void smtc_prepare_cpus(int cpus)
525 * Pull any physically present but unused TCs out of circulation. 525 * Pull any physically present but unused TCs out of circulation.
526 */ 526 */
527 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { 527 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
528 cpu_clear(tc, cpu_possible_map); 528 set_cpu_possible(tc, false);
529 cpu_clear(tc, cpu_present_map); 529 set_cpu_present(tc, false);
530 tc++; 530 tc++;
531 } 531 }
532 532
diff --git a/arch/mips/mipssim/sim_smtc.c b/arch/mips/mipssim/sim_smtc.c
index d6e4f656ad14..5da30b6a65b7 100644
--- a/arch/mips/mipssim/sim_smtc.c
+++ b/arch/mips/mipssim/sim_smtc.c
@@ -43,11 +43,12 @@ static void ssmtc_send_ipi_single(int cpu, unsigned int action)
43 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 43 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
44} 44}
45 45
46static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action) 46static inline void ssmtc_send_ipi_mask(const struct cpumask *mask,
47 unsigned int action)
47{ 48{
48 unsigned int i; 49 unsigned int i;
49 50
50 for_each_cpu_mask(i, mask) 51 for_each_cpu(i, mask)
51 ssmtc_send_ipi_single(i, action); 52 ssmtc_send_ipi_single(i, action);
52} 53}
53 54
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 10ab69f7183f..94e05e5733c1 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -79,7 +79,7 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
79 * cores it has been used on 79 * cores it has been used on
80 */ 80 */
81 if (vma) 81 if (vma)
82 mask = vma->vm_mm->cpu_vm_mask; 82 mask = *mm_cpumask(vma->vm_mm);
83 else 83 else
84 mask = cpu_online_map; 84 mask = cpu_online_map;
85 cpu_clear(cpu, mask); 85 cpu_clear(cpu, mask);
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 499ffe5475df..192cfd2a539c 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -21,11 +21,11 @@ static void msmtc_send_ipi_single(int cpu, unsigned int action)
21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
22} 22}
23 23
24static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action) 24static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
25{ 25{
26 unsigned int i; 26 unsigned int i;
27 27
28 for_each_cpu_mask(i, mask) 28 for_each_cpu(i, mask)
29 msmtc_send_ipi_single(i, action); 29 msmtc_send_ipi_single(i, action);
30} 30}
31 31
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 8ace27716232..326fe7a392e8 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -97,11 +97,11 @@ static void yos_send_ipi_single(int cpu, unsigned int action)
97 } 97 }
98} 98}
99 99
100static void yos_send_ipi_mask(cpumask_t mask, unsigned int action) 100static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
101{ 101{
102 unsigned int i; 102 unsigned int i;
103 103
104 for_each_cpu_mask(i, mask) 104 for_each_cpu(i, mask)
105 yos_send_ipi_single(i, action); 105 yos_send_ipi_single(i, action);
106} 106}
107 107
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 060d853d7b35..f61c164d1e67 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -421,7 +421,7 @@ static void __init node_mem_init(cnodeid_t node)
421 421
422/* 422/*
423 * A node with nothing. We use it to avoid any special casing in 423 * A node with nothing. We use it to avoid any special casing in
424 * node_to_cpumask 424 * cpumask_of_node
425 */ 425 */
426static struct node_data null_node = { 426static struct node_data null_node = {
427 .hub = { 427 .hub = {
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index cbcd7eb83bd1..9aa8f2951df6 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -165,11 +165,11 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
165 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); 165 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
166} 166}
167 167
168static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action) 168static void ip27_send_ipi(const struct cpumask *mask, unsigned int action)
169{ 169{
170 unsigned int i; 170 unsigned int i;
171 171
172 for_each_cpu_mask(i, mask) 172 for_each_cpu(i, mask)
173 ip27_send_ipi_single(i, action); 173 ip27_send_ipi_single(i, action);
174} 174}
175 175
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index 314691648c97..47b347c992ea 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -82,11 +82,12 @@ static void bcm1480_send_ipi_single(int cpu, unsigned int action)
82 __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); 82 __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
83} 83}
84 84
85static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action) 85static void bcm1480_send_ipi_mask(const struct cpumask *mask,
86 unsigned int action)
86{ 87{
87 unsigned int i; 88 unsigned int i;
88 89
89 for_each_cpu_mask(i, mask) 90 for_each_cpu(i, mask)
90 bcm1480_send_ipi_single(i, action); 91 bcm1480_send_ipi_single(i, action);
91} 92}
92 93
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index cad14003b84f..c00a5cb1128d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -70,11 +70,12 @@ static void sb1250_send_ipi_single(int cpu, unsigned int action)
70 __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]); 70 __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
71} 71}
72 72
73static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action) 73static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
74 unsigned int action)
74{ 75{
75 unsigned int i; 76 unsigned int i;
76 77
77 for_each_cpu_mask(i, mask) 78 for_each_cpu(i, mask)
78 sb1250_send_ipi_single(i, action); 79 sb1250_send_ipi_single(i, action);
79} 80}
80 81
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index a9e2e34f69b0..cb294c244de3 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -38,13 +38,13 @@ extern unsigned long mmu_context_cache[NR_CPUS];
38#define enter_lazy_tlb(mm, tsk) do {} while (0) 38#define enter_lazy_tlb(mm, tsk) do {} while (0)
39 39
40#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
41#define cpu_ran_vm(cpu, task) \ 41#define cpu_ran_vm(cpu, mm) \
42 cpu_set((cpu), (task)->cpu_vm_mask) 42 cpumask_set_cpu((cpu), mm_cpumask(mm))
43#define cpu_maybe_ran_vm(cpu, task) \ 43#define cpu_maybe_ran_vm(cpu, mm) \
44 cpu_test_and_set((cpu), (task)->cpu_vm_mask) 44 cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
45#else 45#else
46#define cpu_ran_vm(cpu, task) do {} while (0) 46#define cpu_ran_vm(cpu, mm) do {} while (0)
47#define cpu_maybe_ran_vm(cpu, task) true 47#define cpu_maybe_ran_vm(cpu, mm) true
48#endif /* CONFIG_SMP */ 48#endif /* CONFIG_SMP */
49 49
50/* 50/*
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 21eb45a52629..2e73623feb6b 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -30,7 +30,6 @@ extern void smp_send_all_nop(void);
30 30
31extern void arch_send_call_function_single_ipi(int cpu); 31extern void arch_send_call_function_single_ipi(int cpu);
32extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 32extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
33#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
34 33
35#endif /* !ASSEMBLY */ 34#endif /* !ASSEMBLY */
36 35
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index c0d3b8af9319..d9ea8d39c342 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -146,7 +146,7 @@ extern void smp_generic_take_timebase(void);
146extern struct smp_ops_t *smp_ops; 146extern struct smp_ops_t *smp_ops;
147 147
148extern void arch_send_call_function_single_ipi(int cpu); 148extern void arch_send_call_function_single_ipi(int cpu);
149extern void arch_send_call_function_ipi(cpumask_t mask); 149extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
150 150
151/* Definitions relative to the secondary CPU spin loop 151/* Definitions relative to the secondary CPU spin loop
152 * and entry point. Not all of them exist on both 32 and 152 * and entry point. Not all of them exist on both 32 and
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 394edcbcce71..22f738d12ad9 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -17,11 +17,6 @@ static inline int cpu_to_node(int cpu)
17 17
18#define parent_node(node) (node) 18#define parent_node(node) (node)
19 19
20static inline cpumask_t node_to_cpumask(int node)
21{
22 return numa_cpumask_lookup_table[node];
23}
24
25#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 20#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
26 21
27int of_node_to_nid(struct device_node *device); 22int of_node_to_nid(struct device_node *device);
@@ -36,11 +31,6 @@ static inline int pcibus_to_node(struct pci_bus *bus)
36} 31}
37#endif 32#endif
38 33
39#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
40 CPU_MASK_ALL : \
41 node_to_cpumask(pcibus_to_node(bus)) \
42 )
43
44#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 34#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
45 cpu_all_mask : \ 35 cpu_all_mask : \
46 cpumask_of_node(pcibus_to_node(bus))) 36 cpumask_of_node(pcibus_to_node(bus)))
@@ -104,8 +94,6 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
104#ifdef CONFIG_PPC64 94#ifdef CONFIG_PPC64
105#include <asm/smp.h> 95#include <asm/smp.h>
106 96
107#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
108#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
109#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 97#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
110#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) 98#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
111#define topology_core_id(cpu) (cpu_to_core_id(cpu)) 99#define topology_core_id(cpu) (cpu_to_core_id(cpu))
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 74cd1a7d0d4b..4271f7a655a3 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -431,9 +431,9 @@ void __init smp_setup_cpu_maps(void)
431 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { 431 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
432 DBG(" thread %d -> cpu %d (hard id %d)\n", 432 DBG(" thread %d -> cpu %d (hard id %d)\n",
433 j, cpu, intserv[j]); 433 j, cpu, intserv[j]);
434 cpu_set(cpu, cpu_present_map); 434 set_cpu_present(cpu, true);
435 set_hard_smp_processor_id(cpu, intserv[j]); 435 set_hard_smp_processor_id(cpu, intserv[j]);
436 cpu_set(cpu, cpu_possible_map); 436 set_cpu_possible(cpu, true);
437 cpu++; 437 cpu++;
438 } 438 }
439 } 439 }
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
479 maxcpus); 479 maxcpus);
480 480
481 for (cpu = 0; cpu < maxcpus; cpu++) 481 for (cpu = 0; cpu < maxcpus; cpu++)
482 cpu_set(cpu, cpu_possible_map); 482 set_cpu_possible(cpu, true);
483 out: 483 out:
484 of_node_put(dn); 484 of_node_put(dn);
485 } 485 }
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d387b3937ccc..9b86a74d2815 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -189,11 +189,11 @@ void arch_send_call_function_single_ipi(int cpu)
189 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); 189 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
190} 190}
191 191
192void arch_send_call_function_ipi(cpumask_t mask) 192void arch_send_call_function_ipi_mask(const struct cpumask *mask)
193{ 193{
194 unsigned int cpu; 194 unsigned int cpu;
195 195
196 for_each_cpu_mask(cpu, mask) 196 for_each_cpu(cpu, mask)
197 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); 197 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
198} 198}
199 199
@@ -287,7 +287,7 @@ void __devinit smp_prepare_boot_cpu(void)
287{ 287{
288 BUG_ON(smp_processor_id() != boot_cpuid); 288 BUG_ON(smp_processor_id() != boot_cpuid);
289 289
290 cpu_set(boot_cpuid, cpu_online_map); 290 set_cpu_online(boot_cpuid, true);
291 cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); 291 cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
292 cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); 292 cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
293#ifdef CONFIG_PPC64 293#ifdef CONFIG_PPC64
@@ -307,7 +307,7 @@ int generic_cpu_disable(void)
307 if (cpu == boot_cpuid) 307 if (cpu == boot_cpuid)
308 return -EBUSY; 308 return -EBUSY;
309 309
310 cpu_clear(cpu, cpu_online_map); 310 set_cpu_online(cpu, false);
311#ifdef CONFIG_PPC64 311#ifdef CONFIG_PPC64
312 vdso_data->processorCount--; 312 vdso_data->processorCount--;
313 fixup_irqs(cpu_online_map); 313 fixup_irqs(cpu_online_map);
@@ -361,7 +361,7 @@ void generic_mach_cpu_die(void)
361 smp_wmb(); 361 smp_wmb();
362 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 362 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
363 cpu_relax(); 363 cpu_relax();
364 cpu_set(cpu, cpu_online_map); 364 set_cpu_online(cpu, true);
365 local_irq_enable(); 365 local_irq_enable();
366} 366}
367#endif 367#endif
@@ -508,7 +508,7 @@ int __devinit start_secondary(void *unused)
508 508
509 ipi_call_lock(); 509 ipi_call_lock();
510 notify_cpu_starting(cpu); 510 notify_cpu_starting(cpu);
511 cpu_set(cpu, cpu_online_map); 511 set_cpu_online(cpu, true);
512 /* Update sibling maps */ 512 /* Update sibling maps */
513 base = cpu_first_thread_in_core(cpu); 513 base = cpu_first_thread_in_core(cpu);
514 for (i = 0; i < threads_per_core; i++) { 514 for (i = 0; i < threads_per_core; i++) {
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 937a38e73178..b40c22d697f0 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -320,7 +320,7 @@ static int __init smp_psurge_probe(void)
320 if (ncpus > NR_CPUS) 320 if (ncpus > NR_CPUS)
321 ncpus = NR_CPUS; 321 ncpus = NR_CPUS;
322 for (i = 1; i < ncpus ; ++i) 322 for (i = 1; i < ncpus ; ++i)
323 cpu_set(i, cpu_present_map); 323 set_cpu_present(i, true);
324 324
325 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); 325 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
326 326
@@ -867,7 +867,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
867 867
868int smp_core99_cpu_disable(void) 868int smp_core99_cpu_disable(void)
869{ 869{
870 cpu_clear(smp_processor_id(), cpu_online_map); 870 set_cpu_online(smp_processor_id(), false);
871 871
872 /* XXX reset cpu affinity here */ 872 /* XXX reset cpu affinity here */
873 mpic_cpu_set_priority(0xf); 873 mpic_cpu_set_priority(0xf);
@@ -952,7 +952,7 @@ void __init pmac_setup_smp(void)
952 int cpu; 952 int cpu;
953 953
954 for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu) 954 for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
955 cpu_set(cpu, cpu_possible_map); 955 set_cpu_possible(cpu, true);
956 smp_ops = &psurge_smp_ops; 956 smp_ops = &psurge_smp_ops;
957 } 957 }
958#endif /* CONFIG_PPC32 */ 958#endif /* CONFIG_PPC32 */
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a20ead87153d..ebff6d9a4e39 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -94,7 +94,7 @@ static int pseries_cpu_disable(void)
94{ 94{
95 int cpu = smp_processor_id(); 95 int cpu = smp_processor_id();
96 96
97 cpu_clear(cpu, cpu_online_map); 97 set_cpu_online(cpu, false);
98 vdso_data->processorCount--; 98 vdso_data->processorCount--;
99 99
100 /*fix boot_cpuid here*/ 100 /*fix boot_cpuid here*/
@@ -185,7 +185,7 @@ static int pseries_add_processor(struct device_node *np)
185 185
186 for_each_cpu_mask(cpu, tmp) { 186 for_each_cpu_mask(cpu, tmp) {
187 BUG_ON(cpu_isset(cpu, cpu_present_map)); 187 BUG_ON(cpu_isset(cpu, cpu_present_map));
188 cpu_set(cpu, cpu_present_map); 188 set_cpu_present(cpu, true);
189 set_hard_smp_processor_id(cpu, *intserv++); 189 set_hard_smp_processor_id(cpu, *intserv++);
190 } 190 }
191 err = 0; 191 err = 0;
@@ -217,7 +217,7 @@ static void pseries_remove_processor(struct device_node *np)
217 if (get_hard_smp_processor_id(cpu) != intserv[i]) 217 if (get_hard_smp_processor_id(cpu) != intserv[i])
218 continue; 218 continue;
219 BUG_ON(cpu_online(cpu)); 219 BUG_ON(cpu_online(cpu));
220 cpu_clear(cpu, cpu_present_map); 220 set_cpu_present(cpu, false);
221 set_hard_smp_processor_id(cpu, -1); 221 set_hard_smp_processor_id(cpu, -1);
222 break; 222 break;
223 } 223 }
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index c991fe6473c9..a868b272c257 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -62,7 +62,7 @@ extern struct mutex smp_cpu_state_mutex;
62extern int smp_cpu_polarization[]; 62extern int smp_cpu_polarization[];
63 63
64extern void arch_send_call_function_single_ipi(int cpu); 64extern void arch_send_call_function_single_ipi(int cpu);
65extern void arch_send_call_function_ipi(cpumask_t mask); 65extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
66 66
67#endif 67#endif
68 68
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 5e0ad618dc45..6e7211abd950 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -9,7 +9,6 @@ const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
9 9
10extern cpumask_t cpu_core_map[NR_CPUS]; 10extern cpumask_t cpu_core_map[NR_CPUS];
11 11
12#define topology_core_siblings(cpu) (cpu_core_map[cpu])
13#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 12#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
14 13
15int topology_set_cpu_management(int fc); 14int topology_set_cpu_management(int fc);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b4b6396e6cf0..c932caa5e850 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -147,11 +147,11 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
147 udelay(10); 147 udelay(10);
148} 148}
149 149
150void arch_send_call_function_ipi(cpumask_t mask) 150void arch_send_call_function_ipi_mask(const struct cpumask *mask)
151{ 151{
152 int cpu; 152 int cpu;
153 153
154 for_each_cpu_mask(cpu, mask) 154 for_each_cpu(cpu, mask)
155 smp_ext_bitcall(cpu, ec_call_function); 155 smp_ext_bitcall(cpu, ec_call_function);
156} 156}
157 157
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index ca64f43abe67..53ef26ced75f 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -44,7 +44,6 @@ void plat_send_ipi(unsigned int cpu, unsigned int message);
44 44
45void arch_send_call_function_single_ipi(int cpu); 45void arch_send_call_function_single_ipi(int cpu);
46extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 46extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
47#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
48 47
49#else 48#else
50 49
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index f8c40cc65054..65e7bd2f2240 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -31,7 +31,6 @@
31#define cpu_to_node(cpu) ((void)(cpu),0) 31#define cpu_to_node(cpu) ((void)(cpu),0)
32#define parent_node(node) ((void)(node),0) 32#define parent_node(node) ((void)(node),0)
33 33
34#define node_to_cpumask(node) ((void)node, cpu_online_map)
35#define cpumask_of_node(node) ((void)node, cpu_online_mask) 34#define cpumask_of_node(node) ((void)node, cpu_online_mask)
36 35
37#define pcibus_to_node(bus) ((void)(bus), -1) 36#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index becb6bf353a9..f49e11cd4ded 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -36,7 +36,6 @@ extern int sparc64_multi_core;
36 36
37extern void arch_send_call_function_single_ipi(int cpu); 37extern void arch_send_call_function_single_ipi(int cpu);
38extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 38extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
39#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
40 39
41/* 40/*
42 * General functions that each host system must provide. 41 * General functions that each host system must provide.
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index 26cd25c08399..600a79035fa1 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -12,22 +12,8 @@ static inline int cpu_to_node(int cpu)
12 12
13#define parent_node(node) (node) 13#define parent_node(node) (node)
14 14
15static inline cpumask_t node_to_cpumask(int node)
16{
17 return numa_cpumask_lookup_table[node];
18}
19#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 15#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
20 16
21/*
22 * Returns a pointer to the cpumask of CPUs on Node 'node'.
23 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
24 */
25#define node_to_cpumask_ptr(v, node) \
26 cpumask_t *v = &(numa_cpumask_lookup_table[node])
27
28#define node_to_cpumask_ptr_next(v, node) \
29 v = &(numa_cpumask_lookup_table[node])
30
31struct pci_bus; 17struct pci_bus;
32#ifdef CONFIG_PCI 18#ifdef CONFIG_PCI
33extern int pcibus_to_node(struct pci_bus *pbus); 19extern int pcibus_to_node(struct pci_bus *pbus);
@@ -71,8 +57,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
71#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
72#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 58#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
73#define topology_core_id(cpu) (cpu_data(cpu).core_id) 59#define topology_core_id(cpu) (cpu_data(cpu).core_id)
74#define topology_core_siblings(cpu) (cpu_core_map[cpu])
75#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
76#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 60#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
77#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 61#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
78#define mc_capable() (sparc64_multi_core) 62#define mc_capable() (sparc64_multi_core)
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 54f42e8b0105..34d813011b7a 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -35,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
35 unsigned cpu = smp_processor_id(); 35 unsigned cpu = smp_processor_id();
36 36
37 if(prev != next){ 37 if(prev != next){
38 cpu_clear(cpu, prev->cpu_vm_mask); 38 cpumask_clear_cpu(cpu, mm_cpumask(prev));
39 cpu_set(cpu, next->cpu_vm_mask); 39 cpumask_set_cpu(cpu, mm_cpumask(next));
40 if(next != &init_mm) 40 if(next != &init_mm)
41 __switch_mm(&next->context.id); 41 __switch_mm(&next->context.id);
42 } 42 }
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 98351c78bc81..106bf27e2a9a 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -111,7 +111,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
111 int i; 111 int i;
112 112
113 for (i = 0; i < ncpus; ++i) 113 for (i = 0; i < ncpus; ++i)
114 cpu_set(i, cpu_possible_map); 114 set_cpu_possible(i, true);
115 115
116 cpu_clear(me, cpu_online_map); 116 cpu_clear(me, cpu_online_map);
117 cpu_set(me, cpu_online_map); 117 cpu_set(me, cpu_online_map);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index f923203dc39a..4a2d4e0c18d9 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
37 37
38 if (likely(prev != next)) { 38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */ 39 /* stop flush ipis for the previous mm */
40 cpu_clear(cpu, prev->cpu_vm_mask); 40 cpumask_clear_cpu(cpu, mm_cpumask(prev));
41#ifdef CONFIG_SMP 41#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next); 43 percpu_write(cpu_tlbstate.active_mm, next);
44#endif 44#endif
45 cpu_set(cpu, next->cpu_vm_mask); 45 cpumask_set_cpu(cpu, mm_cpumask(next));
46 46
47 /* Re-load page tables */ 47 /* Re-load page tables */
48 load_cr3(next->pgd); 48 load_cr3(next->pgd);
@@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
58 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 58 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
59 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); 59 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
60 60
61 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { 61 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
62 /* We were in lazy tlb mode and leave_mm disabled 62 /* We were in lazy tlb mode and leave_mm disabled
63 * tlb flush IPI delivery. We must reload CR3 63 * tlb flush IPI delivery. We must reload CR3
64 * to make sure to use no freed page tables. 64 * to make sure to use no freed page tables.
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 6a84ed166aec..1e796782cd7b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -121,7 +121,6 @@ static inline void arch_send_call_function_single_ipi(int cpu)
121 smp_ops.send_call_func_single_ipi(cpu); 121 smp_ops.send_call_func_single_ipi(cpu);
122} 122}
123 123
124#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
125static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) 124static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
126{ 125{
127 smp_ops.send_call_func_ipi(mask); 126 smp_ops.send_call_func_ipi(mask);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 64970b9885f2..dc69f28489f5 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
227 227
228 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 228 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
229 if (cfg) { 229 if (cfg) {
230 if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { 230 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
231 kfree(cfg); 231 kfree(cfg);
232 cfg = NULL; 232 cfg = NULL;
233 } else if (!alloc_cpumask_var_node(&cfg->old_domain, 233 } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
234 GFP_ATOMIC, node)) { 234 GFP_ATOMIC, node)) {
235 free_cpumask_var(cfg->domain); 235 free_cpumask_var(cfg->domain);
236 kfree(cfg); 236 kfree(cfg);
237 cfg = NULL; 237 cfg = NULL;
238 } else {
239 cpumask_clear(cfg->domain);
240 cpumask_clear(cfg->old_domain);
241 } 238 }
242 } 239 }
243 240
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 71f1d99a635d..ec6ef60cbd17 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
67#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
68 preempt_disable(); 68 preempt_disable();
69 load_LDT(pc); 69 load_LDT(pc);
70 if (!cpus_equal(current->mm->cpu_vm_mask, 70 if (!cpumask_equal(mm_cpumask(current->mm),
71 cpumask_of_cpu(smp_processor_id()))) 71 cpumask_of(smp_processor_id())))
72 smp_call_function(flush_ldt, current->mm, 1); 72 smp_call_function(flush_ldt, current->mm, 1);
73 preempt_enable(); 73 preempt_enable();
74#else 74#else
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 847ab4160315..5284cd2b5776 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
555void __init init_c1e_mask(void) 555void __init init_c1e_mask(void)
556{ 556{
557 /* If we're using c1e_idle, we need to allocate c1e_mask. */ 557 /* If we're using c1e_idle, we need to allocate c1e_mask. */
558 if (pm_idle == c1e_idle) { 558 if (pm_idle == c1e_idle)
559 alloc_cpumask_var(&c1e_mask, GFP_KERNEL); 559 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
560 cpumask_clear(c1e_mask);
561 }
562} 560}
563 561
564static int __init idle_setup(char *str) 562static int __init idle_setup(char *str)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 09c5e077dff7..565ebc65920e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1059#endif 1059#endif
1060 current_thread_info()->cpu = 0; /* needed? */ 1060 current_thread_info()->cpu = 0; /* needed? */
1061 for_each_possible_cpu(i) { 1061 for_each_possible_cpu(i) {
1062 alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1062 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1063 alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 1063 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1064 alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); 1064 zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
1065 cpumask_clear(per_cpu(cpu_core_map, i));
1066 cpumask_clear(per_cpu(cpu_sibling_map, i));
1067 cpumask_clear(cpu_data(i).llc_shared_map);
1068 } 1065 }
1069 set_cpu_sibling_map(0); 1066 set_cpu_sibling_map(0);
1070 1067
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index e293ac56c723..dcb00d278512 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -93,7 +93,6 @@ static struct irqaction irq0 = {
93 93
94void __init setup_default_timer_irq(void) 94void __init setup_default_timer_irq(void)
95{ 95{
96 irq0.mask = cpumask_of_cpu(0);
97 setup_irq(0, &irq0); 96 setup_irq(0, &irq0);
98} 97}
99 98
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index c814e144a3f0..36fe08eeb5c3 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -59,7 +59,8 @@ void leave_mm(int cpu)
59{ 59{
60 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 60 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
61 BUG(); 61 BUG();
62 cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); 62 cpumask_clear_cpu(cpu,
63 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
63 load_cr3(swapper_pg_dir); 64 load_cr3(swapper_pg_dir);
64} 65}
65EXPORT_SYMBOL_GPL(leave_mm); 66EXPORT_SYMBOL_GPL(leave_mm);
@@ -234,8 +235,8 @@ void flush_tlb_current_task(void)
234 preempt_disable(); 235 preempt_disable();
235 236
236 local_flush_tlb(); 237 local_flush_tlb();
237 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) 238 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
238 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); 239 flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
239 preempt_enable(); 240 preempt_enable();
240} 241}
241 242
@@ -249,8 +250,8 @@ void flush_tlb_mm(struct mm_struct *mm)
249 else 250 else
250 leave_mm(smp_processor_id()); 251 leave_mm(smp_processor_id());
251 } 252 }
252 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) 253 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
253 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); 254 flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
254 255
255 preempt_enable(); 256 preempt_enable();
256} 257}
@@ -268,8 +269,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
268 leave_mm(smp_processor_id()); 269 leave_mm(smp_processor_id());
269 } 270 }
270 271
271 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) 272 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
272 flush_tlb_others(&mm->cpu_vm_mask, mm, va); 273 flush_tlb_others(mm_cpumask(mm), mm, va);
273 274
274 preempt_enable(); 275 preempt_enable();
275} 276}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 093dd59b5385..3bf7b1d250ce 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1165 /* Get the "official" set of cpus referring to our pagetable. */ 1165 /* Get the "official" set of cpus referring to our pagetable. */
1166 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { 1166 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1167 for_each_online_cpu(cpu) { 1167 for_each_online_cpu(cpu) {
1168 if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask) 1168 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1169 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) 1169 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1170 continue; 1170 continue;
1171 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); 1171 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1172 } 1172 }
1173 return; 1173 return;
1174 } 1174 }
1175 cpumask_copy(mask, &mm->cpu_vm_mask); 1175 cpumask_copy(mask, mm_cpumask(mm));
1176 1176
1177 /* It's possible that a vcpu may have a stale reference to our 1177 /* It's possible that a vcpu may have a stale reference to our
1178 cr3, because its in lazy mode, and it hasn't yet flushed 1178 cr3, because its in lazy mode, and it hasn't yet flushed
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 56071b67bed5..5633b86e3ed1 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -193,7 +193,7 @@ acpi_status __init acpi_os_initialize(void)
193 193
194static void bind_to_cpu0(struct work_struct *work) 194static void bind_to_cpu0(struct work_struct *work)
195{ 195{
196 set_cpus_allowed(current, cpumask_of_cpu(0)); 196 set_cpus_allowed_ptr(current, cpumask_of(0));
197 kfree(work); 197 kfree(work);
198} 198}
199 199
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 11088cf10319..8ba0ed0b9ddb 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -511,7 +511,7 @@ int acpi_processor_preregister_performance(
511 struct acpi_processor *match_pr; 511 struct acpi_processor *match_pr;
512 struct acpi_psd_package *match_pdomain; 512 struct acpi_psd_package *match_pdomain;
513 513
514 if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 514 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
515 return -ENOMEM; 515 return -ENOMEM;
516 516
517 mutex_lock(&performance_mutex); 517 mutex_lock(&performance_mutex);
@@ -558,7 +558,6 @@ int acpi_processor_preregister_performance(
558 * Now that we have _PSD data from all CPUs, lets setup P-state 558 * Now that we have _PSD data from all CPUs, lets setup P-state
559 * domain info. 559 * domain info.
560 */ 560 */
561 cpumask_clear(covered_cpus);
562 for_each_possible_cpu(i) { 561 for_each_possible_cpu(i) {
563 pr = per_cpu(processors, i); 562 pr = per_cpu(processors, i);
564 if (!pr) 563 if (!pr)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index ce7cf3bc5101..4c6c14c1e307 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -77,7 +77,7 @@ static int acpi_processor_update_tsd_coord(void)
77 struct acpi_tsd_package *pdomain, *match_pdomain; 77 struct acpi_tsd_package *pdomain, *match_pdomain;
78 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 78 struct acpi_processor_throttling *pthrottling, *match_pthrottling;
79 79
80 if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 80 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
81 return -ENOMEM; 81 return -ENOMEM;
82 82
83 /* 83 /*
@@ -105,7 +105,6 @@ static int acpi_processor_update_tsd_coord(void)
105 if (retval) 105 if (retval)
106 goto err_ret; 106 goto err_ret;
107 107
108 cpumask_clear(covered_cpus);
109 for_each_possible_cpu(i) { 108 for_each_possible_cpu(i) {
110 pr = per_cpu(processors, i); 109 pr = per_cpu(processors, i);
111 if (!pr) 110 if (!pr)
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 07a7e4b8f8fc..cc4b2f99989d 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void)
884 int count; 884 int count;
885 int cpu; 885 int cpu;
886 886
887 if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) { 887 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
888 printk(KERN_WARNING 888 printk(KERN_WARNING
889 "sfc: RSS disabled due to allocation failure\n"); 889 "sfc: RSS disabled due to allocation failure\n");
890 return 1; 890 return 1;
891 } 891 }
892 892
893 cpumask_clear(core_mask);
894 count = 0; 893 count = 0;
895 for_each_online_cpu(cpu) { 894 for_each_online_cpu(cpu) {
896 if (!cpumask_test_cpu(cpu, core_mask)) { 895 if (!cpumask_test_cpu(cpu, core_mask)) {
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 8574622e36a5..c9e2ae90f195 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -154,9 +154,8 @@ int sync_start(void)
154{ 154{
155 int err; 155 int err;
156 156
157 if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 157 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
158 return -ENOMEM; 158 return -ENOMEM;
159 cpumask_clear(marked_cpus);
160 159
161 start_cpu_work(); 160 start_cpu_work();
162 161
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 88bada2ebc4b..510df36dd5d4 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -37,9 +37,6 @@
37#ifndef parent_node 37#ifndef parent_node
38#define parent_node(node) ((void)(node),0) 38#define parent_node(node) ((void)(node),0)
39#endif 39#endif
40#ifndef node_to_cpumask
41#define node_to_cpumask(node) ((void)node, cpu_online_map)
42#endif
43#ifndef cpumask_of_node 40#ifndef cpumask_of_node
44#define cpumask_of_node(node) ((void)node, cpu_online_mask) 41#define cpumask_of_node(node) ((void)node, cpu_online_mask)
45#endif 42#endif
@@ -55,18 +52,4 @@
55 52
56#endif /* CONFIG_NUMA */ 53#endif /* CONFIG_NUMA */
57 54
58/*
59 * returns pointer to cpumask for specified node
60 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
61 */
62#ifndef node_to_cpumask_ptr
63
64#define node_to_cpumask_ptr(v, node) \
65 cpumask_t _##v = node_to_cpumask(node); \
66 const cpumask_t *v = &_##v
67
68#define node_to_cpumask_ptr_next(v, node) \
69 _##v = node_to_cpumask(node)
70#endif
71
72#endif /* _ASM_GENERIC_TOPOLOGY_H */ 55#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 9b1d458aac6e..789cf5f920ce 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -3,444 +3,37 @@
3 3
4/* 4/*
5 * Cpumasks provide a bitmap suitable for representing the 5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number. 6 * set of CPU's in a system, one bit position per CPU number. In general,
7 * 7 * only nr_cpu_ids (<= NR_CPUS) bits are valid.
8 * The new cpumask_ ops take a "struct cpumask *"; the old ones
9 * use cpumask_t.
10 *
11 * See detailed comments in the file linux/bitmap.h describing the
12 * data type on which these cpumasks are based.
13 *
14 * For details of cpumask_scnprintf() and cpumask_parse_user(),
15 * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
16 * For details of cpulist_scnprintf() and cpulist_parse(), see
17 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
18 * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
19 * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
20 * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
21 * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
22 *
23 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
24 * Note: The alternate operations with the suffix "_nr" are used
25 * to limit the range of the loop to nr_cpu_ids instead of
26 * NR_CPUS when NR_CPUS > 64 for performance reasons.
27 * If NR_CPUS is <= 64 then most assembler bitmask
28 * operators execute faster with a constant range, so
29 * the operator will continue to use NR_CPUS.
30 *
31 * Another consideration is that nr_cpu_ids is initialized
32 * to NR_CPUS and isn't lowered until the possible cpus are
33 * discovered (including any disabled cpus). So early uses
34 * will span the entire range of NR_CPUS.
35 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
36 *
37 * The obsolescent cpumask operations are:
38 *
39 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
40 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
41 * void cpus_setall(mask) set all bits
42 * void cpus_clear(mask) clear all bits
43 * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
44 * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
45 *
46 * int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
47 * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
48 * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
49 * int cpus_andnot(dst, src1, src2) dst = src1 & ~src2
50 * void cpus_complement(dst, src) dst = ~src
51 *
52 * int cpus_equal(mask1, mask2) Does mask1 == mask2?
53 * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
54 * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
55 * int cpus_empty(mask) Is mask empty (no bits sets)?
56 * int cpus_full(mask) Is mask full (all bits sets)?
57 * int cpus_weight(mask) Hamming weigh - number of set bits
58 * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS
59 *
60 * void cpus_shift_right(dst, src, n) Shift right
61 * void cpus_shift_left(dst, src, n) Shift left
62 *
63 * int first_cpu(mask) Number lowest set bit, or NR_CPUS
64 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
65 * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
66 *
67 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
68 * (can be used as an lvalue)
69 * CPU_MASK_ALL Initializer - all bits set
70 * CPU_MASK_NONE Initializer - no bits set
71 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
72 *
73 * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t
74 * variables, and CPUMASK_PTR provides pointers to each field.
75 *
76 * The structure should be defined something like this:
77 * struct my_cpumasks {
78 * cpumask_t mask1;
79 * cpumask_t mask2;
80 * };
81 *
82 * Usage is then:
83 * CPUMASK_ALLOC(my_cpumasks);
84 * CPUMASK_PTR(mask1, my_cpumasks);
85 * CPUMASK_PTR(mask2, my_cpumasks);
86 *
87 * --- DO NOT reference cpumask_t pointers until this check ---
88 * if (my_cpumasks == NULL)
89 * "kmalloc failed"...
90 *
91 * References are now pointers to the cpumask_t variables (*mask1, ...)
92 *
93 *if NR_CPUS > BITS_PER_LONG
94 * CPUMASK_ALLOC(m) Declares and allocates struct m *m =
95 * kmalloc(sizeof(*m), GFP_KERNEL)
96 * CPUMASK_FREE(m) Macro for kfree(m)
97 *else
98 * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m
99 * CPUMASK_FREE(m) Nop
100 *endif
101 * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v)
102 * ------------------------------------------------------------------------
103 *
104 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
105 * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask
106 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
107 * int cpulist_parse(buf, map) Parse ascii string as cpulist
108 * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
109 * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
110 * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
111 * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
112 *
113 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS
114 * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids
115 *
116 * int num_online_cpus() Number of online CPUs
117 * int num_possible_cpus() Number of all possible CPUs
118 * int num_present_cpus() Number of present CPUs
119 *
120 * int cpu_online(cpu) Is some cpu online?
121 * int cpu_possible(cpu) Is some cpu possible?
122 * int cpu_present(cpu) Is some cpu present (can schedule)?
123 *
124 * int any_online_cpu(mask) First online cpu in mask
125 *
126 * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
127 * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
128 * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
129 *
130 * Subtlety:
131 * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
132 * to generate slightly worse code. Note for example the additional
133 * 40 lines of assembly code compiling the "for each possible cpu"
134 * loops buried in the disk_stat_read() macros calls when compiling
135 * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
136 * one-line #define for cpu_isset(), instead of wrapping an inline
137 * inside a macro, the way we do the other calls.
138 */ 8 */
139
140#include <linux/kernel.h> 9#include <linux/kernel.h>
141#include <linux/threads.h> 10#include <linux/threads.h>
142#include <linux/bitmap.h> 11#include <linux/bitmap.h>
143 12
144typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 13typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
145extern cpumask_t _unused_cpumask_arg_;
146
147#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
148#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
149static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
150{
151 set_bit(cpu, dstp->bits);
152}
153
154#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
155static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
156{
157 clear_bit(cpu, dstp->bits);
158}
159
160#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
161static inline void __cpus_setall(cpumask_t *dstp, int nbits)
162{
163 bitmap_fill(dstp->bits, nbits);
164}
165
166#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
167static inline void __cpus_clear(cpumask_t *dstp, int nbits)
168{
169 bitmap_zero(dstp->bits, nbits);
170}
171
172/* No static inline type checking - see Subtlety (1) above. */
173#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
174
175#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
176static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
177{
178 return test_and_set_bit(cpu, addr->bits);
179}
180
181#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
182static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
183 const cpumask_t *src2p, int nbits)
184{
185 return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
186}
187
188#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
189static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
190 const cpumask_t *src2p, int nbits)
191{
192 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
193}
194
195#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
196static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
197 const cpumask_t *src2p, int nbits)
198{
199 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
200}
201
202#define cpus_andnot(dst, src1, src2) \
203 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
204static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
205 const cpumask_t *src2p, int nbits)
206{
207 return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
208}
209
210#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
211static inline void __cpus_complement(cpumask_t *dstp,
212 const cpumask_t *srcp, int nbits)
213{
214 bitmap_complement(dstp->bits, srcp->bits, nbits);
215}
216
217#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
218static inline int __cpus_equal(const cpumask_t *src1p,
219 const cpumask_t *src2p, int nbits)
220{
221 return bitmap_equal(src1p->bits, src2p->bits, nbits);
222}
223
224#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
225static inline int __cpus_intersects(const cpumask_t *src1p,
226 const cpumask_t *src2p, int nbits)
227{
228 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
229}
230
231#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
232static inline int __cpus_subset(const cpumask_t *src1p,
233 const cpumask_t *src2p, int nbits)
234{
235 return bitmap_subset(src1p->bits, src2p->bits, nbits);
236}
237
238#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
239static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
240{
241 return bitmap_empty(srcp->bits, nbits);
242}
243
244#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
245static inline int __cpus_full(const cpumask_t *srcp, int nbits)
246{
247 return bitmap_full(srcp->bits, nbits);
248}
249
250#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
251static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
252{
253 return bitmap_weight(srcp->bits, nbits);
254}
255
256#define cpus_shift_right(dst, src, n) \
257 __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
258static inline void __cpus_shift_right(cpumask_t *dstp,
259 const cpumask_t *srcp, int n, int nbits)
260{
261 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
262}
263
264#define cpus_shift_left(dst, src, n) \
265 __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
266static inline void __cpus_shift_left(cpumask_t *dstp,
267 const cpumask_t *srcp, int n, int nbits)
268{
269 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
270}
271#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
272 14
273/** 15/**
274 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * 16 * cpumask_bits - get the bits in a cpumask
275 * @bitmap: the bitmap 17 * @maskp: the struct cpumask *
276 *
277 * There are a few places where cpumask_var_t isn't appropriate and
278 * static cpumasks must be used (eg. very early boot), yet we don't
279 * expose the definition of 'struct cpumask'.
280 *
281 * This does the conversion, and can be used as a constant initializer.
282 */
283#define to_cpumask(bitmap) \
284 ((struct cpumask *)(1 ? (bitmap) \
285 : (void *)sizeof(__check_is_bitmap(bitmap))))
286
287static inline int __check_is_bitmap(const unsigned long *bitmap)
288{
289 return 1;
290}
291
292/*
293 * Special-case data structure for "single bit set only" constant CPU masks.
294 * 18 *
295 * We pre-generate all the 64 (or 32) possible bit positions, with enough 19 * You should only assume nr_cpu_ids bits of this mask are valid. This is
296 * padding to the left and the right, and return the constant pointer 20 * a macro so it's const-correct.
297 * appropriately offset.
298 */
299extern const unsigned long
300 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
301
302static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
303{
304 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
305 p -= cpu / BITS_PER_LONG;
306 return to_cpumask(p);
307}
308
309#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
310/*
311 * In cases where we take the address of the cpumask immediately,
312 * gcc optimizes it out (it's a constant) and there's no huge stack
313 * variable created:
314 */ 21 */
315#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) 22#define cpumask_bits(maskp) ((maskp)->bits)
316
317
318#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
319
320#if NR_CPUS <= BITS_PER_LONG
321
322#define CPU_MASK_ALL \
323(cpumask_t) { { \
324 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
325} }
326
327#define CPU_MASK_ALL_PTR (&CPU_MASK_ALL)
328
329#else
330
331#define CPU_MASK_ALL \
332(cpumask_t) { { \
333 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
334 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
335} }
336
337/* cpu_mask_all is in init/main.c */
338extern cpumask_t cpu_mask_all;
339#define CPU_MASK_ALL_PTR (&cpu_mask_all)
340
341#endif
342
343#define CPU_MASK_NONE \
344(cpumask_t) { { \
345 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
346} }
347
348#define CPU_MASK_CPU0 \
349(cpumask_t) { { \
350 [0] = 1UL \
351} }
352
353#define cpus_addr(src) ((src).bits)
354
355#if NR_CPUS > BITS_PER_LONG
356#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL)
357#define CPUMASK_FREE(m) kfree(m)
358#else
359#define CPUMASK_ALLOC(m) struct m _m, *m = &_m
360#define CPUMASK_FREE(m)
361#endif
362#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v)
363
364#define cpu_remap(oldbit, old, new) \
365 __cpu_remap((oldbit), &(old), &(new), NR_CPUS)
366static inline int __cpu_remap(int oldbit,
367 const cpumask_t *oldp, const cpumask_t *newp, int nbits)
368{
369 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
370}
371
372#define cpus_remap(dst, src, old, new) \
373 __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
374static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
375 const cpumask_t *oldp, const cpumask_t *newp, int nbits)
376{
377 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
378}
379
380#define cpus_onto(dst, orig, relmap) \
381 __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
382static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
383 const cpumask_t *relmapp, int nbits)
384{
385 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
386}
387
388#define cpus_fold(dst, orig, sz) \
389 __cpus_fold(&(dst), &(orig), sz, NR_CPUS)
390static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
391 int sz, int nbits)
392{
393 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
394}
395#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
396 23
397#if NR_CPUS == 1 24#if NR_CPUS == 1
398
399#define nr_cpu_ids 1 25#define nr_cpu_ids 1
400#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 26#else
401#define first_cpu(src) ({ (void)(src); 0; })
402#define next_cpu(n, src) ({ (void)(src); 1; })
403#define any_online_cpu(mask) 0
404#define for_each_cpu_mask(cpu, mask) \
405 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
406#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
407#else /* NR_CPUS > 1 */
408
409extern int nr_cpu_ids; 27extern int nr_cpu_ids;
410#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
411int __first_cpu(const cpumask_t *srcp);
412int __next_cpu(int n, const cpumask_t *srcp);
413int __any_online_cpu(const cpumask_t *mask);
414
415#define first_cpu(src) __first_cpu(&(src))
416#define next_cpu(n, src) __next_cpu((n), &(src))
417#define any_online_cpu(mask) __any_online_cpu(&(mask))
418#define for_each_cpu_mask(cpu, mask) \
419 for ((cpu) = -1; \
420 (cpu) = next_cpu((cpu), (mask)), \
421 (cpu) < NR_CPUS; )
422#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
423#endif 28#endif
424 29
425#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 30#ifdef CONFIG_CPUMASK_OFFSTACK
426#if NR_CPUS <= 64 31/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
427 32 * not all bits may be allocated. */
428#define next_cpu_nr(n, src) next_cpu(n, src) 33#define nr_cpumask_bits nr_cpu_ids
429#define cpus_weight_nr(cpumask) cpus_weight(cpumask) 34#else
430#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) 35#define nr_cpumask_bits NR_CPUS
431 36#endif
432#else /* NR_CPUS > 64 */
433
434int __next_cpu_nr(int n, const cpumask_t *srcp);
435#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
436#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
437#define for_each_cpu_mask_nr(cpu, mask) \
438 for ((cpu) = -1; \
439 (cpu) = next_cpu_nr((cpu), (mask)), \
440 (cpu) < nr_cpu_ids; )
441
442#endif /* NR_CPUS > 64 */
443#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
444 37
445/* 38/*
446 * The following particular system cpumasks and operations manage 39 * The following particular system cpumasks and operations manage
@@ -487,12 +80,6 @@ extern const struct cpumask *const cpu_online_mask;
487extern const struct cpumask *const cpu_present_mask; 80extern const struct cpumask *const cpu_present_mask;
488extern const struct cpumask *const cpu_active_mask; 81extern const struct cpumask *const cpu_active_mask;
489 82
490/* These strip const, as traditionally they weren't const. */
491#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
492#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
493#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
494#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
495
496#if NR_CPUS > 1 83#if NR_CPUS > 1
497#define num_online_cpus() cpumask_weight(cpu_online_mask) 84#define num_online_cpus() cpumask_weight(cpu_online_mask)
498#define num_possible_cpus() cpumask_weight(cpu_possible_mask) 85#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
@@ -511,35 +98,6 @@ extern const struct cpumask *const cpu_active_mask;
511#define cpu_active(cpu) ((cpu) == 0) 98#define cpu_active(cpu) ((cpu) == 0)
512#endif 99#endif
513 100
514#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
515
516/* These are the new versions of the cpumask operators: passed by pointer.
517 * The older versions will be implemented in terms of these, then deleted. */
518#define cpumask_bits(maskp) ((maskp)->bits)
519
520#if NR_CPUS <= BITS_PER_LONG
521#define CPU_BITS_ALL \
522{ \
523 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
524}
525
526#else /* NR_CPUS > BITS_PER_LONG */
527
528#define CPU_BITS_ALL \
529{ \
530 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
531 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
532}
533#endif /* NR_CPUS > BITS_PER_LONG */
534
535#ifdef CONFIG_CPUMASK_OFFSTACK
536/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
537 * not all bits may be allocated. */
538#define nr_cpumask_bits nr_cpu_ids
539#else
540#define nr_cpumask_bits NR_CPUS
541#endif
542
543/* verify cpu argument to cpumask_* operators */ 101/* verify cpu argument to cpumask_* operators */
544static inline unsigned int cpumask_check(unsigned int cpu) 102static inline unsigned int cpumask_check(unsigned int cpu)
545{ 103{
@@ -1100,4 +658,241 @@ void set_cpu_active(unsigned int cpu, bool active);
1100void init_cpu_present(const struct cpumask *src); 658void init_cpu_present(const struct cpumask *src);
1101void init_cpu_possible(const struct cpumask *src); 659void init_cpu_possible(const struct cpumask *src);
1102void init_cpu_online(const struct cpumask *src); 660void init_cpu_online(const struct cpumask *src);
661
662/**
663 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
664 * @bitmap: the bitmap
665 *
666 * There are a few places where cpumask_var_t isn't appropriate and
667 * static cpumasks must be used (eg. very early boot), yet we don't
668 * expose the definition of 'struct cpumask'.
669 *
670 * This does the conversion, and can be used as a constant initializer.
671 */
672#define to_cpumask(bitmap) \
673 ((struct cpumask *)(1 ? (bitmap) \
674 : (void *)sizeof(__check_is_bitmap(bitmap))))
675
676static inline int __check_is_bitmap(const unsigned long *bitmap)
677{
678 return 1;
679}
680
681/*
682 * Special-case data structure for "single bit set only" constant CPU masks.
683 *
684 * We pre-generate all the 64 (or 32) possible bit positions, with enough
685 * padding to the left and the right, and return the constant pointer
686 * appropriately offset.
687 */
688extern const unsigned long
689 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
690
691static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
692{
693 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
694 p -= cpu / BITS_PER_LONG;
695 return to_cpumask(p);
696}
697
698#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
699
700#if NR_CPUS <= BITS_PER_LONG
701#define CPU_BITS_ALL \
702{ \
703 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
704}
705
706#else /* NR_CPUS > BITS_PER_LONG */
707
708#define CPU_BITS_ALL \
709{ \
710 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
711 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
712}
713#endif /* NR_CPUS > BITS_PER_LONG */
714
715/*
716 *
717 * From here down, all obsolete. Use cpumask_ variants!
718 *
719 */
720#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
721/* These strip const, as traditionally they weren't const. */
722#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
723#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
724#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
725#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
726
727#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
728
729#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
730
731#if NR_CPUS <= BITS_PER_LONG
732
733#define CPU_MASK_ALL \
734(cpumask_t) { { \
735 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
736} }
737
738#else
739
740#define CPU_MASK_ALL \
741(cpumask_t) { { \
742 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
743 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
744} }
745
746#endif
747
748#define CPU_MASK_NONE \
749(cpumask_t) { { \
750 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
751} }
752
753#define CPU_MASK_CPU0 \
754(cpumask_t) { { \
755 [0] = 1UL \
756} }
757
758#if NR_CPUS == 1
759#define first_cpu(src) ({ (void)(src); 0; })
760#define next_cpu(n, src) ({ (void)(src); 1; })
761#define any_online_cpu(mask) 0
762#define for_each_cpu_mask(cpu, mask) \
763 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
764#else /* NR_CPUS > 1 */
765int __first_cpu(const cpumask_t *srcp);
766int __next_cpu(int n, const cpumask_t *srcp);
767int __any_online_cpu(const cpumask_t *mask);
768
769#define first_cpu(src) __first_cpu(&(src))
770#define next_cpu(n, src) __next_cpu((n), &(src))
771#define any_online_cpu(mask) __any_online_cpu(&(mask))
772#define for_each_cpu_mask(cpu, mask) \
773 for ((cpu) = -1; \
774 (cpu) = next_cpu((cpu), (mask)), \
775 (cpu) < NR_CPUS; )
776#endif /* SMP */
777
778#if NR_CPUS <= 64
779
780#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
781
782#else /* NR_CPUS > 64 */
783
784int __next_cpu_nr(int n, const cpumask_t *srcp);
785#define for_each_cpu_mask_nr(cpu, mask) \
786 for ((cpu) = -1; \
787 (cpu) = __next_cpu_nr((cpu), &(mask)), \
788 (cpu) < nr_cpu_ids; )
789
790#endif /* NR_CPUS > 64 */
791
792#define cpus_addr(src) ((src).bits)
793
794#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
795static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
796{
797 set_bit(cpu, dstp->bits);
798}
799
800#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
801static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
802{
803 clear_bit(cpu, dstp->bits);
804}
805
806#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
807static inline void __cpus_setall(cpumask_t *dstp, int nbits)
808{
809 bitmap_fill(dstp->bits, nbits);
810}
811
812#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
813static inline void __cpus_clear(cpumask_t *dstp, int nbits)
814{
815 bitmap_zero(dstp->bits, nbits);
816}
817
818/* No static inline type checking - see Subtlety (1) above. */
819#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
820
821#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
822static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
823{
824 return test_and_set_bit(cpu, addr->bits);
825}
826
827#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
828static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
829 const cpumask_t *src2p, int nbits)
830{
831 return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
832}
833
834#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
835static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
836 const cpumask_t *src2p, int nbits)
837{
838 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
839}
840
841#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
842static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
843 const cpumask_t *src2p, int nbits)
844{
845 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
846}
847
848#define cpus_andnot(dst, src1, src2) \
849 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
850static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
851 const cpumask_t *src2p, int nbits)
852{
853 return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
854}
855
856#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
857static inline int __cpus_equal(const cpumask_t *src1p,
858 const cpumask_t *src2p, int nbits)
859{
860 return bitmap_equal(src1p->bits, src2p->bits, nbits);
861}
862
863#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
864static inline int __cpus_intersects(const cpumask_t *src1p,
865 const cpumask_t *src2p, int nbits)
866{
867 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
868}
869
870#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
871static inline int __cpus_subset(const cpumask_t *src1p,
872 const cpumask_t *src2p, int nbits)
873{
874 return bitmap_subset(src1p->bits, src2p->bits, nbits);
875}
876
877#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
878static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
879{
880 return bitmap_empty(srcp->bits, nbits);
881}
882
883#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
884static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
885{
886 return bitmap_weight(srcp->bits, nbits);
887}
888
889#define cpus_shift_left(dst, src, n) \
890 __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
891static inline void __cpus_shift_left(cpumask_t *dstp,
892 const cpumask_t *srcp, int n, int nbits)
893{
894 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
895}
896#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
897
1103#endif /* __LINUX_CPUMASK_H */ 898#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 8e9e151f811e..b78cf8194957 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -84,7 +84,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
84 * struct irqaction - per interrupt action descriptor 84 * struct irqaction - per interrupt action descriptor
85 * @handler: interrupt handler function 85 * @handler: interrupt handler function
86 * @flags: flags (see IRQF_* above) 86 * @flags: flags (see IRQF_* above)
87 * @mask: no comment as it is useless and about to be removed
88 * @name: name of the device 87 * @name: name of the device
89 * @dev_id: cookie to identify the device 88 * @dev_id: cookie to identify the device
90 * @next: pointer to the next irqaction for shared interrupts 89 * @next: pointer to the next irqaction for shared interrupts
@@ -97,7 +96,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
97struct irqaction { 96struct irqaction {
98 irq_handler_t handler; 97 irq_handler_t handler;
99 unsigned long flags; 98 unsigned long flags;
100 cpumask_t mask;
101 const char *name; 99 const char *name;
102 void *dev_id; 100 void *dev_id;
103 struct irqaction *next; 101 struct irqaction *next;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index cbf2a3b46280..848d1f20086e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1817,10 +1817,13 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
1817 return 0; 1817 return 0;
1818} 1818}
1819#endif 1819#endif
1820
1821#ifndef CONFIG_CPUMASK_OFFSTACK
1820static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1822static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1821{ 1823{
1822 return set_cpus_allowed_ptr(p, &new_mask); 1824 return set_cpus_allowed_ptr(p, &new_mask);
1823} 1825}
1826#endif
1824 1827
1825/* 1828/*
1826 * Architectures can set this to 1 if they have specified 1829 * Architectures can set this to 1 if they have specified
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9e3d8af09207..39c64bae776d 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -73,15 +73,6 @@ int smp_call_function(void(*func)(void *info), void *info, int wait);
73void smp_call_function_many(const struct cpumask *mask, 73void smp_call_function_many(const struct cpumask *mask,
74 void (*func)(void *info), void *info, bool wait); 74 void (*func)(void *info), void *info, bool wait);
75 75
76/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */
77static inline int
78smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
79 int wait)
80{
81 smp_call_function_many(&mask, func, info, wait);
82 return 0;
83}
84
85void __smp_call_function_single(int cpuid, struct call_single_data *data, 76void __smp_call_function_single(int cpuid, struct call_single_data *data,
86 int wait); 77 int wait);
87 78
@@ -144,8 +135,6 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
144static inline void smp_send_reschedule(int cpu) { } 135static inline void smp_send_reschedule(int cpu) { }
145#define num_booting_cpus() 1 136#define num_booting_cpus() 1
146#define smp_prepare_boot_cpu() do {} while (0) 137#define smp_prepare_boot_cpu() do {} while (0)
147#define smp_call_function_mask(mask, func, info, wait) \
148 (up_smp_call_function(func, info))
149#define smp_call_function_many(mask, func, info, wait) \ 138#define smp_call_function_many(mask, func, info, wait) \
150 (up_smp_call_function(func, info)) 139 (up_smp_call_function(func, info))
151static inline void init_call_single_data(void) 140static inline void init_call_single_data(void)
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 809b26c07090..fc0bf3edeb67 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -211,12 +211,6 @@ int arch_update_cpu_topology(void);
211#ifndef topology_core_id 211#ifndef topology_core_id
212#define topology_core_id(cpu) ((void)(cpu), 0) 212#define topology_core_id(cpu) ((void)(cpu), 0)
213#endif 213#endif
214#ifndef topology_thread_siblings
215#define topology_thread_siblings(cpu) cpumask_of_cpu(cpu)
216#endif
217#ifndef topology_core_siblings
218#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
219#endif
220#ifndef topology_thread_cpumask 214#ifndef topology_thread_cpumask
221#define topology_thread_cpumask(cpu) cpumask_of(cpu) 215#define topology_thread_cpumask(cpu) cpumask_of(cpu)
222#endif 216#endif
diff --git a/init/main.c b/init/main.c
index 76961db298f0..7449819a4805 100644
--- a/init/main.c
+++ b/init/main.c
@@ -359,11 +359,6 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
359 359
360#else 360#else
361 361
362#if NR_CPUS > BITS_PER_LONG
363cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL;
364EXPORT_SYMBOL(cpu_mask_all);
365#endif
366
367/* Setup number of possible processor ids */ 362/* Setup number of possible processor ids */
368int nr_cpu_ids __read_mostly = NR_CPUS; 363int nr_cpu_ids __read_mostly = NR_CPUS;
369EXPORT_SYMBOL(nr_cpu_ids); 364EXPORT_SYMBOL(nr_cpu_ids);
diff --git a/kernel/smp.c b/kernel/smp.c
index fd47a256a24e..c9d1c7835c2f 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -347,13 +347,6 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
347 generic_exec_single(cpu, data, wait); 347 generic_exec_single(cpu, data, wait);
348} 348}
349 349
350/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
351
352#ifndef arch_send_call_function_ipi_mask
353# define arch_send_call_function_ipi_mask(maskp) \
354 arch_send_call_function_ipi(*(maskp))
355#endif
356
357/** 350/**
358 * smp_call_function_many(): Run a function on a set of other CPUs. 351 * smp_call_function_many(): Run a function on a set of other CPUs.
359 * @mask: The set of cpus to run on (only runs on online subset). 352 * @mask: The set of cpus to run on (only runs on online subset).
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6c0f6a8a22eb..411af37f4be4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1984,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file)
1984 if (current_trace) 1984 if (current_trace)
1985 *iter->trace = *current_trace; 1985 *iter->trace = *current_trace;
1986 1986
1987 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) 1987 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
1988 goto fail; 1988 goto fail;
1989 1989
1990 cpumask_clear(iter->started);
1991
1992 if (current_trace && current_trace->print_max) 1990 if (current_trace && current_trace->print_max)
1993 iter->tr = &max_tr; 1991 iter->tr = &max_tr;
1994 else 1992 else
@@ -4389,7 +4387,7 @@ __init static int tracer_alloc_buffers(void)
4389 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4387 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4390 goto out_free_buffer_mask; 4388 goto out_free_buffer_mask;
4391 4389
4392 if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) 4390 if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4393 goto out_free_tracing_cpumask; 4391 goto out_free_tracing_cpumask;
4394 4392
4395 /* To save memory, keep the ring buffer size to its minimum */ 4393 /* To save memory, keep the ring buffer size to its minimum */
@@ -4400,7 +4398,6 @@ __init static int tracer_alloc_buffers(void)
4400 4398
4401 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 4399 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
4402 cpumask_copy(tracing_cpumask, cpu_all_mask); 4400 cpumask_copy(tracing_cpumask, cpu_all_mask);
4403 cpumask_clear(tracing_reader_cpumask);
4404 4401
4405 /* TODO: make the number of buffers hot pluggable with CPUS */ 4402 /* TODO: make the number of buffers hot pluggable with CPUS */
4406 global_trace.buffer = ring_buffer_alloc(ring_buf_size, 4403 global_trace.buffer = ring_buffer_alloc(ring_buf_size,
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 6eedf7e473d1..6633965bb27b 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -29,7 +29,6 @@ static unsigned long max_pages(unsigned long min_pages)
29 int node = numa_node_id(); 29 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones; 30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node; 31 int num_cpus_on_node;
32 const struct cpumask *cpumask_on_node = cpumask_of_node(node);
33 32
34 node_free_pages = 33 node_free_pages =
35#ifdef CONFIG_ZONE_DMA 34#ifdef CONFIG_ZONE_DMA
@@ -42,7 +41,7 @@ static unsigned long max_pages(unsigned long min_pages)
42 41
43 max = node_free_pages / FRACTION_OF_NODE_MEM; 42 max = node_free_pages / FRACTION_OF_NODE_MEM;
44 43
45 num_cpus_on_node = cpus_weight_nr(*cpumask_on_node); 44 num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
46 max /= num_cpus_on_node; 45 max /= num_cpus_on_node;
47 46
48 return max(max, min_pages); 47 return max(max, min_pages);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 897bff3b7df9..034a798b0431 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -738,8 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
738 bool called = true; 738 bool called = true;
739 struct kvm_vcpu *vcpu; 739 struct kvm_vcpu *vcpu;
740 740
741 if (alloc_cpumask_var(&cpus, GFP_ATOMIC)) 741 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
742 cpumask_clear(cpus);
743 742
744 spin_lock(&kvm->requests_lock); 743 spin_lock(&kvm->requests_lock);
745 me = smp_processor_id(); 744 me = smp_processor_id();