aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cgroups/cpusets.txt2
-rw-r--r--Documentation/cpu-hotplug.txt22
-rw-r--r--arch/alpha/kernel/smp.c2
-rw-r--r--arch/arm/kernel/kprobes.c2
-rw-r--r--arch/arm/kernel/smp.c7
-rw-r--r--arch/hexagon/kernel/smp.c8
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/mips/cavium-octeon/smp.c4
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kernel/smp.c27
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/mm/c-octeon.c6
-rw-r--r--arch/mips/netlogic/common/smp.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c8
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c2
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c7
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/sparc/kernel/leon_kernel.c6
-rw-r--r--arch/tile/kernel/setup.c8
-rw-r--r--arch/um/kernel/skas/process.c2
-rw-r--r--arch/um/kernel/smp.c9
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c2
-rw-r--r--include/linux/cpumask.h6
-rw-r--r--init/Kconfig4
-rw-r--r--kernel/cpuset.c10
28 files changed, 79 insertions, 90 deletions
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 5c51ed406d1d..cefd3d8bbd11 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -217,7 +217,7 @@ and name space for cpusets, with a minimum of additional kernel code.
217 217
218The cpus and mems files in the root (top_cpuset) cpuset are 218The cpus and mems files in the root (top_cpuset) cpuset are
219read-only. The cpus file automatically tracks the value of 219read-only. The cpus file automatically tracks the value of
220cpu_online_map using a CPU hotplug notifier, and the mems file 220cpu_online_mask using a CPU hotplug notifier, and the mems file
221automatically tracks the value of node_states[N_HIGH_MEMORY]--i.e., 221automatically tracks the value of node_states[N_HIGH_MEMORY]--i.e.,
222nodes with memory--using the cpuset_track_online_nodes() hook. 222nodes with memory--using the cpuset_track_online_nodes() hook.
223 223
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index a20bfd415e41..66ef8f35613d 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -47,7 +47,7 @@ maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using
47 other cpus later online, read FAQ's for more info. 47 other cpus later online, read FAQ's for more info.
48 48
49additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets 49additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets
50 cpu_possible_map = cpu_present_map + additional_cpus 50 cpu_possible_mask = cpu_present_mask + additional_cpus
51 51
52cede_offline={"off","on"} Use this option to disable/enable putting offlined 52cede_offline={"off","on"} Use this option to disable/enable putting offlined
53 processors to an extended H_CEDE state on 53 processors to an extended H_CEDE state on
@@ -64,11 +64,11 @@ should only rely on this to count the # of cpus, but *MUST* not rely
64on the apicid values in those tables for disabled apics. In the event 64on the apicid values in those tables for disabled apics. In the event
65BIOS doesn't mark such hot-pluggable cpus as disabled entries, one could 65BIOS doesn't mark such hot-pluggable cpus as disabled entries, one could
66use this parameter "additional_cpus=x" to represent those cpus in the 66use this parameter "additional_cpus=x" to represent those cpus in the
67cpu_possible_map. 67cpu_possible_mask.
68 68
69possible_cpus=n [s390,x86_64] use this to set hotpluggable cpus. 69possible_cpus=n [s390,x86_64] use this to set hotpluggable cpus.
70 This option sets possible_cpus bits in 70 This option sets possible_cpus bits in
71 cpu_possible_map. Thus keeping the numbers of bits set 71 cpu_possible_mask. Thus keeping the numbers of bits set
72 constant even if the machine gets rebooted. 72 constant even if the machine gets rebooted.
73 73
74CPU maps and such 74CPU maps and such
@@ -76,7 +76,7 @@ CPU maps and such
76[More on cpumaps and primitive to manipulate, please check 76[More on cpumaps and primitive to manipulate, please check
77include/linux/cpumask.h that has more descriptive text.] 77include/linux/cpumask.h that has more descriptive text.]
78 78
79cpu_possible_map: Bitmap of possible CPUs that can ever be available in the 79cpu_possible_mask: Bitmap of possible CPUs that can ever be available in the
80system. This is used to allocate some boot time memory for per_cpu variables 80system. This is used to allocate some boot time memory for per_cpu variables
81that aren't designed to grow/shrink as CPUs are made available or removed. 81that aren't designed to grow/shrink as CPUs are made available or removed.
82Once set during boot time discovery phase, the map is static, i.e no bits 82Once set during boot time discovery phase, the map is static, i.e no bits
@@ -84,13 +84,13 @@ are added or removed anytime. Trimming it accurately for your system needs
84upfront can save some boot time memory. See below for how we use heuristics 84upfront can save some boot time memory. See below for how we use heuristics
85in x86_64 case to keep this under check. 85in x86_64 case to keep this under check.
86 86
87cpu_online_map: Bitmap of all CPUs currently online. Its set in __cpu_up() 87cpu_online_mask: Bitmap of all CPUs currently online. Its set in __cpu_up()
88after a cpu is available for kernel scheduling and ready to receive 88after a cpu is available for kernel scheduling and ready to receive
89interrupts from devices. Its cleared when a cpu is brought down using 89interrupts from devices. Its cleared when a cpu is brought down using
90__cpu_disable(), before which all OS services including interrupts are 90__cpu_disable(), before which all OS services including interrupts are
91migrated to another target CPU. 91migrated to another target CPU.
92 92
93cpu_present_map: Bitmap of CPUs currently present in the system. Not all 93cpu_present_mask: Bitmap of CPUs currently present in the system. Not all
94of them may be online. When physical hotplug is processed by the relevant 94of them may be online. When physical hotplug is processed by the relevant
95subsystem (e.g ACPI) can change and new bit either be added or removed 95subsystem (e.g ACPI) can change and new bit either be added or removed
96from the map depending on the event is hot-add/hot-remove. There are currently 96from the map depending on the event is hot-add/hot-remove. There are currently
@@ -99,22 +99,22 @@ at which time hotplug is disabled.
99 99
100You really dont need to manipulate any of the system cpu maps. They should 100You really dont need to manipulate any of the system cpu maps. They should
101be read-only for most use. When setting up per-cpu resources almost always use 101be read-only for most use. When setting up per-cpu resources almost always use
102cpu_possible_map/for_each_possible_cpu() to iterate. 102cpu_possible_mask/for_each_possible_cpu() to iterate.
103 103
104Never use anything other than cpumask_t to represent bitmap of CPUs. 104Never use anything other than cpumask_t to represent bitmap of CPUs.
105 105
106 #include <linux/cpumask.h> 106 #include <linux/cpumask.h>
107 107
108 for_each_possible_cpu - Iterate over cpu_possible_map 108 for_each_possible_cpu - Iterate over cpu_possible_mask
109 for_each_online_cpu - Iterate over cpu_online_map 109 for_each_online_cpu - Iterate over cpu_online_mask
110 for_each_present_cpu - Iterate over cpu_present_map 110 for_each_present_cpu - Iterate over cpu_present_mask
111 for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. 111 for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask.
112 112
113 #include <linux/cpu.h> 113 #include <linux/cpu.h>
114 get_online_cpus() and put_online_cpus(): 114 get_online_cpus() and put_online_cpus():
115 115
116The above calls are used to inhibit cpu hotplug operations. While the 116The above calls are used to inhibit cpu hotplug operations. While the
117cpu_hotplug.refcount is non zero, the cpu_online_map will not change. 117cpu_hotplug.refcount is non zero, the cpu_online_mask will not change.
118If you merely need to avoid cpus going away, you could also use 118If you merely need to avoid cpus going away, you could also use
119preempt_disable() and preempt_enable() for those sections. 119preempt_disable() and preempt_enable() for those sections.
120Just remember the critical section cannot call any 120Just remember the critical section cannot call any
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 4087a569b43b..50d438db1f6b 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -450,7 +450,7 @@ setup_smp(void)
450 smp_num_probed = 1; 450 smp_num_probed = 1;
451 } 451 }
452 452
453 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n", 453 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
454 smp_num_probed, cpumask_bits(cpu_present_mask)[0]); 454 smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
455} 455}
456 456
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index ab1869dac97a..4dd41fc9e235 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -152,7 +152,7 @@ int __kprobes __arch_disarm_kprobe(void *p)
152 152
153void __kprobes arch_disarm_kprobe(struct kprobe *p) 153void __kprobes arch_disarm_kprobe(struct kprobe *p)
154{ 154{
155 stop_machine(__arch_disarm_kprobe, p, &cpu_online_map); 155 stop_machine(__arch_disarm_kprobe, p, cpu_online_mask);
156} 156}
157 157
158void __kprobes arch_remove_kprobe(struct kprobe *p) 158void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 2cee7d1eb958..addbbe8028c2 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -349,7 +349,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
349 * re-initialize the map in platform_smp_prepare_cpus() if 349 * re-initialize the map in platform_smp_prepare_cpus() if
350 * present != possible (e.g. physical hotplug). 350 * present != possible (e.g. physical hotplug).
351 */ 351 */
352 init_cpu_present(&cpu_possible_map); 352 init_cpu_present(cpu_possible_mask);
353 353
354 /* 354 /*
355 * Initialise the SCU if there are more than one CPU 355 * Initialise the SCU if there are more than one CPU
@@ -581,8 +581,9 @@ void smp_send_stop(void)
581 unsigned long timeout; 581 unsigned long timeout;
582 582
583 if (num_online_cpus() > 1) { 583 if (num_online_cpus() > 1) {
584 cpumask_t mask = cpu_online_map; 584 struct cpumask mask;
585 cpu_clear(smp_processor_id(), mask); 585 cpumask_copy(&mask, cpu_online_mask);
586 cpumask_clear_cpu(smp_processor_id(), &mask);
586 587
587 smp_cross_call(&mask, IPI_CPU_STOP); 588 smp_cross_call(&mask, IPI_CPU_STOP);
588 } 589 }
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 15d1fd22bbc5..9b44a9e2d05a 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -35,7 +35,7 @@
35#define BASE_IPI_IRQ 26 35#define BASE_IPI_IRQ 26
36 36
37/* 37/*
38 * cpu_possible_map needs to be filled out prior to setup_per_cpu_areas 38 * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
39 * (which is prior to any of our smp_prepare_cpu crap), in order to set 39 * (which is prior to any of our smp_prepare_cpu crap), in order to set
40 * up the... per_cpu areas. 40 * up the... per_cpu areas.
41 */ 41 */
@@ -208,7 +208,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
208 stack_start = ((void *) thread) + THREAD_SIZE; 208 stack_start = ((void *) thread) + THREAD_SIZE;
209 __vmstart(start_secondary, stack_start); 209 __vmstart(start_secondary, stack_start);
210 210
211 while (!cpu_isset(cpu, cpu_online_map)) 211 while (!cpu_online(cpu))
212 barrier(); 212 barrier();
213 213
214 return 0; 214 return 0;
@@ -229,7 +229,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
229 229
230 /* Right now, let's just fake it. */ 230 /* Right now, let's just fake it. */
231 for (i = 0; i < max_cpus; i++) 231 for (i = 0; i < max_cpus; i++)
232 cpu_set(i, cpu_present_map); 232 set_cpu_present(i, true);
233 233
234 /* Also need to register the interrupts for IPI */ 234 /* Also need to register the interrupts for IPI */
235 if (max_cpus > 1) 235 if (max_cpus > 1)
@@ -269,5 +269,5 @@ void smp_start_cpus(void)
269 int i; 269 int i;
270 270
271 for (i = 0; i < NR_CPUS; i++) 271 for (i = 0; i < NR_CPUS; i++)
272 cpu_set(i, cpu_possible_map); 272 set_cpu_possible(i, true);
273} 273}
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index ac795d311f44..6f38b6120d96 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -839,7 +839,7 @@ static __init int setup_additional_cpus(char *s)
839early_param("additional_cpus", setup_additional_cpus); 839early_param("additional_cpus", setup_additional_cpus);
840 840
841/* 841/*
842 * cpu_possible_map should be static, it cannot change as CPUs 842 * cpu_possible_mask should be static, it cannot change as CPUs
843 * are onlined, or offlined. The reason is per-cpu data-structures 843 * are onlined, or offlined. The reason is per-cpu data-structures
844 * are allocated by some modules at init time, and dont expect to 844 * are allocated by some modules at init time, and dont expect to
845 * do this dynamically on cpu arrival/departure. 845 * do this dynamically on cpu arrival/departure.
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index c3e2b85c3b02..97e7ce9b50ed 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -78,7 +78,7 @@ static inline void octeon_send_ipi_mask(const struct cpumask *mask,
78} 78}
79 79
80/** 80/**
81 * Detect available CPUs, populate cpu_possible_map 81 * Detect available CPUs, populate cpu_possible_mask
82 */ 82 */
83static void octeon_smp_hotplug_setup(void) 83static void octeon_smp_hotplug_setup(void)
84{ 84{
@@ -268,7 +268,7 @@ static int octeon_cpu_disable(void)
268 268
269 spin_lock(&smp_reserve_lock); 269 spin_lock(&smp_reserve_lock);
270 270
271 cpu_clear(cpu, cpu_online_map); 271 set_cpu_online(cpu, false);
272 cpu_clear(cpu, cpu_callin_map); 272 cpu_clear(cpu, cpu_callin_map);
273 local_irq_disable(); 273 local_irq_disable();
274 fixup_irqs(); 274 fixup_irqs();
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 802e6160f37e..33f63bab478a 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -173,7 +173,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
173 if (retval) 173 if (retval)
174 goto out_unlock; 174 goto out_unlock;
175 175
176 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); 176 cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
177 177
178out_unlock: 178out_unlock:
179 read_unlock(&tasklist_lock); 179 read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index e309665b6c81..f8b2c592514d 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -25,7 +25,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
25 int i; 25 int i;
26 26
27#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
28 if (!cpu_isset(n, cpu_online_map)) 28 if (!cpu_online(n))
29 return 0; 29 return 0;
30#endif 30#endif
31 31
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index ca673569fd24..3046e2986006 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -317,7 +317,7 @@ static int bmips_cpu_disable(void)
317 317
318 pr_info("SMP: CPU%d is offline\n", cpu); 318 pr_info("SMP: CPU%d is offline\n", cpu);
319 319
320 cpu_clear(cpu, cpu_online_map); 320 set_cpu_online(cpu, false);
321 cpu_clear(cpu, cpu_callin_map); 321 cpu_clear(cpu, cpu_callin_map);
322 322
323 local_flush_tlb_all(); 323 local_flush_tlb_all();
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9c1cce9de35f..ba9376bf52a1 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -148,7 +148,7 @@ static void stop_this_cpu(void *dummy)
148 /* 148 /*
149 * Remove this CPU: 149 * Remove this CPU:
150 */ 150 */
151 cpu_clear(smp_processor_id(), cpu_online_map); 151 set_cpu_online(smp_processor_id(), false);
152 for (;;) { 152 for (;;) {
153 if (cpu_wait) 153 if (cpu_wait)
154 (*cpu_wait)(); /* Wait if available. */ 154 (*cpu_wait)(); /* Wait if available. */
@@ -174,7 +174,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
174 mp_ops->prepare_cpus(max_cpus); 174 mp_ops->prepare_cpus(max_cpus);
175 set_cpu_sibling_map(0); 175 set_cpu_sibling_map(0);
176#ifndef CONFIG_HOTPLUG_CPU 176#ifndef CONFIG_HOTPLUG_CPU
177 init_cpu_present(&cpu_possible_map); 177 init_cpu_present(cpu_possible_mask);
178#endif 178#endif
179} 179}
180 180
@@ -248,7 +248,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
248 while (!cpu_isset(cpu, cpu_callin_map)) 248 while (!cpu_isset(cpu, cpu_callin_map))
249 udelay(100); 249 udelay(100);
250 250
251 cpu_set(cpu, cpu_online_map); 251 set_cpu_online(cpu, true);
252 252
253 return 0; 253 return 0;
254} 254}
@@ -320,13 +320,12 @@ void flush_tlb_mm(struct mm_struct *mm)
320 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 320 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
321 smp_on_other_tlbs(flush_tlb_mm_ipi, mm); 321 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
322 } else { 322 } else {
323 cpumask_t mask = cpu_online_map;
324 unsigned int cpu; 323 unsigned int cpu;
325 324
326 cpu_clear(smp_processor_id(), mask); 325 for_each_online_cpu(cpu) {
327 for_each_cpu_mask(cpu, mask) 326 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
328 if (cpu_context(cpu, mm))
329 cpu_context(cpu, mm) = 0; 327 cpu_context(cpu, mm) = 0;
328 }
330 } 329 }
331 local_flush_tlb_mm(mm); 330 local_flush_tlb_mm(mm);
332 331
@@ -360,13 +359,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
360 359
361 smp_on_other_tlbs(flush_tlb_range_ipi, &fd); 360 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
362 } else { 361 } else {
363 cpumask_t mask = cpu_online_map;
364 unsigned int cpu; 362 unsigned int cpu;
365 363
366 cpu_clear(smp_processor_id(), mask); 364 for_each_online_cpu(cpu) {
367 for_each_cpu_mask(cpu, mask) 365 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
368 if (cpu_context(cpu, mm))
369 cpu_context(cpu, mm) = 0; 366 cpu_context(cpu, mm) = 0;
367 }
370 } 368 }
371 local_flush_tlb_range(vma, start, end); 369 local_flush_tlb_range(vma, start, end);
372 preempt_enable(); 370 preempt_enable();
@@ -407,13 +405,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
407 405
408 smp_on_other_tlbs(flush_tlb_page_ipi, &fd); 406 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
409 } else { 407 } else {
410 cpumask_t mask = cpu_online_map;
411 unsigned int cpu; 408 unsigned int cpu;
412 409
413 cpu_clear(smp_processor_id(), mask); 410 for_each_online_cpu(cpu) {
414 for_each_cpu_mask(cpu, mask) 411 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
415 if (cpu_context(cpu, vma->vm_mm))
416 cpu_context(cpu, vma->vm_mm) = 0; 412 cpu_context(cpu, vma->vm_mm) = 0;
413 }
417 } 414 }
418 local_flush_tlb_page(vma, page); 415 local_flush_tlb_page(vma, page);
419 preempt_enable(); 416 preempt_enable();
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index c4f75bbc0bd6..f5dd38f1d015 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -291,7 +291,7 @@ static void smtc_configure_tlb(void)
291 * possibly leave some TCs/VPEs as "slave" processors. 291 * possibly leave some TCs/VPEs as "slave" processors.
292 * 292 *
293 * Use c0_MVPConf0 to find out how many TCs are available, setting up 293 * Use c0_MVPConf0 to find out how many TCs are available, setting up
294 * cpu_possible_map and the logical/physical mappings. 294 * cpu_possible_mask and the logical/physical mappings.
295 */ 295 */
296 296
297int __init smtc_build_cpu_map(int start_cpu_slot) 297int __init smtc_build_cpu_map(int start_cpu_slot)
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 1f9ca07f53c8..47037ec5589b 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -80,9 +80,9 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
80 if (vma) 80 if (vma)
81 mask = *mm_cpumask(vma->vm_mm); 81 mask = *mm_cpumask(vma->vm_mm);
82 else 82 else
83 mask = cpu_online_map; 83 mask = *cpu_online_mask;
84 cpu_clear(cpu, mask); 84 cpumask_clear_cpu(cpu, &mask);
85 for_each_cpu_mask(cpu, mask) 85 for_each_cpu(cpu, &mask)
86 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); 86 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
87 87
88 preempt_enable(); 88 preempt_enable();
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index db17f49886c2..fab316de57e9 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -165,7 +165,7 @@ void __init nlm_smp_setup(void)
165 cpu_set(boot_cpu, phys_cpu_present_map); 165 cpu_set(boot_cpu, phys_cpu_present_map);
166 __cpu_number_map[boot_cpu] = 0; 166 __cpu_number_map[boot_cpu] = 0;
167 __cpu_logical_map[0] = boot_cpu; 167 __cpu_logical_map[0] = boot_cpu;
168 cpu_set(0, cpu_possible_map); 168 set_cpu_possible(0, true);
169 169
170 num_cpus = 1; 170 num_cpus = 1;
171 for (i = 0; i < NR_CPUS; i++) { 171 for (i = 0; i < NR_CPUS; i++) {
@@ -177,14 +177,14 @@ void __init nlm_smp_setup(void)
177 cpu_set(i, phys_cpu_present_map); 177 cpu_set(i, phys_cpu_present_map);
178 __cpu_number_map[i] = num_cpus; 178 __cpu_number_map[i] = num_cpus;
179 __cpu_logical_map[num_cpus] = i; 179 __cpu_logical_map[num_cpus] = i;
180 cpu_set(num_cpus, cpu_possible_map); 180 set_cpu_possible(num_cpus, true);
181 ++num_cpus; 181 ++num_cpus;
182 } 182 }
183 } 183 }
184 184
185 pr_info("Phys CPU present map: %lx, possible map %lx\n", 185 pr_info("Phys CPU present map: %lx, possible map %lx\n",
186 (unsigned long)phys_cpu_present_map.bits[0], 186 (unsigned long)phys_cpu_present_map.bits[0],
187 (unsigned long)cpu_possible_map.bits[0]); 187 (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
188 188
189 pr_info("Detected %i Slave CPU(s)\n", num_cpus); 189 pr_info("Detected %i Slave CPU(s)\n", num_cpus);
190 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 190 nlm_set_nmi_handler(nlm_boot_secondary_cpus);
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 2608752898c0..b71fae231049 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -146,7 +146,7 @@ static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
146} 146}
147 147
148/* 148/*
149 * Detect available CPUs, populate cpu_possible_map before smp_init 149 * Detect available CPUs, populate cpu_possible_mask before smp_init
150 * 150 *
151 * We don't want to start the secondary CPU yet nor do we have a nice probing 151 * We don't want to start the secondary CPU yet nor do we have a nice probing
152 * feature in PMON so we just assume presence of the secondary core. 152 * feature in PMON so we just assume presence of the secondary core.
@@ -155,10 +155,10 @@ static void __init yos_smp_setup(void)
155{ 155{
156 int i; 156 int i;
157 157
158 cpus_clear(cpu_possible_map); 158 init_cpu_possible(cpu_none_mask);
159 159
160 for (i = 0; i < 2; i++) { 160 for (i = 0; i < 2; i++) {
161 cpu_set(i, cpu_possible_map); 161 set_cpu_possible(i, true);
162 __cpu_number_map[i] = i; 162 __cpu_number_map[i] = i;
163 __cpu_logical_map[i] = i; 163 __cpu_logical_map[i] = i;
164 } 164 }
@@ -169,7 +169,7 @@ static void __init yos_prepare_cpus(unsigned int max_cpus)
169 /* 169 /*
170 * Be paranoid. Enable the IPI only if we're really about to go SMP. 170 * Be paranoid. Enable the IPI only if we're really about to go SMP.
171 */ 171 */
172 if (cpus_weight(cpu_possible_map)) 172 if (num_possible_cpus())
173 set_c0_status(STATUSF_IP5); 173 set_c0_status(STATUSF_IP5);
174} 174}
175 175
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index c6851df9ab74..735b43bf8f82 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
76 /* Only let it join in if it's marked enabled */ 76 /* Only let it join in if it's marked enabled */
77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) && 77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
78 (tot_cpus_found != NR_CPUS)) { 78 (tot_cpus_found != NR_CPUS)) {
79 cpu_set(cpuid, cpu_possible_map); 79 set_cpu_possible(cpuid, true);
80 alloc_cpupda(cpuid, tot_cpus_found); 80 alloc_cpupda(cpuid, tot_cpus_found);
81 cpus_found++; 81 cpus_found++;
82 tot_cpus_found++; 82 tot_cpus_found++;
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index d667875be564..de88e22694a0 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -138,7 +138,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
138 138
139/* 139/*
140 * Use CFE to find out how many CPUs are available, setting up 140 * Use CFE to find out how many CPUs are available, setting up
141 * cpu_possible_map and the logical/physical mappings. 141 * cpu_possible_mask and the logical/physical mappings.
142 * XXXKW will the boot CPU ever not be physical 0? 142 * XXXKW will the boot CPU ever not be physical 0?
143 * 143 *
144 * Common setup before any secondaries are started 144 * Common setup before any secondaries are started
@@ -147,14 +147,13 @@ static void __init bcm1480_smp_setup(void)
147{ 147{
148 int i, num; 148 int i, num;
149 149
150 cpus_clear(cpu_possible_map); 150 init_cpu_possible(cpumask_of(0));
151 cpu_set(0, cpu_possible_map);
152 __cpu_number_map[0] = 0; 151 __cpu_number_map[0] = 0;
153 __cpu_logical_map[0] = 0; 152 __cpu_logical_map[0] = 0;
154 153
155 for (i = 1, num = 0; i < NR_CPUS; i++) { 154 for (i = 1, num = 0; i < NR_CPUS; i++) {
156 if (cfe_cpu_stop(i) == 0) { 155 if (cfe_cpu_stop(i) == 0) {
157 cpu_set(i, cpu_possible_map); 156 set_cpu_possible(i, true);
158 __cpu_number_map[i] = ++num; 157 __cpu_number_map[i] = ++num;
159 __cpu_logical_map[num] = i; 158 __cpu_logical_map[num] = i;
160 } 159 }
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 38e7f6bd7922..285cfef4ebc0 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -126,7 +126,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
126 126
127/* 127/*
128 * Use CFE to find out how many CPUs are available, setting up 128 * Use CFE to find out how many CPUs are available, setting up
129 * cpu_possible_map and the logical/physical mappings. 129 * cpu_possible_mask and the logical/physical mappings.
130 * XXXKW will the boot CPU ever not be physical 0? 130 * XXXKW will the boot CPU ever not be physical 0?
131 * 131 *
132 * Common setup before any secondaries are started 132 * Common setup before any secondaries are started
@@ -135,14 +135,13 @@ static void __init sb1250_smp_setup(void)
135{ 135{
136 int i, num; 136 int i, num;
137 137
138 cpus_clear(cpu_possible_map); 138 init_cpu_possible(cpumask_of(0));
139 cpu_set(0, cpu_possible_map);
140 __cpu_number_map[0] = 0; 139 __cpu_number_map[0] = 0;
141 __cpu_logical_map[0] = 0; 140 __cpu_logical_map[0] = 0;
142 141
143 for (i = 1, num = 0; i < NR_CPUS; i++) { 142 for (i = 1, num = 0; i < NR_CPUS; i++) {
144 if (cfe_cpu_stop(i) == 0) { 143 if (cfe_cpu_stop(i) == 0) {
145 cpu_set(i, cpu_possible_map); 144 set_cpu_possible(i, true);
146 __cpu_number_map[i] = ++num; 145 __cpu_number_map[i] = ++num;
147 __cpu_logical_map[num] = i; 146 __cpu_logical_map[num] = i;
148 } 147 }
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index a19c8a063683..35e43673c453 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -104,11 +104,11 @@ static int irq_choose_cpu(const struct cpumask *affinity)
104{ 104{
105 cpumask_t mask; 105 cpumask_t mask;
106 106
107 cpus_and(mask, cpu_online_map, *affinity); 107 cpumask_and(&mask, cpu_online_mask, affinity);
108 if (cpus_equal(mask, cpu_online_map) || cpus_empty(mask)) 108 if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask))
109 return boot_cpu_id; 109 return boot_cpu_id;
110 else 110 else
111 return first_cpu(mask); 111 return cpumask_first(&mask);
112} 112}
113#else 113#else
114#define irq_choose_cpu(affinity) boot_cpu_id 114#define irq_choose_cpu(affinity) boot_cpu_id
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 5f85d8b34dbb..92a94f4920ad 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1100,7 +1100,7 @@ EXPORT_SYMBOL(hash_for_home_map);
1100 1100
1101/* 1101/*
1102 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can 1102 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
1103 * flush on our behalf. It is set to cpu_possible_map OR'ed with 1103 * flush on our behalf. It is set to cpu_possible_mask OR'ed with
1104 * hash_for_home_map, and it is what should be passed to 1104 * hash_for_home_map, and it is what should be passed to
1105 * hv_flush_remote() to flush all caches. Note that if there are 1105 * hv_flush_remote() to flush all caches. Note that if there are
1106 * dedicated hypervisor driver tiles that have authorized use of their 1106 * dedicated hypervisor driver tiles that have authorized use of their
@@ -1186,7 +1186,7 @@ static void __init setup_cpu_maps(void)
1186 sizeof(cpu_lotar_map)); 1186 sizeof(cpu_lotar_map));
1187 if (rc < 0) { 1187 if (rc < 0) {
1188 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); 1188 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1189 cpu_lotar_map = cpu_possible_map; 1189 cpu_lotar_map = *cpu_possible_mask;
1190 } 1190 }
1191 1191
1192#if CHIP_HAS_CBOX_HOME_MAP() 1192#if CHIP_HAS_CBOX_HOME_MAP()
@@ -1196,9 +1196,9 @@ static void __init setup_cpu_maps(void)
1196 sizeof(hash_for_home_map)); 1196 sizeof(hash_for_home_map));
1197 if (rc < 0) 1197 if (rc < 0)
1198 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); 1198 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1199 cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map); 1199 cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
1200#else 1200#else
1201 cpu_cacheable_map = cpu_possible_map; 1201 cpu_cacheable_map = *cpu_possible_mask;
1202#endif 1202#endif
1203} 1203}
1204 1204
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 2e9852c0d487..0a9e57e7446b 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -41,7 +41,7 @@ static int __init start_kernel_proc(void *unused)
41 cpu_tasks[0].pid = pid; 41 cpu_tasks[0].pid = pid;
42 cpu_tasks[0].task = current; 42 cpu_tasks[0].task = current;
43#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
44 cpu_online_map = cpumask_of_cpu(0); 44 init_cpu_online(get_cpu_mask(0));
45#endif 45#endif
46 start_kernel(); 46 start_kernel();
47 return 0; 47 return 0;
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 155206a66908..6f588e160fb0 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -76,7 +76,7 @@ static int idle_proc(void *cpup)
76 cpu_relax(); 76 cpu_relax();
77 77
78 notify_cpu_starting(cpu); 78 notify_cpu_starting(cpu);
79 cpu_set(cpu, cpu_online_map); 79 set_cpu_online(cpu, true);
80 default_idle(); 80 default_idle();
81 return 0; 81 return 0;
82} 82}
@@ -110,8 +110,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
110 for (i = 0; i < ncpus; ++i) 110 for (i = 0; i < ncpus; ++i)
111 set_cpu_possible(i, true); 111 set_cpu_possible(i, true);
112 112
113 cpu_clear(me, cpu_online_map); 113 set_cpu_online(me, true);
114 cpu_set(me, cpu_online_map);
115 cpu_set(me, cpu_callin_map); 114 cpu_set(me, cpu_callin_map);
116 115
117 err = os_pipe(cpu_data[me].ipi_pipe, 1, 1); 116 err = os_pipe(cpu_data[me].ipi_pipe, 1, 1);
@@ -138,13 +137,13 @@ void smp_prepare_cpus(unsigned int maxcpus)
138 137
139void smp_prepare_boot_cpu(void) 138void smp_prepare_boot_cpu(void)
140{ 139{
141 cpu_set(smp_processor_id(), cpu_online_map); 140 set_cpu_online(smp_processor_id(), true);
142} 141}
143 142
144int __cpu_up(unsigned int cpu) 143int __cpu_up(unsigned int cpu)
145{ 144{
146 cpu_set(cpu, smp_commenced_mask); 145 cpu_set(cpu, smp_commenced_mask);
147 while (!cpu_isset(cpu, cpu_online_map)) 146 while (!cpu_online(cpu))
148 mb(); 147 mb();
149 return 0; 148 return 0;
150} 149}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b132ade26f77..4f51bebac02c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -967,7 +967,7 @@ void xen_setup_shared_info(void)
967 xen_setup_mfn_list_list(); 967 xen_setup_mfn_list_list();
968} 968}
969 969
970/* This is called once we have the cpu_possible_map */ 970/* This is called once we have the cpu_possible_mask */
971void xen_setup_vcpu_info_placement(void) 971void xen_setup_vcpu_info_placement(void)
972{ 972{
973 int cpu; 973 int cpu;
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
index a22ffa5bff9f..0bf1b8910eeb 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -142,7 +142,7 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
142 policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ 142 policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
143 143
144 /* policy sharing between dual CPUs */ 144 /* policy sharing between dual CPUs */
145 cpumask_copy(policy->cpus, &cpu_present_map); 145 cpumask_copy(policy->cpus, cpu_present_mask);
146 146
147 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 147 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
148 148
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 1ffdb9856bb9..a2c819d3c96e 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -764,12 +764,6 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
764 * 764 *
765 */ 765 */
766#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 766#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
767/* These strip const, as traditionally they weren't const. */
768#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
769#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
770#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
771#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
772
773#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) 767#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
774 768
775#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) 769#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
diff --git a/init/Kconfig b/init/Kconfig
index 72f33faca44f..6cfd71d06463 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1414,8 +1414,8 @@ endif # MODULES
1414config INIT_ALL_POSSIBLE 1414config INIT_ALL_POSSIBLE
1415 bool 1415 bool
1416 help 1416 help
1417 Back when each arch used to define their own cpu_online_map and 1417 Back when each arch used to define their own cpu_online_mask and
1418 cpu_possible_map, some of them chose to initialize cpu_possible_map 1418 cpu_possible_mask, some of them chose to initialize cpu_possible_mask
1419 with all 1s, and others with all 0s. When they were centralised, 1419 with all 1s, and others with all 0s. When they were centralised,
1420 it was better to provide this option than to break all the archs 1420 it was better to provide this option than to break all the archs
1421 and have several arch maintainers pursuing me down dark alleys. 1421 and have several arch maintainers pursuing me down dark alleys.
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b96ad75b7e64..14f7070b4ba2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -270,11 +270,11 @@ static struct file_system_type cpuset_fs_type = {
270 * are online. If none are online, walk up the cpuset hierarchy 270 * are online. If none are online, walk up the cpuset hierarchy
271 * until we find one that does have some online cpus. If we get 271 * until we find one that does have some online cpus. If we get
272 * all the way to the top and still haven't found any online cpus, 272 * all the way to the top and still haven't found any online cpus,
273 * return cpu_online_map. Or if passed a NULL cs from an exit'ing 273 * return cpu_online_mask. Or if passed a NULL cs from an exit'ing
274 * task, return cpu_online_map. 274 * task, return cpu_online_mask.
275 * 275 *
276 * One way or another, we guarantee to return some non-empty subset 276 * One way or another, we guarantee to return some non-empty subset
277 * of cpu_online_map. 277 * of cpu_online_mask.
278 * 278 *
279 * Call with callback_mutex held. 279 * Call with callback_mutex held.
280 */ 280 */
@@ -867,7 +867,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
867 int retval; 867 int retval;
868 int is_load_balanced; 868 int is_load_balanced;
869 869
870 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ 870 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
871 if (cs == &top_cpuset) 871 if (cs == &top_cpuset)
872 return -EACCES; 872 return -EACCES;
873 873
@@ -2149,7 +2149,7 @@ void __init cpuset_init_smp(void)
2149 * 2149 *
2150 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset 2150 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2151 * attached to the specified @tsk. Guaranteed to return some non-empty 2151 * attached to the specified @tsk. Guaranteed to return some non-empty
2152 * subset of cpu_online_map, even if this means going outside the 2152 * subset of cpu_online_mask, even if this means going outside the
2153 * tasks cpuset. 2153 * tasks cpuset.
2154 **/ 2154 **/
2155 2155