aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/kprobes.c4
-rw-r--r--arch/arm/kernel/smp.c7
-rw-r--r--arch/hexagon/kernel/smp.c8
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kernel/smp.c27
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/mm/c-octeon.c6
-rw-r--r--arch/mips/netlogic/common/smp.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c2
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c5
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/sparc/kernel/leon_kernel.c6
-rw-r--r--arch/tile/kernel/setup.c6
-rw-r--r--arch/um/kernel/skas/process.c2
-rw-r--r--arch/um/kernel/smp.c9
19 files changed, 53 insertions, 58 deletions
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 129c1163248b..a79e5c75a96e 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -127,7 +127,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
127 flush_insns(addr, sizeof(u16)); 127 flush_insns(addr, sizeof(u16));
128 } else if (addr & 2) { 128 } else if (addr & 2) {
129 /* A 32-bit instruction spanning two words needs special care */ 129 /* A 32-bit instruction spanning two words needs special care */
130 stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map); 130 stop_machine(set_t32_breakpoint, (void *)addr, cpu_online_mask);
131 } else { 131 } else {
132 /* Word aligned 32-bit instruction can be written atomically */ 132 /* Word aligned 32-bit instruction can be written atomically */
133 u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; 133 u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
@@ -190,7 +190,7 @@ int __kprobes __arch_disarm_kprobe(void *p)
190 190
191void __kprobes arch_disarm_kprobe(struct kprobe *p) 191void __kprobes arch_disarm_kprobe(struct kprobe *p)
192{ 192{
193 stop_machine(__arch_disarm_kprobe, p, &cpu_online_map); 193 stop_machine(__arch_disarm_kprobe, p, cpu_online_mask);
194} 194}
195 195
196void __kprobes arch_remove_kprobe(struct kprobe *p) 196void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 8f8cce2c46c4..9a4bdde909ce 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -354,7 +354,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
354 * re-initialize the map in platform_smp_prepare_cpus() if 354 * re-initialize the map in platform_smp_prepare_cpus() if
355 * present != possible (e.g. physical hotplug). 355 * present != possible (e.g. physical hotplug).
356 */ 356 */
357 init_cpu_present(&cpu_possible_map); 357 init_cpu_present(cpu_possible_mask);
358 358
359 /* 359 /*
360 * Initialise the SCU if there are more than one CPU 360 * Initialise the SCU if there are more than one CPU
@@ -586,8 +586,9 @@ void smp_send_stop(void)
586 unsigned long timeout; 586 unsigned long timeout;
587 587
588 if (num_online_cpus() > 1) { 588 if (num_online_cpus() > 1) {
589 cpumask_t mask = cpu_online_map; 589 struct cpumask mask;
590 cpu_clear(smp_processor_id(), mask); 590 cpumask_copy(&mask, cpu_online_mask);
591 cpumask_clear_cpu(smp_processor_id(), &mask);
591 592
592 smp_cross_call(&mask, IPI_CPU_STOP); 593 smp_cross_call(&mask, IPI_CPU_STOP);
593 } 594 }
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 15d1fd22bbc5..9b44a9e2d05a 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -35,7 +35,7 @@
35#define BASE_IPI_IRQ 26 35#define BASE_IPI_IRQ 26
36 36
37/* 37/*
38 * cpu_possible_map needs to be filled out prior to setup_per_cpu_areas 38 * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
39 * (which is prior to any of our smp_prepare_cpu crap), in order to set 39 * (which is prior to any of our smp_prepare_cpu crap), in order to set
40 * up the... per_cpu areas. 40 * up the... per_cpu areas.
41 */ 41 */
@@ -208,7 +208,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
208 stack_start = ((void *) thread) + THREAD_SIZE; 208 stack_start = ((void *) thread) + THREAD_SIZE;
209 __vmstart(start_secondary, stack_start); 209 __vmstart(start_secondary, stack_start);
210 210
211 while (!cpu_isset(cpu, cpu_online_map)) 211 while (!cpu_online(cpu))
212 barrier(); 212 barrier();
213 213
214 return 0; 214 return 0;
@@ -229,7 +229,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
229 229
230 /* Right now, let's just fake it. */ 230 /* Right now, let's just fake it. */
231 for (i = 0; i < max_cpus; i++) 231 for (i = 0; i < max_cpus; i++)
232 cpu_set(i, cpu_present_map); 232 set_cpu_present(i, true);
233 233
234 /* Also need to register the interrupts for IPI */ 234 /* Also need to register the interrupts for IPI */
235 if (max_cpus > 1) 235 if (max_cpus > 1)
@@ -269,5 +269,5 @@ void smp_start_cpus(void)
269 int i; 269 int i;
270 270
271 for (i = 0; i < NR_CPUS; i++) 271 for (i = 0; i < NR_CPUS; i++)
272 cpu_set(i, cpu_possible_map); 272 set_cpu_possible(i, true);
273} 273}
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index c3e2b85c3b02..ef56d8a0215f 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -268,7 +268,7 @@ static int octeon_cpu_disable(void)
268 268
269 spin_lock(&smp_reserve_lock); 269 spin_lock(&smp_reserve_lock);
270 270
271 cpu_clear(cpu, cpu_online_map); 271 set_cpu_online(cpu, false);
272 cpu_clear(cpu, cpu_callin_map); 272 cpu_clear(cpu, cpu_callin_map);
273 local_irq_disable(); 273 local_irq_disable();
274 fixup_irqs(); 274 fixup_irqs();
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 802e6160f37e..33f63bab478a 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -173,7 +173,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
173 if (retval) 173 if (retval)
174 goto out_unlock; 174 goto out_unlock;
175 175
176 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); 176 cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
177 177
178out_unlock: 178out_unlock:
179 read_unlock(&tasklist_lock); 179 read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index e309665b6c81..f8b2c592514d 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -25,7 +25,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
25 int i; 25 int i;
26 26
27#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
28 if (!cpu_isset(n, cpu_online_map)) 28 if (!cpu_online(n))
29 return 0; 29 return 0;
30#endif 30#endif
31 31
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index ca673569fd24..3046e2986006 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -317,7 +317,7 @@ static int bmips_cpu_disable(void)
317 317
318 pr_info("SMP: CPU%d is offline\n", cpu); 318 pr_info("SMP: CPU%d is offline\n", cpu);
319 319
320 cpu_clear(cpu, cpu_online_map); 320 set_cpu_online(cpu, false);
321 cpu_clear(cpu, cpu_callin_map); 321 cpu_clear(cpu, cpu_callin_map);
322 322
323 local_flush_tlb_all(); 323 local_flush_tlb_all();
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9c1cce9de35f..ba9376bf52a1 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -148,7 +148,7 @@ static void stop_this_cpu(void *dummy)
148 /* 148 /*
149 * Remove this CPU: 149 * Remove this CPU:
150 */ 150 */
151 cpu_clear(smp_processor_id(), cpu_online_map); 151 set_cpu_online(smp_processor_id(), false);
152 for (;;) { 152 for (;;) {
153 if (cpu_wait) 153 if (cpu_wait)
154 (*cpu_wait)(); /* Wait if available. */ 154 (*cpu_wait)(); /* Wait if available. */
@@ -174,7 +174,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
174 mp_ops->prepare_cpus(max_cpus); 174 mp_ops->prepare_cpus(max_cpus);
175 set_cpu_sibling_map(0); 175 set_cpu_sibling_map(0);
176#ifndef CONFIG_HOTPLUG_CPU 176#ifndef CONFIG_HOTPLUG_CPU
177 init_cpu_present(&cpu_possible_map); 177 init_cpu_present(cpu_possible_mask);
178#endif 178#endif
179} 179}
180 180
@@ -248,7 +248,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
248 while (!cpu_isset(cpu, cpu_callin_map)) 248 while (!cpu_isset(cpu, cpu_callin_map))
249 udelay(100); 249 udelay(100);
250 250
251 cpu_set(cpu, cpu_online_map); 251 set_cpu_online(cpu, true);
252 252
253 return 0; 253 return 0;
254} 254}
@@ -320,13 +320,12 @@ void flush_tlb_mm(struct mm_struct *mm)
320 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 320 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
321 smp_on_other_tlbs(flush_tlb_mm_ipi, mm); 321 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
322 } else { 322 } else {
323 cpumask_t mask = cpu_online_map;
324 unsigned int cpu; 323 unsigned int cpu;
325 324
326 cpu_clear(smp_processor_id(), mask); 325 for_each_online_cpu(cpu) {
327 for_each_cpu_mask(cpu, mask) 326 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
328 if (cpu_context(cpu, mm))
329 cpu_context(cpu, mm) = 0; 327 cpu_context(cpu, mm) = 0;
328 }
330 } 329 }
331 local_flush_tlb_mm(mm); 330 local_flush_tlb_mm(mm);
332 331
@@ -360,13 +359,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
360 359
361 smp_on_other_tlbs(flush_tlb_range_ipi, &fd); 360 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
362 } else { 361 } else {
363 cpumask_t mask = cpu_online_map;
364 unsigned int cpu; 362 unsigned int cpu;
365 363
366 cpu_clear(smp_processor_id(), mask); 364 for_each_online_cpu(cpu) {
367 for_each_cpu_mask(cpu, mask) 365 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
368 if (cpu_context(cpu, mm))
369 cpu_context(cpu, mm) = 0; 366 cpu_context(cpu, mm) = 0;
367 }
370 } 368 }
371 local_flush_tlb_range(vma, start, end); 369 local_flush_tlb_range(vma, start, end);
372 preempt_enable(); 370 preempt_enable();
@@ -407,13 +405,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
407 405
408 smp_on_other_tlbs(flush_tlb_page_ipi, &fd); 406 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
409 } else { 407 } else {
410 cpumask_t mask = cpu_online_map;
411 unsigned int cpu; 408 unsigned int cpu;
412 409
413 cpu_clear(smp_processor_id(), mask); 410 for_each_online_cpu(cpu) {
414 for_each_cpu_mask(cpu, mask) 411 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
415 if (cpu_context(cpu, vma->vm_mm))
416 cpu_context(cpu, vma->vm_mm) = 0; 412 cpu_context(cpu, vma->vm_mm) = 0;
413 }
417 } 414 }
418 local_flush_tlb_page(vma, page); 415 local_flush_tlb_page(vma, page);
419 preempt_enable(); 416 preempt_enable();
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index c4f75bbc0bd6..f5dd38f1d015 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -291,7 +291,7 @@ static void smtc_configure_tlb(void)
291 * possibly leave some TCs/VPEs as "slave" processors. 291 * possibly leave some TCs/VPEs as "slave" processors.
292 * 292 *
293 * Use c0_MVPConf0 to find out how many TCs are available, setting up 293 * Use c0_MVPConf0 to find out how many TCs are available, setting up
294 * cpu_possible_map and the logical/physical mappings. 294 * cpu_possible_mask and the logical/physical mappings.
295 */ 295 */
296 296
297int __init smtc_build_cpu_map(int start_cpu_slot) 297int __init smtc_build_cpu_map(int start_cpu_slot)
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 1f9ca07f53c8..47037ec5589b 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -80,9 +80,9 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
80 if (vma) 80 if (vma)
81 mask = *mm_cpumask(vma->vm_mm); 81 mask = *mm_cpumask(vma->vm_mm);
82 else 82 else
83 mask = cpu_online_map; 83 mask = *cpu_online_mask;
84 cpu_clear(cpu, mask); 84 cpumask_clear_cpu(cpu, &mask);
85 for_each_cpu_mask(cpu, mask) 85 for_each_cpu(cpu, &mask)
86 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); 86 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
87 87
88 preempt_enable(); 88 preempt_enable();
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index db17f49886c2..fab316de57e9 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -165,7 +165,7 @@ void __init nlm_smp_setup(void)
165 cpu_set(boot_cpu, phys_cpu_present_map); 165 cpu_set(boot_cpu, phys_cpu_present_map);
166 __cpu_number_map[boot_cpu] = 0; 166 __cpu_number_map[boot_cpu] = 0;
167 __cpu_logical_map[0] = boot_cpu; 167 __cpu_logical_map[0] = boot_cpu;
168 cpu_set(0, cpu_possible_map); 168 set_cpu_possible(0, true);
169 169
170 num_cpus = 1; 170 num_cpus = 1;
171 for (i = 0; i < NR_CPUS; i++) { 171 for (i = 0; i < NR_CPUS; i++) {
@@ -177,14 +177,14 @@ void __init nlm_smp_setup(void)
177 cpu_set(i, phys_cpu_present_map); 177 cpu_set(i, phys_cpu_present_map);
178 __cpu_number_map[i] = num_cpus; 178 __cpu_number_map[i] = num_cpus;
179 __cpu_logical_map[num_cpus] = i; 179 __cpu_logical_map[num_cpus] = i;
180 cpu_set(num_cpus, cpu_possible_map); 180 set_cpu_possible(num_cpus, true);
181 ++num_cpus; 181 ++num_cpus;
182 } 182 }
183 } 183 }
184 184
185 pr_info("Phys CPU present map: %lx, possible map %lx\n", 185 pr_info("Phys CPU present map: %lx, possible map %lx\n",
186 (unsigned long)phys_cpu_present_map.bits[0], 186 (unsigned long)phys_cpu_present_map.bits[0],
187 (unsigned long)cpu_possible_map.bits[0]); 187 (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
188 188
189 pr_info("Detected %i Slave CPU(s)\n", num_cpus); 189 pr_info("Detected %i Slave CPU(s)\n", num_cpus);
190 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 190 nlm_set_nmi_handler(nlm_boot_secondary_cpus);
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 2608752898c0..00a0d2e90d04 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -155,10 +155,10 @@ static void __init yos_smp_setup(void)
155{ 155{
156 int i; 156 int i;
157 157
158 cpus_clear(cpu_possible_map); 158 init_cpu_possible(cpu_none_mask);
159 159
160 for (i = 0; i < 2; i++) { 160 for (i = 0; i < 2; i++) {
161 cpu_set(i, cpu_possible_map); 161 set_cpu_possible(i, true);
162 __cpu_number_map[i] = i; 162 __cpu_number_map[i] = i;
163 __cpu_logical_map[i] = i; 163 __cpu_logical_map[i] = i;
164 } 164 }
@@ -169,7 +169,7 @@ static void __init yos_prepare_cpus(unsigned int max_cpus)
169 /* 169 /*
170 * Be paranoid. Enable the IPI only if we're really about to go SMP. 170 * Be paranoid. Enable the IPI only if we're really about to go SMP.
171 */ 171 */
172 if (cpus_weight(cpu_possible_map)) 172 if (num_possible_cpus())
173 set_c0_status(STATUSF_IP5); 173 set_c0_status(STATUSF_IP5);
174} 174}
175 175
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index c6851df9ab74..735b43bf8f82 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
76 /* Only let it join in if it's marked enabled */ 76 /* Only let it join in if it's marked enabled */
77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) && 77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
78 (tot_cpus_found != NR_CPUS)) { 78 (tot_cpus_found != NR_CPUS)) {
79 cpu_set(cpuid, cpu_possible_map); 79 set_cpu_possible(cpuid, true);
80 alloc_cpupda(cpuid, tot_cpus_found); 80 alloc_cpupda(cpuid, tot_cpus_found);
81 cpus_found++; 81 cpus_found++;
82 tot_cpus_found++; 82 tot_cpus_found++;
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index d667875be564..63d2211e6167 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -147,14 +147,13 @@ static void __init bcm1480_smp_setup(void)
147{ 147{
148 int i, num; 148 int i, num;
149 149
150 cpus_clear(cpu_possible_map); 150 init_cpu_possible(cpumask_of(0));
151 cpu_set(0, cpu_possible_map);
152 __cpu_number_map[0] = 0; 151 __cpu_number_map[0] = 0;
153 __cpu_logical_map[0] = 0; 152 __cpu_logical_map[0] = 0;
154 153
155 for (i = 1, num = 0; i < NR_CPUS; i++) { 154 for (i = 1, num = 0; i < NR_CPUS; i++) {
156 if (cfe_cpu_stop(i) == 0) { 155 if (cfe_cpu_stop(i) == 0) {
157 cpu_set(i, cpu_possible_map); 156 set_cpu_possible(i, true);
158 __cpu_number_map[i] = ++num; 157 __cpu_number_map[i] = ++num;
159 __cpu_logical_map[num] = i; 158 __cpu_logical_map[num] = i;
160 } 159 }
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 38e7f6bd7922..285cfef4ebc0 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -126,7 +126,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
126 126
127/* 127/*
128 * Use CFE to find out how many CPUs are available, setting up 128 * Use CFE to find out how many CPUs are available, setting up
129 * cpu_possible_map and the logical/physical mappings. 129 * cpu_possible_mask and the logical/physical mappings.
130 * XXXKW will the boot CPU ever not be physical 0? 130 * XXXKW will the boot CPU ever not be physical 0?
131 * 131 *
132 * Common setup before any secondaries are started 132 * Common setup before any secondaries are started
@@ -135,14 +135,13 @@ static void __init sb1250_smp_setup(void)
135{ 135{
136 int i, num; 136 int i, num;
137 137
138 cpus_clear(cpu_possible_map); 138 init_cpu_possible(cpumask_of(0));
139 cpu_set(0, cpu_possible_map);
140 __cpu_number_map[0] = 0; 139 __cpu_number_map[0] = 0;
141 __cpu_logical_map[0] = 0; 140 __cpu_logical_map[0] = 0;
142 141
143 for (i = 1, num = 0; i < NR_CPUS; i++) { 142 for (i = 1, num = 0; i < NR_CPUS; i++) {
144 if (cfe_cpu_stop(i) == 0) { 143 if (cfe_cpu_stop(i) == 0) {
145 cpu_set(i, cpu_possible_map); 144 set_cpu_possible(i, true);
146 __cpu_number_map[i] = ++num; 145 __cpu_number_map[i] = ++num;
147 __cpu_logical_map[num] = i; 146 __cpu_logical_map[num] = i;
148 } 147 }
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index a19c8a063683..35e43673c453 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -104,11 +104,11 @@ static int irq_choose_cpu(const struct cpumask *affinity)
104{ 104{
105 cpumask_t mask; 105 cpumask_t mask;
106 106
107 cpus_and(mask, cpu_online_map, *affinity); 107 cpumask_and(&mask, cpu_online_mask, affinity);
108 if (cpus_equal(mask, cpu_online_map) || cpus_empty(mask)) 108 if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask))
109 return boot_cpu_id; 109 return boot_cpu_id;
110 else 110 else
111 return first_cpu(mask); 111 return cpumask_first(&mask);
112} 112}
113#else 113#else
114#define irq_choose_cpu(affinity) boot_cpu_id 114#define irq_choose_cpu(affinity) boot_cpu_id
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 5f85d8b34dbb..5124093b2e1d 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1186,7 +1186,7 @@ static void __init setup_cpu_maps(void)
1186 sizeof(cpu_lotar_map)); 1186 sizeof(cpu_lotar_map));
1187 if (rc < 0) { 1187 if (rc < 0) {
1188 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); 1188 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1189 cpu_lotar_map = cpu_possible_map; 1189 cpu_lotar_map = *cpu_possible_mask;
1190 } 1190 }
1191 1191
1192#if CHIP_HAS_CBOX_HOME_MAP() 1192#if CHIP_HAS_CBOX_HOME_MAP()
@@ -1196,9 +1196,9 @@ static void __init setup_cpu_maps(void)
1196 sizeof(hash_for_home_map)); 1196 sizeof(hash_for_home_map));
1197 if (rc < 0) 1197 if (rc < 0)
1198 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); 1198 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1199 cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map); 1199 cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
1200#else 1200#else
1201 cpu_cacheable_map = cpu_possible_map; 1201 cpu_cacheable_map = *cpu_possible_mask;
1202#endif 1202#endif
1203} 1203}
1204 1204
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 2e9852c0d487..0a9e57e7446b 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -41,7 +41,7 @@ static int __init start_kernel_proc(void *unused)
41 cpu_tasks[0].pid = pid; 41 cpu_tasks[0].pid = pid;
42 cpu_tasks[0].task = current; 42 cpu_tasks[0].task = current;
43#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
44 cpu_online_map = cpumask_of_cpu(0); 44 init_cpu_online(get_cpu_mask(0));
45#endif 45#endif
46 start_kernel(); 46 start_kernel();
47 return 0; 47 return 0;
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 155206a66908..6f588e160fb0 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -76,7 +76,7 @@ static int idle_proc(void *cpup)
76 cpu_relax(); 76 cpu_relax();
77 77
78 notify_cpu_starting(cpu); 78 notify_cpu_starting(cpu);
79 cpu_set(cpu, cpu_online_map); 79 set_cpu_online(cpu, true);
80 default_idle(); 80 default_idle();
81 return 0; 81 return 0;
82} 82}
@@ -110,8 +110,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
110 for (i = 0; i < ncpus; ++i) 110 for (i = 0; i < ncpus; ++i)
111 set_cpu_possible(i, true); 111 set_cpu_possible(i, true);
112 112
113 cpu_clear(me, cpu_online_map); 113 set_cpu_online(me, true);
114 cpu_set(me, cpu_online_map);
115 cpu_set(me, cpu_callin_map); 114 cpu_set(me, cpu_callin_map);
116 115
117 err = os_pipe(cpu_data[me].ipi_pipe, 1, 1); 116 err = os_pipe(cpu_data[me].ipi_pipe, 1, 1);
@@ -138,13 +137,13 @@ void smp_prepare_cpus(unsigned int maxcpus)
138 137
139void smp_prepare_boot_cpu(void) 138void smp_prepare_boot_cpu(void)
140{ 139{
141 cpu_set(smp_processor_id(), cpu_online_map); 140 set_cpu_online(smp_processor_id(), true);
142} 141}
143 142
144int __cpu_up(unsigned int cpu) 143int __cpu_up(unsigned int cpu)
145{ 144{
146 cpu_set(cpu, smp_commenced_mask); 145 cpu_set(cpu, smp_commenced_mask);
147 while (!cpu_isset(cpu, cpu_online_map)) 146 while (!cpu_online(cpu))
148 mb(); 147 mb();
149 return 0; 148 return 0;
150} 149}