aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2011-04-28 01:07:23 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-05-04 01:22:59 -0400
commit104699c0ab473535793b5fea156adaf309afd29b (patch)
tree6fb55df112b7beb3af4840378b5b3bb55565659b /arch
parent48404f2e95ef0ffd8134d89c8abcd1a15e15f1b0 (diff)
powerpc: Convert old cpumask API into new one
Adapt new API. Almost change is trivial. Most important change is the below line because we plan to change task->cpus_allowed implementation. - ctx->cpus_allowed = current->cpus_allowed; Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/cputhreads.h12
-rw-r--r--arch/powerpc/include/asm/kexec.h2
-rw-r--r--arch/powerpc/kernel/crash.c32
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/platforms/cell/beat_smp.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c11
-rw-r--r--arch/powerpc/platforms/cell/smp.c13
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c2
-rw-r--r--arch/powerpc/xmon/xmon.c16
13 files changed, 52 insertions, 52 deletions
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index f71bb4c118b4..ce516e5eb0d3 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -37,16 +37,16 @@ extern cpumask_t threads_core_mask;
37 * This can typically be used for things like IPI for tlb invalidations 37 * This can typically be used for things like IPI for tlb invalidations
38 * since those need to be done only once per core/TLB 38 * since those need to be done only once per core/TLB
39 */ 39 */
40static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads) 40static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
41{ 41{
42 cpumask_t tmp, res; 42 cpumask_t tmp, res;
43 int i; 43 int i;
44 44
45 res = CPU_MASK_NONE; 45 cpumask_clear(&res);
46 for (i = 0; i < NR_CPUS; i += threads_per_core) { 46 for (i = 0; i < NR_CPUS; i += threads_per_core) {
47 cpus_shift_left(tmp, threads_core_mask, i); 47 cpumask_shift_left(&tmp, &threads_core_mask, i);
48 if (cpus_intersects(threads, tmp)) 48 if (cpumask_intersects(threads, &tmp))
49 cpu_set(i, res); 49 cpumask_set_cpu(i, &res);
50 } 50 }
51 return res; 51 return res;
52} 52}
@@ -58,7 +58,7 @@ static inline int cpu_nr_cores(void)
58 58
59static inline cpumask_t cpu_online_cores_map(void) 59static inline cpumask_t cpu_online_cores_map(void)
60{ 60{
61 return cpu_thread_mask_to_cores(cpu_online_map); 61 return cpu_thread_mask_to_cores(cpu_online_mask);
62} 62}
63 63
64#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index f54408d995b5..8a33698c61bd 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -76,7 +76,7 @@ extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
76extern cpumask_t cpus_in_sr; 76extern cpumask_t cpus_in_sr;
77static inline int kexec_sr_activated(int cpu) 77static inline int kexec_sr_activated(int cpu)
78{ 78{
79 return cpu_isset(cpu,cpus_in_sr); 79 return cpumask_test_cpu(cpu, &cpus_in_sr);
80} 80}
81 81
82struct kimage; 82struct kimage;
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 5b5e1f002a8e..ccc2198e6b23 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs)
64 return; 64 return;
65 65
66 hard_irq_disable(); 66 hard_irq_disable();
67 if (!cpu_isset(cpu, cpus_in_crash)) 67 if (!cpumask_test_cpu(cpu, &cpus_in_crash))
68 crash_save_cpu(regs, cpu); 68 crash_save_cpu(regs, cpu);
69 cpu_set(cpu, cpus_in_crash); 69 cpumask_set_cpu(cpu, &cpus_in_crash);
70 70
71 /* 71 /*
72 * Entered via soft-reset - could be the kdump 72 * Entered via soft-reset - could be the kdump
@@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs)
77 * Tell the kexec CPU that entered via soft-reset and ready 77 * Tell the kexec CPU that entered via soft-reset and ready
78 * to go down. 78 * to go down.
79 */ 79 */
80 if (cpu_isset(cpu, cpus_in_sr)) { 80 if (cpumask_test_cpu(cpu, &cpus_in_sr)) {
81 cpu_clear(cpu, cpus_in_sr); 81 cpumask_clear_cpu(cpu, &cpus_in_sr);
82 atomic_inc(&enter_on_soft_reset); 82 atomic_inc(&enter_on_soft_reset);
83 } 83 }
84 84
@@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs)
87 * This barrier is needed to make sure that all CPUs are stopped. 87 * This barrier is needed to make sure that all CPUs are stopped.
88 * If not, soft-reset will be invoked to bring other CPUs. 88 * If not, soft-reset will be invoked to bring other CPUs.
89 */ 89 */
90 while (!cpu_isset(crashing_cpu, cpus_in_crash)) 90 while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash))
91 cpu_relax(); 91 cpu_relax();
92 92
93 if (ppc_md.kexec_cpu_down) 93 if (ppc_md.kexec_cpu_down)
@@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu)
109{ 109{
110 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ 110 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
111 111
112 cpu_clear(cpu, cpus_in_sr); 112 cpumask_clear_cpu(cpu, &cpus_in_sr);
113 while (atomic_read(&enter_on_soft_reset) != ncpus) 113 while (atomic_read(&enter_on_soft_reset) != ncpus)
114 cpu_relax(); 114 cpu_relax();
115} 115}
@@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int cpu)
132 */ 132 */
133 printk(KERN_EMERG "Sending IPI to other cpus...\n"); 133 printk(KERN_EMERG "Sending IPI to other cpus...\n");
134 msecs = 10000; 134 msecs = 10000;
135 while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { 135 while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
136 cpu_relax(); 136 cpu_relax();
137 mdelay(1); 137 mdelay(1);
138 } 138 }
@@ -144,20 +144,20 @@ static void crash_kexec_prepare_cpus(int cpu)
144 * user to do soft reset such that we get all. 144 * user to do soft reset such that we get all.
145 * Soft-reset will be used until better mechanism is implemented. 145 * Soft-reset will be used until better mechanism is implemented.
146 */ 146 */
147 if (cpus_weight(cpus_in_crash) < ncpus) { 147 if (cpumask_weight(&cpus_in_crash) < ncpus) {
148 printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", 148 printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
149 ncpus - cpus_weight(cpus_in_crash)); 149 ncpus - cpumask_weight(&cpus_in_crash));
150 printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); 150 printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
151 cpus_in_sr = CPU_MASK_NONE; 151 cpumask_clear(&cpus_in_sr);
152 atomic_set(&enter_on_soft_reset, 0); 152 atomic_set(&enter_on_soft_reset, 0);
153 while (cpus_weight(cpus_in_crash) < ncpus) 153 while (cpumask_weight(&cpus_in_crash) < ncpus)
154 cpu_relax(); 154 cpu_relax();
155 } 155 }
156 /* 156 /*
157 * Make sure all CPUs are entered via soft-reset if the kdump is 157 * Make sure all CPUs are entered via soft-reset if the kdump is
158 * invoked using soft-reset. 158 * invoked using soft-reset.
159 */ 159 */
160 if (cpu_isset(cpu, cpus_in_sr)) 160 if (cpumask_test_cpu(cpu, &cpus_in_sr))
161 crash_soft_reset_check(cpu); 161 crash_soft_reset_check(cpu);
162 /* Leave the IPI callback set */ 162 /* Leave the IPI callback set */
163} 163}
@@ -210,7 +210,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
210 * exited using 'x'(exit and recover) or 210 * exited using 'x'(exit and recover) or
211 * kexec_should_crash() failed for all running tasks. 211 * kexec_should_crash() failed for all running tasks.
212 */ 212 */
213 cpu_clear(cpu, cpus_in_sr); 213 cpumask_clear_cpu(cpu, &cpus_in_sr);
214 local_irq_restore(flags); 214 local_irq_restore(flags);
215 return; 215 return;
216 } 216 }
@@ -224,7 +224,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
224 * then start kexec boot. 224 * then start kexec boot.
225 */ 225 */
226 crash_soft_reset_check(cpu); 226 crash_soft_reset_check(cpu);
227 cpu_set(crashing_cpu, cpus_in_crash); 227 cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
228 if (ppc_md.kexec_cpu_down) 228 if (ppc_md.kexec_cpu_down)
229 ppc_md.kexec_cpu_down(1, 0); 229 ppc_md.kexec_cpu_down(1, 0);
230 machine_kexec(kexec_crash_image); 230 machine_kexec(kexec_crash_image);
@@ -253,7 +253,7 @@ static void crash_kexec_prepare_cpus(int cpu)
253 253
254void crash_kexec_secondary(struct pt_regs *regs) 254void crash_kexec_secondary(struct pt_regs *regs)
255{ 255{
256 cpus_in_sr = CPU_MASK_NONE; 256 cpumask_clear(&cpus_in_sr);
257} 257}
258#endif /* CONFIG_SMP */ 258#endif /* CONFIG_SMP */
259 259
@@ -345,7 +345,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
345 crashing_cpu = smp_processor_id(); 345 crashing_cpu = smp_processor_id();
346 crash_save_cpu(regs, crashing_cpu); 346 crash_save_cpu(regs, crashing_cpu);
347 crash_kexec_prepare_cpus(crashing_cpu); 347 crash_kexec_prepare_cpus(crashing_cpu);
348 cpu_set(crashing_cpu, cpus_in_crash); 348 cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
349 crash_kexec_wait_realmode(crashing_cpu); 349 crash_kexec_wait_realmode(crashing_cpu);
350 350
351 machine_kexec_mask_interrupts(); 351 machine_kexec_mask_interrupts();
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 21f30cb68077..1475df6e403f 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
381 int i; 381 int i;
382 382
383 threads_per_core = tpc; 383 threads_per_core = tpc;
384 threads_core_mask = CPU_MASK_NONE; 384 cpumask_clear(&threads_core_mask);
385 385
386 /* This implementation only supports power of 2 number of threads 386 /* This implementation only supports power of 2 number of threads
387 * for simplicity and performance 387 * for simplicity and performance
@@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
390 BUG_ON(tpc != (1 << threads_shift)); 390 BUG_ON(tpc != (1 << threads_shift));
391 391
392 for (i = 0; i < tpc; i++) 392 for (i = 0; i < tpc; i++)
393 cpu_set(i, threads_core_mask); 393 cpumask_set_cpu(i, &threads_core_mask);
394 394
395 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", 395 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
396 tpc, tpc > 1 ? "s" : ""); 396 tpc, tpc > 1 ? "s" : "");
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index b6083f4f39b1..87517ab6d365 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -513,7 +513,7 @@ int cpu_first_thread_of_core(int core)
513} 513}
514EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 514EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
515 515
516/* Must be called when no change can occur to cpu_present_map, 516/* Must be called when no change can occur to cpu_present_mask,
517 * i.e. during cpu online or offline. 517 * i.e. during cpu online or offline.
518 */ 518 */
519static struct device_node *cpu_to_l2cache(int cpu) 519static struct device_node *cpu_to_l2cache(int cpu)
@@ -614,7 +614,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
614 * se we pin us down to CPU 0 for a short while 614 * se we pin us down to CPU 0 for a short while
615 */ 615 */
616 alloc_cpumask_var(&old_mask, GFP_NOWAIT); 616 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
617 cpumask_copy(old_mask, &current->cpus_allowed); 617 cpumask_copy(old_mask, tsk_cpus_allowed(current));
618 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); 618 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
619 619
620 if (smp_ops && smp_ops->setup_cpu) 620 if (smp_ops && smp_ops->setup_cpu)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4a6a109b6816..06b9d457d0a7 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -221,7 +221,7 @@ void system_reset_exception(struct pt_regs *regs)
221 } 221 }
222 222
223#ifdef CONFIG_KEXEC 223#ifdef CONFIG_KEXEC
224 cpu_set(smp_processor_id(), cpus_in_sr); 224 cpumask_set_cpu(smp_processor_id(), &cpus_in_sr);
225#endif 225#endif
226 226
227 die("System Reset", regs, SIGABRT); 227 die("System Reset", regs, SIGABRT);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index e49b799b59a3..2164006fe170 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1452,7 +1452,7 @@ int arch_update_cpu_topology(void)
1452 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1452 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1453 struct sys_device *sysdev; 1453 struct sys_device *sysdev;
1454 1454
1455 for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { 1455 for_each_cpu(cpu,&cpu_associativity_changes_mask) {
1456 vphn_get_associativity(cpu, associativity); 1456 vphn_get_associativity(cpu, associativity);
1457 nid = associativity_to_nid(associativity); 1457 nid = associativity_to_nid(associativity);
1458 1458
diff --git a/arch/powerpc/platforms/cell/beat_smp.c b/arch/powerpc/platforms/cell/beat_smp.c
index 996df33165f1..3e86acbb0fb4 100644
--- a/arch/powerpc/platforms/cell/beat_smp.c
+++ b/arch/powerpc/platforms/cell/beat_smp.c
@@ -85,7 +85,7 @@ static void smp_beatic_message_pass(int target, int msg)
85 85
86static int __init smp_beatic_probe(void) 86static int __init smp_beatic_probe(void)
87{ 87{
88 return cpus_weight(cpu_possible_map); 88 return cpumask_weight(cpu_possible_mask);
89} 89}
90 90
91static void __devinit smp_beatic_setup_cpu(int cpu) 91static void __devinit smp_beatic_setup_cpu(int cpu)
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index dbc338f187a2..f3917e7a5b44 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -45,8 +45,8 @@ static struct cbe_thread_map
45 unsigned int cbe_id; 45 unsigned int cbe_id;
46} cbe_thread_map[NR_CPUS]; 46} cbe_thread_map[NR_CPUS];
47 47
48static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE }; 48static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
49static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE; 49static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
50 50
51static struct cbe_regs_map *cbe_find_map(struct device_node *np) 51static struct cbe_regs_map *cbe_find_map(struct device_node *np)
52{ 52{
@@ -159,7 +159,8 @@ EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
159 159
160u32 cbe_node_to_cpu(int node) 160u32 cbe_node_to_cpu(int node)
161{ 161{
162 return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t)); 162 return cpumask_first(&cbe_local_mask[node]);
163
163} 164}
164EXPORT_SYMBOL_GPL(cbe_node_to_cpu); 165EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
165 166
@@ -268,9 +269,9 @@ void __init cbe_regs_init(void)
268 thread->regs = map; 269 thread->regs = map;
269 thread->cbe_id = cbe_id; 270 thread->cbe_id = cbe_id;
270 map->be_node = thread->be_node; 271 map->be_node = thread->be_node;
271 cpu_set(i, cbe_local_mask[cbe_id]); 272 cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
272 if(thread->thread_id == 0) 273 if(thread->thread_id == 0)
273 cpu_set(i, cbe_first_online_cpu); 274 cpumask_set_cpu(i, &cbe_first_online_cpu);
274 } 275 }
275 } 276 }
276 277
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index 03d638e2f44f..a2161b91b0bf 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -77,7 +77,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
77 unsigned int pcpu; 77 unsigned int pcpu;
78 int start_cpu; 78 int start_cpu;
79 79
80 if (cpu_isset(lcpu, of_spin_map)) 80 if (cpumask_test_cpu(lcpu, &of_spin_map))
81 /* Already started by OF and sitting in spin loop */ 81 /* Already started by OF and sitting in spin loop */
82 return 1; 82 return 1;
83 83
@@ -123,7 +123,7 @@ static int __init smp_iic_probe(void)
123{ 123{
124 iic_request_IPIs(); 124 iic_request_IPIs();
125 125
126 return cpus_weight(cpu_possible_map); 126 return cpumask_weight(cpu_possible_mask);
127} 127}
128 128
129static void __devinit smp_cell_setup_cpu(int cpu) 129static void __devinit smp_cell_setup_cpu(int cpu)
@@ -188,13 +188,12 @@ void __init smp_init_cell(void)
188 if (cpu_has_feature(CPU_FTR_SMT)) { 188 if (cpu_has_feature(CPU_FTR_SMT)) {
189 for_each_present_cpu(i) { 189 for_each_present_cpu(i) {
190 if (cpu_thread_in_core(i) == 0) 190 if (cpu_thread_in_core(i) == 0)
191 cpu_set(i, of_spin_map); 191 cpumask_set_cpu(i, &of_spin_map);
192 } 192 }
193 } else { 193 } else
194 of_spin_map = cpu_present_map; 194 cpumask_copy(&of_spin_map, cpu_present_mask);
195 }
196 195
197 cpu_clear(boot_cpuid, of_spin_map); 196 cpumask_clear_cpu(boot_cpuid, &of_spin_map);
198 197
199 /* Non-lpar has additional take/give timebase */ 198 /* Non-lpar has additional take/give timebase */
200 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { 199 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 65203857b0ce..32cb4e66d2cd 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
141 * runqueue. The context will be rescheduled on the proper node 141 * runqueue. The context will be rescheduled on the proper node
142 * if it is timesliced or preempted. 142 * if it is timesliced or preempted.
143 */ 143 */
144 ctx->cpus_allowed = current->cpus_allowed; 144 cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
145 145
146 /* Save the current cpu id for spu interrupt routing. */ 146 /* Save the current cpu id for spu interrupt routing. */
147 ctx->last_ran = raw_smp_processor_id(); 147 ctx->last_ran = raw_smp_processor_id();
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index ae6c27df4dc4..46f13a3c5d09 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -281,7 +281,7 @@ static int pseries_add_processor(struct device_node *np)
281 } 281 }
282 282
283 for_each_cpu(cpu, tmp) { 283 for_each_cpu(cpu, tmp) {
284 BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); 284 BUG_ON(cpu_present(cpu));
285 set_cpu_present(cpu, true); 285 set_cpu_present(cpu, true);
286 set_hard_smp_processor_id(cpu, *intserv++); 286 set_hard_smp_processor_id(cpu, *intserv++);
287 } 287 }
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 909804aaeebb..91309c5c00d7 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -334,7 +334,7 @@ static void release_output_lock(void)
334 334
335int cpus_are_in_xmon(void) 335int cpus_are_in_xmon(void)
336{ 336{
337 return !cpus_empty(cpus_in_xmon); 337 return !cpumask_empty(&cpus_in_xmon);
338} 338}
339#endif 339#endif
340 340
@@ -373,7 +373,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
373 373
374#ifdef CONFIG_SMP 374#ifdef CONFIG_SMP
375 cpu = smp_processor_id(); 375 cpu = smp_processor_id();
376 if (cpu_isset(cpu, cpus_in_xmon)) { 376 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
377 get_output_lock(); 377 get_output_lock();
378 excprint(regs); 378 excprint(regs);
379 printf("cpu 0x%x: Exception %lx %s in xmon, " 379 printf("cpu 0x%x: Exception %lx %s in xmon, "
@@ -396,7 +396,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
396 } 396 }
397 397
398 xmon_fault_jmp[cpu] = recurse_jmp; 398 xmon_fault_jmp[cpu] = recurse_jmp;
399 cpu_set(cpu, cpus_in_xmon); 399 cpumask_set_cpu(cpu, &cpus_in_xmon);
400 400
401 bp = NULL; 401 bp = NULL;
402 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) 402 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
@@ -440,7 +440,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
440 smp_send_debugger_break(MSG_ALL_BUT_SELF); 440 smp_send_debugger_break(MSG_ALL_BUT_SELF);
441 /* wait for other cpus to come in */ 441 /* wait for other cpus to come in */
442 for (timeout = 100000000; timeout != 0; --timeout) { 442 for (timeout = 100000000; timeout != 0; --timeout) {
443 if (cpus_weight(cpus_in_xmon) >= ncpus) 443 if (cpumask_weight(&cpus_in_xmon) >= ncpus)
444 break; 444 break;
445 barrier(); 445 barrier();
446 } 446 }
@@ -484,7 +484,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
484 } 484 }
485 } 485 }
486 leave: 486 leave:
487 cpu_clear(cpu, cpus_in_xmon); 487 cpumask_clear_cpu(cpu, &cpus_in_xmon);
488 xmon_fault_jmp[cpu] = NULL; 488 xmon_fault_jmp[cpu] = NULL;
489#else 489#else
490 /* UP is simple... */ 490 /* UP is simple... */
@@ -630,7 +630,7 @@ static int xmon_iabr_match(struct pt_regs *regs)
630static int xmon_ipi(struct pt_regs *regs) 630static int xmon_ipi(struct pt_regs *regs)
631{ 631{
632#ifdef CONFIG_SMP 632#ifdef CONFIG_SMP
633 if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon)) 633 if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon))
634 xmon_core(regs, 1); 634 xmon_core(regs, 1);
635#endif 635#endif
636 return 0; 636 return 0;
@@ -976,7 +976,7 @@ static int cpu_cmd(void)
976 printf("cpus stopped:"); 976 printf("cpus stopped:");
977 count = 0; 977 count = 0;
978 for (cpu = 0; cpu < NR_CPUS; ++cpu) { 978 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
979 if (cpu_isset(cpu, cpus_in_xmon)) { 979 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
980 if (count == 0) 980 if (count == 0)
981 printf(" %x", cpu); 981 printf(" %x", cpu);
982 ++count; 982 ++count;
@@ -992,7 +992,7 @@ static int cpu_cmd(void)
992 return 0; 992 return 0;
993 } 993 }
994 /* try to switch to cpu specified */ 994 /* try to switch to cpu specified */
995 if (!cpu_isset(cpu, cpus_in_xmon)) { 995 if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) {
996 printf("cpu 0x%x isn't in xmon\n", cpu); 996 printf("cpu 0x%x isn't in xmon\n", cpu);
997 return 0; 997 return 0;
998 } 998 }