aboutsummaryrefslogtreecommitdiffstats
path: root/arch/cris/arch-v32/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/cris/arch-v32/kernel')
-rw-r--r--arch/cris/arch-v32/kernel/irq.c4
-rw-r--r--arch/cris/arch-v32/kernel/smp.c33
2 files changed, 20 insertions, 17 deletions
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 68a1a5901ca5..5ebe6e841820 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -266,11 +266,11 @@ static int irq_cpu(int irq)
266 266
267 267
268 /* Let the interrupt stay if possible */ 268 /* Let the interrupt stay if possible */
269 if (cpu_isset(cpu, irq_allocations[irq - FIRST_IRQ].mask)) 269 if (cpumask_test_cpu(cpu, &irq_allocations[irq - FIRST_IRQ].mask))
270 goto out; 270 goto out;
271 271
272 /* IRQ must be moved to another CPU. */ 272 /* IRQ must be moved to another CPU. */
273 cpu = first_cpu(irq_allocations[irq - FIRST_IRQ].mask); 273 cpu = cpumask_first(&irq_allocations[irq - FIRST_IRQ].mask);
274 irq_allocations[irq - FIRST_IRQ].cpu = cpu; 274 irq_allocations[irq - FIRST_IRQ].cpu = cpu;
275out: 275out:
276 spin_unlock_irqrestore(&irq_lock, flags); 276 spin_unlock_irqrestore(&irq_lock, flags);
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 66cc75657e2f..a0843a71aaee 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -81,7 +81,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
81 81
82 /* Mark all possible CPUs as present */ 82 /* Mark all possible CPUs as present */
83 for (i = 0; i < max_cpus; i++) 83 for (i = 0; i < max_cpus; i++)
84 cpu_set(i, phys_cpu_present_map); 84 cpumask_set_cpu(i, &phys_cpu_present_map);
85} 85}
86 86
87void __devinit smp_prepare_boot_cpu(void) 87void __devinit smp_prepare_boot_cpu(void)
@@ -98,7 +98,7 @@ void __devinit smp_prepare_boot_cpu(void)
98 SUPP_REG_WR(RW_MM_TLB_PGD, pgd); 98 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
99 99
100 set_cpu_online(0, true); 100 set_cpu_online(0, true);
101 cpu_set(0, phys_cpu_present_map); 101 cpumask_set_cpu(0, &phys_cpu_present_map);
102 set_cpu_possible(0, true); 102 set_cpu_possible(0, true);
103} 103}
104 104
@@ -112,8 +112,9 @@ smp_boot_one_cpu(int cpuid)
112{ 112{
113 unsigned timeout; 113 unsigned timeout;
114 struct task_struct *idle; 114 struct task_struct *idle;
115 cpumask_t cpu_mask = CPU_MASK_NONE; 115 cpumask_t cpu_mask;
116 116
117 cpumask_clear(&cpu_mask);
117 idle = fork_idle(cpuid); 118 idle = fork_idle(cpuid);
118 if (IS_ERR(idle)) 119 if (IS_ERR(idle))
119 panic("SMP: fork failed for CPU:%d", cpuid); 120 panic("SMP: fork failed for CPU:%d", cpuid);
@@ -125,10 +126,10 @@ smp_boot_one_cpu(int cpuid)
125 cpu_now_booting = cpuid; 126 cpu_now_booting = cpuid;
126 127
127 /* Kick it */ 128 /* Kick it */
128 cpu_set(cpuid, cpu_online_map); 129 set_cpu_online(cpuid, true);
129 cpu_set(cpuid, cpu_mask); 130 cpumask_set_cpu(cpuid, &cpu_mask);
130 send_ipi(IPI_BOOT, 0, cpu_mask); 131 send_ipi(IPI_BOOT, 0, cpu_mask);
131 cpu_clear(cpuid, cpu_online_map); 132 set_cpu_online(cpuid, false);
132 133
133 /* Wait for CPU to come online */ 134 /* Wait for CPU to come online */
134 for (timeout = 0; timeout < 10000; timeout++) { 135 for (timeout = 0; timeout < 10000; timeout++) {
@@ -176,7 +177,7 @@ void __init smp_callin(void)
176 notify_cpu_starting(cpu); 177 notify_cpu_starting(cpu);
177 local_irq_enable(); 178 local_irq_enable();
178 179
179 cpu_set(cpu, cpu_online_map); 180 set_cpu_online(cpu, true);
180 cpu_idle(); 181 cpu_idle();
181} 182}
182 183
@@ -214,8 +215,9 @@ int __cpuinit __cpu_up(unsigned int cpu)
214 215
215void smp_send_reschedule(int cpu) 216void smp_send_reschedule(int cpu)
216{ 217{
217 cpumask_t cpu_mask = CPU_MASK_NONE; 218 cpumask_t cpu_mask;
218 cpu_set(cpu, cpu_mask); 219 cpumask_clear(&cpu_mask);
220 cpumask_set_cpu(cpu, &cpu_mask);
219 send_ipi(IPI_SCHEDULE, 0, cpu_mask); 221 send_ipi(IPI_SCHEDULE, 0, cpu_mask);
220} 222}
221 223
@@ -232,7 +234,7 @@ void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned
232 234
233 spin_lock_irqsave(&tlbstate_lock, flags); 235 spin_lock_irqsave(&tlbstate_lock, flags);
234 cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm)); 236 cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
235 cpu_clear(smp_processor_id(), cpu_mask); 237 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
236 flush_mm = mm; 238 flush_mm = mm;
237 flush_vma = vma; 239 flush_vma = vma;
238 flush_addr = addr; 240 flush_addr = addr;
@@ -277,10 +279,10 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
277 int ret = 0; 279 int ret = 0;
278 280
279 /* Calculate CPUs to send to. */ 281 /* Calculate CPUs to send to. */
280 cpus_and(cpu_mask, cpu_mask, cpu_online_map); 282 cpumask_and(&cpu_mask, &cpu_mask, cpu_online_mask);
281 283
282 /* Send the IPI. */ 284 /* Send the IPI. */
283 for_each_cpu_mask(i, cpu_mask) 285 for_each_cpu(i, &cpu_mask)
284 { 286 {
285 ipi.vector |= vector; 287 ipi.vector |= vector;
286 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi); 288 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
@@ -288,7 +290,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
288 290
289 /* Wait for IPI to finish on other CPUS */ 291 /* Wait for IPI to finish on other CPUS */
290 if (wait) { 292 if (wait) {
291 for_each_cpu_mask(i, cpu_mask) { 293 for_each_cpu(i, &cpu_mask) {
292 int j; 294 int j;
293 for (j = 0 ; j < 1000; j++) { 295 for (j = 0 ; j < 1000; j++) {
294 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); 296 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
@@ -314,11 +316,12 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
314 */ 316 */
315int smp_call_function(void (*func)(void *info), void *info, int wait) 317int smp_call_function(void (*func)(void *info), void *info, int wait)
316{ 318{
317 cpumask_t cpu_mask = CPU_MASK_ALL; 319 cpumask_t cpu_mask;
318 struct call_data_struct data; 320 struct call_data_struct data;
319 int ret; 321 int ret;
320 322
321 cpu_clear(smp_processor_id(), cpu_mask); 323 cpumask_setall(&cpu_mask);
324 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
322 325
323 WARN_ON(irqs_disabled()); 326 WARN_ON(irqs_disabled());
324 327