aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m32r/kernel/smp.c')
-rw-r--r--arch/m32r/kernel/smp.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 929e5c9d3ad9..8a88f1f0a3e2 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/sched.h>
20#include <linux/spinlock.h> 21#include <linux/spinlock.h>
21#include <linux/mm.h> 22#include <linux/mm.h>
22#include <linux/smp.h> 23#include <linux/smp.h>
@@ -85,7 +86,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *);
85void smp_local_timer_interrupt(void); 86void smp_local_timer_interrupt(void);
86 87
87static void send_IPI_allbutself(int, int); 88static void send_IPI_allbutself(int, int);
88static void send_IPI_mask(cpumask_t, int, int); 89static void send_IPI_mask(const struct cpumask *, int, int);
89unsigned long send_IPI_mask_phys(cpumask_t, int, int); 90unsigned long send_IPI_mask_phys(cpumask_t, int, int);
90 91
91/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 92/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -113,7 +114,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int);
113void smp_send_reschedule(int cpu_id) 114void smp_send_reschedule(int cpu_id)
114{ 115{
115 WARN_ON(cpu_is_offline(cpu_id)); 116 WARN_ON(cpu_is_offline(cpu_id));
116 send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); 117 send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
117} 118}
118 119
119/*==========================================================================* 120/*==========================================================================*
@@ -168,7 +169,7 @@ void smp_flush_cache_all(void)
168 spin_lock(&flushcache_lock); 169 spin_lock(&flushcache_lock);
169 mask=cpus_addr(cpumask); 170 mask=cpus_addr(cpumask);
170 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 171 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
171 send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); 172 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
172 _flush_cache_copyback_all(); 173 _flush_cache_copyback_all();
173 while (flushcache_cpumask) 174 while (flushcache_cpumask)
174 mb(); 175 mb();
@@ -264,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
264 preempt_disable(); 265 preempt_disable();
265 cpu_id = smp_processor_id(); 266 cpu_id = smp_processor_id();
266 mmc = &mm->context[cpu_id]; 267 mmc = &mm->context[cpu_id];
267 cpu_mask = mm->cpu_vm_mask; 268 cpu_mask = *mm_cpumask(mm);
268 cpu_clear(cpu_id, cpu_mask); 269 cpu_clear(cpu_id, cpu_mask);
269 270
270 if (*mmc != NO_CONTEXT) { 271 if (*mmc != NO_CONTEXT) {
@@ -273,7 +274,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
273 if (mm == current->mm) 274 if (mm == current->mm)
274 activate_context(mm); 275 activate_context(mm);
275 else 276 else
276 cpu_clear(cpu_id, mm->cpu_vm_mask); 277 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
277 local_irq_restore(flags); 278 local_irq_restore(flags);
278 } 279 }
279 if (!cpus_empty(cpu_mask)) 280 if (!cpus_empty(cpu_mask))
@@ -334,7 +335,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
334 preempt_disable(); 335 preempt_disable();
335 cpu_id = smp_processor_id(); 336 cpu_id = smp_processor_id();
336 mmc = &mm->context[cpu_id]; 337 mmc = &mm->context[cpu_id];
337 cpu_mask = mm->cpu_vm_mask; 338 cpu_mask = *mm_cpumask(mm);
338 cpu_clear(cpu_id, cpu_mask); 339 cpu_clear(cpu_id, cpu_mask);
339 340
340#ifdef DEBUG_SMP 341#ifdef DEBUG_SMP
@@ -424,7 +425,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
424 * We have to send the IPI only to 425 * We have to send the IPI only to
425 * CPUs affected. 426 * CPUs affected.
426 */ 427 */
427 send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); 428 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
428 429
429 while (!cpus_empty(flush_cpumask)) { 430 while (!cpus_empty(flush_cpumask)) {
430 /* nothing. lockup detection does not belong here */ 431 /* nothing. lockup detection does not belong here */
@@ -469,7 +470,7 @@ void smp_invalidate_interrupt(void)
469 if (flush_mm == current->active_mm) 470 if (flush_mm == current->active_mm)
470 activate_context(flush_mm); 471 activate_context(flush_mm);
471 else 472 else
472 cpu_clear(cpu_id, flush_mm->cpu_vm_mask); 473 cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
473 } else { 474 } else {
474 unsigned long va = flush_va; 475 unsigned long va = flush_va;
475 476
@@ -546,14 +547,14 @@ static void stop_this_cpu(void *dummy)
546 for ( ; ; ); 547 for ( ; ; );
547} 548}
548 549
549void arch_send_call_function_ipi(cpumask_t mask) 550void arch_send_call_function_ipi_mask(const struct cpumask *mask)
550{ 551{
551 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); 552 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
552} 553}
553 554
554void arch_send_call_function_single_ipi(int cpu) 555void arch_send_call_function_single_ipi(int cpu)
555{ 556{
556 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); 557 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
557} 558}
558 559
559/*==========================================================================* 560/*==========================================================================*
@@ -729,7 +730,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
729 cpumask = cpu_online_map; 730 cpumask = cpu_online_map;
730 cpu_clear(smp_processor_id(), cpumask); 731 cpu_clear(smp_processor_id(), cpumask);
731 732
732 send_IPI_mask(cpumask, ipi_num, try); 733 send_IPI_mask(&cpumask, ipi_num, try);
733} 734}
734 735
735/*==========================================================================* 736/*==========================================================================*
@@ -752,7 +753,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
752 * ---------- --- -------------------------------------------------------- 753 * ---------- --- --------------------------------------------------------
753 * 754 *
754 *==========================================================================*/ 755 *==========================================================================*/
755static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) 756static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
756{ 757{
757 cpumask_t physid_mask, tmp; 758 cpumask_t physid_mask, tmp;
758 int cpu_id, phys_id; 759 int cpu_id, phys_id;
@@ -761,11 +762,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
761 if (num_cpus <= 1) /* NO MP */ 762 if (num_cpus <= 1) /* NO MP */
762 return; 763 return;
763 764
764 cpus_and(tmp, cpumask, cpu_online_map); 765 cpumask_and(&tmp, cpumask, cpu_online_mask);
765 BUG_ON(!cpus_equal(cpumask, tmp)); 766 BUG_ON(!cpumask_equal(cpumask, &tmp));
766 767
767 physid_mask = CPU_MASK_NONE; 768 physid_mask = CPU_MASK_NONE;
768 for_each_cpu_mask(cpu_id, cpumask){ 769 for_each_cpu(cpu_id, cpumask) {
769 if ((phys_id = cpu_to_physid(cpu_id)) != -1) 770 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
770 cpu_set(phys_id, physid_mask); 771 cpu_set(phys_id, physid_mask);
771 } 772 }