diff options
-rw-r--r-- | arch/ia64/kernel/smp.c | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index c2d982385dce..5230eaafd83f 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -301,15 +301,12 @@ smp_flush_tlb_mm (struct mm_struct *mm) | |||
301 | return; | 301 | return; |
302 | } | 302 | } |
303 | 303 | ||
304 | smp_call_function_mask(mm->cpu_vm_mask, | ||
305 | (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); | ||
306 | local_irq_disable(); | ||
307 | local_finish_flush_tlb_mm(mm); | ||
308 | local_irq_enable(); | ||
304 | preempt_enable(); | 309 | preempt_enable(); |
305 | /* | ||
306 | * We could optimize this further by using mm->cpu_vm_mask to track which CPUs | ||
307 | * have been running in the address space. It's not clear that this is worth the | ||
308 | * trouble though: to avoid races, we have to raise the IPI on the target CPU | ||
309 | * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is | ||
310 | * rather trivial. | ||
311 | */ | ||
312 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); | ||
313 | } | 310 | } |
314 | 311 | ||
315 | void arch_send_call_function_single_ipi(int cpu) | 312 | void arch_send_call_function_single_ipi(int cpu) |