aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDimitri Sivanich <sivanich@sgi.com>2009-04-15 11:56:25 -0400
committerTony Luck <tony.luck@intel.com>2009-04-16 14:51:35 -0400
commitedb91dc01a216e84b78721b71a06db1e0db141b7 (patch)
tree9e030d23e6ccac225bce132d4bd5017720d8cfc8
parentc4cb768f027706b3a0190309416b13f07114fe56 (diff)
[IA64] smp_flush_tlb_mm() should only send IPI's to cpus in cpu_vm_mask
Having flush_tlb_mm->smp_flush_tlb_mm() send an IPI to every cpu on the system is occasionally triggering spin_lock contention in generic_smp_call_function_interrupt(). Follow x86 arch's lead and only sends IPIs to the cpus in mm->cpu_vm_mask. Experiments with this change have shown significant improvement in this contention issue. Signed-off-by: Dimitri Sivanich <sivanich@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/kernel/smp.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index c2d982385dce..5230eaafd83f 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -301,15 +301,12 @@ smp_flush_tlb_mm (struct mm_struct *mm)
301 return; 301 return;
302 } 302 }
303 303
304 smp_call_function_mask(mm->cpu_vm_mask,
305 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
306 local_irq_disable();
307 local_finish_flush_tlb_mm(mm);
308 local_irq_enable();
304 preempt_enable(); 309 preempt_enable();
305 /*
306 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
307 * have been running in the address space. It's not clear that this is worth the
308 * trouble though: to avoid races, we have to raise the IPI on the target CPU
309 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
310 * rather trivial.
311 */
312 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
313} 310}
314 311
315void arch_send_call_function_single_ipi(int cpu) 312void arch_send_call_function_single_ipi(int cpu)