aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-06-22 17:42:32 -0400
committerRalf Baechle <ralf@linux-mips.org>2006-07-13 16:25:56 -0400
commit25969354a385f347b55aafb1040dfc21263fa7c3 (patch)
tree1ba512ac5f4984260c32b02594e70fbefa6b9894
parentf1aaee53f2877a7afa55e8245c241ff60a86367d (diff)
[MIPS] Avoid interprocessor function calls.
On the 34K where multiple virtual processors are implemented in a single core and share a single TLB, interprocessor function calls are not needed to flush a cache, so avoid them. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/kernel/smp.c35
1 files changed, 30 insertions, 5 deletions
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9096a5ea4229..221895802dca 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -319,6 +319,32 @@ static void flush_tlb_mm_ipi(void *mm)
319} 319}
320 320
321/* 321/*
322 * Special Variant of smp_call_function for use by TLB functions:
323 *
324 * o No return value
325 * o collapses to normal function call on UP kernels
326 * o collapses to normal function call on systems with a single shared
327 * primary cache.
328 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
329 */
330static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
331{
332#ifndef CONFIG_MIPS_MT_SMTC
333 smp_call_function(func, info, 1, 1);
334#endif
335}
336
337static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
338{
339 preempt_disable();
340
341 smp_on_other_tlbs(func, info);
342 func(info);
343
344 preempt_enable();
345}
346
347/*
322 * The following tlb flush calls are invoked when old translations are 348 * The following tlb flush calls are invoked when old translations are
323 * being torn down, or pte attributes are changing. For single threaded 349 * being torn down, or pte attributes are changing. For single threaded
324 * address spaces, a new context is obtained on the current cpu, and tlb 350 * address spaces, a new context is obtained on the current cpu, and tlb
@@ -336,7 +362,7 @@ void flush_tlb_mm(struct mm_struct *mm)
336 preempt_disable(); 362 preempt_disable();
337 363
338 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 364 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
339 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); 365 smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
340 } else { 366 } else {
341 int i; 367 int i;
342 for (i = 0; i < num_online_cpus(); i++) 368 for (i = 0; i < num_online_cpus(); i++)
@@ -372,7 +398,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
372 fd.vma = vma; 398 fd.vma = vma;
373 fd.addr1 = start; 399 fd.addr1 = start;
374 fd.addr2 = end; 400 fd.addr2 = end;
375 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); 401 smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
376 } else { 402 } else {
377 int i; 403 int i;
378 for (i = 0; i < num_online_cpus(); i++) 404 for (i = 0; i < num_online_cpus(); i++)
@@ -414,7 +440,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
414 440
415 fd.vma = vma; 441 fd.vma = vma;
416 fd.addr1 = page; 442 fd.addr1 = page;
417 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); 443 smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
418 } else { 444 } else {
419 int i; 445 int i;
420 for (i = 0; i < num_online_cpus(); i++) 446 for (i = 0; i < num_online_cpus(); i++)
@@ -434,8 +460,7 @@ static void flush_tlb_one_ipi(void *info)
434 460
435void flush_tlb_one(unsigned long vaddr) 461void flush_tlb_one(unsigned long vaddr)
436{ 462{
437 smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1); 463 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
438 local_flush_tlb_one(vaddr);
439} 464}
440 465
441static DEFINE_PER_CPU(struct cpu, cpu_devices); 466static DEFINE_PER_CPU(struct cpu, cpu_devices);