aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-05-12 08:20:06 -0400
committerRalf Baechle <ralf@linux-mips.org>2006-05-31 19:28:33 -0400
commit7f3f1d01a9020cff2cb2390aaee3f8df0d70e203 (patch)
tree222c7f054eccce1b98fac8ceafa8dde2afd6b63f
parent78665aaa96fe62b4cee6c226680801c4480aa407 (diff)
[MIPS] Fix deadlock on MP with cache aliases.
A proper fix would involve introducing the notion of shared caches but at this stage of 2.6.17 that's going to be too intrusive and not needed for current hardware; aside I think some discussion will be needed. So for now on the affected SMP configurations which happen to suffer from cache aliases we make use of the fact that a single cache will be shared by all processors. This solves the deadlock issue and will improve performance by getting rid of the smp_call_function overhead. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/mm/c-r4k.c39
1 files changed, 30 insertions, 9 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4420191795f3..570bc4e30fd5 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -29,6 +29,27 @@
29#include <asm/war.h> 29#include <asm/war.h>
30#include <asm/cacheflush.h> /* for run_uncached() */ 30#include <asm/cacheflush.h> /* for run_uncached() */
31 31
32
33/*
34 * Special Variant of smp_call_function for use by cache functions:
35 *
36 * o No return value
37 * o collapses to normal function call on UP kernels
38 * o collapses to normal function call on systems with a single shared
39 * primary cache.
40 */
41static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
42 int retry, int wait)
43{
44 preempt_disable();
45
46#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
47 smp_call_function(func, info, retry, wait);
48#endif
49 func(info);
50 preempt_enable();
51}
52
32/* 53/*
33 * Must die. 54 * Must die.
34 */ 55 */
@@ -299,7 +320,7 @@ static void r4k_flush_cache_all(void)
299 if (!cpu_has_dc_aliases) 320 if (!cpu_has_dc_aliases)
300 return; 321 return;
301 322
302 on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1); 323 r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
303} 324}
304 325
305static inline void local_r4k___flush_cache_all(void * args) 326static inline void local_r4k___flush_cache_all(void * args)
@@ -320,7 +341,7 @@ static inline void local_r4k___flush_cache_all(void * args)
320 341
321static void r4k___flush_cache_all(void) 342static void r4k___flush_cache_all(void)
322{ 343{
323 on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 344 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
324} 345}
325 346
326static inline void local_r4k_flush_cache_range(void * args) 347static inline void local_r4k_flush_cache_range(void * args)
@@ -341,7 +362,7 @@ static inline void local_r4k_flush_cache_range(void * args)
341static void r4k_flush_cache_range(struct vm_area_struct *vma, 362static void r4k_flush_cache_range(struct vm_area_struct *vma,
342 unsigned long start, unsigned long end) 363 unsigned long start, unsigned long end)
343{ 364{
344 on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 365 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
345} 366}
346 367
347static inline void local_r4k_flush_cache_mm(void * args) 368static inline void local_r4k_flush_cache_mm(void * args)
@@ -370,7 +391,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
370 if (!cpu_has_dc_aliases) 391 if (!cpu_has_dc_aliases)
371 return; 392 return;
372 393
373 on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 394 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
374} 395}
375 396
376struct flush_cache_page_args { 397struct flush_cache_page_args {
@@ -461,7 +482,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
461 args.addr = addr; 482 args.addr = addr;
462 args.pfn = pfn; 483 args.pfn = pfn;
463 484
464 on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 485 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
465} 486}
466 487
467static inline void local_r4k_flush_data_cache_page(void * addr) 488static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -471,7 +492,7 @@ static inline void local_r4k_flush_data_cache_page(void * addr)
471 492
472static void r4k_flush_data_cache_page(unsigned long addr) 493static void r4k_flush_data_cache_page(unsigned long addr)
473{ 494{
474 on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1); 495 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
475} 496}
476 497
477struct flush_icache_range_args { 498struct flush_icache_range_args {
@@ -514,7 +535,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
514 args.start = start; 535 args.start = start;
515 args.end = end; 536 args.end = end;
516 537
517 on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 538 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
518 instruction_hazard(); 539 instruction_hazard();
519} 540}
520 541
@@ -590,7 +611,7 @@ static void r4k_flush_icache_page(struct vm_area_struct *vma,
590 args.vma = vma; 611 args.vma = vma;
591 args.page = page; 612 args.page = page;
592 613
593 on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1); 614 r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
594} 615}
595 616
596 617
@@ -689,7 +710,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
689 710
690static void r4k_flush_cache_sigtramp(unsigned long addr) 711static void r4k_flush_cache_sigtramp(unsigned long addr)
691{ 712{
692 on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 713 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
693} 714}
694 715
695static void r4k_flush_icache_all(void) 716static void r4k_flush_icache_all(void)