diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-14 13:21:16 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-14 13:21:16 -0400 |
commit | e7b8b7f16edc9b363573eadf2ab2683473626071 (patch) | |
tree | f5d578d8d594b09d968d579f000a3f9b03da10a9 /arch/sh/mm/cache-sh4.c | |
parent | 795687265d1b6f666d02ff56f6c1679a8db160a9 (diff) |
sh: NO_CONTEXT ASID optimizations for SH-4 cache flush.
This optimizes for the cases when a CPU does not yet have a valid ASID
context associated with it, as in this case there is no work for any of
flush_cache_mm()/flush_cache_page()/flush_cache_range() to do. Based on
the the MIPS implementation.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 9 |
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index dfc1d0379479..92f87a460a81 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -330,6 +330,9 @@ loop_exit: | |||
330 | */ | 330 | */ |
331 | void flush_cache_mm(struct mm_struct *mm) | 331 | void flush_cache_mm(struct mm_struct *mm) |
332 | { | 332 | { |
333 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | ||
334 | return; | ||
335 | |||
333 | /* | 336 | /* |
334 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | 337 | * If cache is only 4k-per-way, there are never any 'aliases'. Since |
335 | * the cache is physically tagged, the data can just be left in there. | 338 | * the cache is physically tagged, the data can just be left in there. |
@@ -371,6 +374,9 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
371 | unsigned long phys = pfn << PAGE_SHIFT; | 374 | unsigned long phys = pfn << PAGE_SHIFT; |
372 | unsigned int alias_mask; | 375 | unsigned int alias_mask; |
373 | 376 | ||
377 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | ||
378 | return; | ||
379 | |||
374 | alias_mask = boot_cpu_data.dcache.alias_mask; | 380 | alias_mask = boot_cpu_data.dcache.alias_mask; |
375 | 381 | ||
376 | /* We only need to flush D-cache when we have alias */ | 382 | /* We only need to flush D-cache when we have alias */ |
@@ -413,6 +419,9 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
413 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 419 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, |
414 | unsigned long end) | 420 | unsigned long end) |
415 | { | 421 | { |
422 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | ||
423 | return; | ||
424 | |||
416 | /* | 425 | /* |
417 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | 426 | * If cache is only 4k-per-way, there are never any 'aliases'. Since |
418 | * the cache is physically tagged, the data can just be left in there. | 427 | * the cache is physically tagged, the data can just be left in there. |