diff options
author | Ilya Loginov <isloginov@gmail.com> | 2009-11-26 03:16:19 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-11-26 03:16:19 -0500 |
commit | 2d4dc890b5c8fabd818a8586607e6843c4375e62 (patch) | |
tree | 9976ed7b0eed0056f8289aeb6a2b0abf8c940454 /arch/m32r | |
parent | 3586e917f2c7df769d173c4ec99554cb40a911e5 (diff) |
block: add helpers to run flush_dcache_page() against a bio and a request's pages
Mtdblock driver doesn't call flush_dcache_page for pages in request. So,
this causes problems on architectures where the icache doesn't fill from
the dcache or with dcache aliases. The patch fixes this.
The ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE symbol was introduced to avoid
pointless empty cache-thrashing loops on architectures for which
flush_dcache_page() is a no-op. Every architecture was provided with this
flush pages on architectires where ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE is
equal 1 or do nothing otherwise.
See "fix mtd_blkdevs problem with caches on some architectures" discussion
on LKML for more information.
Signed-off-by: Ilya Loginov <isloginov@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Peter Horton <phorton@bitbox.co.uk>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/m32r')
-rw-r--r-- | arch/m32r/include/asm/cacheflush.h | 3 |
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/m32r/include/asm/cacheflush.h b/arch/m32r/include/asm/cacheflush.h index 78587c958146..8e8e04516c39 100644 --- a/arch/m32r/include/asm/cacheflush.h +++ b/arch/m32r/include/asm/cacheflush.h | |||
@@ -12,6 +12,7 @@ extern void _flush_cache_copyback_all(void); | |||
12 | #define flush_cache_dup_mm(mm) do { } while (0) | 12 | #define flush_cache_dup_mm(mm) do { } while (0) |
13 | #define flush_cache_range(vma, start, end) do { } while (0) | 13 | #define flush_cache_range(vma, start, end) do { } while (0) |
14 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 14 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
15 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
15 | #define flush_dcache_page(page) do { } while (0) | 16 | #define flush_dcache_page(page) do { } while (0) |
16 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 17 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
17 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 18 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
@@ -33,6 +34,7 @@ extern void smp_flush_cache_all(void); | |||
33 | #define flush_cache_dup_mm(mm) do { } while (0) | 34 | #define flush_cache_dup_mm(mm) do { } while (0) |
34 | #define flush_cache_range(vma, start, end) do { } while (0) | 35 | #define flush_cache_range(vma, start, end) do { } while (0) |
35 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 36 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
37 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
36 | #define flush_dcache_page(page) do { } while (0) | 38 | #define flush_dcache_page(page) do { } while (0) |
37 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 39 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
38 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 40 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
@@ -46,6 +48,7 @@ extern void smp_flush_cache_all(void); | |||
46 | #define flush_cache_dup_mm(mm) do { } while (0) | 48 | #define flush_cache_dup_mm(mm) do { } while (0) |
47 | #define flush_cache_range(vma, start, end) do { } while (0) | 49 | #define flush_cache_range(vma, start, end) do { } while (0) |
48 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 50 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
51 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
49 | #define flush_dcache_page(page) do { } while (0) | 52 | #define flush_dcache_page(page) do { } while (0) |
50 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 53 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
51 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 54 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |