aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/cacheflush.h
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2009-06-11 22:09:29 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-06-14 06:20:37 -0400
commit73be1591579084a8103a7005dd3172f3e9dd7362 (patch)
tree0f9dc996b9effc48728b2e25f51c41eded8cb518 /arch/arm/include/asm/cacheflush.h
parent44b7532b8b464f606053562400719c9c21276037 (diff)
[ARM] 5545/2: add flush_kernel_dcache_page() for ARM
Without this, the default implementation is a no op which is completely wrong with a VIVT cache, and usage of sg_copy_buffer() produces unpredictable results. Tested-by: Sebastian Andrzej Siewior <bigeasy@breakpoint.cc> CC: stable@kernel.org Signed-off-by: Nicolas Pitre <nico@marvell.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/cacheflush.h')
-rw-r--r--arch/arm/include/asm/cacheflush.h8
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index bb7d695f3900..1a711ea8418b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -429,6 +429,14 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
429 __flush_anon_page(vma, page, vmaddr); 429 __flush_anon_page(vma, page, vmaddr);
430} 430}
431 431
432#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
433static inline void flush_kernel_dcache_page(struct page *page)
434{
435 /* highmem pages are always flushed upon kunmap already */
436 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
437 __cpuc_flush_dcache_page(page_address(page));
438}
439
432#define flush_dcache_mmap_lock(mapping) \ 440#define flush_dcache_mmap_lock(mapping) \
433 spin_lock_irq(&(mapping)->tree_lock) 441 spin_lock_irq(&(mapping)->tree_lock)
434#define flush_dcache_mmap_unlock(mapping) \ 442#define flush_dcache_mmap_unlock(mapping) \