aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/tlbflush.h
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-09-13 10:58:06 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-09-19 07:17:44 -0400
commit6012191aa9c6ffff3a23b81162298318b56d7cb3 (patch)
tree8f08d869b452d66f126743bcfd73aa6f5a701605 /arch/arm/include/asm/tlbflush.h
parentc01778001a4f5ad9c62d882776235f3f31922fdd (diff)
ARM: 6380/1: Introduce __sync_icache_dcache() for VIPT caches
On SMP systems, there is a small chance of a PTE becoming visible to a different CPU before the current cache maintenance operations in update_mmu_cache(). To avoid this, cache maintenance must be handled in set_pte_at() (similar to IA-64 and PowerPC). This patch provides a unified VIPT cache handling mechanism and implements the __sync_icache_dcache() function for ARMv6 onwards architectures. It is called from set_pte_at() and replaces the update_mmu_cache(). The latter is still used on VIVT hardware where a vm_area_struct is required. Tested-by: Rabin Vincent <rabin.vincent@stericsson.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/tlbflush.h')
-rw-r--r--arch/arm/include/asm/tlbflush.h10
1 files changed, 9 insertions, 1 deletions
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 9ad329ad745..989c9e57d92 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -562,10 +562,18 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
562/* 562/*
563 * If PG_dcache_clean is not set for the page, we need to ensure that any 563 * If PG_dcache_clean is not set for the page, we need to ensure that any
564 * cache entries for the kernels virtual memory range are written 564 * cache entries for the kernels virtual memory range are written
565 * back to the page. 565 * back to the page. On ARMv6 and later, the cache coherency is handled via
566 * the set_pte_at() function.
566 */ 567 */
568#if __LINUX_ARM_ARCH__ < 6
567extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, 569extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
568 pte_t *ptep); 570 pte_t *ptep);
571#else
572static inline void update_mmu_cache(struct vm_area_struct *vma,
573 unsigned long addr, pte_t *ptep)
574{
575}
576#endif
569 577
570#endif 578#endif
571 579