aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/fault-armv.c
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-09-13 10:57:36 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-09-19 07:17:43 -0400
commitc01778001a4f5ad9c62d882776235f3f31922fdd (patch)
treea425979b236dd5c7757e9a1f0c66d3819ad99021 /arch/arm/mm/fault-armv.c
parent0fc73099dd25df2c5181b7bad57d1faa5cd12d3c (diff)
ARM: 6379/1: Assume new page cache pages have dirty D-cache
There are places in Linux where writes to newly allocated page cache pages happen without a subsequent call to flush_dcache_page() (several PIO drivers including USB HCD). This patch changes the meaning of PG_arch_1 to be PG_dcache_clean and always flush the D-cache for a newly mapped page in update_mmu_cache(). The patch also sets the PG_arch_1 bit in the DMA cache maintenance function to avoid additional cache flushing in update_mmu_cache(). Tested-by: Rabin Vincent <rabin.vincent@stericsson.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r--arch/arm/mm/fault-armv.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 9b906dec1ca1..58846cbd0e0b 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -141,7 +141,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
141 * a page table, or changing an existing PTE. Basically, there are two 141 * a page table, or changing an existing PTE. Basically, there are two
142 * things that we need to take care of: 142 * things that we need to take care of:
143 * 143 *
144 * 1. If PG_dcache_dirty is set for the page, we need to ensure 144 * 1. If PG_dcache_clean is not set for the page, we need to ensure
145 * that any cache entries for the kernels virtual memory 145 * that any cache entries for the kernels virtual memory
146 * range are written back to the page. 146 * range are written back to the page.
147 * 2. If we have multiple shared mappings of the same space in 147 * 2. If we have multiple shared mappings of the same space in
@@ -169,7 +169,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
169 169
170 mapping = page_mapping(page); 170 mapping = page_mapping(page);
171#ifndef CONFIG_SMP 171#ifndef CONFIG_SMP
172 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 172 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
173 __flush_dcache_page(mapping, page); 173 __flush_dcache_page(mapping, page);
174#endif 174#endif
175 if (mapping) { 175 if (mapping) {