aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-09-13 10:57:36 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-09-19 07:17:43 -0400
commitc01778001a4f5ad9c62d882776235f3f31922fdd (patch)
treea425979b236dd5c7757e9a1f0c66d3819ad99021 /arch/arm
parent0fc73099dd25df2c5181b7bad57d1faa5cd12d3c (diff)
ARM: 6379/1: Assume new page cache pages have dirty D-cache
There are places in Linux where writes to newly allocated page cache pages happen without a subsequent call to flush_dcache_page() (several PIO drivers including USB HCD). This patch changes the meaning of PG_arch_1 to be PG_dcache_clean and always flush the D-cache for a newly mapped page in update_mmu_cache(). The patch also sets the PG_arch_1 bit in the DMA cache maintenance function to avoid additional cache flushing in update_mmu_cache(). Tested-by: Rabin Vincent <rabin.vincent@stericsson.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/cacheflush.h6
-rw-r--r--arch/arm/include/asm/tlbflush.h2
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v6.c2
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/dma-mapping.c6
-rw-r--r--arch/arm/mm/fault-armv.c4
-rw-r--r--arch/arm/mm/flush.c3
8 files changed, 17 insertions, 10 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4656a24058d2..d3730f0f4b50 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -137,10 +137,10 @@
137#endif 137#endif
138 138
139/* 139/*
140 * This flag is used to indicate that the page pointed to by a pte 140 * This flag is used to indicate that the page pointed to by a pte is clean
141 * is dirty and requires cleaning before returning it to the user. 141 * and does not require cleaning before returning it to the user.
142 */ 142 */
143#define PG_dcache_dirty PG_arch_1 143#define PG_dcache_clean PG_arch_1
144 144
145/* 145/*
146 * MM Cache Management 146 * MM Cache Management
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 33b546ae72d4..9ad329ad7458 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -560,7 +560,7 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
560#endif 560#endif
561 561
562/* 562/*
563 * if PG_dcache_dirty is set for the page, we need to ensure that any 563 * If PG_dcache_clean is not set for the page, we need to ensure that any
564 * cache entries for the kernels virtual memory range are written 564 * cache entries for the kernels virtual memory range are written
565 * back to the page. 565 * back to the page.
566 */ 566 */
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 598c51ad5071..b8061519ce77 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
73{ 73{
74 void *kto = kmap_atomic(to, KM_USER1); 74 void *kto = kmap_atomic(to, KM_USER1);
75 75
76 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 76 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
77 __flush_dcache_page(page_mapping(from), from); 77 __flush_dcache_page(page_mapping(from), from);
78 78
79 spin_lock(&minicache_lock); 79 spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index f55fa1044f72..bdba6c65c901 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
79 unsigned int offset = CACHE_COLOUR(vaddr); 79 unsigned int offset = CACHE_COLOUR(vaddr);
80 unsigned long kfrom, kto; 80 unsigned long kfrom, kto;
81 81
82 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 82 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
83 __flush_dcache_page(page_mapping(from), from); 83 __flush_dcache_page(page_mapping(from), from);
84 84
85 /* FIXME: not highmem safe */ 85 /* FIXME: not highmem safe */
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 9920c0ae2096..649bbcd325bf 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
95{ 95{
96 void *kto = kmap_atomic(to, KM_USER1); 96 void *kto = kmap_atomic(to, KM_USER1);
97 97
98 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 98 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
99 __flush_dcache_page(page_mapping(from), from); 99 __flush_dcache_page(page_mapping(from), from);
100 100
101 spin_lock(&minicache_lock); 101 spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4bc43e535d3b..e4dd0646e859 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -523,6 +523,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
523 outer_inv_range(paddr, paddr + size); 523 outer_inv_range(paddr, paddr + size);
524 524
525 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 525 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
526
527 /*
528 * Mark the D-cache clean for this page to avoid extra flushing.
529 */
530 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
531 set_bit(PG_dcache_clean, &page->flags);
526} 532}
527EXPORT_SYMBOL(___dma_page_dev_to_cpu); 533EXPORT_SYMBOL(___dma_page_dev_to_cpu);
528 534
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 9b906dec1ca1..58846cbd0e0b 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -141,7 +141,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
141 * a page table, or changing an existing PTE. Basically, there are two 141 * a page table, or changing an existing PTE. Basically, there are two
142 * things that we need to take care of: 142 * things that we need to take care of:
143 * 143 *
144 * 1. If PG_dcache_dirty is set for the page, we need to ensure 144 * 1. If PG_dcache_clean is not set for the page, we need to ensure
145 * that any cache entries for the kernels virtual memory 145 * that any cache entries for the kernels virtual memory
146 * range are written back to the page. 146 * range are written back to the page.
147 * 2. If we have multiple shared mappings of the same space in 147 * 2. If we have multiple shared mappings of the same space in
@@ -169,7 +169,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
169 169
170 mapping = page_mapping(page); 170 mapping = page_mapping(page);
171#ifndef CONFIG_SMP 171#ifndef CONFIG_SMP
172 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 172 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
173 __flush_dcache_page(mapping, page); 173 __flush_dcache_page(mapping, page);
174#endif 174#endif
175 if (mapping) { 175 if (mapping) {
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 87dd5ffdfa2d..b4efce9b7985 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -248,7 +248,7 @@ void flush_dcache_page(struct page *page)
248 248
249#ifndef CONFIG_SMP 249#ifndef CONFIG_SMP
250 if (mapping && !mapping_mapped(mapping)) 250 if (mapping && !mapping_mapped(mapping))
251 set_bit(PG_dcache_dirty, &page->flags); 251 clear_bit(PG_dcache_clean, &page->flags);
252 else 252 else
253#endif 253#endif
254 { 254 {
@@ -257,6 +257,7 @@ void flush_dcache_page(struct page *page)
257 __flush_dcache_aliases(mapping, page); 257 __flush_dcache_aliases(mapping, page);
258 else if (mapping) 258 else if (mapping)
259 __flush_icache_all(); 259 __flush_icache_all();
260 set_bit(PG_dcache_clean, &page->flags);
260 } 261 }
261} 262}
262EXPORT_SYMBOL(flush_dcache_page); 263EXPORT_SYMBOL(flush_dcache_page);