aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/flush.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/flush.c')
-rw-r--r--arch/arm/mm/flush.c57
1 files changed, 41 insertions, 16 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c6de48d89503..191788fb18d1 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,14 +13,32 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/system.h> 15#include <asm/system.h>
16#include <asm/tlbflush.h>
16 17
17static void __flush_dcache_page(struct address_space *mapping, struct page *page) 18#ifdef CONFIG_CPU_CACHE_VIPT
19#define ALIAS_FLUSH_START 0xffff4000
20
21#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
22
23static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
18{ 24{
19 struct mm_struct *mm = current->active_mm; 25 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
20 struct vm_area_struct *mpnt; 26
21 struct prio_tree_iter iter; 27 set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
22 pgoff_t pgoff; 28 flush_tlb_kernel_page(to);
23 29
30 asm( "mcrr p15, 0, %1, %0, c14\n"
31 " mcrr p15, 0, %1, %0, c5\n"
32 :
33 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
34 : "cc");
35}
36#else
37#define flush_pfn_alias(pfn,vaddr) do { } while (0)
38#endif
39
40void __flush_dcache_page(struct address_space *mapping, struct page *page)
41{
24 /* 42 /*
25 * Writeback any data associated with the kernel mapping of this 43 * Writeback any data associated with the kernel mapping of this
26 * page. This ensures that data in the physical page is mutually 44 * page. This ensures that data in the physical page is mutually
@@ -29,12 +47,21 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
29 __cpuc_flush_dcache_page(page_address(page)); 47 __cpuc_flush_dcache_page(page_address(page));
30 48
31 /* 49 /*
32 * If there's no mapping pointer here, then this page isn't 50 * If this is a page cache page, and we have an aliasing VIPT cache,
33 * visible to userspace yet, so there are no cache lines 51 * we only need to do one flush - which would be at the relevant
34 * associated with any other aliases. 52 * userspace colour, which is congruent with page->index.
35 */ 53 */
36 if (!mapping) 54 if (mapping && cache_is_vipt_aliasing())
37 return; 55 flush_pfn_alias(page_to_pfn(page),
56 page->index << PAGE_CACHE_SHIFT);
57}
58
59static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
60{
61 struct mm_struct *mm = current->active_mm;
62 struct vm_area_struct *mpnt;
63 struct prio_tree_iter iter;
64 pgoff_t pgoff;
38 65
39 /* 66 /*
40 * There are possible user space mappings of this page: 67 * There are possible user space mappings of this page:
@@ -57,8 +84,6 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
57 continue; 84 continue;
58 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 85 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
59 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 86 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
60 if (cache_is_vipt())
61 break;
62 } 87 }
63 flush_dcache_mmap_unlock(mapping); 88 flush_dcache_mmap_unlock(mapping);
64} 89}
@@ -83,12 +108,12 @@ void flush_dcache_page(struct page *page)
83{ 108{
84 struct address_space *mapping = page_mapping(page); 109 struct address_space *mapping = page_mapping(page);
85 110
86 if (cache_is_vipt_nonaliasing())
87 return;
88
89 if (mapping && !mapping_mapped(mapping)) 111 if (mapping && !mapping_mapped(mapping))
90 set_bit(PG_dcache_dirty, &page->flags); 112 set_bit(PG_dcache_dirty, &page->flags);
91 else 113 else {
92 __flush_dcache_page(mapping, page); 114 __flush_dcache_page(mapping, page);
115 if (mapping && cache_is_vivt())
116 __flush_dcache_aliases(mapping, page);
117 }
93} 118}
94EXPORT_SYMBOL(flush_dcache_page); 119EXPORT_SYMBOL(flush_dcache_page);