aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/flush.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/flush.c')
-rw-r--r--arch/arm/mm/flush.c49
1 files changed, 29 insertions, 20 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 7f294f307c83..329594e760cd 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -35,14 +35,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
35 : 35 :
36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
37 : "cc"); 37 : "cc");
38 __flush_icache_all();
39} 38}
40 39
41void flush_cache_mm(struct mm_struct *mm) 40void flush_cache_mm(struct mm_struct *mm)
42{ 41{
43 if (cache_is_vivt()) { 42 if (cache_is_vivt()) {
44 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 43 vivt_flush_cache_mm(mm);
45 __cpuc_flush_user_all();
46 return; 44 return;
47 } 45 }
48 46
@@ -52,16 +50,13 @@ void flush_cache_mm(struct mm_struct *mm)
52 : 50 :
53 : "r" (0) 51 : "r" (0)
54 : "cc"); 52 : "cc");
55 __flush_icache_all();
56 } 53 }
57} 54}
58 55
59void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 56void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
60{ 57{
61 if (cache_is_vivt()) { 58 if (cache_is_vivt()) {
62 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 59 vivt_flush_cache_range(vma, start, end);
63 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
64 vma->vm_flags);
65 return; 60 return;
66 } 61 }
67 62
@@ -71,22 +66,26 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
71 : 66 :
72 : "r" (0) 67 : "r" (0)
73 : "cc"); 68 : "cc");
74 __flush_icache_all();
75 } 69 }
70
71 if (vma->vm_flags & VM_EXEC)
72 __flush_icache_all();
76} 73}
77 74
78void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 75void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
79{ 76{
80 if (cache_is_vivt()) { 77 if (cache_is_vivt()) {
81 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 78 vivt_flush_cache_page(vma, user_addr, pfn);
82 unsigned long addr = user_addr & PAGE_MASK;
83 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
84 }
85 return; 79 return;
86 } 80 }
87 81
88 if (cache_is_vipt_aliasing()) 82 if (cache_is_vipt_aliasing()) {
89 flush_pfn_alias(pfn, user_addr); 83 flush_pfn_alias(pfn, user_addr);
84 __flush_icache_all();
85 }
86
87 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
88 __flush_icache_all();
90} 89}
91 90
92void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 91void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -94,15 +93,13 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
94 unsigned long len, int write) 93 unsigned long len, int write)
95{ 94{
96 if (cache_is_vivt()) { 95 if (cache_is_vivt()) {
97 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 96 vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
98 unsigned long addr = (unsigned long)kaddr;
99 __cpuc_coherent_kern_range(addr, addr + len);
100 }
101 return; 97 return;
102 } 98 }
103 99
104 if (cache_is_vipt_aliasing()) { 100 if (cache_is_vipt_aliasing()) {
105 flush_pfn_alias(page_to_pfn(page), uaddr); 101 flush_pfn_alias(page_to_pfn(page), uaddr);
102 __flush_icache_all();
106 return; 103 return;
107 } 104 }
108 105
@@ -120,6 +117,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
120 117
121void __flush_dcache_page(struct address_space *mapping, struct page *page) 118void __flush_dcache_page(struct address_space *mapping, struct page *page)
122{ 119{
120 void *addr = page_address(page);
121
123 /* 122 /*
124 * Writeback any data associated with the kernel mapping of this 123 * Writeback any data associated with the kernel mapping of this
125 * page. This ensures that data in the physical page is mutually 124 * page. This ensures that data in the physical page is mutually
@@ -130,9 +129,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
130 * kmap_atomic() doesn't set the page virtual address, and 129 * kmap_atomic() doesn't set the page virtual address, and
131 * kunmap_atomic() takes care of cache flushing already. 130 * kunmap_atomic() takes care of cache flushing already.
132 */ 131 */
133 if (page_address(page)) 132 if (addr)
134#endif 133#endif
135 __cpuc_flush_dcache_page(page_address(page)); 134 __cpuc_flush_dcache_page(addr);
136 135
137 /* 136 /*
138 * If this is a page cache page, and we have an aliasing VIPT cache, 137 * If this is a page cache page, and we have an aliasing VIPT cache,
@@ -196,7 +195,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
196 */ 195 */
197void flush_dcache_page(struct page *page) 196void flush_dcache_page(struct page *page)
198{ 197{
199 struct address_space *mapping = page_mapping(page); 198 struct address_space *mapping;
199
200 /*
201 * The zero page is never written to, so never has any dirty
202 * cache lines, and therefore never needs to be flushed.
203 */
204 if (page == ZERO_PAGE(0))
205 return;
206
207 mapping = page_mapping(page);
200 208
201#ifndef CONFIG_SMP 209#ifndef CONFIG_SMP
202 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) 210 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
@@ -242,6 +250,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
242 * userspace address only. 250 * userspace address only.
243 */ 251 */
244 flush_pfn_alias(pfn, vmaddr); 252 flush_pfn_alias(pfn, vmaddr);
253 __flush_icache_all();
245 } 254 }
246 255
247 /* 256 /*