aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/flush.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/flush.c')
-rw-r--r--arch/arm/mm/flush.c74
1 files changed, 32 insertions, 42 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index b27942909b23..329594e760cd 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -18,10 +18,6 @@
18 18
19#include "mm.h" 19#include "mm.h"
20 20
21#ifdef CONFIG_ARM_ERRATA_411920
22extern void v6_icache_inval_all(void);
23#endif
24
25#ifdef CONFIG_CPU_CACHE_VIPT 21#ifdef CONFIG_CPU_CACHE_VIPT
26 22
27#define ALIAS_FLUSH_START 0xffff4000 23#define ALIAS_FLUSH_START 0xffff4000
@@ -35,77 +31,61 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
35 flush_tlb_kernel_page(to); 31 flush_tlb_kernel_page(to);
36 32
37 asm( "mcrr p15, 0, %1, %0, c14\n" 33 asm( "mcrr p15, 0, %1, %0, c14\n"
38 " mcr p15, 0, %2, c7, c10, 4\n" 34 " mcr p15, 0, %2, c7, c10, 4"
39#ifndef CONFIG_ARM_ERRATA_411920
40 " mcr p15, 0, %2, c7, c5, 0\n"
41#endif
42 : 35 :
43 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
44 : "cc"); 37 : "cc");
45#ifdef CONFIG_ARM_ERRATA_411920
46 v6_icache_inval_all();
47#endif
48} 38}
49 39
50void flush_cache_mm(struct mm_struct *mm) 40void flush_cache_mm(struct mm_struct *mm)
51{ 41{
52 if (cache_is_vivt()) { 42 if (cache_is_vivt()) {
53 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 43 vivt_flush_cache_mm(mm);
54 __cpuc_flush_user_all();
55 return; 44 return;
56 } 45 }
57 46
58 if (cache_is_vipt_aliasing()) { 47 if (cache_is_vipt_aliasing()) {
59 asm( "mcr p15, 0, %0, c7, c14, 0\n" 48 asm( "mcr p15, 0, %0, c7, c14, 0\n"
60 " mcr p15, 0, %0, c7, c10, 4\n" 49 " mcr p15, 0, %0, c7, c10, 4"
61#ifndef CONFIG_ARM_ERRATA_411920
62 " mcr p15, 0, %0, c7, c5, 0\n"
63#endif
64 : 50 :
65 : "r" (0) 51 : "r" (0)
66 : "cc"); 52 : "cc");
67#ifdef CONFIG_ARM_ERRATA_411920
68 v6_icache_inval_all();
69#endif
70 } 53 }
71} 54}
72 55
73void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 56void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
74{ 57{
75 if (cache_is_vivt()) { 58 if (cache_is_vivt()) {
76 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 59 vivt_flush_cache_range(vma, start, end);
77 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
78 vma->vm_flags);
79 return; 60 return;
80 } 61 }
81 62
82 if (cache_is_vipt_aliasing()) { 63 if (cache_is_vipt_aliasing()) {
83 asm( "mcr p15, 0, %0, c7, c14, 0\n" 64 asm( "mcr p15, 0, %0, c7, c14, 0\n"
84 " mcr p15, 0, %0, c7, c10, 4\n" 65 " mcr p15, 0, %0, c7, c10, 4"
85#ifndef CONFIG_ARM_ERRATA_411920
86 " mcr p15, 0, %0, c7, c5, 0\n"
87#endif
88 : 66 :
89 : "r" (0) 67 : "r" (0)
90 : "cc"); 68 : "cc");
91#ifdef CONFIG_ARM_ERRATA_411920
92 v6_icache_inval_all();
93#endif
94 } 69 }
70
71 if (vma->vm_flags & VM_EXEC)
72 __flush_icache_all();
95} 73}
96 74
97void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 75void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
98{ 76{
99 if (cache_is_vivt()) { 77 if (cache_is_vivt()) {
100 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 78 vivt_flush_cache_page(vma, user_addr, pfn);
101 unsigned long addr = user_addr & PAGE_MASK;
102 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
103 }
104 return; 79 return;
105 } 80 }
106 81
107 if (cache_is_vipt_aliasing()) 82 if (cache_is_vipt_aliasing()) {
108 flush_pfn_alias(pfn, user_addr); 83 flush_pfn_alias(pfn, user_addr);
84 __flush_icache_all();
85 }
86
87 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
88 __flush_icache_all();
109} 89}
110 90
111void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 91void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -113,15 +93,13 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
113 unsigned long len, int write) 93 unsigned long len, int write)
114{ 94{
115 if (cache_is_vivt()) { 95 if (cache_is_vivt()) {
116 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 96 vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
117 unsigned long addr = (unsigned long)kaddr;
118 __cpuc_coherent_kern_range(addr, addr + len);
119 }
120 return; 97 return;
121 } 98 }
122 99
123 if (cache_is_vipt_aliasing()) { 100 if (cache_is_vipt_aliasing()) {
124 flush_pfn_alias(page_to_pfn(page), uaddr); 101 flush_pfn_alias(page_to_pfn(page), uaddr);
102 __flush_icache_all();
125 return; 103 return;
126 } 104 }
127 105
@@ -139,6 +117,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
139 117
140void __flush_dcache_page(struct address_space *mapping, struct page *page) 118void __flush_dcache_page(struct address_space *mapping, struct page *page)
141{ 119{
120 void *addr = page_address(page);
121
142 /* 122 /*
143 * Writeback any data associated with the kernel mapping of this 123 * Writeback any data associated with the kernel mapping of this
144 * page. This ensures that data in the physical page is mutually 124 * page. This ensures that data in the physical page is mutually
@@ -149,9 +129,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
149 * kmap_atomic() doesn't set the page virtual address, and 129 * kmap_atomic() doesn't set the page virtual address, and
150 * kunmap_atomic() takes care of cache flushing already. 130 * kunmap_atomic() takes care of cache flushing already.
151 */ 131 */
152 if (page_address(page)) 132 if (addr)
153#endif 133#endif
154 __cpuc_flush_dcache_page(page_address(page)); 134 __cpuc_flush_dcache_page(addr);
155 135
156 /* 136 /*
157 * If this is a page cache page, and we have an aliasing VIPT cache, 137 * If this is a page cache page, and we have an aliasing VIPT cache,
@@ -215,7 +195,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
215 */ 195 */
216void flush_dcache_page(struct page *page) 196void flush_dcache_page(struct page *page)
217{ 197{
218 struct address_space *mapping = page_mapping(page); 198 struct address_space *mapping;
199
200 /*
201 * The zero page is never written to, so never has any dirty
202 * cache lines, and therefore never needs to be flushed.
203 */
204 if (page == ZERO_PAGE(0))
205 return;
206
207 mapping = page_mapping(page);
219 208
220#ifndef CONFIG_SMP 209#ifndef CONFIG_SMP
221 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) 210 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
@@ -261,6 +250,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
261 * userspace address only. 250 * userspace address only.
262 */ 251 */
263 flush_pfn_alias(pfn, vmaddr); 252 flush_pfn_alias(pfn, vmaddr);
253 __flush_icache_all();
264 } 254 }
265 255
266 /* 256 /*