aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/flush.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2010-10-30 07:35:11 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2010-10-30 07:35:11 -0400
commit67577927e8d7a1f4b09b4992df640eadc6aacb36 (patch)
tree2e9efe6b5745965faf0dcc084d4613d9356263f9 /arch/arm/mm/flush.c
parent6fe4c590313133ebd5dadb769031489ff178ece1 (diff)
parent51f00a471ce8f359627dd99aeac322947a0e491b (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Conflicts: drivers/mtd/mtd_blkdevs.c Merge Grant's device-tree bits so that we can apply the subsequent fixes. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'arch/arm/mm/flush.c')
-rw-r--r--arch/arm/mm/flush.c69
1 files changed, 56 insertions, 13 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c6844cb9b508..391ffae75098 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -17,6 +17,7 @@
17#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20#include <asm/smp_plat.h>
20 21
21#include "mm.h" 22#include "mm.h"
22 23
@@ -39,6 +40,18 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
39 : "cc"); 40 : "cc");
40} 41}
41 42
43static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
44{
45 unsigned long colour = CACHE_COLOUR(vaddr);
46 unsigned long offset = vaddr & (PAGE_SIZE - 1);
47 unsigned long to;
48
49 set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
50 to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset;
51 flush_tlb_kernel_page(to);
52 flush_icache_range(to, to + len);
53}
54
42void flush_cache_mm(struct mm_struct *mm) 55void flush_cache_mm(struct mm_struct *mm)
43{ 56{
44 if (cache_is_vivt()) { 57 if (cache_is_vivt()) {
@@ -89,16 +102,16 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
89 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) 102 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
90 __flush_icache_all(); 103 __flush_icache_all();
91} 104}
105
92#else 106#else
93#define flush_pfn_alias(pfn,vaddr) do { } while (0) 107#define flush_pfn_alias(pfn,vaddr) do { } while (0)
108#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
94#endif 109#endif
95 110
96#ifdef CONFIG_SMP
97static void flush_ptrace_access_other(void *args) 111static void flush_ptrace_access_other(void *args)
98{ 112{
99 __flush_icache_all(); 113 __flush_icache_all();
100} 114}
101#endif
102 115
103static 116static
104void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 117void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -118,15 +131,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
118 return; 131 return;
119 } 132 }
120 133
121 /* VIPT non-aliasing cache */ 134 /* VIPT non-aliasing D-cache */
122 if (vma->vm_flags & VM_EXEC) { 135 if (vma->vm_flags & VM_EXEC) {
123 unsigned long addr = (unsigned long)kaddr; 136 unsigned long addr = (unsigned long)kaddr;
124 __cpuc_coherent_kern_range(addr, addr + len); 137 if (icache_is_vipt_aliasing())
125#ifdef CONFIG_SMP 138 flush_icache_alias(page_to_pfn(page), uaddr, len);
139 else
140 __cpuc_coherent_kern_range(addr, addr + len);
126 if (cache_ops_need_broadcast()) 141 if (cache_ops_need_broadcast())
127 smp_call_function(flush_ptrace_access_other, 142 smp_call_function(flush_ptrace_access_other,
128 NULL, 1); 143 NULL, 1);
129#endif
130 } 144 }
131} 145}
132 146
@@ -215,6 +229,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
215 flush_dcache_mmap_unlock(mapping); 229 flush_dcache_mmap_unlock(mapping);
216} 230}
217 231
232#if __LINUX_ARM_ARCH__ >= 6
233void __sync_icache_dcache(pte_t pteval)
234{
235 unsigned long pfn;
236 struct page *page;
237 struct address_space *mapping;
238
239 if (!pte_present_user(pteval))
240 return;
241 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
242 /* only flush non-aliasing VIPT caches for exec mappings */
243 return;
244 pfn = pte_pfn(pteval);
245 if (!pfn_valid(pfn))
246 return;
247
248 page = pfn_to_page(pfn);
249 if (cache_is_vipt_aliasing())
250 mapping = page_mapping(page);
251 else
252 mapping = NULL;
253
254 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
255 __flush_dcache_page(mapping, page);
256 /* pte_exec() already checked above for non-aliasing VIPT cache */
257 if (cache_is_vipt_nonaliasing() || pte_exec(pteval))
258 __flush_icache_all();
259}
260#endif
261
218/* 262/*
219 * Ensure cache coherency between kernel mapping and userspace mapping 263 * Ensure cache coherency between kernel mapping and userspace mapping
220 * of this page. 264 * of this page.
@@ -246,17 +290,16 @@ void flush_dcache_page(struct page *page)
246 290
247 mapping = page_mapping(page); 291 mapping = page_mapping(page);
248 292
249#ifndef CONFIG_SMP 293 if (!cache_ops_need_broadcast() &&
250 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) 294 mapping && !mapping_mapped(mapping))
251 set_bit(PG_dcache_dirty, &page->flags); 295 clear_bit(PG_dcache_clean, &page->flags);
252 else 296 else {
253#endif
254 {
255 __flush_dcache_page(mapping, page); 297 __flush_dcache_page(mapping, page);
256 if (mapping && cache_is_vivt()) 298 if (mapping && cache_is_vivt())
257 __flush_dcache_aliases(mapping, page); 299 __flush_dcache_aliases(mapping, page);
258 else if (mapping) 300 else if (mapping)
259 __flush_icache_all(); 301 __flush_icache_all();
302 set_bit(PG_dcache_clean, &page->flags);
260 } 303 }
261} 304}
262EXPORT_SYMBOL(flush_dcache_page); 305EXPORT_SYMBOL(flush_dcache_page);