aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorGeorge G. Davis <davis_g@mvista.com>2006-09-02 13:43:20 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-09-02 13:43:20 -0400
commita188ad2bc7dbfa16ccdcaa8d43ade185b969baff (patch)
tree7938fff8dded204e92bd7c3149875abb8d7060aa /arch/arm/mm
parent57bcdafcb1e0782e7ae13471d9223c69e3a6cba2 (diff)
[ARM] 3762/1: Fix ptrace cache coherency bug for ARM1136 VIPT nonaliasing Harvard caches
Patch from George G. Davis Resolve ARM1136 VIPT non-aliasing cache coherency issues observed when using ptrace to set breakpoints and cleanup copy_{to,from}_user_page() while we're here as requested by Russell King because "it's also far too heavy on non-v6 CPUs". NOTES: 1. Only access_process_vm() calls copy_{to,from}_user_page(). 2. access_process_vm() calls get_user_pages() to pin down the "page". 3. get_user_pages() calls flush_dcache_page(page) which ensures cache coherency between kernel and userspace mappings of "page". However flush_dcache_page(page) may not invalidate I-Cache over this range for all cases, specifically, I-Cache is not invalidated for the VIPT non-aliasing case. So memory is consistent between kernel and user space mappings of "page" but I-Cache may still be hot over this range. IOW, we don't have to worry about flush_cache_page() before memcpy(). 4. Now, for the copy_to_user_page() case, after memcpy(), we must flush the caches so memory is consistent with kernel cache entries and invalidate the I-Cache if this mm region is executable. We don't need to do anything after memcpy() for the copy_from_user_page() case since kernel cache entries will be invalidated via the same process above if we access "page" again. The flush_ptrace_access() function (borrowed from SPARC64 implementation) is added to handle cache flushing after memcpy() for the copy_to_user_page() case. Signed-off-by: George G. Davis <gdavis@mvista.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/flush.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index b103e56806bd..d438ce41cdd5 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -87,6 +87,32 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
87 if (cache_is_vipt_aliasing()) 87 if (cache_is_vipt_aliasing())
88 flush_pfn_alias(pfn, user_addr); 88 flush_pfn_alias(pfn, user_addr);
89} 89}
90
91void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
92 unsigned long uaddr, void *kaddr,
93 unsigned long len, int write)
94{
95 if (cache_is_vivt()) {
96 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
97 unsigned long addr = (unsigned long)kaddr;
98 __cpuc_coherent_kern_range(addr, addr + len);
99 }
100 return;
101 }
102
103 if (cache_is_vipt_aliasing()) {
104 flush_pfn_alias(page_to_pfn(page), uaddr);
105 return;
106 }
107
108 /* VIPT non-aliasing cache */
109 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
110 vma->vm_flags | VM_EXEC) {
111 unsigned long addr = (unsigned long)kaddr;
112 /* only flushing the kernel mapping on non-aliasing VIPT */
113 __cpuc_coherent_kern_range(addr, addr + len);
114 }
115}
90#else 116#else
91#define flush_pfn_alias(pfn,vaddr) do { } while (0) 117#define flush_pfn_alias(pfn,vaddr) do { } while (0)
92#endif 118#endif