aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/cacheflush.h24
-rw-r--r--arch/arm/include/asm/smp_plat.h5
-rw-r--r--arch/arm/mm/flush.c51
3 files changed, 50 insertions, 30 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 730aefcfbee3..3d2ef54c7cb9 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -316,12 +316,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
316 * processes address space. Really, we want to allow our "user 316 * processes address space. Really, we want to allow our "user
317 * space" model to handle this. 317 * space" model to handle this.
318 */ 318 */
319#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 319extern void copy_to_user_page(struct vm_area_struct *, struct page *,
320 do { \ 320 unsigned long, void *, const void *, unsigned long);
321 memcpy(dst, src, len); \
322 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
323 } while (0)
324
325#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 321#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
326 do { \ 322 do { \
327 memcpy(dst, src, len); \ 323 memcpy(dst, src, len); \
@@ -355,17 +351,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
355 } 351 }
356} 352}
357 353
358static inline void
359vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
360 unsigned long uaddr, void *kaddr,
361 unsigned long len, int write)
362{
363 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
364 unsigned long addr = (unsigned long)kaddr;
365 __cpuc_coherent_kern_range(addr, addr + len);
366 }
367}
368
369#ifndef CONFIG_CPU_CACHE_VIPT 354#ifndef CONFIG_CPU_CACHE_VIPT
370#define flush_cache_mm(mm) \ 355#define flush_cache_mm(mm) \
371 vivt_flush_cache_mm(mm) 356 vivt_flush_cache_mm(mm)
@@ -373,15 +358,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
373 vivt_flush_cache_range(vma,start,end) 358 vivt_flush_cache_range(vma,start,end)
374#define flush_cache_page(vma,addr,pfn) \ 359#define flush_cache_page(vma,addr,pfn) \
375 vivt_flush_cache_page(vma,addr,pfn) 360 vivt_flush_cache_page(vma,addr,pfn)
376#define flush_ptrace_access(vma,page,ua,ka,len,write) \
377 vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
378#else 361#else
379extern void flush_cache_mm(struct mm_struct *mm); 362extern void flush_cache_mm(struct mm_struct *mm);
380extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 363extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
381extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); 364extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
382extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
383 unsigned long uaddr, void *kaddr,
384 unsigned long len, int write);
385#endif 365#endif
386 366
387#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 367#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 59303e200845..e6215305544a 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void)
13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; 13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
14} 14}
15 15
16static inline int cache_ops_need_broadcast(void)
17{
18 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
19}
20
16#endif 21#endif
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 6f3a4b7a3b82..e34f095e2090 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cachetype.h> 15#include <asm/cachetype.h>
16#include <asm/smp_plat.h>
16#include <asm/system.h> 17#include <asm/system.h>
17#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
18 19
@@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
87 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) 88 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
88 __flush_icache_all(); 89 __flush_icache_all();
89} 90}
91#else
92#define flush_pfn_alias(pfn,vaddr) do { } while (0)
93#endif
90 94
95#ifdef CONFIG_SMP
96static void flush_ptrace_access_other(void *args)
97{
98 __flush_icache_all();
99}
100#endif
101
102static
91void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 103void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
92 unsigned long uaddr, void *kaddr, 104 unsigned long uaddr, void *kaddr, unsigned long len)
93 unsigned long len, int write)
94{ 105{
95 if (cache_is_vivt()) { 106 if (cache_is_vivt()) {
96 vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); 107 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
108 unsigned long addr = (unsigned long)kaddr;
109 __cpuc_coherent_kern_range(addr, addr + len);
110 }
97 return; 111 return;
98 } 112 }
99 113
@@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
104 } 118 }
105 119
106 /* VIPT non-aliasing cache */ 120 /* VIPT non-aliasing cache */
107 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && 121 if (vma->vm_flags & VM_EXEC) {
108 vma->vm_flags & VM_EXEC) {
109 unsigned long addr = (unsigned long)kaddr; 122 unsigned long addr = (unsigned long)kaddr;
110 /* only flushing the kernel mapping on non-aliasing VIPT */
111 __cpuc_coherent_kern_range(addr, addr + len); 123 __cpuc_coherent_kern_range(addr, addr + len);
124#ifdef CONFIG_SMP
125 if (cache_ops_need_broadcast())
126 smp_call_function(flush_ptrace_access_other,
127 NULL, 1);
128#endif
112 } 129 }
113} 130}
114#else 131
115#define flush_pfn_alias(pfn,vaddr) do { } while (0) 132/*
133 * Copy user data from/to a page which is mapped into a different
134 * processes address space. Really, we want to allow our "user
135 * space" model to handle this.
136 *
137 * Note that this code needs to run on the current CPU.
138 */
139void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
140 unsigned long uaddr, void *dst, const void *src,
141 unsigned long len)
142{
143#ifdef CONFIG_SMP
144 preempt_disable();
116#endif 145#endif
146 memcpy(dst, src, len);
147 flush_ptrace_access(vma, page, uaddr, dst, len);
148#ifdef CONFIG_SMP
149 preempt_enable();
150#endif
151}
117 152
118void __flush_dcache_page(struct address_space *mapping, struct page *page) 153void __flush_dcache_page(struct address_space *mapping, struct page *page)
119{ 154{