aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/cacheflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/cacheflush.h')
-rw-r--r--arch/arm/include/asm/cacheflush.h24
1 files changed, 2 insertions, 22 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 730aefcfbee3..3d2ef54c7cb9 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -316,12 +316,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
316 * processes address space. Really, we want to allow our "user 316 * processes address space. Really, we want to allow our "user
317 * space" model to handle this. 317 * space" model to handle this.
318 */ 318 */
319#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 319extern void copy_to_user_page(struct vm_area_struct *, struct page *,
320 do { \ 320 unsigned long, void *, const void *, unsigned long);
321 memcpy(dst, src, len); \
322 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
323 } while (0)
324
325#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 321#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
326 do { \ 322 do { \
327 memcpy(dst, src, len); \ 323 memcpy(dst, src, len); \
@@ -355,17 +351,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
355 } 351 }
356} 352}
357 353
358static inline void
359vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
360 unsigned long uaddr, void *kaddr,
361 unsigned long len, int write)
362{
363 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
364 unsigned long addr = (unsigned long)kaddr;
365 __cpuc_coherent_kern_range(addr, addr + len);
366 }
367}
368
369#ifndef CONFIG_CPU_CACHE_VIPT 354#ifndef CONFIG_CPU_CACHE_VIPT
370#define flush_cache_mm(mm) \ 355#define flush_cache_mm(mm) \
371 vivt_flush_cache_mm(mm) 356 vivt_flush_cache_mm(mm)
@@ -373,15 +358,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
373 vivt_flush_cache_range(vma,start,end) 358 vivt_flush_cache_range(vma,start,end)
374#define flush_cache_page(vma,addr,pfn) \ 359#define flush_cache_page(vma,addr,pfn) \
375 vivt_flush_cache_page(vma,addr,pfn) 360 vivt_flush_cache_page(vma,addr,pfn)
376#define flush_ptrace_access(vma,page,ua,ka,len,write) \
377 vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
378#else 361#else
379extern void flush_cache_mm(struct mm_struct *mm); 362extern void flush_cache_mm(struct mm_struct *mm);
380extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 363extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
381extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); 364extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
382extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
383 unsigned long uaddr, void *kaddr,
384 unsigned long len, int write);
385#endif 365#endif
386 366
387#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 367#define flush_cache_dup_mm(mm) flush_cache_mm(mm)