aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/cacheflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/cacheflush.h')
-rw-r--r--arch/arm/include/asm/cacheflush.h69
1 files changed, 23 insertions, 46 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index c77d2fa1f6e5..72da7e045c6b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -42,7 +42,8 @@
42#endif 42#endif
43 43
44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ 44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) 45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
46 defined(CONFIG_CPU_ARM1026)
46# define MULTI_CACHE 1 47# define MULTI_CACHE 1
47#endif 48#endif
48 49
@@ -196,21 +197,6 @@
196 * DMA Cache Coherency 197 * DMA Cache Coherency
197 * =================== 198 * ===================
198 * 199 *
199 * dma_inv_range(start, end)
200 *
201 * Invalidate (discard) the specified virtual address range.
202 * May not write back any entries. If 'start' or 'end'
203 * are not cache line aligned, those lines must be written
204 * back.
205 * - start - virtual start address
206 * - end - virtual end address
207 *
208 * dma_clean_range(start, end)
209 *
210 * Clean (write back) the specified virtual address range.
211 * - start - virtual start address
212 * - end - virtual end address
213 *
214 * dma_flush_range(start, end) 200 * dma_flush_range(start, end)
215 * 201 *
216 * Clean and invalidate the specified virtual address range. 202 * Clean and invalidate the specified virtual address range.
@@ -227,8 +213,9 @@ struct cpu_cache_fns {
227 void (*coherent_user_range)(unsigned long, unsigned long); 213 void (*coherent_user_range)(unsigned long, unsigned long);
228 void (*flush_kern_dcache_area)(void *, size_t); 214 void (*flush_kern_dcache_area)(void *, size_t);
229 215
230 void (*dma_inv_range)(const void *, const void *); 216 void (*dma_map_area)(const void *, size_t, int);
231 void (*dma_clean_range)(const void *, const void *); 217 void (*dma_unmap_area)(const void *, size_t, int);
218
232 void (*dma_flush_range)(const void *, const void *); 219 void (*dma_flush_range)(const void *, const void *);
233}; 220};
234 221
@@ -258,8 +245,8 @@ extern struct cpu_cache_fns cpu_cache;
258 * is visible to DMA, or data written by DMA to system memory is 245 * is visible to DMA, or data written by DMA to system memory is
259 * visible to the CPU. 246 * visible to the CPU.
260 */ 247 */
261#define dmac_inv_range cpu_cache.dma_inv_range 248#define dmac_map_area cpu_cache.dma_map_area
262#define dmac_clean_range cpu_cache.dma_clean_range 249#define dmac_unmap_area cpu_cache.dma_unmap_area
263#define dmac_flush_range cpu_cache.dma_flush_range 250#define dmac_flush_range cpu_cache.dma_flush_range
264 251
265#else 252#else
@@ -284,12 +271,12 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
284 * is visible to DMA, or data written by DMA to system memory is 271 * is visible to DMA, or data written by DMA to system memory is
285 * visible to the CPU. 272 * visible to the CPU.
286 */ 273 */
287#define dmac_inv_range __glue(_CACHE,_dma_inv_range) 274#define dmac_map_area __glue(_CACHE,_dma_map_area)
288#define dmac_clean_range __glue(_CACHE,_dma_clean_range) 275#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
289#define dmac_flush_range __glue(_CACHE,_dma_flush_range) 276#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
290 277
291extern void dmac_inv_range(const void *, const void *); 278extern void dmac_map_area(const void *, size_t, int);
292extern void dmac_clean_range(const void *, const void *); 279extern void dmac_unmap_area(const void *, size_t, int);
293extern void dmac_flush_range(const void *, const void *); 280extern void dmac_flush_range(const void *, const void *);
294 281
295#endif 282#endif
@@ -330,12 +317,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
330 * processes address space. Really, we want to allow our "user 317 * processes address space. Really, we want to allow our "user
331 * space" model to handle this. 318 * space" model to handle this.
332 */ 319 */
333#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 320extern void copy_to_user_page(struct vm_area_struct *, struct page *,
334 do { \ 321 unsigned long, void *, const void *, unsigned long);
335 memcpy(dst, src, len); \
336 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
337 } while (0)
338
339#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 322#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
340 do { \ 323 do { \
341 memcpy(dst, src, len); \ 324 memcpy(dst, src, len); \
@@ -369,17 +352,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
369 } 352 }
370} 353}
371 354
372static inline void
373vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
374 unsigned long uaddr, void *kaddr,
375 unsigned long len, int write)
376{
377 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
378 unsigned long addr = (unsigned long)kaddr;
379 __cpuc_coherent_kern_range(addr, addr + len);
380 }
381}
382
383#ifndef CONFIG_CPU_CACHE_VIPT 355#ifndef CONFIG_CPU_CACHE_VIPT
384#define flush_cache_mm(mm) \ 356#define flush_cache_mm(mm) \
385 vivt_flush_cache_mm(mm) 357 vivt_flush_cache_mm(mm)
@@ -387,15 +359,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
387 vivt_flush_cache_range(vma,start,end) 359 vivt_flush_cache_range(vma,start,end)
388#define flush_cache_page(vma,addr,pfn) \ 360#define flush_cache_page(vma,addr,pfn) \
389 vivt_flush_cache_page(vma,addr,pfn) 361 vivt_flush_cache_page(vma,addr,pfn)
390#define flush_ptrace_access(vma,page,ua,ka,len,write) \
391 vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
392#else 362#else
393extern void flush_cache_mm(struct mm_struct *mm); 363extern void flush_cache_mm(struct mm_struct *mm);
394extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 364extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
395extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); 365extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
396extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
397 unsigned long uaddr, void *kaddr,
398 unsigned long len, int write);
399#endif 366#endif
400 367
401#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 368#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
@@ -446,6 +413,16 @@ static inline void __flush_icache_all(void)
446 : "r" (0)); 413 : "r" (0));
447#endif 414#endif
448} 415}
416static inline void flush_kernel_vmap_range(void *addr, int size)
417{
418 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
419 __cpuc_flush_dcache_area(addr, (size_t)size);
420}
421static inline void invalidate_kernel_vmap_range(void *addr, int size)
422{
423 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
424 __cpuc_flush_dcache_area(addr, (size_t)size);
425}
449 426
450#define ARCH_HAS_FLUSH_ANON_PAGE 427#define ARCH_HAS_FLUSH_ANON_PAGE
451static inline void flush_anon_page(struct vm_area_struct *vma, 428static inline void flush_anon_page(struct vm_area_struct *vma,