aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/cacheflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/cacheflush.h')
-rw-r--r--arch/arm/include/asm/cacheflush.h56
1 files changed, 11 insertions, 45 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 5fe4a2ad7fa3..72da7e045c6b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -197,21 +197,6 @@
197 * DMA Cache Coherency 197 * DMA Cache Coherency
198 * =================== 198 * ===================
199 * 199 *
200 * dma_inv_range(start, end)
201 *
202 * Invalidate (discard) the specified virtual address range.
203 * May not write back any entries. If 'start' or 'end'
204 * are not cache line aligned, those lines must be written
205 * back.
206 * - start - virtual start address
207 * - end - virtual end address
208 *
209 * dma_clean_range(start, end)
210 *
211 * Clean (write back) the specified virtual address range.
212 * - start - virtual start address
213 * - end - virtual end address
214 *
215 * dma_flush_range(start, end) 200 * dma_flush_range(start, end)
216 * 201 *
217 * Clean and invalidate the specified virtual address range. 202 * Clean and invalidate the specified virtual address range.
@@ -228,8 +213,9 @@ struct cpu_cache_fns {
228 void (*coherent_user_range)(unsigned long, unsigned long); 213 void (*coherent_user_range)(unsigned long, unsigned long);
229 void (*flush_kern_dcache_area)(void *, size_t); 214 void (*flush_kern_dcache_area)(void *, size_t);
230 215
231 void (*dma_inv_range)(const void *, const void *); 216 void (*dma_map_area)(const void *, size_t, int);
232 void (*dma_clean_range)(const void *, const void *); 217 void (*dma_unmap_area)(const void *, size_t, int);
218
233 void (*dma_flush_range)(const void *, const void *); 219 void (*dma_flush_range)(const void *, const void *);
234}; 220};
235 221
@@ -259,8 +245,8 @@ extern struct cpu_cache_fns cpu_cache;
259 * is visible to DMA, or data written by DMA to system memory is 245 * is visible to DMA, or data written by DMA to system memory is
260 * visible to the CPU. 246 * visible to the CPU.
261 */ 247 */
262#define dmac_inv_range cpu_cache.dma_inv_range 248#define dmac_map_area cpu_cache.dma_map_area
263#define dmac_clean_range cpu_cache.dma_clean_range 249#define dmac_unmap_area cpu_cache.dma_unmap_area
264#define dmac_flush_range cpu_cache.dma_flush_range 250#define dmac_flush_range cpu_cache.dma_flush_range
265 251
266#else 252#else
@@ -285,12 +271,12 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
285 * is visible to DMA, or data written by DMA to system memory is 271 * is visible to DMA, or data written by DMA to system memory is
286 * visible to the CPU. 272 * visible to the CPU.
287 */ 273 */
288#define dmac_inv_range __glue(_CACHE,_dma_inv_range) 274#define dmac_map_area __glue(_CACHE,_dma_map_area)
289#define dmac_clean_range __glue(_CACHE,_dma_clean_range) 275#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
290#define dmac_flush_range __glue(_CACHE,_dma_flush_range) 276#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
291 277
292extern void dmac_inv_range(const void *, const void *); 278extern void dmac_map_area(const void *, size_t, int);
293extern void dmac_clean_range(const void *, const void *); 279extern void dmac_unmap_area(const void *, size_t, int);
294extern void dmac_flush_range(const void *, const void *); 280extern void dmac_flush_range(const void *, const void *);
295 281
296#endif 282#endif
@@ -331,12 +317,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
331 * processes address space. Really, we want to allow our "user 317 * processes address space. Really, we want to allow our "user
332 * space" model to handle this. 318 * space" model to handle this.
333 */ 319 */
334#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 320extern void copy_to_user_page(struct vm_area_struct *, struct page *,
335 do { \ 321 unsigned long, void *, const void *, unsigned long);
336 memcpy(dst, src, len); \
337 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
338 } while (0)
339
340#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 322#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
341 do { \ 323 do { \
342 memcpy(dst, src, len); \ 324 memcpy(dst, src, len); \
@@ -370,17 +352,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
370 } 352 }
371} 353}
372 354
373static inline void
374vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
375 unsigned long uaddr, void *kaddr,
376 unsigned long len, int write)
377{
378 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
379 unsigned long addr = (unsigned long)kaddr;
380 __cpuc_coherent_kern_range(addr, addr + len);
381 }
382}
383
384#ifndef CONFIG_CPU_CACHE_VIPT 355#ifndef CONFIG_CPU_CACHE_VIPT
385#define flush_cache_mm(mm) \ 356#define flush_cache_mm(mm) \
386 vivt_flush_cache_mm(mm) 357 vivt_flush_cache_mm(mm)
@@ -388,15 +359,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
388 vivt_flush_cache_range(vma,start,end) 359 vivt_flush_cache_range(vma,start,end)
389#define flush_cache_page(vma,addr,pfn) \ 360#define flush_cache_page(vma,addr,pfn) \
390 vivt_flush_cache_page(vma,addr,pfn) 361 vivt_flush_cache_page(vma,addr,pfn)
391#define flush_ptrace_access(vma,page,ua,ka,len,write) \
392 vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
393#else 362#else
394extern void flush_cache_mm(struct mm_struct *mm); 363extern void flush_cache_mm(struct mm_struct *mm);
395extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 364extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
396extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); 365extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
397extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
398 unsigned long uaddr, void *kaddr,
399 unsigned long len, int write);
400#endif 366#endif
401 367
402#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 368#define flush_cache_dup_mm(mm) flush_cache_mm(mm)