aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/cacheflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/cacheflush.h')
-rw-r--r--arch/arm/include/asm/cacheflush.h56
1 files changed, 11 insertions, 45 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 730aefcfbee3..be8b4d79cf41 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -182,21 +182,6 @@
182 * DMA Cache Coherency 182 * DMA Cache Coherency
183 * =================== 183 * ===================
184 * 184 *
185 * dma_inv_range(start, end)
186 *
187 * Invalidate (discard) the specified virtual address range.
188 * May not write back any entries. If 'start' or 'end'
189 * are not cache line aligned, those lines must be written
190 * back.
191 * - start - virtual start address
192 * - end - virtual end address
193 *
194 * dma_clean_range(start, end)
195 *
196 * Clean (write back) the specified virtual address range.
197 * - start - virtual start address
198 * - end - virtual end address
199 *
200 * dma_flush_range(start, end) 185 * dma_flush_range(start, end)
201 * 186 *
202 * Clean and invalidate the specified virtual address range. 187 * Clean and invalidate the specified virtual address range.
@@ -213,8 +198,9 @@ struct cpu_cache_fns {
213 void (*coherent_user_range)(unsigned long, unsigned long); 198 void (*coherent_user_range)(unsigned long, unsigned long);
214 void (*flush_kern_dcache_area)(void *, size_t); 199 void (*flush_kern_dcache_area)(void *, size_t);
215 200
216 void (*dma_inv_range)(const void *, const void *); 201 void (*dma_map_area)(const void *, size_t, int);
217 void (*dma_clean_range)(const void *, const void *); 202 void (*dma_unmap_area)(const void *, size_t, int);
203
218 void (*dma_flush_range)(const void *, const void *); 204 void (*dma_flush_range)(const void *, const void *);
219}; 205};
220 206
@@ -244,8 +230,8 @@ extern struct cpu_cache_fns cpu_cache;
244 * is visible to DMA, or data written by DMA to system memory is 230 * is visible to DMA, or data written by DMA to system memory is
245 * visible to the CPU. 231 * visible to the CPU.
246 */ 232 */
247#define dmac_inv_range cpu_cache.dma_inv_range 233#define dmac_map_area cpu_cache.dma_map_area
248#define dmac_clean_range cpu_cache.dma_clean_range 234#define dmac_unmap_area cpu_cache.dma_unmap_area
249#define dmac_flush_range cpu_cache.dma_flush_range 235#define dmac_flush_range cpu_cache.dma_flush_range
250 236
251#else 237#else
@@ -270,12 +256,12 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
270 * is visible to DMA, or data written by DMA to system memory is 256 * is visible to DMA, or data written by DMA to system memory is
271 * visible to the CPU. 257 * visible to the CPU.
272 */ 258 */
273#define dmac_inv_range __glue(_CACHE,_dma_inv_range) 259#define dmac_map_area __glue(_CACHE,_dma_map_area)
274#define dmac_clean_range __glue(_CACHE,_dma_clean_range) 260#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
275#define dmac_flush_range __glue(_CACHE,_dma_flush_range) 261#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
276 262
277extern void dmac_inv_range(const void *, const void *); 263extern void dmac_map_area(const void *, size_t, int);
278extern void dmac_clean_range(const void *, const void *); 264extern void dmac_unmap_area(const void *, size_t, int);
279extern void dmac_flush_range(const void *, const void *); 265extern void dmac_flush_range(const void *, const void *);
280 266
281#endif 267#endif
@@ -316,12 +302,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
316 * processes address space. Really, we want to allow our "user 302 * processes address space. Really, we want to allow our "user
317 * space" model to handle this. 303 * space" model to handle this.
318 */ 304 */
319#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 305extern void copy_to_user_page(struct vm_area_struct *, struct page *,
320 do { \ 306 unsigned long, void *, const void *, unsigned long);
321 memcpy(dst, src, len); \
322 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
323 } while (0)
324
325#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 307#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
326 do { \ 308 do { \
327 memcpy(dst, src, len); \ 309 memcpy(dst, src, len); \
@@ -355,17 +337,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
355 } 337 }
356} 338}
357 339
358static inline void
359vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
360 unsigned long uaddr, void *kaddr,
361 unsigned long len, int write)
362{
363 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
364 unsigned long addr = (unsigned long)kaddr;
365 __cpuc_coherent_kern_range(addr, addr + len);
366 }
367}
368
369#ifndef CONFIG_CPU_CACHE_VIPT 340#ifndef CONFIG_CPU_CACHE_VIPT
370#define flush_cache_mm(mm) \ 341#define flush_cache_mm(mm) \
371 vivt_flush_cache_mm(mm) 342 vivt_flush_cache_mm(mm)
@@ -373,15 +344,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
373 vivt_flush_cache_range(vma,start,end) 344 vivt_flush_cache_range(vma,start,end)
374#define flush_cache_page(vma,addr,pfn) \ 345#define flush_cache_page(vma,addr,pfn) \
375 vivt_flush_cache_page(vma,addr,pfn) 346 vivt_flush_cache_page(vma,addr,pfn)
376#define flush_ptrace_access(vma,page,ua,ka,len,write) \
377 vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
378#else 347#else
379extern void flush_cache_mm(struct mm_struct *mm); 348extern void flush_cache_mm(struct mm_struct *mm);
380extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 349extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
381extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); 350extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
382extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
383 unsigned long uaddr, void *kaddr,
384 unsigned long len, int write);
385#endif 351#endif
386 352
387#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 353#define flush_cache_dup_mm(mm) flush_cache_mm(mm)