diff options
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 56 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 79 | ||||
-rw-r--r-- | arch/arm/include/asm/page.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/smp_plat.h | 5 |
4 files changed, 80 insertions, 67 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 730aefcfbee3..be8b4d79cf41 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -182,21 +182,6 @@ | |||
182 | * DMA Cache Coherency | 182 | * DMA Cache Coherency |
183 | * =================== | 183 | * =================== |
184 | * | 184 | * |
185 | * dma_inv_range(start, end) | ||
186 | * | ||
187 | * Invalidate (discard) the specified virtual address range. | ||
188 | * May not write back any entries. If 'start' or 'end' | ||
189 | * are not cache line aligned, those lines must be written | ||
190 | * back. | ||
191 | * - start - virtual start address | ||
192 | * - end - virtual end address | ||
193 | * | ||
194 | * dma_clean_range(start, end) | ||
195 | * | ||
196 | * Clean (write back) the specified virtual address range. | ||
197 | * - start - virtual start address | ||
198 | * - end - virtual end address | ||
199 | * | ||
200 | * dma_flush_range(start, end) | 185 | * dma_flush_range(start, end) |
201 | * | 186 | * |
202 | * Clean and invalidate the specified virtual address range. | 187 | * Clean and invalidate the specified virtual address range. |
@@ -213,8 +198,9 @@ struct cpu_cache_fns { | |||
213 | void (*coherent_user_range)(unsigned long, unsigned long); | 198 | void (*coherent_user_range)(unsigned long, unsigned long); |
214 | void (*flush_kern_dcache_area)(void *, size_t); | 199 | void (*flush_kern_dcache_area)(void *, size_t); |
215 | 200 | ||
216 | void (*dma_inv_range)(const void *, const void *); | 201 | void (*dma_map_area)(const void *, size_t, int); |
217 | void (*dma_clean_range)(const void *, const void *); | 202 | void (*dma_unmap_area)(const void *, size_t, int); |
203 | |||
218 | void (*dma_flush_range)(const void *, const void *); | 204 | void (*dma_flush_range)(const void *, const void *); |
219 | }; | 205 | }; |
220 | 206 | ||
@@ -244,8 +230,8 @@ extern struct cpu_cache_fns cpu_cache; | |||
244 | * is visible to DMA, or data written by DMA to system memory is | 230 | * is visible to DMA, or data written by DMA to system memory is |
245 | * visible to the CPU. | 231 | * visible to the CPU. |
246 | */ | 232 | */ |
247 | #define dmac_inv_range cpu_cache.dma_inv_range | 233 | #define dmac_map_area cpu_cache.dma_map_area |
248 | #define dmac_clean_range cpu_cache.dma_clean_range | 234 | #define dmac_unmap_area cpu_cache.dma_unmap_area |
249 | #define dmac_flush_range cpu_cache.dma_flush_range | 235 | #define dmac_flush_range cpu_cache.dma_flush_range |
250 | 236 | ||
251 | #else | 237 | #else |
@@ -270,12 +256,12 @@ extern void __cpuc_flush_dcache_area(void *, size_t); | |||
270 | * is visible to DMA, or data written by DMA to system memory is | 256 | * is visible to DMA, or data written by DMA to system memory is |
271 | * visible to the CPU. | 257 | * visible to the CPU. |
272 | */ | 258 | */ |
273 | #define dmac_inv_range __glue(_CACHE,_dma_inv_range) | 259 | #define dmac_map_area __glue(_CACHE,_dma_map_area) |
274 | #define dmac_clean_range __glue(_CACHE,_dma_clean_range) | 260 | #define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) |
275 | #define dmac_flush_range __glue(_CACHE,_dma_flush_range) | 261 | #define dmac_flush_range __glue(_CACHE,_dma_flush_range) |
276 | 262 | ||
277 | extern void dmac_inv_range(const void *, const void *); | 263 | extern void dmac_map_area(const void *, size_t, int); |
278 | extern void dmac_clean_range(const void *, const void *); | 264 | extern void dmac_unmap_area(const void *, size_t, int); |
279 | extern void dmac_flush_range(const void *, const void *); | 265 | extern void dmac_flush_range(const void *, const void *); |
280 | 266 | ||
281 | #endif | 267 | #endif |
@@ -316,12 +302,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) | |||
316 | * processes address space. Really, we want to allow our "user | 302 | * processes address space. Really, we want to allow our "user |
317 | * space" model to handle this. | 303 | * space" model to handle this. |
318 | */ | 304 | */ |
319 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 305 | extern void copy_to_user_page(struct vm_area_struct *, struct page *, |
320 | do { \ | 306 | unsigned long, void *, const void *, unsigned long); |
321 | memcpy(dst, src, len); \ | ||
322 | flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ | ||
323 | } while (0) | ||
324 | |||
325 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 307 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
326 | do { \ | 308 | do { \ |
327 | memcpy(dst, src, len); \ | 309 | memcpy(dst, src, len); \ |
@@ -355,17 +337,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig | |||
355 | } | 337 | } |
356 | } | 338 | } |
357 | 339 | ||
358 | static inline void | ||
359 | vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | ||
360 | unsigned long uaddr, void *kaddr, | ||
361 | unsigned long len, int write) | ||
362 | { | ||
363 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | ||
364 | unsigned long addr = (unsigned long)kaddr; | ||
365 | __cpuc_coherent_kern_range(addr, addr + len); | ||
366 | } | ||
367 | } | ||
368 | |||
369 | #ifndef CONFIG_CPU_CACHE_VIPT | 340 | #ifndef CONFIG_CPU_CACHE_VIPT |
370 | #define flush_cache_mm(mm) \ | 341 | #define flush_cache_mm(mm) \ |
371 | vivt_flush_cache_mm(mm) | 342 | vivt_flush_cache_mm(mm) |
@@ -373,15 +344,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
373 | vivt_flush_cache_range(vma,start,end) | 344 | vivt_flush_cache_range(vma,start,end) |
374 | #define flush_cache_page(vma,addr,pfn) \ | 345 | #define flush_cache_page(vma,addr,pfn) \ |
375 | vivt_flush_cache_page(vma,addr,pfn) | 346 | vivt_flush_cache_page(vma,addr,pfn) |
376 | #define flush_ptrace_access(vma,page,ua,ka,len,write) \ | ||
377 | vivt_flush_ptrace_access(vma,page,ua,ka,len,write) | ||
378 | #else | 347 | #else |
379 | extern void flush_cache_mm(struct mm_struct *mm); | 348 | extern void flush_cache_mm(struct mm_struct *mm); |
380 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | 349 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
381 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); | 350 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); |
382 | extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | ||
383 | unsigned long uaddr, void *kaddr, | ||
384 | unsigned long len, int write); | ||
385 | #endif | 351 | #endif |
386 | 352 | ||
387 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) | 353 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a96300bf83fd..256ee1c9f51a 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -57,18 +57,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | |||
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * DMA-consistent mapping functions. These allocate/free a region of | 60 | * The DMA API is built upon the notion of "buffer ownership". A buffer |
61 | * uncached, unwrite-buffered mapped memory space for use with DMA | 61 | * is either exclusively owned by the CPU (and therefore may be accessed |
62 | * devices. This is the "generic" version. The PCI specific version | 62 | * by it) or exclusively owned by the DMA device. These helper functions |
63 | * is in pci.h | 63 | * represent the transitions between these two ownership states. |
64 | * | 64 | * |
65 | * Note: Drivers should NOT use this function directly, as it will break | 65 | * Note, however, that on later ARMs, this notion does not work due to |
66 | * platforms with CONFIG_DMABOUNCE. | 66 | * speculative prefetches. We model our approach on the assumption that |
67 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | 67 | * the CPU does do speculative prefetches, which means we clean caches |
68 | * before transfers and delay cache invalidation until transfer completion. | ||
69 | * | ||
70 | * Private support functions: these are not part of the API and are | ||
71 | * liable to change. Drivers must not use these. | ||
68 | */ | 72 | */ |
69 | extern void dma_cache_maint(const void *kaddr, size_t size, int rw); | 73 | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, |
70 | extern void dma_cache_maint_page(struct page *page, unsigned long offset, | 74 | enum dma_data_direction dir) |
71 | size_t size, int rw); | 75 | { |
76 | extern void ___dma_single_cpu_to_dev(const void *, size_t, | ||
77 | enum dma_data_direction); | ||
78 | |||
79 | if (!arch_is_coherent()) | ||
80 | ___dma_single_cpu_to_dev(kaddr, size, dir); | ||
81 | } | ||
82 | |||
83 | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, | ||
84 | enum dma_data_direction dir) | ||
85 | { | ||
86 | extern void ___dma_single_dev_to_cpu(const void *, size_t, | ||
87 | enum dma_data_direction); | ||
88 | |||
89 | if (!arch_is_coherent()) | ||
90 | ___dma_single_dev_to_cpu(kaddr, size, dir); | ||
91 | } | ||
92 | |||
93 | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
94 | size_t size, enum dma_data_direction dir) | ||
95 | { | ||
96 | extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, | ||
97 | size_t, enum dma_data_direction); | ||
98 | |||
99 | if (!arch_is_coherent()) | ||
100 | ___dma_page_cpu_to_dev(page, off, size, dir); | ||
101 | } | ||
102 | |||
103 | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | ||
104 | size_t size, enum dma_data_direction dir) | ||
105 | { | ||
106 | extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, | ||
107 | size_t, enum dma_data_direction); | ||
108 | |||
109 | if (!arch_is_coherent()) | ||
110 | ___dma_page_dev_to_cpu(page, off, size, dir); | ||
111 | } | ||
72 | 112 | ||
73 | /* | 113 | /* |
74 | * Return whether the given device DMA address mask can be supported | 114 | * Return whether the given device DMA address mask can be supported |
@@ -304,8 +344,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |||
304 | { | 344 | { |
305 | BUG_ON(!valid_dma_direction(dir)); | 345 | BUG_ON(!valid_dma_direction(dir)); |
306 | 346 | ||
307 | if (!arch_is_coherent()) | 347 | __dma_single_cpu_to_dev(cpu_addr, size, dir); |
308 | dma_cache_maint(cpu_addr, size, dir); | ||
309 | 348 | ||
310 | return virt_to_dma(dev, cpu_addr); | 349 | return virt_to_dma(dev, cpu_addr); |
311 | } | 350 | } |
@@ -329,8 +368,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
329 | { | 368 | { |
330 | BUG_ON(!valid_dma_direction(dir)); | 369 | BUG_ON(!valid_dma_direction(dir)); |
331 | 370 | ||
332 | if (!arch_is_coherent()) | 371 | __dma_page_cpu_to_dev(page, offset, size, dir); |
333 | dma_cache_maint_page(page, offset, size, dir); | ||
334 | 372 | ||
335 | return page_to_dma(dev, page) + offset; | 373 | return page_to_dma(dev, page) + offset; |
336 | } | 374 | } |
@@ -352,7 +390,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
352 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | 390 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, |
353 | size_t size, enum dma_data_direction dir) | 391 | size_t size, enum dma_data_direction dir) |
354 | { | 392 | { |
355 | /* nothing to do */ | 393 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); |
356 | } | 394 | } |
357 | 395 | ||
358 | /** | 396 | /** |
@@ -372,7 +410,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | |||
372 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, | 410 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, |
373 | size_t size, enum dma_data_direction dir) | 411 | size_t size, enum dma_data_direction dir) |
374 | { | 412 | { |
375 | /* nothing to do */ | 413 | __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, |
414 | size, dir); | ||
376 | } | 415 | } |
377 | #endif /* CONFIG_DMABOUNCE */ | 416 | #endif /* CONFIG_DMABOUNCE */ |
378 | 417 | ||
@@ -400,7 +439,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
400 | { | 439 | { |
401 | BUG_ON(!valid_dma_direction(dir)); | 440 | BUG_ON(!valid_dma_direction(dir)); |
402 | 441 | ||
403 | dmabounce_sync_for_cpu(dev, handle, offset, size, dir); | 442 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) |
443 | return; | ||
444 | |||
445 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); | ||
404 | } | 446 | } |
405 | 447 | ||
406 | static inline void dma_sync_single_range_for_device(struct device *dev, | 448 | static inline void dma_sync_single_range_for_device(struct device *dev, |
@@ -412,8 +454,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
412 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | 454 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) |
413 | return; | 455 | return; |
414 | 456 | ||
415 | if (!arch_is_coherent()) | 457 | __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); |
416 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | ||
417 | } | 458 | } |
418 | 459 | ||
419 | static inline void dma_sync_single_for_cpu(struct device *dev, | 460 | static inline void dma_sync_single_for_cpu(struct device *dev, |
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 3a32af4cce30..a485ac3c8696 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -117,11 +117,12 @@ | |||
117 | #endif | 117 | #endif |
118 | 118 | ||
119 | struct page; | 119 | struct page; |
120 | struct vm_area_struct; | ||
120 | 121 | ||
121 | struct cpu_user_fns { | 122 | struct cpu_user_fns { |
122 | void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); | 123 | void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); |
123 | void (*cpu_copy_user_highpage)(struct page *to, struct page *from, | 124 | void (*cpu_copy_user_highpage)(struct page *to, struct page *from, |
124 | unsigned long vaddr); | 125 | unsigned long vaddr, struct vm_area_struct *vma); |
125 | }; | 126 | }; |
126 | 127 | ||
127 | #ifdef MULTI_USER | 128 | #ifdef MULTI_USER |
@@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user; | |||
137 | 138 | ||
138 | extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); | 139 | extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); |
139 | extern void __cpu_copy_user_highpage(struct page *to, struct page *from, | 140 | extern void __cpu_copy_user_highpage(struct page *to, struct page *from, |
140 | unsigned long vaddr); | 141 | unsigned long vaddr, struct vm_area_struct *vma); |
141 | #endif | 142 | #endif |
142 | 143 | ||
143 | #define clear_user_highpage(page,vaddr) \ | 144 | #define clear_user_highpage(page,vaddr) \ |
@@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, | |||
145 | 146 | ||
146 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | 147 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
147 | #define copy_user_highpage(to,from,vaddr,vma) \ | 148 | #define copy_user_highpage(to,from,vaddr,vma) \ |
148 | __cpu_copy_user_highpage(to, from, vaddr) | 149 | __cpu_copy_user_highpage(to, from, vaddr, vma) |
149 | 150 | ||
150 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | 151 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
151 | extern void copy_page(void *to, const void *from); | 152 | extern void copy_page(void *to, const void *from); |
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 59303e200845..e6215305544a 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h | |||
@@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void) | |||
13 | return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; | 13 | return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; |
14 | } | 14 | } |
15 | 15 | ||
16 | static inline int cache_ops_need_broadcast(void) | ||
17 | { | ||
18 | return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; | ||
19 | } | ||
20 | |||
16 | #endif | 21 | #endif |