aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-10-31 12:52:16 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-02-15 10:21:43 -0500
commit18eabe2347ae7a11b3db768695913724166dfb0e (patch)
tree2f6a9bb654d01e07a62be75adc1282e97b5c16d4
parentbf32eb85492af197ea5ff20e0be56f667a80584d (diff)
ARM: dma-mapping: introduce the idea of buffer ownership
The DMA API has the notion of buffer ownership; make it explicit in the ARM implementation of this API. This gives us a set of hooks to allow us to deal with CPU cache issues arising from non-cache coherent DMA. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com> Tested-By: Jamie Iles <jamie@jamieiles.com>
-rw-r--r--arch/arm/common/dmabounce.c4
-rw-r--r--arch/arm/include/asm/dma-mapping.h64
-rw-r--r--arch/arm/mm/dma-mapping.c13
3 files changed, 58 insertions, 23 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index bc90364a96c7..51499d68b161 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -277,7 +277,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
277 * We don't need to sync the DMA buffer since 277 * We don't need to sync the DMA buffer since
278 * it was allocated via the coherent allocators. 278 * it was allocated via the coherent allocators.
279 */ 279 */
280 dma_cache_maint(ptr, size, dir); 280 __dma_single_cpu_to_dev(ptr, size, dir);
281 } 281 }
282 282
283 return dma_addr; 283 return dma_addr;
@@ -315,6 +315,8 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
315 __cpuc_flush_kernel_dcache_area(ptr, size); 315 __cpuc_flush_kernel_dcache_area(ptr, size);
316 } 316 }
317 free_safe_buffer(dev->archdata.dmabounce, buf); 317 free_safe_buffer(dev->archdata.dmabounce, buf);
318 } else {
319 __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
318 } 320 }
319} 321}
320 322
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index a96300bf83fd..e850f5c1607b 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -57,20 +57,49 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
57#endif 57#endif
58 58
59/* 59/*
60 * DMA-consistent mapping functions. These allocate/free a region of 60 * Private support functions: these are not part of the API and are
61 * uncached, unwrite-buffered mapped memory space for use with DMA 61 * liable to change. Drivers must not use these.
62 * devices. This is the "generic" version. The PCI specific version
63 * is in pci.h
64 *
65 * Note: Drivers should NOT use this function directly, as it will break
66 * platforms with CONFIG_DMABOUNCE.
67 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
68 */ 62 */
69extern void dma_cache_maint(const void *kaddr, size_t size, int rw); 63extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
70extern void dma_cache_maint_page(struct page *page, unsigned long offset, 64extern void dma_cache_maint_page(struct page *page, unsigned long offset,
71 size_t size, int rw); 65 size_t size, int rw);
72 66
73/* 67/*
68 * The DMA API is built upon the notion of "buffer ownership". A buffer
69 * is either exclusively owned by the CPU (and therefore may be accessed
70 * by it) or exclusively owned by the DMA device. These helper functions
71 * represent the transitions between these two ownership states.
72 *
73 * As above, these are private support functions and not part of the API.
74 * Drivers must not use these.
75 */
76static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
77 enum dma_data_direction dir)
78{
79 if (!arch_is_coherent())
80 dma_cache_maint(kaddr, size, dir);
81}
82
83static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
84 enum dma_data_direction dir)
85{
86 /* nothing to do */
87}
88
89static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
90 size_t size, enum dma_data_direction dir)
91{
92 if (!arch_is_coherent())
93 dma_cache_maint_page(page, off, size, dir);
94}
95
96static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
97 size_t size, enum dma_data_direction dir)
98{
99 /* nothing to do */
100}
101
102/*
74 * Return whether the given device DMA address mask can be supported 103 * Return whether the given device DMA address mask can be supported
75 * properly. For example, if your device can only drive the low 24-bits 104 * properly. For example, if your device can only drive the low 24-bits
76 * during bus mastering, then you would pass 0x00ffffff as the mask 105 * during bus mastering, then you would pass 0x00ffffff as the mask
@@ -304,8 +333,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
304{ 333{
305 BUG_ON(!valid_dma_direction(dir)); 334 BUG_ON(!valid_dma_direction(dir));
306 335
307 if (!arch_is_coherent()) 336 __dma_single_cpu_to_dev(cpu_addr, size, dir);
308 dma_cache_maint(cpu_addr, size, dir);
309 337
310 return virt_to_dma(dev, cpu_addr); 338 return virt_to_dma(dev, cpu_addr);
311} 339}
@@ -329,8 +357,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
329{ 357{
330 BUG_ON(!valid_dma_direction(dir)); 358 BUG_ON(!valid_dma_direction(dir));
331 359
332 if (!arch_is_coherent()) 360 __dma_page_cpu_to_dev(page, offset, size, dir);
333 dma_cache_maint_page(page, offset, size, dir);
334 361
335 return page_to_dma(dev, page) + offset; 362 return page_to_dma(dev, page) + offset;
336} 363}
@@ -352,7 +379,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
352static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, 379static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
353 size_t size, enum dma_data_direction dir) 380 size_t size, enum dma_data_direction dir)
354{ 381{
355 /* nothing to do */ 382 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
356} 383}
357 384
358/** 385/**
@@ -372,7 +399,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
372static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 399static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
373 size_t size, enum dma_data_direction dir) 400 size_t size, enum dma_data_direction dir)
374{ 401{
375 /* nothing to do */ 402 __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
403 size, dir);
376} 404}
377#endif /* CONFIG_DMABOUNCE */ 405#endif /* CONFIG_DMABOUNCE */
378 406
@@ -400,7 +428,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
400{ 428{
401 BUG_ON(!valid_dma_direction(dir)); 429 BUG_ON(!valid_dma_direction(dir));
402 430
403 dmabounce_sync_for_cpu(dev, handle, offset, size, dir); 431 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
432 return;
433
434 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
404} 435}
405 436
406static inline void dma_sync_single_range_for_device(struct device *dev, 437static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -412,8 +443,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
412 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) 443 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
413 return; 444 return;
414 445
415 if (!arch_is_coherent()) 446 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
416 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
417} 447}
418 448
419static inline void dma_sync_single_for_cpu(struct device *dev, 449static inline void dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 26325cb5d368..a316c9459526 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -573,8 +573,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
573 int i; 573 int i;
574 574
575 for_each_sg(sg, s, nents, i) { 575 for_each_sg(sg, s, nents, i) {
576 dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, 576 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
577 sg_dma_len(s), dir); 577 sg_dma_len(s), dir))
578 continue;
579
580 __dma_page_dev_to_cpu(sg_page(s), s->offset,
581 s->length, dir);
578 } 582 }
579} 583}
580EXPORT_SYMBOL(dma_sync_sg_for_cpu); 584EXPORT_SYMBOL(dma_sync_sg_for_cpu);
@@ -597,9 +601,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
597 sg_dma_len(s), dir)) 601 sg_dma_len(s), dir))
598 continue; 602 continue;
599 603
600 if (!arch_is_coherent()) 604 __dma_page_cpu_to_dev(sg_page(s), s->offset,
601 dma_cache_maint_page(sg_page(s), s->offset, 605 s->length, dir);
602 s->length, dir);
603 } 606 }
604} 607}
605EXPORT_SYMBOL(dma_sync_sg_for_device); 608EXPORT_SYMBOL(dma_sync_sg_for_device);