diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-10-31 12:52:16 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-02-15 10:21:43 -0500 |
commit | 18eabe2347ae7a11b3db768695913724166dfb0e (patch) | |
tree | 2f6a9bb654d01e07a62be75adc1282e97b5c16d4 /arch/arm/include/asm | |
parent | bf32eb85492af197ea5ff20e0be56f667a80584d (diff) |
ARM: dma-mapping: introduce the idea of buffer ownership
The DMA API has the notion of buffer ownership; make it explicit in the
ARM implementation of this API. This gives us a set of hooks to allow
us to deal with CPU cache issues arising from non-cache coherent DMA.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com>
Tested-By: Jamie Iles <jamie@jamieiles.com>
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 64 |
1 files changed, 47 insertions, 17 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a96300bf83fd..e850f5c1607b 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -57,20 +57,49 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | |||
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * DMA-consistent mapping functions. These allocate/free a region of | 60 | * Private support functions: these are not part of the API and are |
61 | * uncached, unwrite-buffered mapped memory space for use with DMA | 61 | * liable to change. Drivers must not use these. |
62 | * devices. This is the "generic" version. The PCI specific version | ||
63 | * is in pci.h | ||
64 | * | ||
65 | * Note: Drivers should NOT use this function directly, as it will break | ||
66 | * platforms with CONFIG_DMABOUNCE. | ||
67 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | ||
68 | */ | 62 | */ |
69 | extern void dma_cache_maint(const void *kaddr, size_t size, int rw); | 63 | extern void dma_cache_maint(const void *kaddr, size_t size, int rw); |
70 | extern void dma_cache_maint_page(struct page *page, unsigned long offset, | 64 | extern void dma_cache_maint_page(struct page *page, unsigned long offset, |
71 | size_t size, int rw); | 65 | size_t size, int rw); |
72 | 66 | ||
73 | /* | 67 | /* |
68 | * The DMA API is built upon the notion of "buffer ownership". A buffer | ||
69 | * is either exclusively owned by the CPU (and therefore may be accessed | ||
70 | * by it) or exclusively owned by the DMA device. These helper functions | ||
71 | * represent the transitions between these two ownership states. | ||
72 | * | ||
73 | * As above, these are private support functions and not part of the API. | ||
74 | * Drivers must not use these. | ||
75 | */ | ||
76 | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, | ||
77 | enum dma_data_direction dir) | ||
78 | { | ||
79 | if (!arch_is_coherent()) | ||
80 | dma_cache_maint(kaddr, size, dir); | ||
81 | } | ||
82 | |||
83 | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, | ||
84 | enum dma_data_direction dir) | ||
85 | { | ||
86 | /* nothing to do */ | ||
87 | } | ||
88 | |||
89 | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
90 | size_t size, enum dma_data_direction dir) | ||
91 | { | ||
92 | if (!arch_is_coherent()) | ||
93 | dma_cache_maint_page(page, off, size, dir); | ||
94 | } | ||
95 | |||
96 | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | ||
97 | size_t size, enum dma_data_direction dir) | ||
98 | { | ||
99 | /* nothing to do */ | ||
100 | } | ||
101 | |||
102 | /* | ||
74 | * Return whether the given device DMA address mask can be supported | 103 | * Return whether the given device DMA address mask can be supported |
75 | * properly. For example, if your device can only drive the low 24-bits | 104 | * properly. For example, if your device can only drive the low 24-bits |
76 | * during bus mastering, then you would pass 0x00ffffff as the mask | 105 | * during bus mastering, then you would pass 0x00ffffff as the mask |
@@ -304,8 +333,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |||
304 | { | 333 | { |
305 | BUG_ON(!valid_dma_direction(dir)); | 334 | BUG_ON(!valid_dma_direction(dir)); |
306 | 335 | ||
307 | if (!arch_is_coherent()) | 336 | __dma_single_cpu_to_dev(cpu_addr, size, dir); |
308 | dma_cache_maint(cpu_addr, size, dir); | ||
309 | 337 | ||
310 | return virt_to_dma(dev, cpu_addr); | 338 | return virt_to_dma(dev, cpu_addr); |
311 | } | 339 | } |
@@ -329,8 +357,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
329 | { | 357 | { |
330 | BUG_ON(!valid_dma_direction(dir)); | 358 | BUG_ON(!valid_dma_direction(dir)); |
331 | 359 | ||
332 | if (!arch_is_coherent()) | 360 | __dma_page_cpu_to_dev(page, offset, size, dir); |
333 | dma_cache_maint_page(page, offset, size, dir); | ||
334 | 361 | ||
335 | return page_to_dma(dev, page) + offset; | 362 | return page_to_dma(dev, page) + offset; |
336 | } | 363 | } |
@@ -352,7 +379,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
352 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | 379 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, |
353 | size_t size, enum dma_data_direction dir) | 380 | size_t size, enum dma_data_direction dir) |
354 | { | 381 | { |
355 | /* nothing to do */ | 382 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); |
356 | } | 383 | } |
357 | 384 | ||
358 | /** | 385 | /** |
@@ -372,7 +399,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | |||
372 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, | 399 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, |
373 | size_t size, enum dma_data_direction dir) | 400 | size_t size, enum dma_data_direction dir) |
374 | { | 401 | { |
375 | /* nothing to do */ | 402 | __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, |
403 | size, dir); | ||
376 | } | 404 | } |
377 | #endif /* CONFIG_DMABOUNCE */ | 405 | #endif /* CONFIG_DMABOUNCE */ |
378 | 406 | ||
@@ -400,7 +428,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
400 | { | 428 | { |
401 | BUG_ON(!valid_dma_direction(dir)); | 429 | BUG_ON(!valid_dma_direction(dir)); |
402 | 430 | ||
403 | dmabounce_sync_for_cpu(dev, handle, offset, size, dir); | 431 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) |
432 | return; | ||
433 | |||
434 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); | ||
404 | } | 435 | } |
405 | 436 | ||
406 | static inline void dma_sync_single_range_for_device(struct device *dev, | 437 | static inline void dma_sync_single_range_for_device(struct device *dev, |
@@ -412,8 +443,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
412 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | 443 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) |
413 | return; | 444 | return; |
414 | 445 | ||
415 | if (!arch_is_coherent()) | 446 | __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); |
416 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | ||
417 | } | 447 | } |
418 | 448 | ||
419 | static inline void dma_sync_single_for_cpu(struct device *dev, | 449 | static inline void dma_sync_single_for_cpu(struct device *dev, |