aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/dma-mapping.h
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-09-25 17:16:22 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-09-29 05:40:22 -0400
commit125ab12acf64ff86b55d20e14db20becd917b7c4 (patch)
treece39f81e6a3a409314735ce0cca1e366ea8fd94e /arch/arm/include/asm/dma-mapping.h
parent8c8a0ec57ee285ff407e9a64b3a5a37eaf800ad8 (diff)
[ARM] dma: fix dmabounce dma_sync_xxx() implementations
The dmabounce dma_sync_xxx() implementation have been broken for quite some time; they all copy data between the DMA buffer and the CPU visible buffer no irrespective of the change of ownership. (IOW, a DMA_FROM_DEVICE mapping copies data from the DMA buffer to the CPU buffer during a call to dma_sync_single_for_device().) Fix it by getting rid of sync_single(), moving the contents into the recently created dmabounce_sync_for_xxx() functions and adjusting appropriately. This also makes it possible to properly support the DMA range sync functions. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r--arch/arm/include/asm/dma-mapping.h26
1 files changed, 11 insertions, 15 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index c003ad390def..1204dc958c43 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -242,6 +242,15 @@ extern void dmabounce_unregister_dev(struct device *);
242extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); 242extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
243 243
244/* 244/*
245 * The DMA API, implemented by dmabounce.c. See below for descriptions.
246 */
247extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
248extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
249 unsigned long offset, size_t size,
250 enum dma_data_direction dir);
251extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
252
253/*
245 * Private functions 254 * Private functions
246 */ 255 */
247int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, 256int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
@@ -251,7 +260,6 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
251#else 260#else
252#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) 261#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1)
253#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) 262#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1)
254#endif /* CONFIG_DMABOUNCE */
255 263
256 264
257/** 265/**
@@ -268,7 +276,6 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
268 * can regain ownership by calling dma_unmap_single() or 276 * can regain ownership by calling dma_unmap_single() or
269 * dma_sync_single_for_cpu(). 277 * dma_sync_single_for_cpu().
270 */ 278 */
271#ifndef CONFIG_DMABOUNCE
272static inline dma_addr_t 279static inline dma_addr_t
273dma_map_single(struct device *dev, void *cpu_addr, size_t size, 280dma_map_single(struct device *dev, void *cpu_addr, size_t size,
274 enum dma_data_direction dir) 281 enum dma_data_direction dir)
@@ -278,9 +285,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
278 285
279 return virt_to_dma(dev, cpu_addr); 286 return virt_to_dma(dev, cpu_addr);
280} 287}
281#else 288
282extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
283#endif
284 289
285/** 290/**
286 * dma_map_page - map a portion of a page for streaming DMA 291 * dma_map_page - map a portion of a page for streaming DMA
@@ -297,7 +302,6 @@ extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_d
297 * can regain ownership by calling dma_unmap_page() or 302 * can regain ownership by calling dma_unmap_page() or
298 * dma_sync_single_for_cpu(). 303 * dma_sync_single_for_cpu().
299 */ 304 */
300#ifndef CONFIG_DMABOUNCE
301static inline dma_addr_t 305static inline dma_addr_t
302dma_map_page(struct device *dev, struct page *page, 306dma_map_page(struct device *dev, struct page *page,
303 unsigned long offset, size_t size, 307 unsigned long offset, size_t size,
@@ -308,11 +312,6 @@ dma_map_page(struct device *dev, struct page *page,
308 312
309 return page_to_dma(dev, page) + offset; 313 return page_to_dma(dev, page) + offset;
310} 314}
311#else
312extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
313 unsigned long offset, size_t size,
314 enum dma_data_direction dir);
315#endif
316 315
317/** 316/**
318 * dma_unmap_single - unmap a single buffer previously mapped 317 * dma_unmap_single - unmap a single buffer previously mapped
@@ -328,16 +327,13 @@ extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
328 * After this call, reads by the CPU to the buffer are guaranteed to see 327 * After this call, reads by the CPU to the buffer are guaranteed to see
329 * whatever the device wrote there. 328 * whatever the device wrote there.
330 */ 329 */
331#ifndef CONFIG_DMABOUNCE
332static inline void 330static inline void
333dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, 331dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
334 enum dma_data_direction dir) 332 enum dma_data_direction dir)
335{ 333{
336 /* nothing to do */ 334 /* nothing to do */
337} 335}
338#else 336#endif /* CONFIG_DMABOUNCE */
339extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
340#endif
341 337
342/** 338/**
343 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 339 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()