aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c37
1 files changed, 25 insertions, 12 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 734ac9135998..cc0a932bbea9 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -277,7 +277,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
277 * We don't need to sync the DMA buffer since 277 * We don't need to sync the DMA buffer since
278 * it was allocated via the coherent allocators. 278 * it was allocated via the coherent allocators.
279 */ 279 */
280 dma_cache_maint(ptr, size, dir); 280 __dma_single_cpu_to_dev(ptr, size, dir);
281 } 281 }
282 282
283 return dma_addr; 283 return dma_addr;
@@ -308,17 +308,15 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
308 memcpy(ptr, buf->safe, size); 308 memcpy(ptr, buf->safe, size);
309 309
310 /* 310 /*
311 * DMA buffers must have the same cache properties 311 * Since we may have written to a page cache page,
312 * as if they were really used for DMA - which means 312 * we need to ensure that the data will be coherent
313 * data must be written back to RAM. Note that 313 * with user mappings.
314 * we don't use dmac_flush_range() here for the
315 * bidirectional case because we know the cache
316 * lines will be coherent with the data written.
317 */ 314 */
318 dmac_clean_range(ptr, ptr + size); 315 __cpuc_flush_dcache_area(ptr, size);
319 outer_clean_range(__pa(ptr), __pa(ptr) + size);
320 } 316 }
321 free_safe_buffer(dev->archdata.dmabounce, buf); 317 free_safe_buffer(dev->archdata.dmabounce, buf);
318 } else {
319 __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
322 } 320 }
323} 321}
324 322
@@ -342,6 +340,22 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
342} 340}
343EXPORT_SYMBOL(dma_map_single); 341EXPORT_SYMBOL(dma_map_single);
344 342
343/*
344 * see if a mapped address was really a "safe" buffer and if so, copy
345 * the data from the safe buffer back to the unsafe buffer and free up
346 * the safe buffer. (basically return things back to the way they
347 * should be)
348 */
349void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
350 enum dma_data_direction dir)
351{
352 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
353 __func__, (void *) dma_addr, size, dir);
354
355 unmap_single(dev, dma_addr, size, dir);
356}
357EXPORT_SYMBOL(dma_unmap_single);
358
345dma_addr_t dma_map_page(struct device *dev, struct page *page, 359dma_addr_t dma_map_page(struct device *dev, struct page *page,
346 unsigned long offset, size_t size, enum dma_data_direction dir) 360 unsigned long offset, size_t size, enum dma_data_direction dir)
347{ 361{
@@ -366,8 +380,7 @@ EXPORT_SYMBOL(dma_map_page);
366 * the safe buffer. (basically return things back to the way they 380 * the safe buffer. (basically return things back to the way they
367 * should be) 381 * should be)
368 */ 382 */
369 383void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
370void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
371 enum dma_data_direction dir) 384 enum dma_data_direction dir)
372{ 385{
373 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 386 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -375,7 +388,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
375 388
376 unmap_single(dev, dma_addr, size, dir); 389 unmap_single(dev, dma_addr, size, dir);
377} 390}
378EXPORT_SYMBOL(dma_unmap_single); 391EXPORT_SYMBOL(dma_unmap_page);
379 392
380int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, 393int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
381 unsigned long off, size_t sz, enum dma_data_direction dir) 394 unsigned long off, size_t sz, enum dma_data_direction dir)