aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c56
1 files changed, 29 insertions, 27 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 69130f365904..aecc6c3f908f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -246,9 +246,9 @@ map_single(struct device *dev, void *ptr, size_t size,
246 } 246 }
247 247
248 dev_dbg(dev, 248 dev_dbg(dev,
249 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", 249 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
250 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), 250 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
251 buf->safe, (void *) buf->safe_dma_addr); 251 buf->safe, buf->safe_dma_addr);
252 252
253 if ((dir == DMA_TO_DEVICE) || 253 if ((dir == DMA_TO_DEVICE) ||
254 (dir == DMA_BIDIRECTIONAL)) { 254 (dir == DMA_BIDIRECTIONAL)) {
@@ -292,9 +292,9 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
292 BUG_ON(buf->size != size); 292 BUG_ON(buf->size != size);
293 293
294 dev_dbg(dev, 294 dev_dbg(dev,
295 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", 295 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
296 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), 296 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
297 buf->safe, (void *) buf->safe_dma_addr); 297 buf->safe, buf->safe_dma_addr);
298 298
299 DO_STATS ( device_info->bounce_count++ ); 299 DO_STATS ( device_info->bounce_count++ );
300 300
@@ -321,9 +321,8 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
321 } 321 }
322} 322}
323 323
324static inline void 324static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
325sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, 325 enum dma_data_direction dir)
326 enum dma_data_direction dir)
327{ 326{
328 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 327 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
329 struct safe_buffer *buf = NULL; 328 struct safe_buffer *buf = NULL;
@@ -355,9 +354,9 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
355 */ 354 */
356 355
357 dev_dbg(dev, 356 dev_dbg(dev,
358 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", 357 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
359 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), 358 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
360 buf->safe, (void *) buf->safe_dma_addr); 359 buf->safe, buf->safe_dma_addr);
361 360
362 DO_STATS ( device_info->bounce_count++ ); 361 DO_STATS ( device_info->bounce_count++ );
363 362
@@ -383,8 +382,9 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
383 * No need to sync the safe buffer - it was allocated 382 * No need to sync the safe buffer - it was allocated
384 * via the coherent allocators. 383 * via the coherent allocators.
385 */ 384 */
385 return 0;
386 } else { 386 } else {
387 dma_cache_maint(dma_to_virt(dev, dma_addr), size, dir); 387 return 1;
388 } 388 }
389} 389}
390 390
@@ -474,25 +474,29 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
474 } 474 }
475} 475}
476 476
477void 477void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_addr,
478dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, 478 unsigned long offset, size_t size,
479 enum dma_data_direction dir) 479 enum dma_data_direction dir)
480{ 480{
481 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 481 dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n",
482 __func__, (void *) dma_addr, size, dir); 482 __func__, dma_addr, offset, size, dir);
483 483
484 sync_single(dev, dma_addr, size, dir); 484 if (sync_single(dev, dma_addr, offset + size, dir))
485 dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir);
485} 486}
487EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
486 488
487void 489void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_addr,
488dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size, 490 unsigned long offset, size_t size,
489 enum dma_data_direction dir) 491 enum dma_data_direction dir)
490{ 492{
491 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 493 dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n",
492 __func__, (void *) dma_addr, size, dir); 494 __func__, dma_addr, offset, size, dir);
493 495
494 sync_single(dev, dma_addr, size, dir); 496 if (sync_single(dev, dma_addr, offset + size, dir))
497 dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir);
495} 498}
499EXPORT_SYMBOL(dma_sync_single_range_for_device);
496 500
497void 501void
498dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, 502dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
@@ -644,8 +648,6 @@ EXPORT_SYMBOL(dma_map_single);
644EXPORT_SYMBOL(dma_unmap_single); 648EXPORT_SYMBOL(dma_unmap_single);
645EXPORT_SYMBOL(dma_map_sg); 649EXPORT_SYMBOL(dma_map_sg);
646EXPORT_SYMBOL(dma_unmap_sg); 650EXPORT_SYMBOL(dma_unmap_sg);
647EXPORT_SYMBOL(dma_sync_single_for_cpu);
648EXPORT_SYMBOL(dma_sync_single_for_device);
649EXPORT_SYMBOL(dma_sync_sg_for_cpu); 651EXPORT_SYMBOL(dma_sync_sg_for_cpu);
650EXPORT_SYMBOL(dma_sync_sg_for_device); 652EXPORT_SYMBOL(dma_sync_sg_for_device);
651EXPORT_SYMBOL(dmabounce_register_dev); 653EXPORT_SYMBOL(dmabounce_register_dev);