aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-02-10 13:55:20 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:06:13 -0400
commita227fb92a0f5f0dd8282719386e9b3a29f0d16b2 (patch)
treeb990f647dcb38e17a4eef63b14f3358e6698e5c3 /arch/arm/common/dmabounce.c
parent553ac78877242b6d8b591323731df304140d0f99 (diff)
ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops
This patch removes the need for the offset parameter in dma bounce functions. This is required to let dma-mapping framework on ARM architecture to use common, generic dma_map_ops based dma-mapping helpers. Background and more detailed explaination: dma_*_range_* functions are available from the early days of the dma mapping api. They are the correct way of doing a partial syncs on the buffer (usually used by the network device drivers). This patch changes only the internal implementation of the dma bounce functions to let them tunnel through dma_map_ops structure. The driver api stays unchanged, so driver are obliged to call dma_*_range_* functions to keep code clean and easy to understand. The only drawback from this patch is reduced detection of the dma api abuse. Let us consider the following code: dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE); dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE); Without the patch such code fails, because dma bounce code is unable to find the bounce buffer for the given dma_address. After the patch the above sync call will be equivalent to: dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE); which succeeds. I don't consider this as a real problem, because DMA API abuse should be caught by debug_dma_* function family. This patch lets us to simplify the internal low-level implementation without chaning the driver visible API. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 210ad1bef7e5..32e9cc6ca7d9 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
173 read_lock_irqsave(&device_info->lock, flags); 173 read_lock_irqsave(&device_info->lock, flags);
174 174
175 list_for_each_entry(b, &device_info->safe_buffers, node) 175 list_for_each_entry(b, &device_info->safe_buffers, node)
176 if (b->safe_dma_addr == safe_dma_addr) { 176 if (b->safe_dma_addr <= safe_dma_addr &&
177 b->safe_dma_addr + b->size > safe_dma_addr) {
177 rb = b; 178 rb = b;
178 break; 179 break;
179 } 180 }
@@ -362,9 +363,10 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
362EXPORT_SYMBOL(__dma_unmap_page); 363EXPORT_SYMBOL(__dma_unmap_page);
363 364
364int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, 365int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
365 unsigned long off, size_t sz, enum dma_data_direction dir) 366 size_t sz, enum dma_data_direction dir)
366{ 367{
367 struct safe_buffer *buf; 368 struct safe_buffer *buf;
369 unsigned long off;
368 370
369 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", 371 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
370 __func__, addr, off, sz, dir); 372 __func__, addr, off, sz, dir);
@@ -373,6 +375,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
373 if (!buf) 375 if (!buf)
374 return 1; 376 return 1;
375 377
378 off = addr - buf->safe_dma_addr;
379
376 BUG_ON(buf->direction != dir); 380 BUG_ON(buf->direction != dir);
377 381
378 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 382 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -391,9 +395,10 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
391EXPORT_SYMBOL(dmabounce_sync_for_cpu); 395EXPORT_SYMBOL(dmabounce_sync_for_cpu);
392 396
393int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, 397int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
394 unsigned long off, size_t sz, enum dma_data_direction dir) 398 size_t sz, enum dma_data_direction dir)
395{ 399{
396 struct safe_buffer *buf; 400 struct safe_buffer *buf;
401 unsigned long off;
397 402
398 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", 403 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
399 __func__, addr, off, sz, dir); 404 __func__, addr, off, sz, dir);
@@ -402,6 +407,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
402 if (!buf) 407 if (!buf)
403 return 1; 408 return 1;
404 409
410 off = addr - buf->safe_dma_addr;
411
405 BUG_ON(buf->direction != dir); 412 BUG_ON(buf->direction != dir);
406 413
407 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 414 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",