aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-05-22 04:53:03 -0400
committerChristoph Hellwig <hch@lst.de>2017-06-28 09:54:36 -0400
commit9eef8b8cc26559fe5f2575daf7d08c6a17e81ff8 (patch)
treee841db81e1a71ac48162fa402d42ee2526d7b71b /arch/arm/common/dmabounce.c
parenta760088b45186d6235e05a75788d142bf203a927 (diff)
arm: implement ->mapping_error
DMA_ERROR_CODE is going to go away, so don't rely on it. Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 9b1b7be2ec0e..4060378e0f14 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -33,6 +33,7 @@
33#include <linux/scatterlist.h> 33#include <linux/scatterlist.h>
34 34
35#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
36#include <asm/dma-iommu.h>
36 37
37#undef STATS 38#undef STATS
38 39
@@ -256,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
256 if (buf == NULL) { 257 if (buf == NULL) {
257 dev_err(dev, "%s: unable to map unsafe buffer %p!\n", 258 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
258 __func__, ptr); 259 __func__, ptr);
259 return DMA_ERROR_CODE; 260 return ARM_MAPPING_ERROR;
260 } 261 }
261 262
262 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 263 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -326,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
326 327
327 ret = needs_bounce(dev, dma_addr, size); 328 ret = needs_bounce(dev, dma_addr, size);
328 if (ret < 0) 329 if (ret < 0)
329 return DMA_ERROR_CODE; 330 return ARM_MAPPING_ERROR;
330 331
331 if (ret == 0) { 332 if (ret == 0) {
332 arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); 333 arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
@@ -335,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
335 336
336 if (PageHighMem(page)) { 337 if (PageHighMem(page)) {
337 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); 338 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
338 return DMA_ERROR_CODE; 339 return ARM_MAPPING_ERROR;
339 } 340 }
340 341
341 return map_single(dev, page_address(page) + offset, size, dir, attrs); 342 return map_single(dev, page_address(page) + offset, size, dir, attrs);
@@ -452,6 +453,11 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
452 return arm_dma_ops.set_dma_mask(dev, dma_mask); 453 return arm_dma_ops.set_dma_mask(dev, dma_mask);
453} 454}
454 455
456static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr)
457{
458 return arm_dma_ops.mapping_error(dev, dma_addr);
459}
460
455static const struct dma_map_ops dmabounce_ops = { 461static const struct dma_map_ops dmabounce_ops = {
456 .alloc = arm_dma_alloc, 462 .alloc = arm_dma_alloc,
457 .free = arm_dma_free, 463 .free = arm_dma_free,
@@ -466,6 +472,7 @@ static const struct dma_map_ops dmabounce_ops = {
466 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 472 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
467 .sync_sg_for_device = arm_dma_sync_sg_for_device, 473 .sync_sg_for_device = arm_dma_sync_sg_for_device,
468 .set_dma_mask = dmabounce_set_mask, 474 .set_dma_mask = dmabounce_set_mask,
475 .mapping_error = dmabounce_mapping_error,
469}; 476};
470 477
471static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, 478static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,