aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-02-10 13:55:20 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:06:18 -0400
commit15237e1f505b3e5c2276f240b01cd2133e110cbc (patch)
tree989e8a8580420ad3759a7bab81cd86347a3dadca /arch/arm/common/dmabounce.c
parent2a550e73d3e5f040a3e8eb733c942ab352eafb36 (diff)
ARM: dma-mapping: move all dma bounce code to separate dma ops structure
This patch removes dma bounce hooks from the common dma mapping implementation on ARM architecture and creates a separate set of dma_map_ops for dma bounce devices. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c62
1 files changed, 49 insertions, 13 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 32e9cc6ca7d9..813c29dc6613 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -308,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
308 * substitute the safe buffer for the unsafe one. 308 * substitute the safe buffer for the unsafe one.
309 * (basically move the buffer from an unsafe area to a safe one) 309 * (basically move the buffer from an unsafe area to a safe one)
310 */ 310 */
311dma_addr_t __dma_map_page(struct device *dev, struct page *page, 311static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
312 unsigned long offset, size_t size, enum dma_data_direction dir) 312 unsigned long offset, size_t size, enum dma_data_direction dir,
313 struct dma_attrs *attrs)
313{ 314{
314 dma_addr_t dma_addr; 315 dma_addr_t dma_addr;
315 int ret; 316 int ret;
@@ -324,7 +325,7 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
324 return DMA_ERROR_CODE; 325 return DMA_ERROR_CODE;
325 326
326 if (ret == 0) { 327 if (ret == 0) {
327 __dma_page_cpu_to_dev(page, offset, size, dir); 328 arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
328 return dma_addr; 329 return dma_addr;
329 } 330 }
330 331
@@ -335,7 +336,6 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
335 336
336 return map_single(dev, page_address(page) + offset, size, dir); 337 return map_single(dev, page_address(page) + offset, size, dir);
337} 338}
338EXPORT_SYMBOL(__dma_map_page);
339 339
340/* 340/*
341 * see if a mapped address was really a "safe" buffer and if so, copy 341 * see if a mapped address was really a "safe" buffer and if so, copy
@@ -343,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
343 * the safe buffer. (basically return things back to the way they 343 * the safe buffer. (basically return things back to the way they
344 * should be) 344 * should be)
345 */ 345 */
346void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 346static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
347 enum dma_data_direction dir) 347 enum dma_data_direction dir, struct dma_attrs *attrs)
348{ 348{
349 struct safe_buffer *buf; 349 struct safe_buffer *buf;
350 350
@@ -353,16 +353,14 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
353 353
354 buf = find_safe_buffer_dev(dev, dma_addr, __func__); 354 buf = find_safe_buffer_dev(dev, dma_addr, __func__);
355 if (!buf) { 355 if (!buf) {
356 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), 356 arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
357 dma_addr & ~PAGE_MASK, size, dir);
358 return; 357 return;
359 } 358 }
360 359
361 unmap_single(dev, buf, size, dir); 360 unmap_single(dev, buf, size, dir);
362} 361}
363EXPORT_SYMBOL(__dma_unmap_page);
364 362
365int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, 363static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
366 size_t sz, enum dma_data_direction dir) 364 size_t sz, enum dma_data_direction dir)
367{ 365{
368 struct safe_buffer *buf; 366 struct safe_buffer *buf;
@@ -392,9 +390,17 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
392 } 390 }
393 return 0; 391 return 0;
394} 392}
395EXPORT_SYMBOL(dmabounce_sync_for_cpu);
396 393
397int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, 394static void dmabounce_sync_for_cpu(struct device *dev,
395 dma_addr_t handle, size_t size, enum dma_data_direction dir)
396{
397 if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
398 return;
399
400 arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
401}
402
403static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
398 size_t sz, enum dma_data_direction dir) 404 size_t sz, enum dma_data_direction dir)
399{ 405{
400 struct safe_buffer *buf; 406 struct safe_buffer *buf;
@@ -424,7 +430,35 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
424 } 430 }
425 return 0; 431 return 0;
426} 432}
427EXPORT_SYMBOL(dmabounce_sync_for_device); 433
434static void dmabounce_sync_for_device(struct device *dev,
435 dma_addr_t handle, size_t size, enum dma_data_direction dir)
436{
437 if (!__dmabounce_sync_for_device(dev, handle, size, dir))
438 return;
439
440 arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
441}
442
443static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
444{
445 if (dev->archdata.dmabounce)
446 return 0;
447
448 return arm_dma_ops.set_dma_mask(dev, dma_mask);
449}
450
451static struct dma_map_ops dmabounce_ops = {
452 .map_page = dmabounce_map_page,
453 .unmap_page = dmabounce_unmap_page,
454 .sync_single_for_cpu = dmabounce_sync_for_cpu,
455 .sync_single_for_device = dmabounce_sync_for_device,
456 .map_sg = arm_dma_map_sg,
457 .unmap_sg = arm_dma_unmap_sg,
458 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
459 .sync_sg_for_device = arm_dma_sync_sg_for_device,
460 .set_dma_mask = dmabounce_set_mask,
461};
428 462
429static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, 463static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
430 const char *name, unsigned long size) 464 const char *name, unsigned long size)
@@ -486,6 +520,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
486#endif 520#endif
487 521
488 dev->archdata.dmabounce = device_info; 522 dev->archdata.dmabounce = device_info;
523 set_dma_ops(dev, &dmabounce_ops);
489 524
490 dev_info(dev, "dmabounce: registered device\n"); 525 dev_info(dev, "dmabounce: registered device\n");
491 526
@@ -504,6 +539,7 @@ void dmabounce_unregister_dev(struct device *dev)
504 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 539 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
505 540
506 dev->archdata.dmabounce = NULL; 541 dev->archdata.dmabounce = NULL;
542 set_dma_ops(dev, NULL);
507 543
508 if (!device_info) { 544 if (!device_info) {
509 dev_warn(dev, 545 dev_warn(dev,