diff options
| author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-08-10 07:18:26 -0400 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-08-10 09:05:14 -0400 |
| commit | 9dd428680573d7867ee5e40fa3f059a98301d416 (patch) | |
| tree | cedec454e5490d2f09b3cad4b6c2fed46a6f857b | |
| parent | 98ed7d4b1a4eebc1ac25929b6968673bef4d54c3 (diff) | |
[ARM] dma-mapping: provide sync_range APIs
Convert the existing dma_sync_single_for_* APIs to the new range based
APIs, and make the dma_sync_single_for_* API a superset of it.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
| -rw-r--r-- | arch/arm/common/dmabounce.c | 38 | ||||
| -rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 39 |
2 files changed, 48 insertions, 29 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 1ea6482cdf6e..aecc6c3f908f 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
| @@ -321,9 +321,8 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
| 321 | } | 321 | } |
| 322 | } | 322 | } |
| 323 | 323 | ||
| 324 | static inline void | 324 | static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
| 325 | sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 325 | enum dma_data_direction dir) |
| 326 | enum dma_data_direction dir) | ||
| 327 | { | 326 | { |
| 328 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 327 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
| 329 | struct safe_buffer *buf = NULL; | 328 | struct safe_buffer *buf = NULL; |
| @@ -383,8 +382,9 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
| 383 | * No need to sync the safe buffer - it was allocated | 382 | * No need to sync the safe buffer - it was allocated |
| 384 | * via the coherent allocators. | 383 | * via the coherent allocators. |
| 385 | */ | 384 | */ |
| 385 | return 0; | ||
| 386 | } else { | 386 | } else { |
| 387 | dma_cache_maint(dma_to_virt(dev, dma_addr), size, dir); | 387 | return 1; |
| 388 | } | 388 | } |
| 389 | } | 389 | } |
| 390 | 390 | ||
| @@ -474,25 +474,29 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
| 474 | } | 474 | } |
| 475 | } | 475 | } |
| 476 | 476 | ||
| 477 | void | 477 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_addr, |
| 478 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, | 478 | unsigned long offset, size_t size, |
| 479 | enum dma_data_direction dir) | 479 | enum dma_data_direction dir) |
| 480 | { | 480 | { |
| 481 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 481 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n", |
| 482 | __func__, (void *) dma_addr, size, dir); | 482 | __func__, dma_addr, offset, size, dir); |
| 483 | 483 | ||
| 484 | sync_single(dev, dma_addr, size, dir); | 484 | if (sync_single(dev, dma_addr, offset + size, dir)) |
| 485 | dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir); | ||
| 485 | } | 486 | } |
| 487 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
| 486 | 488 | ||
| 487 | void | 489 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_addr, |
| 488 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size, | 490 | unsigned long offset, size_t size, |
| 489 | enum dma_data_direction dir) | 491 | enum dma_data_direction dir) |
| 490 | { | 492 | { |
| 491 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 493 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n", |
| 492 | __func__, (void *) dma_addr, size, dir); | 494 | __func__, dma_addr, offset, size, dir); |
| 493 | 495 | ||
| 494 | sync_single(dev, dma_addr, size, dir); | 496 | if (sync_single(dev, dma_addr, offset + size, dir)) |
| 497 | dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir); | ||
| 495 | } | 498 | } |
| 499 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
| 496 | 500 | ||
| 497 | void | 501 | void |
| 498 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | 502 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, |
| @@ -644,8 +648,6 @@ EXPORT_SYMBOL(dma_map_single); | |||
| 644 | EXPORT_SYMBOL(dma_unmap_single); | 648 | EXPORT_SYMBOL(dma_unmap_single); |
| 645 | EXPORT_SYMBOL(dma_map_sg); | 649 | EXPORT_SYMBOL(dma_map_sg); |
| 646 | EXPORT_SYMBOL(dma_unmap_sg); | 650 | EXPORT_SYMBOL(dma_unmap_sg); |
| 647 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
| 648 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
| 649 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 651 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
| 650 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 652 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
| 651 | EXPORT_SYMBOL(dmabounce_register_dev); | 653 | EXPORT_SYMBOL(dmabounce_register_dev); |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 6f45959fe2cc..7b95d2058395 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
| @@ -351,11 +351,12 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da | |||
| 351 | 351 | ||
| 352 | 352 | ||
| 353 | /** | 353 | /** |
| 354 | * dma_sync_single_for_cpu | 354 | * dma_sync_single_range_for_cpu |
| 355 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 355 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 356 | * @handle: DMA address of buffer | 356 | * @handle: DMA address of buffer |
| 357 | * @size: size of buffer to map | 357 | * @offset: offset of region to start sync |
| 358 | * @dir: DMA transfer direction | 358 | * @size: size of region to sync |
| 359 | * @dir: DMA transfer direction (same as passed to dma_map_single) | ||
| 359 | * | 360 | * |
| 360 | * Make physical memory consistent for a single streaming mode DMA | 361 | * Make physical memory consistent for a single streaming mode DMA |
| 361 | * translation after a transfer. | 362 | * translation after a transfer. |
| @@ -369,25 +370,41 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da | |||
| 369 | */ | 370 | */ |
| 370 | #ifndef CONFIG_DMABOUNCE | 371 | #ifndef CONFIG_DMABOUNCE |
| 371 | static inline void | 372 | static inline void |
| 372 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | 373 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, |
| 373 | enum dma_data_direction dir) | 374 | unsigned long offset, size_t size, |
| 375 | enum dma_data_direction dir) | ||
| 374 | { | 376 | { |
| 375 | if (!arch_is_coherent()) | 377 | if (!arch_is_coherent()) |
| 376 | dma_cache_maint(dma_to_virt(dev, handle), size, dir); | 378 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
| 377 | } | 379 | } |
| 378 | 380 | ||
| 379 | static inline void | 381 | static inline void |
| 380 | dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | 382 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, |
| 381 | enum dma_data_direction dir) | 383 | unsigned long offset, size_t size, |
| 384 | enum dma_data_direction dir) | ||
| 382 | { | 385 | { |
| 383 | if (!arch_is_coherent()) | 386 | if (!arch_is_coherent()) |
| 384 | dma_cache_maint(dma_to_virt(dev, handle), size, dir); | 387 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
| 385 | } | 388 | } |
| 386 | #else | 389 | #else |
| 387 | extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); | 390 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); |
| 388 | extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction); | 391 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); |
| 389 | #endif | 392 | #endif |
| 390 | 393 | ||
| 394 | static inline void | ||
| 395 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | ||
| 396 | enum dma_data_direction dir) | ||
| 397 | { | ||
| 398 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); | ||
| 399 | } | ||
| 400 | |||
| 401 | static inline void | ||
| 402 | dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | ||
| 403 | enum dma_data_direction dir) | ||
| 404 | { | ||
| 405 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); | ||
| 406 | } | ||
| 407 | |||
| 391 | 408 | ||
| 392 | /** | 409 | /** |
| 393 | * dma_sync_sg_for_cpu | 410 | * dma_sync_sg_for_cpu |
