diff options
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 65 |
1 files changed, 53 insertions, 12 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 8f69b98f68fc..4fff837363ed 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <linux/mm_types.h> | 6 | #include <linux/mm_types.h> |
7 | #include <linux/scatterlist.h> | 7 | #include <linux/scatterlist.h> |
8 | #include <linux/dma-debug.h> | ||
8 | 9 | ||
9 | #include <asm-generic/dma-coherent.h> | 10 | #include <asm-generic/dma-coherent.h> |
10 | #include <asm/memory.h> | 11 | #include <asm/memory.h> |
@@ -297,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | |||
297 | /* | 298 | /* |
298 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | 299 | * The DMA API, implemented by dmabounce.c. See below for descriptions. |
299 | */ | 300 | */ |
300 | extern dma_addr_t dma_map_single(struct device *, void *, size_t, | 301 | extern dma_addr_t __dma_map_single(struct device *, void *, size_t, |
301 | enum dma_data_direction); | 302 | enum dma_data_direction); |
302 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, | 303 | extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, |
303 | enum dma_data_direction); | 304 | enum dma_data_direction); |
304 | extern dma_addr_t dma_map_page(struct device *, struct page *, | 305 | extern dma_addr_t __dma_map_page(struct device *, struct page *, |
305 | unsigned long, size_t, enum dma_data_direction); | 306 | unsigned long, size_t, enum dma_data_direction); |
306 | extern void dma_unmap_page(struct device *, dma_addr_t, size_t, | 307 | extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, |
307 | enum dma_data_direction); | 308 | enum dma_data_direction); |
308 | 309 | ||
309 | /* | 310 | /* |
@@ -327,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, | |||
327 | } | 328 | } |
328 | 329 | ||
329 | 330 | ||
331 | static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, | ||
332 | size_t size, enum dma_data_direction dir) | ||
333 | { | ||
334 | __dma_single_cpu_to_dev(cpu_addr, size, dir); | ||
335 | return virt_to_dma(dev, cpu_addr); | ||
336 | } | ||
337 | |||
338 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | ||
339 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
340 | { | ||
341 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
342 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | ||
343 | } | ||
344 | |||
345 | static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, | ||
346 | size_t size, enum dma_data_direction dir) | ||
347 | { | ||
348 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); | ||
349 | } | ||
350 | |||
351 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
352 | size_t size, enum dma_data_direction dir) | ||
353 | { | ||
354 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | ||
355 | handle & ~PAGE_MASK, size, dir); | ||
356 | } | ||
357 | #endif /* CONFIG_DMABOUNCE */ | ||
358 | |||
330 | /** | 359 | /** |
331 | * dma_map_single - map a single buffer for streaming DMA | 360 | * dma_map_single - map a single buffer for streaming DMA |
332 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 361 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
@@ -344,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, | |||
344 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 373 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
345 | size_t size, enum dma_data_direction dir) | 374 | size_t size, enum dma_data_direction dir) |
346 | { | 375 | { |
376 | dma_addr_t addr; | ||
377 | |||
347 | BUG_ON(!valid_dma_direction(dir)); | 378 | BUG_ON(!valid_dma_direction(dir)); |
348 | 379 | ||
349 | __dma_single_cpu_to_dev(cpu_addr, size, dir); | 380 | addr = __dma_map_single(dev, cpu_addr, size, dir); |
381 | debug_dma_map_page(dev, virt_to_page(cpu_addr), | ||
382 | (unsigned long)cpu_addr & ~PAGE_MASK, size, | ||
383 | dir, addr, true); | ||
350 | 384 | ||
351 | return virt_to_dma(dev, cpu_addr); | 385 | return addr; |
352 | } | 386 | } |
353 | 387 | ||
354 | /** | 388 | /** |
@@ -368,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |||
368 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 402 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
369 | unsigned long offset, size_t size, enum dma_data_direction dir) | 403 | unsigned long offset, size_t size, enum dma_data_direction dir) |
370 | { | 404 | { |
405 | dma_addr_t addr; | ||
406 | |||
371 | BUG_ON(!valid_dma_direction(dir)); | 407 | BUG_ON(!valid_dma_direction(dir)); |
372 | 408 | ||
373 | __dma_page_cpu_to_dev(page, offset, size, dir); | 409 | addr = __dma_map_page(dev, page, offset, size, dir); |
410 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | ||
374 | 411 | ||
375 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 412 | return addr; |
376 | } | 413 | } |
377 | 414 | ||
378 | /** | 415 | /** |
@@ -392,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
392 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | 429 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, |
393 | size_t size, enum dma_data_direction dir) | 430 | size_t size, enum dma_data_direction dir) |
394 | { | 431 | { |
395 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); | 432 | debug_dma_unmap_page(dev, handle, size, dir, true); |
433 | __dma_unmap_single(dev, handle, size, dir); | ||
396 | } | 434 | } |
397 | 435 | ||
398 | /** | 436 | /** |
@@ -412,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | |||
412 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, | 450 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, |
413 | size_t size, enum dma_data_direction dir) | 451 | size_t size, enum dma_data_direction dir) |
414 | { | 452 | { |
415 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | 453 | debug_dma_unmap_page(dev, handle, size, dir, false); |
416 | handle & ~PAGE_MASK, size, dir); | 454 | __dma_unmap_page(dev, handle, size, dir); |
417 | } | 455 | } |
418 | #endif /* CONFIG_DMABOUNCE */ | ||
419 | 456 | ||
420 | /** | 457 | /** |
421 | * dma_sync_single_range_for_cpu | 458 | * dma_sync_single_range_for_cpu |
@@ -441,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
441 | { | 478 | { |
442 | BUG_ON(!valid_dma_direction(dir)); | 479 | BUG_ON(!valid_dma_direction(dir)); |
443 | 480 | ||
481 | debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); | ||
482 | |||
444 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) | 483 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) |
445 | return; | 484 | return; |
446 | 485 | ||
@@ -453,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
453 | { | 492 | { |
454 | BUG_ON(!valid_dma_direction(dir)); | 493 | BUG_ON(!valid_dma_direction(dir)); |
455 | 494 | ||
495 | debug_dma_sync_single_for_device(dev, handle + offset, size, dir); | ||
496 | |||
456 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | 497 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) |
457 | return; | 498 | return; |
458 | 499 | ||