aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-01-03 06:29:28 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 17:31:11 -0500
commit24056f525051a9e186af28904b396320e18bf9a0 (patch)
treea8580f24820e21ad48333fce6b5f03be55edd561 /arch/arm
parent9eedd96301cad8ab58ee8c1e579677d0a75c2ba1 (diff)
ARM: DMA: add support for DMA debugging
Add ARM support for the DMA debug infrastructure, which allows the DMA API usage to be debugged. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/common/dmabounce.c16
-rw-r--r--arch/arm/include/asm/dma-mapping.h65
-rw-r--r--arch/arm/mm/dma-mapping.c24
4 files changed, 83 insertions, 23 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 47694865018..ca2ab3d2863 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2,6 +2,7 @@ config ARM
2 bool 2 bool
3 default y 3 default y
4 select HAVE_AOUT 4 select HAVE_AOUT
5 select HAVE_DMA_API_DEBUG
5 select HAVE_IDE 6 select HAVE_IDE
6 select HAVE_MEMBLOCK 7 select HAVE_MEMBLOCK
7 select RTC_LIB 8 select RTC_LIB
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index cc0a932bbea..e5681636626 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
328 * substitute the safe buffer for the unsafe one. 328 * substitute the safe buffer for the unsafe one.
329 * (basically move the buffer from an unsafe area to a safe one) 329 * (basically move the buffer from an unsafe area to a safe one)
330 */ 330 */
331dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, 331dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
332 enum dma_data_direction dir) 332 enum dma_data_direction dir)
333{ 333{
334 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 334 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
338 338
339 return map_single(dev, ptr, size, dir); 339 return map_single(dev, ptr, size, dir);
340} 340}
341EXPORT_SYMBOL(dma_map_single); 341EXPORT_SYMBOL(__dma_map_single);
342 342
343/* 343/*
344 * see if a mapped address was really a "safe" buffer and if so, copy 344 * see if a mapped address was really a "safe" buffer and if so, copy
@@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single);
346 * the safe buffer. (basically return things back to the way they 346 * the safe buffer. (basically return things back to the way they
347 * should be) 347 * should be)
348 */ 348 */
349void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 349void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
350 enum dma_data_direction dir) 350 enum dma_data_direction dir)
351{ 351{
352 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 352 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
354 354
355 unmap_single(dev, dma_addr, size, dir); 355 unmap_single(dev, dma_addr, size, dir);
356} 356}
357EXPORT_SYMBOL(dma_unmap_single); 357EXPORT_SYMBOL(__dma_unmap_single);
358 358
359dma_addr_t dma_map_page(struct device *dev, struct page *page, 359dma_addr_t __dma_map_page(struct device *dev, struct page *page,
360 unsigned long offset, size_t size, enum dma_data_direction dir) 360 unsigned long offset, size_t size, enum dma_data_direction dir)
361{ 361{
362 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", 362 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
@@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
372 372
373 return map_single(dev, page_address(page) + offset, size, dir); 373 return map_single(dev, page_address(page) + offset, size, dir);
374} 374}
375EXPORT_SYMBOL(dma_map_page); 375EXPORT_SYMBOL(__dma_map_page);
376 376
377/* 377/*
378 * see if a mapped address was really a "safe" buffer and if so, copy 378 * see if a mapped address was really a "safe" buffer and if so, copy
@@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page);
380 * the safe buffer. (basically return things back to the way they 380 * the safe buffer. (basically return things back to the way they
381 * should be) 381 * should be)
382 */ 382 */
383void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 383void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
384 enum dma_data_direction dir) 384 enum dma_data_direction dir)
385{ 385{
386 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 386 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
388 388
389 unmap_single(dev, dma_addr, size, dir); 389 unmap_single(dev, dma_addr, size, dir);
390} 390}
391EXPORT_SYMBOL(dma_unmap_page); 391EXPORT_SYMBOL(__dma_unmap_page);
392 392
393int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, 393int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
394 unsigned long off, size_t sz, enum dma_data_direction dir) 394 unsigned long off, size_t sz, enum dma_data_direction dir)
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 8f69b98f68f..4fff837363e 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,6 +5,7 @@
5 5
6#include <linux/mm_types.h> 6#include <linux/mm_types.h>
7#include <linux/scatterlist.h> 7#include <linux/scatterlist.h>
8#include <linux/dma-debug.h>
8 9
9#include <asm-generic/dma-coherent.h> 10#include <asm-generic/dma-coherent.h>
10#include <asm/memory.h> 11#include <asm/memory.h>
@@ -297,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
297/* 298/*
298 * The DMA API, implemented by dmabounce.c. See below for descriptions. 299 * The DMA API, implemented by dmabounce.c. See below for descriptions.
299 */ 300 */
300extern dma_addr_t dma_map_single(struct device *, void *, size_t, 301extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
301 enum dma_data_direction); 302 enum dma_data_direction);
302extern void dma_unmap_single(struct device *, dma_addr_t, size_t, 303extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
303 enum dma_data_direction); 304 enum dma_data_direction);
304extern dma_addr_t dma_map_page(struct device *, struct page *, 305extern dma_addr_t __dma_map_page(struct device *, struct page *,
305 unsigned long, size_t, enum dma_data_direction); 306 unsigned long, size_t, enum dma_data_direction);
306extern void dma_unmap_page(struct device *, dma_addr_t, size_t, 307extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
307 enum dma_data_direction); 308 enum dma_data_direction);
308 309
309/* 310/*
@@ -327,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
327} 328}
328 329
329 330
331static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
332 size_t size, enum dma_data_direction dir)
333{
334 __dma_single_cpu_to_dev(cpu_addr, size, dir);
335 return virt_to_dma(dev, cpu_addr);
336}
337
338static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
339 unsigned long offset, size_t size, enum dma_data_direction dir)
340{
341 __dma_page_cpu_to_dev(page, offset, size, dir);
342 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
343}
344
345static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
346 size_t size, enum dma_data_direction dir)
347{
348 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
349}
350
351static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
352 size_t size, enum dma_data_direction dir)
353{
354 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
355 handle & ~PAGE_MASK, size, dir);
356}
357#endif /* CONFIG_DMABOUNCE */
358
330/** 359/**
331 * dma_map_single - map a single buffer for streaming DMA 360 * dma_map_single - map a single buffer for streaming DMA
332 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 361 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -344,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
344static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 373static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
345 size_t size, enum dma_data_direction dir) 374 size_t size, enum dma_data_direction dir)
346{ 375{
376 dma_addr_t addr;
377
347 BUG_ON(!valid_dma_direction(dir)); 378 BUG_ON(!valid_dma_direction(dir));
348 379
349 __dma_single_cpu_to_dev(cpu_addr, size, dir); 380 addr = __dma_map_single(dev, cpu_addr, size, dir);
381 debug_dma_map_page(dev, virt_to_page(cpu_addr),
382 (unsigned long)cpu_addr & ~PAGE_MASK, size,
383 dir, addr, true);
350 384
351 return virt_to_dma(dev, cpu_addr); 385 return addr;
352} 386}
353 387
354/** 388/**
@@ -368,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
368static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 402static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
369 unsigned long offset, size_t size, enum dma_data_direction dir) 403 unsigned long offset, size_t size, enum dma_data_direction dir)
370{ 404{
405 dma_addr_t addr;
406
371 BUG_ON(!valid_dma_direction(dir)); 407 BUG_ON(!valid_dma_direction(dir));
372 408
373 __dma_page_cpu_to_dev(page, offset, size, dir); 409 addr = __dma_map_page(dev, page, offset, size, dir);
410 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
374 411
375 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 412 return addr;
376} 413}
377 414
378/** 415/**
@@ -392,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
392static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, 429static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
393 size_t size, enum dma_data_direction dir) 430 size_t size, enum dma_data_direction dir)
394{ 431{
395 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); 432 debug_dma_unmap_page(dev, handle, size, dir, true);
433 __dma_unmap_single(dev, handle, size, dir);
396} 434}
397 435
398/** 436/**
@@ -412,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
412static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 450static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
413 size_t size, enum dma_data_direction dir) 451 size_t size, enum dma_data_direction dir)
414{ 452{
415 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 453 debug_dma_unmap_page(dev, handle, size, dir, false);
416 handle & ~PAGE_MASK, size, dir); 454 __dma_unmap_page(dev, handle, size, dir);
417} 455}
418#endif /* CONFIG_DMABOUNCE */
419 456
420/** 457/**
421 * dma_sync_single_range_for_cpu 458 * dma_sync_single_range_for_cpu
@@ -441,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
441{ 478{
442 BUG_ON(!valid_dma_direction(dir)); 479 BUG_ON(!valid_dma_direction(dir));
443 480
481 debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
482
444 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) 483 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
445 return; 484 return;
446 485
@@ -453,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
453{ 492{
454 BUG_ON(!valid_dma_direction(dir)); 493 BUG_ON(!valid_dma_direction(dir));
455 494
495 debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
496
456 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) 497 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
457 return; 498 return;
458 499
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 44e72108d7a..85f9361f3e0 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -554,17 +554,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
554 struct scatterlist *s; 554 struct scatterlist *s;
555 int i, j; 555 int i, j;
556 556
557 BUG_ON(!valid_dma_direction(dir));
558
557 for_each_sg(sg, s, nents, i) { 559 for_each_sg(sg, s, nents, i) {
558 s->dma_address = dma_map_page(dev, sg_page(s), s->offset, 560 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
559 s->length, dir); 561 s->length, dir);
560 if (dma_mapping_error(dev, s->dma_address)) 562 if (dma_mapping_error(dev, s->dma_address))
561 goto bad_mapping; 563 goto bad_mapping;
562 } 564 }
565 debug_dma_map_sg(dev, sg, nents, nents, dir);
563 return nents; 566 return nents;
564 567
565 bad_mapping: 568 bad_mapping:
566 for_each_sg(sg, s, i, j) 569 for_each_sg(sg, s, i, j)
567 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 570 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
568 return 0; 571 return 0;
569} 572}
570EXPORT_SYMBOL(dma_map_sg); 573EXPORT_SYMBOL(dma_map_sg);
@@ -585,8 +588,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
585 struct scatterlist *s; 588 struct scatterlist *s;
586 int i; 589 int i;
587 590
591 debug_dma_unmap_sg(dev, sg, nents, dir);
592
588 for_each_sg(sg, s, nents, i) 593 for_each_sg(sg, s, nents, i)
589 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 594 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
590} 595}
591EXPORT_SYMBOL(dma_unmap_sg); 596EXPORT_SYMBOL(dma_unmap_sg);
592 597
@@ -611,6 +616,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
611 __dma_page_dev_to_cpu(sg_page(s), s->offset, 616 __dma_page_dev_to_cpu(sg_page(s), s->offset,
612 s->length, dir); 617 s->length, dir);
613 } 618 }
619
620 debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
614} 621}
615EXPORT_SYMBOL(dma_sync_sg_for_cpu); 622EXPORT_SYMBOL(dma_sync_sg_for_cpu);
616 623
@@ -635,5 +642,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
635 __dma_page_cpu_to_dev(sg_page(s), s->offset, 642 __dma_page_cpu_to_dev(sg_page(s), s->offset,
636 s->length, dir); 643 s->length, dir);
637 } 644 }
645
646 debug_dma_sync_sg_for_device(dev, sg, nents, dir);
638} 647}
639EXPORT_SYMBOL(dma_sync_sg_for_device); 648EXPORT_SYMBOL(dma_sync_sg_for_device);
649
650#define PREALLOC_DMA_DEBUG_ENTRIES 4096
651
652static int __init dma_debug_do_init(void)
653{
654 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
655 return 0;
656}
657fs_initcall(dma_debug_do_init);