aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1aa664a1999..db23ae4aaaa 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -214,7 +214,8 @@ static int __init consistent_init(void)
214core_initcall(consistent_init); 214core_initcall(consistent_init);
215 215
216static void * 216static void *
217__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) 217__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
218 const void *caller)
218{ 219{
219 struct arm_vmregion *c; 220 struct arm_vmregion *c;
220 size_t align; 221 size_t align;
@@ -241,7 +242,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
241 * Allocate a virtual address in the consistent mapping region. 242 * Allocate a virtual address in the consistent mapping region.
242 */ 243 */
243 c = arm_vmregion_alloc(&consistent_head, align, size, 244 c = arm_vmregion_alloc(&consistent_head, align, size,
244 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 245 gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
245 if (c) { 246 if (c) {
246 pte_t *pte; 247 pte_t *pte;
247 int idx = CONSISTENT_PTE_INDEX(c->vm_start); 248 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
@@ -320,14 +321,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
320 321
321#else /* !CONFIG_MMU */ 322#else /* !CONFIG_MMU */
322 323
323#define __dma_alloc_remap(page, size, gfp, prot) page_address(page) 324#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
324#define __dma_free_remap(addr, size) do { } while (0) 325#define __dma_free_remap(addr, size) do { } while (0)
325 326
326#endif /* CONFIG_MMU */ 327#endif /* CONFIG_MMU */
327 328
328static void * 329static void *
329__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, 330__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
330 pgprot_t prot) 331 pgprot_t prot, const void *caller)
331{ 332{
332 struct page *page; 333 struct page *page;
333 void *addr; 334 void *addr;
@@ -349,7 +350,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
349 return NULL; 350 return NULL;
350 351
351 if (!arch_is_coherent()) 352 if (!arch_is_coherent())
352 addr = __dma_alloc_remap(page, size, gfp, prot); 353 addr = __dma_alloc_remap(page, size, gfp, prot, caller);
353 else 354 else
354 addr = page_address(page); 355 addr = page_address(page);
355 356
@@ -374,7 +375,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
374 return memory; 375 return memory;
375 376
376 return __dma_alloc(dev, size, handle, gfp, 377 return __dma_alloc(dev, size, handle, gfp,
377 pgprot_dmacoherent(pgprot_kernel)); 378 pgprot_dmacoherent(pgprot_kernel),
379 __builtin_return_address(0));
378} 380}
379EXPORT_SYMBOL(dma_alloc_coherent); 381EXPORT_SYMBOL(dma_alloc_coherent);
380 382
@@ -386,7 +388,8 @@ void *
386dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 388dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
387{ 389{
388 return __dma_alloc(dev, size, handle, gfp, 390 return __dma_alloc(dev, size, handle, gfp,
389 pgprot_writecombine(pgprot_kernel)); 391 pgprot_writecombine(pgprot_kernel),
392 __builtin_return_address(0));
390} 393}
391EXPORT_SYMBOL(dma_alloc_writecombine); 394EXPORT_SYMBOL(dma_alloc_writecombine);
392 395
@@ -723,6 +726,9 @@ EXPORT_SYMBOL(dma_set_mask);
723 726
724static int __init dma_debug_do_init(void) 727static int __init dma_debug_do_init(void)
725{ 728{
729#ifdef CONFIG_MMU
730 arm_vmregion_create_proc("dma-mappings", &consistent_head);
731#endif
726 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 732 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
727 return 0; 733 return 0;
728} 734}