aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLaura Abbott <lauraa@codeaurora.org>2014-10-09 18:26:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:52 -0400
commit513510ddba9650fc7da456eefeb0ead7632324f6 (patch)
tree8e21369797216203ba554d99d031403c5b8d95cb /arch
parent9efb3a421d55d30b65fb0dbee05108d15c6c55f7 (diff)
common: dma-mapping: introduce common remapping functions
For architectures without coherent DMA, memory for DMA may need to be remapped with coherent attributes. Factor out the the remapping code from arm and put it in a common location to reduce code duplication. As part of this, the arm APIs are now migrated away from ioremap_page_range to the common APIs which use map_vm_area for remapping. This should be an equivalent change and using map_vm_area is more correct as ioremap_page_range is intended to bring in io addresses into the cpu space and not regular kernel managed memory. Signed-off-by: Laura Abbott <lauraa@codeaurora.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: David Riley <davidriley@chromium.org> Cc: Olof Johansson <olof@lixom.net> Cc: Ritesh Harjain <ritesh.harjani@gmail.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thierry Reding <thierry.reding@gmail.com> Cc: Will Deacon <will.deacon@arm.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Laura Abbott <lauraa@codeaurora.org> Cc: Mitchel Humpherys <mitchelh@codeaurora.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/dma-mapping.c57
1 files changed, 9 insertions, 48 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7a996aaa061e..eecc8e60deea 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -298,37 +298,19 @@ static void *
298__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 298__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
299 const void *caller) 299 const void *caller)
300{ 300{
301 struct vm_struct *area;
302 unsigned long addr;
303
304 /* 301 /*
305 * DMA allocation can be mapped to user space, so lets 302 * DMA allocation can be mapped to user space, so lets
306 * set VM_USERMAP flags too. 303 * set VM_USERMAP flags too.
307 */ 304 */
308 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 305 return dma_common_contiguous_remap(page, size,
309 caller); 306 VM_ARM_DMA_CONSISTENT | VM_USERMAP,
310 if (!area) 307 prot, caller);
311 return NULL;
312 addr = (unsigned long)area->addr;
313 area->phys_addr = __pfn_to_phys(page_to_pfn(page));
314
315 if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
316 vunmap((void *)addr);
317 return NULL;
318 }
319 return (void *)addr;
320} 308}
321 309
322static void __dma_free_remap(void *cpu_addr, size_t size) 310static void __dma_free_remap(void *cpu_addr, size_t size)
323{ 311{
324 unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; 312 dma_common_free_remap(cpu_addr, size,
325 struct vm_struct *area = find_vm_area(cpu_addr); 313 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
326 if (!area || (area->flags & flags) != flags) {
327 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
328 return;
329 }
330 unmap_kernel_range((unsigned long)cpu_addr, size);
331 vunmap(cpu_addr);
332} 314}
333 315
334#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 316#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
@@ -1271,29 +1253,8 @@ static void *
1271__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1253__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1272 const void *caller) 1254 const void *caller)
1273{ 1255{
1274 unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1256 return dma_common_pages_remap(pages, size,
1275 struct vm_struct *area; 1257 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
1276 unsigned long p;
1277
1278 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
1279 caller);
1280 if (!area)
1281 return NULL;
1282
1283 area->pages = pages;
1284 area->nr_pages = nr_pages;
1285 p = (unsigned long)area->addr;
1286
1287 for (i = 0; i < nr_pages; i++) {
1288 phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
1289 if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
1290 goto err;
1291 p += PAGE_SIZE;
1292 }
1293 return area->addr;
1294err:
1295 unmap_kernel_range((unsigned long)area->addr, size);
1296 vunmap(area->addr);
1297 return NULL; 1258 return NULL;
1298} 1259}
1299 1260
@@ -1501,8 +1462,8 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1501 } 1462 }
1502 1463
1503 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1464 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
1504 unmap_kernel_range((unsigned long)cpu_addr, size); 1465 dma_common_free_remap(cpu_addr, size,
1505 vunmap(cpu_addr); 1466 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
1506 } 1467 }
1507 1468
1508 __iommu_remove_mapping(dev, handle, size); 1469 __iommu_remove_mapping(dev, handle, size);