aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2015-11-04 08:23:52 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2015-11-16 05:05:35 -0500
commitbd1c6ff74ce0bbd8cda6eb7763fa0e2625dfcc8b (patch)
treec9077a8f5382b8c535779396e67d44ff30c0f3fb
parent8005c49d9aea74d382f474ce11afbbc7d7130bec (diff)
arm64/dma-mapping: Fix sizes in __iommu_{alloc,free}_attrs
The iommu-dma layer does its own size-alignment for coherent DMA allocations based on IOMMU page sizes, but we still need to consider CPU page sizes for the cases where a non-cacheable CPU mapping is created. Whilst everything on the alloc/map path seems to implicitly align things enough to make it work, some functions used by the corresponding unmap/free path do not, which leads to problems freeing odd-sized allocations. Either way it's something we really should be handling explicitly, so do that to make both paths suitably robust. Reported-by: Yong Wu <yong.wu@mediatek.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/mm/dma-mapping.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 131a199114b4..97fd714035f9 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -552,10 +552,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
552{ 552{
553 bool coherent = is_device_dma_coherent(dev); 553 bool coherent = is_device_dma_coherent(dev);
554 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); 554 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
555 size_t iosize = size;
555 void *addr; 556 void *addr;
556 557
557 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) 558 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
558 return NULL; 559 return NULL;
560
561 size = PAGE_ALIGN(size);
562
559 /* 563 /*
560 * Some drivers rely on this, and we probably don't want the 564 * Some drivers rely on this, and we probably don't want the
561 * possibility of stale kernel data being read by devices anyway. 565 * possibility of stale kernel data being read by devices anyway.
@@ -566,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
566 struct page **pages; 570 struct page **pages;
567 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); 571 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
568 572
569 pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle, 573 pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
570 flush_page); 574 flush_page);
571 if (!pages) 575 if (!pages)
572 return NULL; 576 return NULL;
@@ -574,7 +578,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
574 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, 578 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
575 __builtin_return_address(0)); 579 __builtin_return_address(0));
576 if (!addr) 580 if (!addr)
577 iommu_dma_free(dev, pages, size, handle); 581 iommu_dma_free(dev, pages, iosize, handle);
578 } else { 582 } else {
579 struct page *page; 583 struct page *page;
580 /* 584 /*
@@ -591,7 +595,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
591 if (!addr) 595 if (!addr)
592 return NULL; 596 return NULL;
593 597
594 *handle = iommu_dma_map_page(dev, page, 0, size, ioprot); 598 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
595 if (iommu_dma_mapping_error(dev, *handle)) { 599 if (iommu_dma_mapping_error(dev, *handle)) {
596 if (coherent) 600 if (coherent)
597 __free_pages(page, get_order(size)); 601 __free_pages(page, get_order(size));
@@ -606,6 +610,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
606static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 610static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
607 dma_addr_t handle, struct dma_attrs *attrs) 611 dma_addr_t handle, struct dma_attrs *attrs)
608{ 612{
613 size_t iosize = size;
614
615 size = PAGE_ALIGN(size);
609 /* 616 /*
610 * @cpu_addr will be one of 3 things depending on how it was allocated: 617 * @cpu_addr will be one of 3 things depending on how it was allocated:
611 * - A remapped array of pages from iommu_dma_alloc(), for all 618 * - A remapped array of pages from iommu_dma_alloc(), for all
@@ -617,17 +624,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
617 * Hence how dodgy the below logic looks... 624 * Hence how dodgy the below logic looks...
618 */ 625 */
619 if (__in_atomic_pool(cpu_addr, size)) { 626 if (__in_atomic_pool(cpu_addr, size)) {
620 iommu_dma_unmap_page(dev, handle, size, 0, NULL); 627 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
621 __free_from_pool(cpu_addr, size); 628 __free_from_pool(cpu_addr, size);
622 } else if (is_vmalloc_addr(cpu_addr)){ 629 } else if (is_vmalloc_addr(cpu_addr)){
623 struct vm_struct *area = find_vm_area(cpu_addr); 630 struct vm_struct *area = find_vm_area(cpu_addr);
624 631
625 if (WARN_ON(!area || !area->pages)) 632 if (WARN_ON(!area || !area->pages))
626 return; 633 return;
627 iommu_dma_free(dev, area->pages, size, &handle); 634 iommu_dma_free(dev, area->pages, iosize, &handle);
628 dma_common_free_remap(cpu_addr, size, VM_USERMAP); 635 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
629 } else { 636 } else {
630 iommu_dma_unmap_page(dev, handle, size, 0, NULL); 637 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
631 __free_pages(virt_to_page(cpu_addr), get_order(size)); 638 __free_pages(virt_to_page(cpu_addr), get_order(size));
632 } 639 }
633} 640}