aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLorenzo Nava <lorenx4@gmail.com>2015-07-02 12:28:03 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-08-04 11:16:21 -0400
commit21caf3a765b0a88f8fedf63b36e5d15683b73fe5 (patch)
tree98536a516686bbf3a717e47955cc1c8b2af251a4
parent1234e3fda9aa24b2d650bbcd9ef09d5f6a12dc86 (diff)
ARM: 8398/1: arm DMA: Fix allocation from CMA for coherent DMA
This patch allows the use of CMA for DMA coherent memory allocation. At the moment if the input parameter "is_coherent" is set to true the allocation is not made using the CMA, which I think is not the desired behaviour. The patch covers the allocation and free of memory for coherent DMA. Signed-off-by: Lorenzo Nava <lorenx4@gmail.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/dma-mapping.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 5edf17cf043d..ad7419e69967 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -649,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
649 size = PAGE_ALIGN(size); 649 size = PAGE_ALIGN(size);
650 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); 650 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
651 651
652 if (is_coherent || nommu()) 652 if (nommu())
653 addr = __alloc_simple_buffer(dev, size, gfp, &page);
654 else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
655 addr = __alloc_from_contiguous(dev, size, prot, &page,
656 caller, want_vaddr);
657 else if (is_coherent)
653 addr = __alloc_simple_buffer(dev, size, gfp, &page); 658 addr = __alloc_simple_buffer(dev, size, gfp, &page);
654 else if (!(gfp & __GFP_WAIT)) 659 else if (!(gfp & __GFP_WAIT))
655 addr = __alloc_from_pool(size, &page); 660 addr = __alloc_from_pool(size, &page);
656 else if (!dev_get_cma_area(dev))
657 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
658 else 661 else
659 addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); 662 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
663 caller, want_vaddr);
660 664
661 if (page) 665 if (page)
662 *handle = pfn_to_dma(dev, page_to_pfn(page)); 666 *handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -684,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
684static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 688static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
685 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 689 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
686{ 690{
687 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
688 void *memory; 691 void *memory;
689 692
690 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 693 if (dma_alloc_from_coherent(dev, size, handle, &memory))
691 return memory; 694 return memory;
692 695
693 return __dma_alloc(dev, size, handle, gfp, prot, true, 696 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
694 attrs, __builtin_return_address(0)); 697 attrs, __builtin_return_address(0));
695} 698}
696 699
@@ -754,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
754 757
755 size = PAGE_ALIGN(size); 758 size = PAGE_ALIGN(size);
756 759
757 if (is_coherent || nommu()) { 760 if (nommu()) {
758 __dma_free_buffer(page, size); 761 __dma_free_buffer(page, size);
759 } else if (__free_from_pool(cpu_addr, size)) { 762 } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
760 return; 763 return;
761 } else if (!dev_get_cma_area(dev)) { 764 } else if (!dev_get_cma_area(dev)) {
762 if (want_vaddr) 765 if (want_vaddr && !is_coherent)
763 __dma_free_remap(cpu_addr, size); 766 __dma_free_remap(cpu_addr, size);
764 __dma_free_buffer(page, size); 767 __dma_free_buffer(page, size);
765 } else { 768 } else {