aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRob Herring <rob.herring@calxeda.com>2012-08-21 06:20:17 -0400
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-10-02 02:58:06 -0400
commitdd37e9405a8e85be49a60b2530efeb5f06bcb753 (patch)
tree0480f0c6ed0941eeeba4f94190814e8c710504b6 /arch/arm/mm
parenta0d271cbfed1dd50278c6b06bead3d00ba0a88f9 (diff)
ARM: add coherent dma ops
arch_is_coherent is problematic as it is a global symbol. This doesn't work for multi-platform kernels or platforms which can support per device coherent DMA. This adds arm_coherent_dma_ops to be used for devices which connected coherently (i.e. to the ACP port on Cortex-A9 or A15). The arm_dma_ops are modified at boot when arch_is_coherent is true. Signed-off-by: Rob Herring <rob.herring@calxeda.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c71
1 files changed, 59 insertions, 12 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 13f555d6249..7d772c0a93f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -73,11 +73,18 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
73 unsigned long offset, size_t size, enum dma_data_direction dir, 73 unsigned long offset, size_t size, enum dma_data_direction dir,
74 struct dma_attrs *attrs) 74 struct dma_attrs *attrs)
75{ 75{
76 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 76 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
77 __dma_page_cpu_to_dev(page, offset, size, dir); 77 __dma_page_cpu_to_dev(page, offset, size, dir);
78 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 78 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
79} 79}
80 80
81static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
82 unsigned long offset, size_t size, enum dma_data_direction dir,
83 struct dma_attrs *attrs)
84{
85 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
86}
87
81/** 88/**
82 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 89 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
83 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 90 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -96,7 +103,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
96 size_t size, enum dma_data_direction dir, 103 size_t size, enum dma_data_direction dir,
97 struct dma_attrs *attrs) 104 struct dma_attrs *attrs)
98{ 105{
99 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 106 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
100 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 107 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
101 handle & ~PAGE_MASK, size, dir); 108 handle & ~PAGE_MASK, size, dir);
102} 109}
@@ -106,8 +113,7 @@ static void arm_dma_sync_single_for_cpu(struct device *dev,
106{ 113{
107 unsigned int offset = handle & (PAGE_SIZE - 1); 114 unsigned int offset = handle & (PAGE_SIZE - 1);
108 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 115 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
109 if (!arch_is_coherent()) 116 __dma_page_dev_to_cpu(page, offset, size, dir);
110 __dma_page_dev_to_cpu(page, offset, size, dir);
111} 117}
112 118
113static void arm_dma_sync_single_for_device(struct device *dev, 119static void arm_dma_sync_single_for_device(struct device *dev,
@@ -115,8 +121,7 @@ static void arm_dma_sync_single_for_device(struct device *dev,
115{ 121{
116 unsigned int offset = handle & (PAGE_SIZE - 1); 122 unsigned int offset = handle & (PAGE_SIZE - 1);
117 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 123 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
118 if (!arch_is_coherent()) 124 __dma_page_cpu_to_dev(page, offset, size, dir);
119 __dma_page_cpu_to_dev(page, offset, size, dir);
120} 125}
121 126
122static int arm_dma_set_mask(struct device *dev, u64 dma_mask); 127static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
@@ -138,6 +143,22 @@ struct dma_map_ops arm_dma_ops = {
138}; 143};
139EXPORT_SYMBOL(arm_dma_ops); 144EXPORT_SYMBOL(arm_dma_ops);
140 145
146static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
147 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
148static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
149 dma_addr_t handle, struct dma_attrs *attrs);
150
151struct dma_map_ops arm_coherent_dma_ops = {
152 .alloc = arm_coherent_dma_alloc,
153 .free = arm_coherent_dma_free,
154 .mmap = arm_dma_mmap,
155 .get_sgtable = arm_dma_get_sgtable,
156 .map_page = arm_coherent_dma_map_page,
157 .map_sg = arm_dma_map_sg,
158 .set_dma_mask = arm_dma_set_mask,
159};
160EXPORT_SYMBOL(arm_coherent_dma_ops);
161
141static u64 get_coherent_dma_mask(struct device *dev) 162static u64 get_coherent_dma_mask(struct device *dev)
142{ 163{
143 u64 mask = (u64)arm_dma_limit; 164 u64 mask = (u64)arm_dma_limit;
@@ -586,7 +607,7 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
586 607
587 608
588static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 609static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
589 gfp_t gfp, pgprot_t prot, const void *caller) 610 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
590{ 611{
591 u64 mask = get_coherent_dma_mask(dev); 612 u64 mask = get_coherent_dma_mask(dev);
592 struct page *page; 613 struct page *page;
@@ -619,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
619 *handle = DMA_ERROR_CODE; 640 *handle = DMA_ERROR_CODE;
620 size = PAGE_ALIGN(size); 641 size = PAGE_ALIGN(size);
621 642
622 if (arch_is_coherent() || nommu()) 643 if (is_coherent || nommu())
623 addr = __alloc_simple_buffer(dev, size, gfp, &page); 644 addr = __alloc_simple_buffer(dev, size, gfp, &page);
624 else if (gfp & GFP_ATOMIC) 645 else if (gfp & GFP_ATOMIC)
625 addr = __alloc_from_pool(size, &page); 646 addr = __alloc_from_pool(size, &page);
@@ -647,7 +668,20 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
647 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 668 if (dma_alloc_from_coherent(dev, size, handle, &memory))
648 return memory; 669 return memory;
649 670
650 return __dma_alloc(dev, size, handle, gfp, prot, 671 return __dma_alloc(dev, size, handle, gfp, prot, false,
672 __builtin_return_address(0));
673}
674
675static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
676 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
677{
678 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
679 void *memory;
680
681 if (dma_alloc_from_coherent(dev, size, handle, &memory))
682 return memory;
683
684 return __dma_alloc(dev, size, handle, gfp, prot, true,
651 __builtin_return_address(0)); 685 __builtin_return_address(0));
652} 686}
653 687
@@ -684,8 +718,9 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
684/* 718/*
685 * Free a buffer as defined by the above mapping. 719 * Free a buffer as defined by the above mapping.
686 */ 720 */
687void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 721static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
688 dma_addr_t handle, struct dma_attrs *attrs) 722 dma_addr_t handle, struct dma_attrs *attrs,
723 bool is_coherent)
689{ 724{
690 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 725 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
691 726
@@ -694,7 +729,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
694 729
695 size = PAGE_ALIGN(size); 730 size = PAGE_ALIGN(size);
696 731
697 if (arch_is_coherent() || nommu()) { 732 if (is_coherent || nommu()) {
698 __dma_free_buffer(page, size); 733 __dma_free_buffer(page, size);
699 } else if (__free_from_pool(cpu_addr, size)) { 734 } else if (__free_from_pool(cpu_addr, size)) {
700 return; 735 return;
@@ -710,6 +745,18 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
710 } 745 }
711} 746}
712 747
748void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
749 dma_addr_t handle, struct dma_attrs *attrs)
750{
751 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
752}
753
754static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
755 dma_addr_t handle, struct dma_attrs *attrs)
756{
757 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
758}
759
713int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 760int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
714 void *cpu_addr, dma_addr_t handle, size_t size, 761 void *cpu_addr, dma_addr_t handle, size_t size,
715 struct dma_attrs *attrs) 762 struct dma_attrs *attrs)