aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2013-05-21 12:35:19 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2014-02-27 12:16:59 -0500
commit7363590d2c4691593fd280f94b3deaeb5e83dbbd (patch)
tree892b7ac3974015dd23401a1d993cbd930cee2fb9 /arch/arm64/mm
parent3690951fc6d42f3a0903987677d0e592c49dd8db (diff)
arm64: Implement coherent DMA API based on swiotlb
This patch adds support for DMA API cache maintenance on SoCs without hardware device cache coherency. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S78
-rw-r--r--arch/arm64/mm/dma-mapping.c162
2 files changed, 239 insertions, 1 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 1ea9f26d1b70..97fcef535a8a 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -166,3 +166,81 @@ ENTRY(__flush_dcache_area)
166 dsb sy 166 dsb sy
167 ret 167 ret
168ENDPROC(__flush_dcache_area) 168ENDPROC(__flush_dcache_area)
169
170/*
171 * __dma_inv_range(start, end)
172 * - start - virtual start address of region
173 * - end - virtual end address of region
174 */
175__dma_inv_range:
176 dcache_line_size x2, x3
177 sub x3, x2, #1
178 bic x0, x0, x3
179 bic x1, x1, x3
1801: dc ivac, x0 // invalidate D / U line
181 add x0, x0, x2
182 cmp x0, x1
183 b.lo 1b
184 dsb sy
185 ret
186ENDPROC(__dma_inv_range)
187
188/*
189 * __dma_clean_range(start, end)
190 * - start - virtual start address of region
191 * - end - virtual end address of region
192 */
193__dma_clean_range:
194 dcache_line_size x2, x3
195 sub x3, x2, #1
196 bic x0, x0, x3
1971: dc cvac, x0 // clean D / U line
198 add x0, x0, x2
199 cmp x0, x1
200 b.lo 1b
201 dsb sy
202 ret
203ENDPROC(__dma_clean_range)
204
205/*
206 * __dma_flush_range(start, end)
207 * - start - virtual start address of region
208 * - end - virtual end address of region
209 */
210ENTRY(__dma_flush_range)
211 dcache_line_size x2, x3
212 sub x3, x2, #1
213 bic x0, x0, x3
2141: dc civac, x0 // clean & invalidate D / U line
215 add x0, x0, x2
216 cmp x0, x1
217 b.lo 1b
218 dsb sy
219 ret
220ENDPROC(__dma_flush_range)
221
222/*
223 * __dma_map_area(start, size, dir)
224 * - start - kernel virtual start address
225 * - size - size of region
226 * - dir - DMA direction
227 */
228ENTRY(__dma_map_area)
229 add x1, x1, x0
230 cmp w2, #DMA_FROM_DEVICE
231 b.eq __dma_inv_range
232 b __dma_clean_range
233ENDPROC(__dma_map_area)
234
235/*
236 * __dma_unmap_area(start, size, dir)
237 * - start - kernel virtual start address
238 * - size - size of region
239 * - dir - DMA direction
240 */
241ENTRY(__dma_unmap_area)
242 add x1, x1, x0
243 cmp w2, #DMA_TO_DEVICE
244 b.ne __dma_inv_range
245 ret
246ENDPROC(__dma_unmap_area)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index afa91a26a72b..88fbc5e5bae7 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -78,7 +78,166 @@ static void __dma_free_coherent(struct device *dev, size_t size,
78 } 78 }
79} 79}
80 80
81static struct dma_map_ops coherent_swiotlb_dma_ops = { 81static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
82 dma_addr_t *dma_handle, gfp_t flags,
83 struct dma_attrs *attrs)
84{
85 struct page *page, **map;
86 void *ptr, *coherent_ptr;
87 int order, i;
88
89 size = PAGE_ALIGN(size);
90 order = get_order(size);
91
92 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
93 if (!ptr)
94 goto no_mem;
95 map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
96 if (!map)
97 goto no_map;
98
99 /* remove any dirty cache lines on the kernel alias */
100 __dma_flush_range(ptr, ptr + size);
101
102 /* create a coherent mapping */
103 page = virt_to_page(ptr);
104 for (i = 0; i < (size >> PAGE_SHIFT); i++)
105 map[i] = page + i;
106 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
107 pgprot_dmacoherent(pgprot_default));
108 kfree(map);
109 if (!coherent_ptr)
110 goto no_map;
111
112 return coherent_ptr;
113
114no_map:
115 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
116no_mem:
117 *dma_handle = ~0;
118 return NULL;
119}
120
121static void __dma_free_noncoherent(struct device *dev, size_t size,
122 void *vaddr, dma_addr_t dma_handle,
123 struct dma_attrs *attrs)
124{
125 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
126
127 vunmap(vaddr);
128 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
129}
130
131static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
132 unsigned long offset, size_t size,
133 enum dma_data_direction dir,
134 struct dma_attrs *attrs)
135{
136 dma_addr_t dev_addr;
137
138 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
139 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
140
141 return dev_addr;
142}
143
144
145static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
146 size_t size, enum dma_data_direction dir,
147 struct dma_attrs *attrs)
148{
149 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
150 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
151}
152
153static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
154 int nelems, enum dma_data_direction dir,
155 struct dma_attrs *attrs)
156{
157 struct scatterlist *sg;
158 int i, ret;
159
160 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
161 for_each_sg(sgl, sg, ret, i)
162 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
163 sg->length, dir);
164
165 return ret;
166}
167
168static void __swiotlb_unmap_sg_attrs(struct device *dev,
169 struct scatterlist *sgl, int nelems,
170 enum dma_data_direction dir,
171 struct dma_attrs *attrs)
172{
173 struct scatterlist *sg;
174 int i;
175
176 for_each_sg(sgl, sg, nelems, i)
177 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
178 sg->length, dir);
179 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
180}
181
182static void __swiotlb_sync_single_for_cpu(struct device *dev,
183 dma_addr_t dev_addr, size_t size,
184 enum dma_data_direction dir)
185{
186 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
187 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
188}
189
190static void __swiotlb_sync_single_for_device(struct device *dev,
191 dma_addr_t dev_addr, size_t size,
192 enum dma_data_direction dir)
193{
194 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
195 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
196}
197
198static void __swiotlb_sync_sg_for_cpu(struct device *dev,
199 struct scatterlist *sgl, int nelems,
200 enum dma_data_direction dir)
201{
202 struct scatterlist *sg;
203 int i;
204
205 for_each_sg(sgl, sg, nelems, i)
206 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
207 sg->length, dir);
208 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
209}
210
211static void __swiotlb_sync_sg_for_device(struct device *dev,
212 struct scatterlist *sgl, int nelems,
213 enum dma_data_direction dir)
214{
215 struct scatterlist *sg;
216 int i;
217
218 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
219 for_each_sg(sgl, sg, nelems, i)
220 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
221 sg->length, dir);
222}
223
224struct dma_map_ops noncoherent_swiotlb_dma_ops = {
225 .alloc = __dma_alloc_noncoherent,
226 .free = __dma_free_noncoherent,
227 .map_page = __swiotlb_map_page,
228 .unmap_page = __swiotlb_unmap_page,
229 .map_sg = __swiotlb_map_sg_attrs,
230 .unmap_sg = __swiotlb_unmap_sg_attrs,
231 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
232 .sync_single_for_device = __swiotlb_sync_single_for_device,
233 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
234 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
235 .dma_supported = swiotlb_dma_supported,
236 .mapping_error = swiotlb_dma_mapping_error,
237};
238EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
239
240struct dma_map_ops coherent_swiotlb_dma_ops = {
82 .alloc = __dma_alloc_coherent, 241 .alloc = __dma_alloc_coherent,
83 .free = __dma_free_coherent, 242 .free = __dma_free_coherent,
84 .map_page = swiotlb_map_page, 243 .map_page = swiotlb_map_page,
@@ -92,6 +251,7 @@ static struct dma_map_ops coherent_swiotlb_dma_ops = {
92 .dma_supported = swiotlb_dma_supported, 251 .dma_supported = swiotlb_dma_supported,
93 .mapping_error = swiotlb_dma_mapping_error, 252 .mapping_error = swiotlb_dma_mapping_error,
94}; 253};
254EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
95 255
96extern int swiotlb_late_init_with_default_size(size_t default_size); 256extern int swiotlb_late_init_with_default_size(size_t default_size);
97 257