aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/dma-mapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/dma-mapping.c')
-rw-r--r--arch/arm64/mm/dma-mapping.c116
1 files changed, 52 insertions, 64 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d92094203913..0a24b9b8c698 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -134,16 +134,17 @@ static void __dma_free_coherent(struct device *dev, size_t size,
134 swiotlb_free_coherent(dev, size, vaddr, dma_handle); 134 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
135} 135}
136 136
137static void *__dma_alloc_noncoherent(struct device *dev, size_t size, 137static void *__dma_alloc(struct device *dev, size_t size,
138 dma_addr_t *dma_handle, gfp_t flags, 138 dma_addr_t *dma_handle, gfp_t flags,
139 struct dma_attrs *attrs) 139 struct dma_attrs *attrs)
140{ 140{
141 struct page *page; 141 struct page *page;
142 void *ptr, *coherent_ptr; 142 void *ptr, *coherent_ptr;
143 bool coherent = is_device_dma_coherent(dev);
143 144
144 size = PAGE_ALIGN(size); 145 size = PAGE_ALIGN(size);
145 146
146 if (!(flags & __GFP_WAIT)) { 147 if (!coherent && !(flags & __GFP_WAIT)) {
147 struct page *page = NULL; 148 struct page *page = NULL;
148 void *addr = __alloc_from_pool(size, &page); 149 void *addr = __alloc_from_pool(size, &page);
149 150
@@ -151,13 +152,16 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
151 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 152 *dma_handle = phys_to_dma(dev, page_to_phys(page));
152 153
153 return addr; 154 return addr;
154
155 } 155 }
156 156
157 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); 157 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
158 if (!ptr) 158 if (!ptr)
159 goto no_mem; 159 goto no_mem;
160 160
161 /* no need for non-cacheable mapping if coherent */
162 if (coherent)
163 return ptr;
164
161 /* remove any dirty cache lines on the kernel alias */ 165 /* remove any dirty cache lines on the kernel alias */
162 __dma_flush_range(ptr, ptr + size); 166 __dma_flush_range(ptr, ptr + size);
163 167
@@ -179,15 +183,17 @@ no_mem:
179 return NULL; 183 return NULL;
180} 184}
181 185
182static void __dma_free_noncoherent(struct device *dev, size_t size, 186static void __dma_free(struct device *dev, size_t size,
183 void *vaddr, dma_addr_t dma_handle, 187 void *vaddr, dma_addr_t dma_handle,
184 struct dma_attrs *attrs) 188 struct dma_attrs *attrs)
185{ 189{
186 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); 190 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
187 191
188 if (__free_from_pool(vaddr, size)) 192 if (!is_device_dma_coherent(dev)) {
189 return; 193 if (__free_from_pool(vaddr, size))
190 vunmap(vaddr); 194 return;
195 vunmap(vaddr);
196 }
191 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); 197 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
192} 198}
193 199
@@ -199,7 +205,8 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
199 dma_addr_t dev_addr; 205 dma_addr_t dev_addr;
200 206
201 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); 207 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
202 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); 208 if (!is_device_dma_coherent(dev))
209 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
203 210
204 return dev_addr; 211 return dev_addr;
205} 212}
@@ -209,7 +216,8 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
209 size_t size, enum dma_data_direction dir, 216 size_t size, enum dma_data_direction dir,
210 struct dma_attrs *attrs) 217 struct dma_attrs *attrs)
211{ 218{
212 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); 219 if (!is_device_dma_coherent(dev))
220 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
213 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); 221 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
214} 222}
215 223
@@ -221,9 +229,10 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
221 int i, ret; 229 int i, ret;
222 230
223 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); 231 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
224 for_each_sg(sgl, sg, ret, i) 232 if (!is_device_dma_coherent(dev))
225 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), 233 for_each_sg(sgl, sg, ret, i)
226 sg->length, dir); 234 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
235 sg->length, dir);
227 236
228 return ret; 237 return ret;
229} 238}
@@ -236,9 +245,10 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev,
236 struct scatterlist *sg; 245 struct scatterlist *sg;
237 int i; 246 int i;
238 247
239 for_each_sg(sgl, sg, nelems, i) 248 if (!is_device_dma_coherent(dev))
240 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), 249 for_each_sg(sgl, sg, nelems, i)
241 sg->length, dir); 250 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
251 sg->length, dir);
242 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs); 252 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
243} 253}
244 254
@@ -246,7 +256,8 @@ static void __swiotlb_sync_single_for_cpu(struct device *dev,
246 dma_addr_t dev_addr, size_t size, 256 dma_addr_t dev_addr, size_t size,
247 enum dma_data_direction dir) 257 enum dma_data_direction dir)
248{ 258{
249 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); 259 if (!is_device_dma_coherent(dev))
260 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
250 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir); 261 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
251} 262}
252 263
@@ -255,7 +266,8 @@ static void __swiotlb_sync_single_for_device(struct device *dev,
255 enum dma_data_direction dir) 266 enum dma_data_direction dir)
256{ 267{
257 swiotlb_sync_single_for_device(dev, dev_addr, size, dir); 268 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
258 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); 269 if (!is_device_dma_coherent(dev))
270 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
259} 271}
260 272
261static void __swiotlb_sync_sg_for_cpu(struct device *dev, 273static void __swiotlb_sync_sg_for_cpu(struct device *dev,
@@ -265,9 +277,10 @@ static void __swiotlb_sync_sg_for_cpu(struct device *dev,
265 struct scatterlist *sg; 277 struct scatterlist *sg;
266 int i; 278 int i;
267 279
268 for_each_sg(sgl, sg, nelems, i) 280 if (!is_device_dma_coherent(dev))
269 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), 281 for_each_sg(sgl, sg, nelems, i)
270 sg->length, dir); 282 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
283 sg->length, dir);
271 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir); 284 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
272} 285}
273 286
@@ -279,9 +292,10 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
279 int i; 292 int i;
280 293
281 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir); 294 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
282 for_each_sg(sgl, sg, nelems, i) 295 if (!is_device_dma_coherent(dev))
283 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), 296 for_each_sg(sgl, sg, nelems, i)
284 sg->length, dir); 297 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
298 sg->length, dir);
285} 299}
286 300
287/* vma->vm_page_prot must be set appropriately before calling this function */ 301/* vma->vm_page_prot must be set appropriately before calling this function */
@@ -308,28 +322,20 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
308 return ret; 322 return ret;
309} 323}
310 324
311static int __swiotlb_mmap_noncoherent(struct device *dev, 325static int __swiotlb_mmap(struct device *dev,
312 struct vm_area_struct *vma, 326 struct vm_area_struct *vma,
313 void *cpu_addr, dma_addr_t dma_addr, size_t size, 327 void *cpu_addr, dma_addr_t dma_addr, size_t size,
314 struct dma_attrs *attrs) 328 struct dma_attrs *attrs)
315{
316 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
317 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
318}
319
320static int __swiotlb_mmap_coherent(struct device *dev,
321 struct vm_area_struct *vma,
322 void *cpu_addr, dma_addr_t dma_addr, size_t size,
323 struct dma_attrs *attrs)
324{ 329{
325 /* Just use whatever page_prot attributes were specified */ 330 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
331 is_device_dma_coherent(dev));
326 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 332 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
327} 333}
328 334
329struct dma_map_ops noncoherent_swiotlb_dma_ops = { 335static struct dma_map_ops swiotlb_dma_ops = {
330 .alloc = __dma_alloc_noncoherent, 336 .alloc = __dma_alloc,
331 .free = __dma_free_noncoherent, 337 .free = __dma_free,
332 .mmap = __swiotlb_mmap_noncoherent, 338 .mmap = __swiotlb_mmap,
333 .map_page = __swiotlb_map_page, 339 .map_page = __swiotlb_map_page,
334 .unmap_page = __swiotlb_unmap_page, 340 .unmap_page = __swiotlb_unmap_page,
335 .map_sg = __swiotlb_map_sg_attrs, 341 .map_sg = __swiotlb_map_sg_attrs,
@@ -341,24 +347,6 @@ struct dma_map_ops noncoherent_swiotlb_dma_ops = {
341 .dma_supported = swiotlb_dma_supported, 347 .dma_supported = swiotlb_dma_supported,
342 .mapping_error = swiotlb_dma_mapping_error, 348 .mapping_error = swiotlb_dma_mapping_error,
343}; 349};
344EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
345
346struct dma_map_ops coherent_swiotlb_dma_ops = {
347 .alloc = __dma_alloc_coherent,
348 .free = __dma_free_coherent,
349 .mmap = __swiotlb_mmap_coherent,
350 .map_page = swiotlb_map_page,
351 .unmap_page = swiotlb_unmap_page,
352 .map_sg = swiotlb_map_sg_attrs,
353 .unmap_sg = swiotlb_unmap_sg_attrs,
354 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
355 .sync_single_for_device = swiotlb_sync_single_for_device,
356 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
357 .sync_sg_for_device = swiotlb_sync_sg_for_device,
358 .dma_supported = swiotlb_dma_supported,
359 .mapping_error = swiotlb_dma_mapping_error,
360};
361EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
362 350
363extern int swiotlb_late_init_with_default_size(size_t default_size); 351extern int swiotlb_late_init_with_default_size(size_t default_size);
364 352
@@ -427,7 +415,7 @@ static int __init swiotlb_late_init(void)
427{ 415{
428 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); 416 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
429 417
430 dma_ops = &noncoherent_swiotlb_dma_ops; 418 dma_ops = &swiotlb_dma_ops;
431 419
432 return swiotlb_late_init_with_default_size(swiotlb_size); 420 return swiotlb_late_init_with_default_size(swiotlb_size);
433} 421}