diff options
author | Christoph Hellwig <hch@lst.de> | 2018-12-06 15:43:30 -0500 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2018-12-13 15:06:10 -0500 |
commit | 7249c1a52df9967cd23550f3dc24fb6ca43cdc6a (patch) | |
tree | c87a0b9027e18b36cbb7ef9b87fb2db30619067f /kernel/dma/mapping.c | |
parent | 05887cb610a54bf568de7f0bc07c4a64e45ac6f9 (diff) |
dma-mapping: move various slow path functions out of line
There is no need to have all setup and coherent allocation / freeing
routines inline. Move them out of line to keep the implemeation
nicely encapsulated and save some kernel text size.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'kernel/dma/mapping.c')
-rw-r--r-- | kernel/dma/mapping.c | 140 |
1 files changed, 138 insertions, 2 deletions
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index dfe29d18dba1..176ae3e08916 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c | |||
@@ -223,7 +223,20 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
223 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | 223 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); |
224 | return ret; | 224 | return ret; |
225 | } | 225 | } |
226 | EXPORT_SYMBOL(dma_common_get_sgtable); | 226 | |
227 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, | ||
228 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
229 | unsigned long attrs) | ||
230 | { | ||
231 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
232 | BUG_ON(!ops); | ||
233 | if (ops->get_sgtable) | ||
234 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | ||
235 | attrs); | ||
236 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | ||
237 | attrs); | ||
238 | } | ||
239 | EXPORT_SYMBOL(dma_get_sgtable_attrs); | ||
227 | 240 | ||
228 | /* | 241 | /* |
229 | * Create userspace mapping for the DMA-coherent memory. | 242 | * Create userspace mapping for the DMA-coherent memory. |
@@ -261,7 +274,31 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |||
261 | return -ENXIO; | 274 | return -ENXIO; |
262 | #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ | 275 | #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ |
263 | } | 276 | } |
264 | EXPORT_SYMBOL(dma_common_mmap); | 277 | |
278 | /** | ||
279 | * dma_mmap_attrs - map a coherent DMA allocation into user space | ||
280 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
281 | * @vma: vm_area_struct describing requested user mapping | ||
282 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | ||
283 | * @dma_addr: device-view address returned from dma_alloc_attrs | ||
284 | * @size: size of memory originally requested in dma_alloc_attrs | ||
285 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | ||
286 | * | ||
287 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user | ||
288 | * space. The coherent DMA buffer must not be freed by the driver until the | ||
289 | * user space mapping has been released. | ||
290 | */ | ||
291 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | ||
292 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
293 | unsigned long attrs) | ||
294 | { | ||
295 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
296 | BUG_ON(!ops); | ||
297 | if (ops->mmap) | ||
298 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | ||
299 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | ||
300 | } | ||
301 | EXPORT_SYMBOL(dma_mmap_attrs); | ||
265 | 302 | ||
266 | #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK | 303 | #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK |
267 | static u64 dma_default_get_required_mask(struct device *dev) | 304 | static u64 dma_default_get_required_mask(struct device *dev) |
@@ -294,3 +331,102 @@ u64 dma_get_required_mask(struct device *dev) | |||
294 | EXPORT_SYMBOL_GPL(dma_get_required_mask); | 331 | EXPORT_SYMBOL_GPL(dma_get_required_mask); |
295 | #endif | 332 | #endif |
296 | 333 | ||
334 | #ifndef arch_dma_alloc_attrs | ||
335 | #define arch_dma_alloc_attrs(dev) (true) | ||
336 | #endif | ||
337 | |||
338 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
339 | gfp_t flag, unsigned long attrs) | ||
340 | { | ||
341 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
342 | void *cpu_addr; | ||
343 | |||
344 | BUG_ON(!ops); | ||
345 | WARN_ON_ONCE(dev && !dev->coherent_dma_mask); | ||
346 | |||
347 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) | ||
348 | return cpu_addr; | ||
349 | |||
350 | /* let the implementation decide on the zone to allocate from: */ | ||
351 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | ||
352 | |||
353 | if (!arch_dma_alloc_attrs(&dev)) | ||
354 | return NULL; | ||
355 | if (!ops->alloc) | ||
356 | return NULL; | ||
357 | |||
358 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | ||
359 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | ||
360 | return cpu_addr; | ||
361 | } | ||
362 | EXPORT_SYMBOL(dma_alloc_attrs); | ||
363 | |||
364 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | ||
365 | dma_addr_t dma_handle, unsigned long attrs) | ||
366 | { | ||
367 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
368 | |||
369 | BUG_ON(!ops); | ||
370 | |||
371 | if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) | ||
372 | return; | ||
373 | /* | ||
374 | * On non-coherent platforms which implement DMA-coherent buffers via | ||
375 | * non-cacheable remaps, ops->free() may call vunmap(). Thus getting | ||
376 | * this far in IRQ context is a) at risk of a BUG_ON() or trying to | ||
377 | * sleep on some machines, and b) an indication that the driver is | ||
378 | * probably misusing the coherent API anyway. | ||
379 | */ | ||
380 | WARN_ON(irqs_disabled()); | ||
381 | |||
382 | if (!ops->free || !cpu_addr) | ||
383 | return; | ||
384 | |||
385 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
386 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | ||
387 | } | ||
388 | EXPORT_SYMBOL(dma_free_attrs); | ||
389 | |||
390 | static inline void dma_check_mask(struct device *dev, u64 mask) | ||
391 | { | ||
392 | if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) | ||
393 | dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); | ||
394 | } | ||
395 | |||
396 | int dma_supported(struct device *dev, u64 mask) | ||
397 | { | ||
398 | const struct dma_map_ops *ops = get_dma_ops(dev); | ||
399 | |||
400 | if (!ops) | ||
401 | return 0; | ||
402 | if (!ops->dma_supported) | ||
403 | return 1; | ||
404 | return ops->dma_supported(dev, mask); | ||
405 | } | ||
406 | EXPORT_SYMBOL(dma_supported); | ||
407 | |||
408 | #ifndef HAVE_ARCH_DMA_SET_MASK | ||
409 | int dma_set_mask(struct device *dev, u64 mask) | ||
410 | { | ||
411 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
412 | return -EIO; | ||
413 | |||
414 | dma_check_mask(dev, mask); | ||
415 | *dev->dma_mask = mask; | ||
416 | return 0; | ||
417 | } | ||
418 | EXPORT_SYMBOL(dma_set_mask); | ||
419 | #endif | ||
420 | |||
421 | #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK | ||
422 | int dma_set_coherent_mask(struct device *dev, u64 mask) | ||
423 | { | ||
424 | if (!dma_supported(dev, mask)) | ||
425 | return -EIO; | ||
426 | |||
427 | dma_check_mask(dev, mask); | ||
428 | dev->coherent_dma_mask = mask; | ||
429 | return 0; | ||
430 | } | ||
431 | EXPORT_SYMBOL(dma_set_coherent_mask); | ||
432 | #endif | ||