diff options
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 65 |
1 files changed, 49 insertions, 16 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index f7b54019ef55..c9e53dec3695 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -308,24 +308,15 @@ static void __swiotlb_sync_sg_for_device(struct device *dev, | |||
308 | sg->length, dir); | 308 | sg->length, dir); |
309 | } | 309 | } |
310 | 310 | ||
311 | static int __swiotlb_mmap(struct device *dev, | 311 | static int __swiotlb_mmap_pfn(struct vm_area_struct *vma, |
312 | struct vm_area_struct *vma, | 312 | unsigned long pfn, size_t size) |
313 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
314 | unsigned long attrs) | ||
315 | { | 313 | { |
316 | int ret = -ENXIO; | 314 | int ret = -ENXIO; |
317 | unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> | 315 | unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> |
318 | PAGE_SHIFT; | 316 | PAGE_SHIFT; |
319 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | 317 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
320 | unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; | ||
321 | unsigned long off = vma->vm_pgoff; | 318 | unsigned long off = vma->vm_pgoff; |
322 | 319 | ||
323 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, | ||
324 | is_device_dma_coherent(dev)); | ||
325 | |||
326 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | ||
327 | return ret; | ||
328 | |||
329 | if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { | 320 | if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { |
330 | ret = remap_pfn_range(vma, vma->vm_start, | 321 | ret = remap_pfn_range(vma, vma->vm_start, |
331 | pfn + off, | 322 | pfn + off, |
@@ -336,19 +327,43 @@ static int __swiotlb_mmap(struct device *dev, | |||
336 | return ret; | 327 | return ret; |
337 | } | 328 | } |
338 | 329 | ||
339 | static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | 330 | static int __swiotlb_mmap(struct device *dev, |
340 | void *cpu_addr, dma_addr_t handle, size_t size, | 331 | struct vm_area_struct *vma, |
341 | unsigned long attrs) | 332 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
333 | unsigned long attrs) | ||
334 | { | ||
335 | int ret; | ||
336 | unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; | ||
337 | |||
338 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, | ||
339 | is_device_dma_coherent(dev)); | ||
340 | |||
341 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | ||
342 | return ret; | ||
343 | |||
344 | return __swiotlb_mmap_pfn(vma, pfn, size); | ||
345 | } | ||
346 | |||
347 | static int __swiotlb_get_sgtable_page(struct sg_table *sgt, | ||
348 | struct page *page, size_t size) | ||
342 | { | 349 | { |
343 | int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | 350 | int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
344 | 351 | ||
345 | if (!ret) | 352 | if (!ret) |
346 | sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)), | 353 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); |
347 | PAGE_ALIGN(size), 0); | ||
348 | 354 | ||
349 | return ret; | 355 | return ret; |
350 | } | 356 | } |
351 | 357 | ||
358 | static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
359 | void *cpu_addr, dma_addr_t handle, size_t size, | ||
360 | unsigned long attrs) | ||
361 | { | ||
362 | struct page *page = phys_to_page(dma_to_phys(dev, handle)); | ||
363 | |||
364 | return __swiotlb_get_sgtable_page(sgt, page, size); | ||
365 | } | ||
366 | |||
352 | static int __swiotlb_dma_supported(struct device *hwdev, u64 mask) | 367 | static int __swiotlb_dma_supported(struct device *hwdev, u64 mask) |
353 | { | 368 | { |
354 | if (swiotlb) | 369 | if (swiotlb) |
@@ -703,6 +718,15 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |||
703 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | 718 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) |
704 | return ret; | 719 | return ret; |
705 | 720 | ||
721 | if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { | ||
722 | /* | ||
723 | * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, | ||
724 | * hence in the vmalloc space. | ||
725 | */ | ||
726 | unsigned long pfn = vmalloc_to_pfn(cpu_addr); | ||
727 | return __swiotlb_mmap_pfn(vma, pfn, size); | ||
728 | } | ||
729 | |||
706 | area = find_vm_area(cpu_addr); | 730 | area = find_vm_area(cpu_addr); |
707 | if (WARN_ON(!area || !area->pages)) | 731 | if (WARN_ON(!area || !area->pages)) |
708 | return -ENXIO; | 732 | return -ENXIO; |
@@ -717,6 +741,15 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
717 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 741 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
718 | struct vm_struct *area = find_vm_area(cpu_addr); | 742 | struct vm_struct *area = find_vm_area(cpu_addr); |
719 | 743 | ||
744 | if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { | ||
745 | /* | ||
746 | * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, | ||
747 | * hence in the vmalloc space. | ||
748 | */ | ||
749 | struct page *page = vmalloc_to_page(cpu_addr); | ||
750 | return __swiotlb_get_sgtable_page(sgt, page, size); | ||
751 | } | ||
752 | |||
720 | if (WARN_ON(!area || !area->pages)) | 753 | if (WARN_ON(!area || !area->pages)) |
721 | return -ENXIO; | 754 | return -ENXIO; |
722 | 755 | ||