diff options
author | Alexey Brodkin <abrodkin@synopsys.com> | 2016-11-03 11:06:13 -0400 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2016-11-03 13:01:07 -0400 |
commit | a79a812131b07254c09cf325ec68c0d05aaed0b5 (patch) | |
tree | 0b4d5bd96a3999319b2f4f6c31c558bcda35c1b0 | |
parent | 8f6d9eb2a3f38f1acd04efa0aeb8b81f5373c923 (diff) |
arc: Implement arch-specific dma_map_ops.mmap
We used to use generic implementation of dma_map_ops.mmap which is
dma_common_mmap() but that only worked for simpler cached mappings when
vaddr = paddr.
If a driver requests uncached DMA buffer kernel maps it to virtual
address so that MMU gets involved and page uncached status takes into
account. In that case usage of dma_common_mmap() lead to mapping of
vaddr to vaddr for user-space which is obviously wrong. For more detals
please refer to verbose explanation here [1].
So here we implement our own version of mmap() which always deals
with dma_addr and maps underlying memory to user-space properly
(note that DMA buffer mapped to user-space is always uncached
because there's no way to properly manage cache from user-space).
[1] https://lkml.org/lkml/2016/10/26/973
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: <stable@vger.kernel.org> #4.5+
Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
-rw-r--r-- | arch/arc/mm/dma.c | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 60aab5a7522b..cd8aad8226dd 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr, | |||
105 | __free_pages(page, get_order(size)); | 105 | __free_pages(page, get_order(size)); |
106 | } | 106 | } |
107 | 107 | ||
108 | static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
109 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
110 | unsigned long attrs) | ||
111 | { | ||
112 | unsigned long user_count = vma_pages(vma); | ||
113 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
114 | unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr)); | ||
115 | unsigned long off = vma->vm_pgoff; | ||
116 | int ret = -ENXIO; | ||
117 | |||
118 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
119 | |||
120 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | ||
121 | return ret; | ||
122 | |||
123 | if (off < count && user_count <= (count - off)) { | ||
124 | ret = remap_pfn_range(vma, vma->vm_start, | ||
125 | pfn + off, | ||
126 | user_count << PAGE_SHIFT, | ||
127 | vma->vm_page_prot); | ||
128 | } | ||
129 | |||
130 | return ret; | ||
131 | } | ||
132 | |||
108 | /* | 133 | /* |
109 | * streaming DMA Mapping API... | 134 | * streaming DMA Mapping API... |
110 | * CPU accesses page via normal paddr, thus needs to explicitly made | 135 | * CPU accesses page via normal paddr, thus needs to explicitly made |
@@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask) | |||
193 | struct dma_map_ops arc_dma_ops = { | 218 | struct dma_map_ops arc_dma_ops = { |
194 | .alloc = arc_dma_alloc, | 219 | .alloc = arc_dma_alloc, |
195 | .free = arc_dma_free, | 220 | .free = arc_dma_free, |
221 | .mmap = arc_dma_mmap, | ||
196 | .map_page = arc_dma_map_page, | 222 | .map_page = arc_dma_map_page, |
197 | .map_sg = arc_dma_map_sg, | 223 | .map_sg = arc_dma_map_sg, |
198 | .sync_single_for_device = arc_dma_sync_single_for_device, | 224 | .sync_single_for_device = arc_dma_sync_single_for_device, |