diff options
| author | Laura Abbott <lauraa@codeaurora.org> | 2014-03-14 15:52:23 -0400 |
|---|---|---|
| committer | Catalin Marinas <catalin.marinas@arm.com> | 2014-03-24 06:01:17 -0400 |
| commit | 6e8d7968e92f7668a2a615773ad3940f0219dcbd (patch) | |
| tree | 5a8f533606bb3d3e611472f72ec369714c5f450e | |
| parent | 31b1e940c5d47ee1a01baeccfb1b2b8890822d1a (diff) | |
arm64: Implement custom mmap functions for dma mapping
The current dma_ops do not specify an mmap function so maping
falls back to the default implementation. There are at least
two issues with using the default implementation:
1) The pgprot is always pgprot_noncached (strongly ordered)
memory even with coherent operations
2) dma_common_mmap calls virt_to_page on the remapped non-coherent
address which leads to invalid memory being mapped.
Fix both these issue by implementing a custom mmap function which
correctly accounts for remapped addresses and sets vm_pg_prot
appropriately.
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
[catalin.marinas@arm.com: replaced "arm64_" with "__" prefix for consistency]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
| -rw-r--r-- | arch/arm64/mm/dma-mapping.c | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 88fbc5e5bae7..81eea3d3249f 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
| @@ -221,9 +221,52 @@ static void __swiotlb_sync_sg_for_device(struct device *dev, | |||
| 221 | sg->length, dir); | 221 | sg->length, dir); |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | /* vma->vm_page_prot must be set appropriately before calling this function */ | ||
| 225 | static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 226 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
| 227 | { | ||
| 228 | int ret = -ENXIO; | ||
| 229 | unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> | ||
| 230 | PAGE_SHIFT; | ||
| 231 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
| 232 | unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; | ||
| 233 | unsigned long off = vma->vm_pgoff; | ||
| 234 | |||
| 235 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | ||
| 236 | return ret; | ||
| 237 | |||
| 238 | if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { | ||
| 239 | ret = remap_pfn_range(vma, vma->vm_start, | ||
| 240 | pfn + off, | ||
| 241 | vma->vm_end - vma->vm_start, | ||
| 242 | vma->vm_page_prot); | ||
| 243 | } | ||
| 244 | |||
| 245 | return ret; | ||
| 246 | } | ||
| 247 | |||
| 248 | static int __swiotlb_mmap_noncoherent(struct device *dev, | ||
| 249 | struct vm_area_struct *vma, | ||
| 250 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
| 251 | struct dma_attrs *attrs) | ||
| 252 | { | ||
| 253 | vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); | ||
| 254 | return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
| 255 | } | ||
| 256 | |||
| 257 | static int __swiotlb_mmap_coherent(struct device *dev, | ||
| 258 | struct vm_area_struct *vma, | ||
| 259 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
| 260 | struct dma_attrs *attrs) | ||
| 261 | { | ||
| 262 | /* Just use whatever page_prot attributes were specified */ | ||
| 263 | return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
| 264 | } | ||
| 265 | |||
| 224 | struct dma_map_ops noncoherent_swiotlb_dma_ops = { | 266 | struct dma_map_ops noncoherent_swiotlb_dma_ops = { |
| 225 | .alloc = __dma_alloc_noncoherent, | 267 | .alloc = __dma_alloc_noncoherent, |
| 226 | .free = __dma_free_noncoherent, | 268 | .free = __dma_free_noncoherent, |
| 269 | .mmap = __swiotlb_mmap_noncoherent, | ||
| 227 | .map_page = __swiotlb_map_page, | 270 | .map_page = __swiotlb_map_page, |
| 228 | .unmap_page = __swiotlb_unmap_page, | 271 | .unmap_page = __swiotlb_unmap_page, |
| 229 | .map_sg = __swiotlb_map_sg_attrs, | 272 | .map_sg = __swiotlb_map_sg_attrs, |
| @@ -240,6 +283,7 @@ EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops); | |||
| 240 | struct dma_map_ops coherent_swiotlb_dma_ops = { | 283 | struct dma_map_ops coherent_swiotlb_dma_ops = { |
| 241 | .alloc = __dma_alloc_coherent, | 284 | .alloc = __dma_alloc_coherent, |
| 242 | .free = __dma_free_coherent, | 285 | .free = __dma_free_coherent, |
| 286 | .mmap = __swiotlb_mmap_coherent, | ||
| 243 | .map_page = swiotlb_map_page, | 287 | .map_page = swiotlb_map_page, |
| 244 | .unmap_page = swiotlb_unmap_page, | 288 | .unmap_page = swiotlb_unmap_page, |
| 245 | .map_sg = swiotlb_map_sg_attrs, | 289 | .map_sg = swiotlb_map_sg_attrs, |
