diff options
author | Christoph Hellwig <hch@lst.de> | 2019-07-26 03:26:40 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2019-08-10 13:52:45 -0400 |
commit | 33dcb37cef741294b481f4d889a465b8091f11bf (patch) | |
tree | 129f9cde5b52cd77e04382c99ff0421aa862354b /kernel/dma/mapping.c | |
parent | d8ad55538abe443919e20e0bb996561bca9cad84 (diff) |
dma-mapping: fix page attributes for dma_mmap_*
All the way back to introducing dma_common_mmap we've defaulted to mark
the pages as uncached. But this is wrong for DMA coherent devices.
Later on DMA_ATTR_WRITE_COMBINE also got incorrect treatment as that
flag is only treated special on the alloc side for non-coherent devices.
Introduce a new dma_pgprot helper that deals with the check for coherent
devices so that only the remapping cases ever reach arch_dma_mmap_pgprot
and we thus ensure no aliasing of page attributes happens, which makes
the powerpc version of arch_dma_mmap_pgprot obsolete and simplifies the
remaining ones.
Note that this means arch_dma_mmap_pgprot is a bit misnamed now, but
we'll phase it out soon.
Fixes: 64ccc9c033c6 ("common: dma-mapping: add support for generic dma_mmap_* calls")
Reported-by: Shawn Anastasio <shawn@anastas.io>
Reported-by: Gavin Li <git@thegavinli.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> # arm64
Diffstat (limited to 'kernel/dma/mapping.c')
-rw-r--r-- | kernel/dma/mapping.c | 19 |
1 files changed, 18 insertions, 1 deletions
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index b945239621d8..b0038ca3aa92 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c | |||
@@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, | |||
150 | } | 150 | } |
151 | EXPORT_SYMBOL(dma_get_sgtable_attrs); | 151 | EXPORT_SYMBOL(dma_get_sgtable_attrs); |
152 | 152 | ||
153 | #ifdef CONFIG_MMU | ||
154 | /* | ||
155 | * Return the page attributes used for mapping dma_alloc_* memory, either in | ||
156 | * kernel space if remapping is needed, or to userspace through dma_mmap_*. | ||
157 | */ | ||
158 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) | ||
159 | { | ||
160 | if (dev_is_dma_coherent(dev) || | ||
161 | (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && | ||
162 | (attrs & DMA_ATTR_NON_CONSISTENT))) | ||
163 | return prot; | ||
164 | if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT)) | ||
165 | return arch_dma_mmap_pgprot(dev, prot, attrs); | ||
166 | return pgprot_noncached(prot); | ||
167 | } | ||
168 | #endif /* CONFIG_MMU */ | ||
169 | |||
153 | /* | 170 | /* |
154 | * Create userspace mapping for the DMA-coherent memory. | 171 | * Create userspace mapping for the DMA-coherent memory. |
155 | */ | 172 | */ |
@@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |||
164 | unsigned long pfn; | 181 | unsigned long pfn; |
165 | int ret = -ENXIO; | 182 | int ret = -ENXIO; |
166 | 183 | ||
167 | vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); | 184 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); |
168 | 185 | ||
169 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) | 186 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
170 | return ret; | 187 | return ret; |