aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-07-26 03:26:40 -0400
committerChristoph Hellwig <hch@lst.de>2019-08-10 13:52:45 -0400
commit33dcb37cef741294b481f4d889a465b8091f11bf (patch)
tree129f9cde5b52cd77e04382c99ff0421aa862354b
parentd8ad55538abe443919e20e0bb996561bca9cad84 (diff)
dma-mapping: fix page attributes for dma_mmap_*
All the way back to introducing dma_common_mmap we've defaulted to mark the pages as uncached. But this is wrong for DMA coherent devices. Later on DMA_ATTR_WRITE_COMBINE also got incorrect treatment as that flag is only treated special on the alloc side for non-coherent devices. Introduce a new dma_pgprot helper that deals with the check for coherent devices so that only the remapping cases ever reach arch_dma_mmap_pgprot and we thus ensure no aliasing of page attributes happens, which makes the powerpc version of arch_dma_mmap_pgprot obsolete and simplifies the remaining ones. Note that this means arch_dma_mmap_pgprot is a bit misnamed now, but we'll phase it out soon. Fixes: 64ccc9c033c6 ("common: dma-mapping: add support for generic dma_mmap_* calls") Reported-by: Shawn Anastasio <shawn@anastas.io> Reported-by: Gavin Li <git@thegavinli.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Catalin Marinas <catalin.marinas@arm.com> # arm64
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/dma-common.c17
-rw-r--r--drivers/iommu/dma-iommu.c6
-rw-r--r--include/linux/dma-noncoherent.h13
-rw-r--r--kernel/dma/mapping.c19
-rw-r--r--kernel/dma/remap.c2
9 files changed, 34 insertions, 35 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6774b03aa405..d42557ee69c2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
2405pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 2405pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
2406 unsigned long attrs) 2406 unsigned long attrs)
2407{ 2407{
2408 if (!dev_is_dma_coherent(dev)) 2408 return __get_dma_pgprot(attrs, prot);
2409 return __get_dma_pgprot(attrs, prot);
2410 return prot;
2411} 2409}
2412 2410
2413void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 2411void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 1d3f0b5a9940..bd2b039f43a6 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -14,9 +14,7 @@
14pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 14pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
15 unsigned long attrs) 15 unsigned long attrs)
16{ 16{
17 if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE)) 17 return pgprot_writecombine(prot);
18 return pgprot_writecombine(prot);
19 return prot;
20} 18}
21 19
22void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 20void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 77f6ebf97113..d8dcd8820369 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -121,7 +121,6 @@ config PPC
121 select ARCH_32BIT_OFF_T if PPC32 121 select ARCH_32BIT_OFF_T if PPC32
122 select ARCH_HAS_DEBUG_VIRTUAL 122 select ARCH_HAS_DEBUG_VIRTUAL
123 select ARCH_HAS_DEVMEM_IS_ALLOWED 123 select ARCH_HAS_DEVMEM_IS_ALLOWED
124 select ARCH_HAS_DMA_MMAP_PGPROT
125 select ARCH_HAS_ELF_RANDOMIZE 124 select ARCH_HAS_ELF_RANDOMIZE
126 select ARCH_HAS_FORTIFY_SOURCE 125 select ARCH_HAS_FORTIFY_SOURCE
127 select ARCH_HAS_GCOV_PROFILE_ALL 126 select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ea0c69236789..56dfa7a2a6f2 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -49,8 +49,7 @@ obj-y := cputable.o ptrace.o syscalls.o \
49 signal.o sysfs.o cacheinfo.o time.o \ 49 signal.o sysfs.o cacheinfo.o time.o \
50 prom.o traps.o setup-common.o \ 50 prom.o traps.o setup-common.o \
51 udbg.o misc.o io.o misc_$(BITS).o \ 51 udbg.o misc.o io.o misc_$(BITS).o \
52 of_platform.o prom_parse.o \ 52 of_platform.o prom_parse.o
53 dma-common.o
54obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 53obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
55 signal_64.o ptrace32.o \ 54 signal_64.o ptrace32.o \
56 paca.o nvram_64.o firmware.o 55 paca.o nvram_64.o firmware.o
diff --git a/arch/powerpc/kernel/dma-common.c b/arch/powerpc/kernel/dma-common.c
deleted file mode 100644
index dc7ef6b17b69..000000000000
--- a/arch/powerpc/kernel/dma-common.c
+++ /dev/null
@@ -1,17 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Contains common dma routines for all powerpc platforms.
4 *
5 * Copyright (C) 2019 Shawn Anastasio.
6 */
7
8#include <linux/mm.h>
9#include <linux/dma-noncoherent.h>
10
11pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
12 unsigned long attrs)
13{
14 if (!dev_is_dma_coherent(dev))
15 return pgprot_noncached(prot);
16 return prot;
17}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a7f9c3edbcb2..0015fe610b23 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -574,7 +574,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
574 struct iova_domain *iovad = &cookie->iovad; 574 struct iova_domain *iovad = &cookie->iovad;
575 bool coherent = dev_is_dma_coherent(dev); 575 bool coherent = dev_is_dma_coherent(dev);
576 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 576 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
577 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 577 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
578 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 578 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
579 struct page **pages; 579 struct page **pages;
580 struct sg_table sgt; 580 struct sg_table sgt;
@@ -975,7 +975,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
975 return NULL; 975 return NULL;
976 976
977 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 977 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
978 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 978 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
979 979
980 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 980 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
981 VM_USERMAP, prot, __builtin_return_address(0)); 981 VM_USERMAP, prot, __builtin_return_address(0));
@@ -1035,7 +1035,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1035 unsigned long pfn, off = vma->vm_pgoff; 1035 unsigned long pfn, off = vma->vm_pgoff;
1036 int ret; 1036 int ret;
1037 1037
1038 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 1038 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1039 1039
1040 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1040 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1041 return ret; 1041 return ret;
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 3813211a9aad..0bff3d7fac92 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
42 dma_addr_t dma_addr, unsigned long attrs); 42 dma_addr_t dma_addr, unsigned long attrs);
43long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 43long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
44 dma_addr_t dma_addr); 44 dma_addr_t dma_addr);
45
46#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
47pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 45pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
48 unsigned long attrs); 46 unsigned long attrs);
47
48#ifdef CONFIG_MMU
49pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
49#else 50#else
50# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) 51static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
51#endif 52 unsigned long attrs)
53{
54 return prot; /* no protection bits supported without page tables */
55}
56#endif /* CONFIG_MMU */
52 57
53#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC 58#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
54void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 59void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index b945239621d8..b0038ca3aa92 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
150} 150}
151EXPORT_SYMBOL(dma_get_sgtable_attrs); 151EXPORT_SYMBOL(dma_get_sgtable_attrs);
152 152
153#ifdef CONFIG_MMU
154/*
155 * Return the page attributes used for mapping dma_alloc_* memory, either in
156 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
157 */
158pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
159{
160 if (dev_is_dma_coherent(dev) ||
161 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
162 (attrs & DMA_ATTR_NON_CONSISTENT)))
163 return prot;
164 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
165 return arch_dma_mmap_pgprot(dev, prot, attrs);
166 return pgprot_noncached(prot);
167}
168#endif /* CONFIG_MMU */
169
153/* 170/*
154 * Create userspace mapping for the DMA-coherent memory. 171 * Create userspace mapping for the DMA-coherent memory.
155 */ 172 */
@@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
164 unsigned long pfn; 181 unsigned long pfn;
165 int ret = -ENXIO; 182 int ret = -ENXIO;
166 183
167 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 184 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
168 185
169 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 186 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
170 return ret; 187 return ret;
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index a594aec07882..ffe78f0b2fe4 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
218 218
219 /* create a coherent mapping */ 219 /* create a coherent mapping */
220 ret = dma_common_contiguous_remap(page, size, VM_USERMAP, 220 ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
221 arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs), 221 dma_pgprot(dev, PAGE_KERNEL, attrs),
222 __builtin_return_address(0)); 222 __builtin_return_address(0));
223 if (!ret) { 223 if (!ret) {
224 __dma_direct_free_pages(dev, size, page); 224 __dma_direct_free_pages(dev, size, page);