diff options
author | Christoph Hellwig <hch@lst.de> | 2019-08-30 02:51:01 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2019-09-04 05:13:20 -0400 |
commit | 512317401f6a337e617ec284d20dec5fa3a951ec (patch) | |
tree | a835b3bccfb35b7c6498d7cede836bebb05b14af | |
parent | fe9041c245196c6c61091ccc2c74b73ab9a5fc50 (diff) |
dma-mapping: always use VM_DMA_COHERENT for generic DMA remap
Currently the generic dma remap allocator gets a vm_flags passed by
the caller that is a little confusing. We just introduced a generic
vmalloc-level flag to identify the dma coherent allocations, so use
that everywhere and remove the now pointless argument.
Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 10 | ||||
-rw-r--r-- | arch/xtensa/kernel/pci-dma.c | 4 | ||||
-rw-r--r-- | drivers/iommu/dma-iommu.c | 6 | ||||
-rw-r--r-- | include/linux/dma-mapping.h | 6 | ||||
-rw-r--r-- | kernel/dma/remap.c | 23 |
5 files changed, 21 insertions, 28 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index aec31f9b918b..fd02c982e36a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -343,13 +343,12 @@ static void * | |||
343 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, | 343 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, |
344 | const void *caller) | 344 | const void *caller) |
345 | { | 345 | { |
346 | return dma_common_contiguous_remap(page, size, VM_DMA_COHERENT, | 346 | return dma_common_contiguous_remap(page, size, prot, caller); |
347 | prot, caller); | ||
348 | } | 347 | } |
349 | 348 | ||
350 | static void __dma_free_remap(void *cpu_addr, size_t size) | 349 | static void __dma_free_remap(void *cpu_addr, size_t size) |
351 | { | 350 | { |
352 | dma_common_free_remap(cpu_addr, size, VM_DMA_COHERENT); | 351 | dma_common_free_remap(cpu_addr, size); |
353 | } | 352 | } |
354 | 353 | ||
355 | #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K | 354 | #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K |
@@ -1365,8 +1364,7 @@ static void * | |||
1365 | __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, | 1364 | __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, |
1366 | const void *caller) | 1365 | const void *caller) |
1367 | { | 1366 | { |
1368 | return dma_common_pages_remap(pages, size, VM_DMA_COHERENT, prot, | 1367 | return dma_common_pages_remap(pages, size, prot, caller); |
1369 | caller); | ||
1370 | } | 1368 | } |
1371 | 1369 | ||
1372 | /* | 1370 | /* |
@@ -1609,7 +1607,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
1609 | } | 1607 | } |
1610 | 1608 | ||
1611 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) | 1609 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) |
1612 | dma_common_free_remap(cpu_addr, size, VM_DMA_COHERENT); | 1610 | dma_common_free_remap(cpu_addr, size); |
1613 | 1611 | ||
1614 | __iommu_remove_mapping(dev, handle, size); | 1612 | __iommu_remove_mapping(dev, handle, size); |
1615 | __iommu_free_buffer(dev, pages, size, attrs); | 1613 | __iommu_free_buffer(dev, pages, size, attrs); |
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 65f05776d827..154979d62b73 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c | |||
@@ -167,7 +167,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
167 | if (PageHighMem(page)) { | 167 | if (PageHighMem(page)) { |
168 | void *p; | 168 | void *p; |
169 | 169 | ||
170 | p = dma_common_contiguous_remap(page, size, VM_MAP, | 170 | p = dma_common_contiguous_remap(page, size, |
171 | pgprot_noncached(PAGE_KERNEL), | 171 | pgprot_noncached(PAGE_KERNEL), |
172 | __builtin_return_address(0)); | 172 | __builtin_return_address(0)); |
173 | if (!p) { | 173 | if (!p) { |
@@ -192,7 +192,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, | |||
192 | page = virt_to_page(platform_vaddr_to_cached(vaddr)); | 192 | page = virt_to_page(platform_vaddr_to_cached(vaddr)); |
193 | } else { | 193 | } else { |
194 | #ifdef CONFIG_MMU | 194 | #ifdef CONFIG_MMU |
195 | dma_common_free_remap(vaddr, size, VM_MAP); | 195 | dma_common_free_remap(vaddr, size); |
196 | #endif | 196 | #endif |
197 | page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle))); | 197 | page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle))); |
198 | } | 198 | } |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index ef407e4eccde..949e341bf2f3 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -617,7 +617,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, | |||
617 | < size) | 617 | < size) |
618 | goto out_free_sg; | 618 | goto out_free_sg; |
619 | 619 | ||
620 | vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, | 620 | vaddr = dma_common_pages_remap(pages, size, prot, |
621 | __builtin_return_address(0)); | 621 | __builtin_return_address(0)); |
622 | if (!vaddr) | 622 | if (!vaddr) |
623 | goto out_unmap; | 623 | goto out_unmap; |
@@ -941,7 +941,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) | |||
941 | pages = __iommu_dma_get_pages(cpu_addr); | 941 | pages = __iommu_dma_get_pages(cpu_addr); |
942 | if (!pages) | 942 | if (!pages) |
943 | page = vmalloc_to_page(cpu_addr); | 943 | page = vmalloc_to_page(cpu_addr); |
944 | dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP); | 944 | dma_common_free_remap(cpu_addr, alloc_size); |
945 | } else { | 945 | } else { |
946 | /* Lowmem means a coherent atomic or CMA allocation */ | 946 | /* Lowmem means a coherent atomic or CMA allocation */ |
947 | page = virt_to_page(cpu_addr); | 947 | page = virt_to_page(cpu_addr); |
@@ -979,7 +979,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, | |||
979 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); | 979 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
980 | 980 | ||
981 | cpu_addr = dma_common_contiguous_remap(page, alloc_size, | 981 | cpu_addr = dma_common_contiguous_remap(page, alloc_size, |
982 | VM_USERMAP, prot, __builtin_return_address(0)); | 982 | prot, __builtin_return_address(0)); |
983 | if (!cpu_addr) | 983 | if (!cpu_addr) |
984 | goto out_free_pages; | 984 | goto out_free_pages; |
985 | 985 | ||
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 80063b0fdea8..86223bc24d82 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -627,13 +627,11 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |||
627 | unsigned long attrs); | 627 | unsigned long attrs); |
628 | 628 | ||
629 | void *dma_common_contiguous_remap(struct page *page, size_t size, | 629 | void *dma_common_contiguous_remap(struct page *page, size_t size, |
630 | unsigned long vm_flags, | ||
631 | pgprot_t prot, const void *caller); | 630 | pgprot_t prot, const void *caller); |
632 | 631 | ||
633 | void *dma_common_pages_remap(struct page **pages, size_t size, | 632 | void *dma_common_pages_remap(struct page **pages, size_t size, |
634 | unsigned long vm_flags, pgprot_t prot, | 633 | pgprot_t prot, const void *caller); |
635 | const void *caller); | 634 | void dma_common_free_remap(void *cpu_addr, size_t size); |
636 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | ||
637 | 635 | ||
638 | bool dma_in_atomic_pool(void *start, size_t size); | 636 | bool dma_in_atomic_pool(void *start, size_t size); |
639 | void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); | 637 | void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); |
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index 838123f79639..f6b90521a7e4 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c | |||
@@ -12,12 +12,11 @@ | |||
12 | #include <linux/vmalloc.h> | 12 | #include <linux/vmalloc.h> |
13 | 13 | ||
14 | static struct vm_struct *__dma_common_pages_remap(struct page **pages, | 14 | static struct vm_struct *__dma_common_pages_remap(struct page **pages, |
15 | size_t size, unsigned long vm_flags, pgprot_t prot, | 15 | size_t size, pgprot_t prot, const void *caller) |
16 | const void *caller) | ||
17 | { | 16 | { |
18 | struct vm_struct *area; | 17 | struct vm_struct *area; |
19 | 18 | ||
20 | area = get_vm_area_caller(size, vm_flags, caller); | 19 | area = get_vm_area_caller(size, VM_DMA_COHERENT, caller); |
21 | if (!area) | 20 | if (!area) |
22 | return NULL; | 21 | return NULL; |
23 | 22 | ||
@@ -34,12 +33,11 @@ static struct vm_struct *__dma_common_pages_remap(struct page **pages, | |||
34 | * Cannot be used in non-sleeping contexts | 33 | * Cannot be used in non-sleeping contexts |
35 | */ | 34 | */ |
36 | void *dma_common_pages_remap(struct page **pages, size_t size, | 35 | void *dma_common_pages_remap(struct page **pages, size_t size, |
37 | unsigned long vm_flags, pgprot_t prot, | 36 | pgprot_t prot, const void *caller) |
38 | const void *caller) | ||
39 | { | 37 | { |
40 | struct vm_struct *area; | 38 | struct vm_struct *area; |
41 | 39 | ||
42 | area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); | 40 | area = __dma_common_pages_remap(pages, size, prot, caller); |
43 | if (!area) | 41 | if (!area) |
44 | return NULL; | 42 | return NULL; |
45 | 43 | ||
@@ -53,7 +51,6 @@ void *dma_common_pages_remap(struct page **pages, size_t size, | |||
53 | * Cannot be used in non-sleeping contexts | 51 | * Cannot be used in non-sleeping contexts |
54 | */ | 52 | */ |
55 | void *dma_common_contiguous_remap(struct page *page, size_t size, | 53 | void *dma_common_contiguous_remap(struct page *page, size_t size, |
56 | unsigned long vm_flags, | ||
57 | pgprot_t prot, const void *caller) | 54 | pgprot_t prot, const void *caller) |
58 | { | 55 | { |
59 | int i; | 56 | int i; |
@@ -67,7 +64,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, | |||
67 | for (i = 0; i < (size >> PAGE_SHIFT); i++) | 64 | for (i = 0; i < (size >> PAGE_SHIFT); i++) |
68 | pages[i] = nth_page(page, i); | 65 | pages[i] = nth_page(page, i); |
69 | 66 | ||
70 | area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); | 67 | area = __dma_common_pages_remap(pages, size, prot, caller); |
71 | 68 | ||
72 | kfree(pages); | 69 | kfree(pages); |
73 | 70 | ||
@@ -79,11 +76,11 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, | |||
79 | /* | 76 | /* |
80 | * Unmaps a range previously mapped by dma_common_*_remap | 77 | * Unmaps a range previously mapped by dma_common_*_remap |
81 | */ | 78 | */ |
82 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) | 79 | void dma_common_free_remap(void *cpu_addr, size_t size) |
83 | { | 80 | { |
84 | struct vm_struct *area = find_vm_area(cpu_addr); | 81 | struct vm_struct *area = find_vm_area(cpu_addr); |
85 | 82 | ||
86 | if (!area || (area->flags & vm_flags) != vm_flags) { | 83 | if (!area || area->flags != VM_DMA_COHERENT) { |
87 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); | 84 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); |
88 | return; | 85 | return; |
89 | } | 86 | } |
@@ -136,7 +133,7 @@ static int __init dma_atomic_pool_init(void) | |||
136 | if (!atomic_pool) | 133 | if (!atomic_pool) |
137 | goto free_page; | 134 | goto free_page; |
138 | 135 | ||
139 | addr = dma_common_contiguous_remap(page, atomic_pool_size, VM_USERMAP, | 136 | addr = dma_common_contiguous_remap(page, atomic_pool_size, |
140 | pgprot_dmacoherent(PAGE_KERNEL), | 137 | pgprot_dmacoherent(PAGE_KERNEL), |
141 | __builtin_return_address(0)); | 138 | __builtin_return_address(0)); |
142 | if (!addr) | 139 | if (!addr) |
@@ -153,7 +150,7 @@ static int __init dma_atomic_pool_init(void) | |||
153 | return 0; | 150 | return 0; |
154 | 151 | ||
155 | remove_mapping: | 152 | remove_mapping: |
156 | dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); | 153 | dma_common_free_remap(addr, atomic_pool_size); |
157 | destroy_genpool: | 154 | destroy_genpool: |
158 | gen_pool_destroy(atomic_pool); | 155 | gen_pool_destroy(atomic_pool); |
159 | atomic_pool = NULL; | 156 | atomic_pool = NULL; |
@@ -228,7 +225,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
228 | arch_dma_prep_coherent(page, size); | 225 | arch_dma_prep_coherent(page, size); |
229 | 226 | ||
230 | /* create a coherent mapping */ | 227 | /* create a coherent mapping */ |
231 | ret = dma_common_contiguous_remap(page, size, VM_USERMAP, | 228 | ret = dma_common_contiguous_remap(page, size, |
232 | dma_pgprot(dev, PAGE_KERNEL, attrs), | 229 | dma_pgprot(dev, PAGE_KERNEL, attrs), |
233 | __builtin_return_address(0)); | 230 | __builtin_return_address(0)); |
234 | if (!ret) { | 231 | if (!ret) { |