diff options
| author | Christoph Hellwig <hch@lst.de> | 2018-04-16 09:24:51 -0400 |
|---|---|---|
| committer | Christoph Hellwig <hch@lst.de> | 2018-05-19 02:46:12 -0400 |
| commit | 782e6769c0df744e773dc2acff71c974b3bba4e9 (patch) | |
| tree | 4b7f513da50a3bce1cca7a28949258642e9c8ab7 | |
| parent | 35ddb69cd223eea5b1c68af753ed014208b6144e (diff) | |
dma-mapping: provide a generic dma-noncoherent implementation
Add a new dma_map_ops implementation that uses dma-direct for the
address mapping of streaming mappings, and which requires arch-specific
implemenations of coherent allocate/free.
Architectures have to provide flushing helpers to ownership trasnfers
to the device and/or CPU, and can provide optional implementations of
the coherent mmap functionality, and the cache_flush routines for
non-coherent long term allocations.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Alexey Brodkin <abrodkin@synopsys.com>
Acked-by: Vineet Gupta <vgupta@synopsys.com>
| -rw-r--r-- | MAINTAINERS | 2 | ||||
| -rw-r--r-- | include/asm-generic/dma-mapping.h | 9 | ||||
| -rw-r--r-- | include/linux/dma-direct.h | 7 | ||||
| -rw-r--r-- | include/linux/dma-mapping.h | 1 | ||||
| -rw-r--r-- | include/linux/dma-noncoherent.h | 47 | ||||
| -rw-r--r-- | lib/Kconfig | 20 | ||||
| -rw-r--r-- | lib/Makefile | 1 | ||||
| -rw-r--r-- | lib/dma-direct.c | 8 | ||||
| -rw-r--r-- | lib/dma-noncoherent.c | 102 |
9 files changed, 192 insertions, 5 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 79bb02ff812f..08d0d15d4958 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -4334,12 +4334,14 @@ W: http://git.infradead.org/users/hch/dma-mapping.git | |||
| 4334 | S: Supported | 4334 | S: Supported |
| 4335 | F: lib/dma-debug.c | 4335 | F: lib/dma-debug.c |
| 4336 | F: lib/dma-direct.c | 4336 | F: lib/dma-direct.c |
| 4337 | F: lib/dma-noncoherent.c | ||
| 4337 | F: lib/dma-virt.c | 4338 | F: lib/dma-virt.c |
| 4338 | F: drivers/base/dma-mapping.c | 4339 | F: drivers/base/dma-mapping.c |
| 4339 | F: drivers/base/dma-coherent.c | 4340 | F: drivers/base/dma-coherent.c |
| 4340 | F: include/asm-generic/dma-mapping.h | 4341 | F: include/asm-generic/dma-mapping.h |
| 4341 | F: include/linux/dma-direct.h | 4342 | F: include/linux/dma-direct.h |
| 4342 | F: include/linux/dma-mapping.h | 4343 | F: include/linux/dma-mapping.h |
| 4344 | F: include/linux/dma-noncoherent.h | ||
| 4343 | 4345 | ||
| 4344 | DME1737 HARDWARE MONITOR DRIVER | 4346 | DME1737 HARDWARE MONITOR DRIVER |
| 4345 | M: Juerg Haefliger <juergh@gmail.com> | 4347 | M: Juerg Haefliger <juergh@gmail.com> |
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index 880a292d792f..ad2868263867 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h | |||
| @@ -4,7 +4,16 @@ | |||
| 4 | 4 | ||
| 5 | static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | 5 | static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) |
| 6 | { | 6 | { |
| 7 | /* | ||
| 8 | * Use the non-coherent ops if available. If an architecture wants a | ||
| 9 | * more fine-grained selection of operations it will have to implement | ||
| 10 | * get_arch_dma_ops itself or use the per-device dma_ops. | ||
| 11 | */ | ||
| 12 | #ifdef CONFIG_DMA_NONCOHERENT_OPS | ||
| 13 | return &dma_noncoherent_ops; | ||
| 14 | #else | ||
| 7 | return &dma_direct_ops; | 15 | return &dma_direct_ops; |
| 16 | #endif | ||
| 8 | } | 17 | } |
| 9 | 18 | ||
| 10 | #endif /* _ASM_GENERIC_DMA_MAPPING_H */ | 19 | #endif /* _ASM_GENERIC_DMA_MAPPING_H */ |
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 53ad6a47f513..8d9f33febde5 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h | |||
| @@ -59,6 +59,11 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
| 59 | gfp_t gfp, unsigned long attrs); | 59 | gfp_t gfp, unsigned long attrs); |
| 60 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, | 60 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, |
| 61 | dma_addr_t dma_addr, unsigned long attrs); | 61 | dma_addr_t dma_addr, unsigned long attrs); |
| 62 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | ||
| 63 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
| 64 | unsigned long attrs); | ||
| 65 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | ||
| 66 | enum dma_data_direction dir, unsigned long attrs); | ||
| 62 | int dma_direct_supported(struct device *dev, u64 mask); | 67 | int dma_direct_supported(struct device *dev, u64 mask); |
| 63 | 68 | int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr); | |
| 64 | #endif /* _LINUX_DMA_DIRECT_H */ | 69 | #endif /* _LINUX_DMA_DIRECT_H */ |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 25a9a2b04f78..4be070df5fc5 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
| @@ -136,6 +136,7 @@ struct dma_map_ops { | |||
| 136 | }; | 136 | }; |
| 137 | 137 | ||
| 138 | extern const struct dma_map_ops dma_direct_ops; | 138 | extern const struct dma_map_ops dma_direct_ops; |
| 139 | extern const struct dma_map_ops dma_noncoherent_ops; | ||
| 139 | extern const struct dma_map_ops dma_virt_ops; | 140 | extern const struct dma_map_ops dma_virt_ops; |
| 140 | 141 | ||
| 141 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) | 142 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h new file mode 100644 index 000000000000..10b2654d549b --- /dev/null +++ b/include/linux/dma-noncoherent.h | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _LINUX_DMA_NONCOHERENT_H | ||
| 3 | #define _LINUX_DMA_NONCOHERENT_H 1 | ||
| 4 | |||
| 5 | #include <linux/dma-mapping.h> | ||
| 6 | |||
| 7 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
| 8 | gfp_t gfp, unsigned long attrs); | ||
| 9 | void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, | ||
| 10 | dma_addr_t dma_addr, unsigned long attrs); | ||
| 11 | |||
| 12 | #ifdef CONFIG_DMA_NONCOHERENT_MMAP | ||
| 13 | int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 14 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
| 15 | unsigned long attrs); | ||
| 16 | #else | ||
| 17 | #define arch_dma_mmap NULL | ||
| 18 | #endif /* CONFIG_DMA_NONCOHERENT_MMAP */ | ||
| 19 | |||
| 20 | #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC | ||
| 21 | void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
| 22 | enum dma_data_direction direction); | ||
| 23 | #else | ||
| 24 | #define arch_dma_cache_sync NULL | ||
| 25 | #endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */ | ||
| 26 | |||
| 27 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE | ||
| 28 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||
| 29 | size_t size, enum dma_data_direction dir); | ||
| 30 | #else | ||
| 31 | static inline void arch_sync_dma_for_device(struct device *dev, | ||
| 32 | phys_addr_t paddr, size_t size, enum dma_data_direction dir) | ||
| 33 | { | ||
| 34 | } | ||
| 35 | #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ | ||
| 36 | |||
| 37 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | ||
| 38 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||
| 39 | size_t size, enum dma_data_direction dir); | ||
| 40 | #else | ||
| 41 | static inline void arch_sync_dma_for_cpu(struct device *dev, | ||
| 42 | phys_addr_t paddr, size_t size, enum dma_data_direction dir) | ||
| 43 | { | ||
| 44 | } | ||
| 45 | #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ | ||
| 46 | |||
| 47 | #endif /* _LINUX_DMA_NONCOHERENT_H */ | ||
diff --git a/lib/Kconfig b/lib/Kconfig index 6c4e9d0ce5d1..7a913937888b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -441,10 +441,30 @@ config ARCH_DMA_ADDR_T_64BIT | |||
| 441 | config IOMMU_HELPER | 441 | config IOMMU_HELPER |
| 442 | bool | 442 | bool |
| 443 | 443 | ||
| 444 | config ARCH_HAS_SYNC_DMA_FOR_DEVICE | ||
| 445 | bool | ||
| 446 | |||
| 447 | config ARCH_HAS_SYNC_DMA_FOR_CPU | ||
| 448 | bool | ||
| 449 | select NEED_DMA_MAP_STATE | ||
| 450 | |||
| 444 | config DMA_DIRECT_OPS | 451 | config DMA_DIRECT_OPS |
| 445 | bool | 452 | bool |
| 446 | depends on HAS_DMA | 453 | depends on HAS_DMA |
| 447 | 454 | ||
| 455 | config DMA_NONCOHERENT_OPS | ||
| 456 | bool | ||
| 457 | depends on HAS_DMA | ||
| 458 | select DMA_DIRECT_OPS | ||
| 459 | |||
| 460 | config DMA_NONCOHERENT_MMAP | ||
| 461 | bool | ||
| 462 | depends on DMA_NONCOHERENT_OPS | ||
| 463 | |||
| 464 | config DMA_NONCOHERENT_CACHE_SYNC | ||
| 465 | bool | ||
| 466 | depends on DMA_NONCOHERENT_OPS | ||
| 467 | |||
| 448 | config DMA_VIRT_OPS | 468 | config DMA_VIRT_OPS |
| 449 | bool | 469 | bool |
| 450 | depends on HAS_DMA | 470 | depends on HAS_DMA |
diff --git a/lib/Makefile b/lib/Makefile index 94203b5eecd4..9f18c8152281 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -30,6 +30,7 @@ lib-$(CONFIG_PRINTK) += dump_stack.o | |||
| 30 | lib-$(CONFIG_MMU) += ioremap.o | 30 | lib-$(CONFIG_MMU) += ioremap.o |
| 31 | lib-$(CONFIG_SMP) += cpumask.o | 31 | lib-$(CONFIG_SMP) += cpumask.o |
| 32 | lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o | 32 | lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o |
| 33 | lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o | ||
| 33 | lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o | 34 | lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o |
| 34 | 35 | ||
| 35 | lib-y += kobject.o klist.o | 36 | lib-y += kobject.o klist.o |
diff --git a/lib/dma-direct.c b/lib/dma-direct.c index df9e726e0712..b824eb218782 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c | |||
| @@ -128,7 +128,7 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, | |||
| 128 | free_pages((unsigned long)cpu_addr, page_order); | 128 | free_pages((unsigned long)cpu_addr, page_order); |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | 131 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
| 132 | unsigned long offset, size_t size, enum dma_data_direction dir, | 132 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 133 | unsigned long attrs) | 133 | unsigned long attrs) |
| 134 | { | 134 | { |
| @@ -139,8 +139,8 @@ static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | |||
| 139 | return dma_addr; | 139 | return dma_addr; |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | 142 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
| 143 | int nents, enum dma_data_direction dir, unsigned long attrs) | 143 | enum dma_data_direction dir, unsigned long attrs) |
| 144 | { | 144 | { |
| 145 | int i; | 145 | int i; |
| 146 | struct scatterlist *sg; | 146 | struct scatterlist *sg; |
| @@ -175,7 +175,7 @@ int dma_direct_supported(struct device *dev, u64 mask) | |||
| 175 | return 1; | 175 | return 1; |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) | 178 | int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 179 | { | 179 | { |
| 180 | return dma_addr == DIRECT_MAPPING_ERROR; | 180 | return dma_addr == DIRECT_MAPPING_ERROR; |
| 181 | } | 181 | } |
diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c new file mode 100644 index 000000000000..79e9a757387f --- /dev/null +++ b/lib/dma-noncoherent.c | |||
| @@ -0,0 +1,102 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2018 Christoph Hellwig. | ||
| 4 | * | ||
| 5 | * DMA operations that map physical memory directly without providing cache | ||
| 6 | * coherence. | ||
| 7 | */ | ||
| 8 | #include <linux/export.h> | ||
| 9 | #include <linux/mm.h> | ||
| 10 | #include <linux/dma-direct.h> | ||
| 11 | #include <linux/dma-noncoherent.h> | ||
| 12 | #include <linux/scatterlist.h> | ||
| 13 | |||
| 14 | static void dma_noncoherent_sync_single_for_device(struct device *dev, | ||
| 15 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | ||
| 16 | { | ||
| 17 | arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); | ||
| 18 | } | ||
| 19 | |||
| 20 | static void dma_noncoherent_sync_sg_for_device(struct device *dev, | ||
| 21 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | ||
| 22 | { | ||
| 23 | struct scatterlist *sg; | ||
| 24 | int i; | ||
| 25 | |||
| 26 | for_each_sg(sgl, sg, nents, i) | ||
| 27 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); | ||
| 28 | } | ||
| 29 | |||
| 30 | static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page, | ||
| 31 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
| 32 | unsigned long attrs) | ||
| 33 | { | ||
| 34 | dma_addr_t addr; | ||
| 35 | |||
| 36 | addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); | ||
| 37 | if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| 38 | arch_sync_dma_for_device(dev, page_to_phys(page) + offset, | ||
| 39 | size, dir); | ||
| 40 | return addr; | ||
| 41 | } | ||
| 42 | |||
| 43 | static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl, | ||
| 44 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
| 45 | { | ||
| 46 | nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs); | ||
| 47 | if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| 48 | dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir); | ||
| 49 | return nents; | ||
| 50 | } | ||
| 51 | |||
| 52 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | ||
| 53 | static void dma_noncoherent_sync_single_for_cpu(struct device *dev, | ||
| 54 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | ||
| 55 | { | ||
| 56 | arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); | ||
| 57 | } | ||
| 58 | |||
| 59 | static void dma_noncoherent_sync_sg_for_cpu(struct device *dev, | ||
| 60 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | ||
| 61 | { | ||
| 62 | struct scatterlist *sg; | ||
| 63 | int i; | ||
| 64 | |||
| 65 | for_each_sg(sgl, sg, nents, i) | ||
| 66 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); | ||
| 67 | } | ||
| 68 | |||
| 69 | static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr, | ||
| 70 | size_t size, enum dma_data_direction dir, unsigned long attrs) | ||
| 71 | { | ||
| 72 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| 73 | dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl, | ||
| 77 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
| 78 | { | ||
| 79 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| 80 | dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir); | ||
| 81 | } | ||
| 82 | #endif | ||
| 83 | |||
| 84 | const struct dma_map_ops dma_noncoherent_ops = { | ||
| 85 | .alloc = arch_dma_alloc, | ||
| 86 | .free = arch_dma_free, | ||
| 87 | .mmap = arch_dma_mmap, | ||
| 88 | .sync_single_for_device = dma_noncoherent_sync_single_for_device, | ||
| 89 | .sync_sg_for_device = dma_noncoherent_sync_sg_for_device, | ||
| 90 | .map_page = dma_noncoherent_map_page, | ||
| 91 | .map_sg = dma_noncoherent_map_sg, | ||
| 92 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | ||
| 93 | .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu, | ||
| 94 | .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu, | ||
| 95 | .unmap_page = dma_noncoherent_unmap_page, | ||
| 96 | .unmap_sg = dma_noncoherent_unmap_sg, | ||
| 97 | #endif | ||
| 98 | .dma_supported = dma_direct_supported, | ||
| 99 | .mapping_error = dma_direct_mapping_error, | ||
| 100 | .cache_sync = arch_dma_cache_sync, | ||
| 101 | }; | ||
| 102 | EXPORT_SYMBOL(dma_noncoherent_ops); | ||
