aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-04-16 09:24:51 -0400
committerChristoph Hellwig <hch@lst.de>2018-05-19 02:46:12 -0400
commit782e6769c0df744e773dc2acff71c974b3bba4e9 (patch)
tree4b7f513da50a3bce1cca7a28949258642e9c8ab7 /lib
parent35ddb69cd223eea5b1c68af753ed014208b6144e (diff)
dma-mapping: provide a generic dma-noncoherent implementation
Add a new dma_map_ops implementation that uses dma-direct for the address mapping of streaming mappings, and which requires arch-specific implemenations of coherent allocate/free. Architectures have to provide flushing helpers to ownership trasnfers to the device and/or CPU, and can provide optional implementations of the coherent mmap functionality, and the cache_flush routines for non-coherent long term allocations. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Alexey Brodkin <abrodkin@synopsys.com> Acked-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig20
-rw-r--r--lib/Makefile1
-rw-r--r--lib/dma-direct.c8
-rw-r--r--lib/dma-noncoherent.c102
4 files changed, 127 insertions, 4 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 6c4e9d0ce5d1..7a913937888b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -441,10 +441,30 @@ config ARCH_DMA_ADDR_T_64BIT
441config IOMMU_HELPER 441config IOMMU_HELPER
442 bool 442 bool
443 443
444config ARCH_HAS_SYNC_DMA_FOR_DEVICE
445 bool
446
447config ARCH_HAS_SYNC_DMA_FOR_CPU
448 bool
449 select NEED_DMA_MAP_STATE
450
444config DMA_DIRECT_OPS 451config DMA_DIRECT_OPS
445 bool 452 bool
446 depends on HAS_DMA 453 depends on HAS_DMA
447 454
455config DMA_NONCOHERENT_OPS
456 bool
457 depends on HAS_DMA
458 select DMA_DIRECT_OPS
459
460config DMA_NONCOHERENT_MMAP
461 bool
462 depends on DMA_NONCOHERENT_OPS
463
464config DMA_NONCOHERENT_CACHE_SYNC
465 bool
466 depends on DMA_NONCOHERENT_OPS
467
448config DMA_VIRT_OPS 468config DMA_VIRT_OPS
449 bool 469 bool
450 depends on HAS_DMA 470 depends on HAS_DMA
diff --git a/lib/Makefile b/lib/Makefile
index 94203b5eecd4..9f18c8152281 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -30,6 +30,7 @@ lib-$(CONFIG_PRINTK) += dump_stack.o
30lib-$(CONFIG_MMU) += ioremap.o 30lib-$(CONFIG_MMU) += ioremap.o
31lib-$(CONFIG_SMP) += cpumask.o 31lib-$(CONFIG_SMP) += cpumask.o
32lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o 32lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
33lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o
33lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o 34lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
34 35
35lib-y += kobject.o klist.o 36lib-y += kobject.o klist.o
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index df9e726e0712..b824eb218782 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -128,7 +128,7 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
128 free_pages((unsigned long)cpu_addr, page_order); 128 free_pages((unsigned long)cpu_addr, page_order);
129} 129}
130 130
131static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, 131dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
132 unsigned long offset, size_t size, enum dma_data_direction dir, 132 unsigned long offset, size_t size, enum dma_data_direction dir,
133 unsigned long attrs) 133 unsigned long attrs)
134{ 134{
@@ -139,8 +139,8 @@ static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
139 return dma_addr; 139 return dma_addr;
140} 140}
141 141
142static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 142int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
143 int nents, enum dma_data_direction dir, unsigned long attrs) 143 enum dma_data_direction dir, unsigned long attrs)
144{ 144{
145 int i; 145 int i;
146 struct scatterlist *sg; 146 struct scatterlist *sg;
@@ -175,7 +175,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
175 return 1; 175 return 1;
176} 176}
177 177
178static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) 178int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
179{ 179{
180 return dma_addr == DIRECT_MAPPING_ERROR; 180 return dma_addr == DIRECT_MAPPING_ERROR;
181} 181}
diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c
new file mode 100644
index 000000000000..79e9a757387f
--- /dev/null
+++ b/lib/dma-noncoherent.c
@@ -0,0 +1,102 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Christoph Hellwig.
4 *
5 * DMA operations that map physical memory directly without providing cache
6 * coherence.
7 */
8#include <linux/export.h>
9#include <linux/mm.h>
10#include <linux/dma-direct.h>
11#include <linux/dma-noncoherent.h>
12#include <linux/scatterlist.h>
13
14static void dma_noncoherent_sync_single_for_device(struct device *dev,
15 dma_addr_t addr, size_t size, enum dma_data_direction dir)
16{
17 arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
18}
19
20static void dma_noncoherent_sync_sg_for_device(struct device *dev,
21 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
22{
23 struct scatterlist *sg;
24 int i;
25
26 for_each_sg(sgl, sg, nents, i)
27 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
28}
29
30static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
31 unsigned long offset, size_t size, enum dma_data_direction dir,
32 unsigned long attrs)
33{
34 dma_addr_t addr;
35
36 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
37 if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
38 arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
39 size, dir);
40 return addr;
41}
42
43static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
44 int nents, enum dma_data_direction dir, unsigned long attrs)
45{
46 nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
47 if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
48 dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
49 return nents;
50}
51
52#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
53static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
54 dma_addr_t addr, size_t size, enum dma_data_direction dir)
55{
56 arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
57}
58
59static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
60 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
61{
62 struct scatterlist *sg;
63 int i;
64
65 for_each_sg(sgl, sg, nents, i)
66 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
67}
68
69static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
70 size_t size, enum dma_data_direction dir, unsigned long attrs)
71{
72 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
73 dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
74}
75
76static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
77 int nents, enum dma_data_direction dir, unsigned long attrs)
78{
79 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
80 dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
81}
82#endif
83
84const struct dma_map_ops dma_noncoherent_ops = {
85 .alloc = arch_dma_alloc,
86 .free = arch_dma_free,
87 .mmap = arch_dma_mmap,
88 .sync_single_for_device = dma_noncoherent_sync_single_for_device,
89 .sync_sg_for_device = dma_noncoherent_sync_sg_for_device,
90 .map_page = dma_noncoherent_map_page,
91 .map_sg = dma_noncoherent_map_sg,
92#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
93 .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu,
94 .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu,
95 .unmap_page = dma_noncoherent_unmap_page,
96 .unmap_sg = dma_noncoherent_unmap_sg,
97#endif
98 .dma_supported = dma_direct_supported,
99 .mapping_error = dma_direct_mapping_error,
100 .cache_sync = arch_dma_cache_sync,
101};
102EXPORT_SYMBOL(dma_noncoherent_ops);