diff options
Diffstat (limited to 'lib/dma-direct.c')
-rw-r--r-- | lib/dma-direct.c | 204 |
1 files changed, 0 insertions, 204 deletions
diff --git a/lib/dma-direct.c b/lib/dma-direct.c deleted file mode 100644 index 8be8106270c2..000000000000 --- a/lib/dma-direct.c +++ /dev/null | |||
@@ -1,204 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * DMA operations that map physical memory directly without using an IOMMU or | ||
4 | * flushing caches. | ||
5 | */ | ||
6 | #include <linux/export.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/dma-direct.h> | ||
9 | #include <linux/scatterlist.h> | ||
10 | #include <linux/dma-contiguous.h> | ||
11 | #include <linux/pfn.h> | ||
12 | #include <linux/set_memory.h> | ||
13 | |||
14 | #define DIRECT_MAPPING_ERROR 0 | ||
15 | |||
16 | /* | ||
17 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but | ||
18 | * some use it for entirely different regions: | ||
19 | */ | ||
20 | #ifndef ARCH_ZONE_DMA_BITS | ||
21 | #define ARCH_ZONE_DMA_BITS 24 | ||
22 | #endif | ||
23 | |||
24 | /* | ||
25 | * For AMD SEV all DMA must be to unencrypted addresses. | ||
26 | */ | ||
27 | static inline bool force_dma_unencrypted(void) | ||
28 | { | ||
29 | return sev_active(); | ||
30 | } | ||
31 | |||
32 | static bool | ||
33 | check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
34 | const char *caller) | ||
35 | { | ||
36 | if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { | ||
37 | if (!dev->dma_mask) { | ||
38 | dev_err(dev, | ||
39 | "%s: call on device without dma_mask\n", | ||
40 | caller); | ||
41 | return false; | ||
42 | } | ||
43 | |||
44 | if (*dev->dma_mask >= DMA_BIT_MASK(32)) { | ||
45 | dev_err(dev, | ||
46 | "%s: overflow %pad+%zu of device mask %llx\n", | ||
47 | caller, &dma_addr, size, *dev->dma_mask); | ||
48 | } | ||
49 | return false; | ||
50 | } | ||
51 | return true; | ||
52 | } | ||
53 | |||
54 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) | ||
55 | { | ||
56 | dma_addr_t addr = force_dma_unencrypted() ? | ||
57 | __phys_to_dma(dev, phys) : phys_to_dma(dev, phys); | ||
58 | return addr + size - 1 <= dev->coherent_dma_mask; | ||
59 | } | ||
60 | |||
61 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
62 | gfp_t gfp, unsigned long attrs) | ||
63 | { | ||
64 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
65 | int page_order = get_order(size); | ||
66 | struct page *page = NULL; | ||
67 | void *ret; | ||
68 | |||
69 | /* we always manually zero the memory once we are done: */ | ||
70 | gfp &= ~__GFP_ZERO; | ||
71 | |||
72 | /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ | ||
73 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) | ||
74 | gfp |= GFP_DMA; | ||
75 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) | ||
76 | gfp |= GFP_DMA32; | ||
77 | |||
78 | again: | ||
79 | /* CMA can be used only in the context which permits sleeping */ | ||
80 | if (gfpflags_allow_blocking(gfp)) { | ||
81 | page = dma_alloc_from_contiguous(dev, count, page_order, gfp); | ||
82 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { | ||
83 | dma_release_from_contiguous(dev, page, count); | ||
84 | page = NULL; | ||
85 | } | ||
86 | } | ||
87 | if (!page) | ||
88 | page = alloc_pages_node(dev_to_node(dev), gfp, page_order); | ||
89 | |||
90 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { | ||
91 | __free_pages(page, page_order); | ||
92 | page = NULL; | ||
93 | |||
94 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && | ||
95 | dev->coherent_dma_mask < DMA_BIT_MASK(64) && | ||
96 | !(gfp & (GFP_DMA32 | GFP_DMA))) { | ||
97 | gfp |= GFP_DMA32; | ||
98 | goto again; | ||
99 | } | ||
100 | |||
101 | if (IS_ENABLED(CONFIG_ZONE_DMA) && | ||
102 | dev->coherent_dma_mask < DMA_BIT_MASK(32) && | ||
103 | !(gfp & GFP_DMA)) { | ||
104 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | ||
105 | goto again; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | if (!page) | ||
110 | return NULL; | ||
111 | ret = page_address(page); | ||
112 | if (force_dma_unencrypted()) { | ||
113 | set_memory_decrypted((unsigned long)ret, 1 << page_order); | ||
114 | *dma_handle = __phys_to_dma(dev, page_to_phys(page)); | ||
115 | } else { | ||
116 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | ||
117 | } | ||
118 | memset(ret, 0, size); | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * NOTE: this function must never look at the dma_addr argument, because we want | ||
124 | * to be able to use it as a helper for iommu implementations as well. | ||
125 | */ | ||
126 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, | ||
127 | dma_addr_t dma_addr, unsigned long attrs) | ||
128 | { | ||
129 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
130 | unsigned int page_order = get_order(size); | ||
131 | |||
132 | if (force_dma_unencrypted()) | ||
133 | set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); | ||
134 | if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) | ||
135 | free_pages((unsigned long)cpu_addr, page_order); | ||
136 | } | ||
137 | |||
138 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | ||
139 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
140 | unsigned long attrs) | ||
141 | { | ||
142 | dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; | ||
143 | |||
144 | if (!check_addr(dev, dma_addr, size, __func__)) | ||
145 | return DIRECT_MAPPING_ERROR; | ||
146 | return dma_addr; | ||
147 | } | ||
148 | |||
149 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | ||
150 | enum dma_data_direction dir, unsigned long attrs) | ||
151 | { | ||
152 | int i; | ||
153 | struct scatterlist *sg; | ||
154 | |||
155 | for_each_sg(sgl, sg, nents, i) { | ||
156 | BUG_ON(!sg_page(sg)); | ||
157 | |||
158 | sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); | ||
159 | if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) | ||
160 | return 0; | ||
161 | sg_dma_len(sg) = sg->length; | ||
162 | } | ||
163 | |||
164 | return nents; | ||
165 | } | ||
166 | |||
167 | int dma_direct_supported(struct device *dev, u64 mask) | ||
168 | { | ||
169 | #ifdef CONFIG_ZONE_DMA | ||
170 | if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) | ||
171 | return 0; | ||
172 | #else | ||
173 | /* | ||
174 | * Because 32-bit DMA masks are so common we expect every architecture | ||
175 | * to be able to satisfy them - either by not supporting more physical | ||
176 | * memory, or by providing a ZONE_DMA32. If neither is the case, the | ||
177 | * architecture needs to use an IOMMU instead of the direct mapping. | ||
178 | */ | ||
179 | if (mask < DMA_BIT_MASK(32)) | ||
180 | return 0; | ||
181 | #endif | ||
182 | /* | ||
183 | * Various PCI/PCIe bridges have broken support for > 32bit DMA even | ||
184 | * if the device itself might support it. | ||
185 | */ | ||
186 | if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32)) | ||
187 | return 0; | ||
188 | return 1; | ||
189 | } | ||
190 | |||
191 | int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
192 | { | ||
193 | return dma_addr == DIRECT_MAPPING_ERROR; | ||
194 | } | ||
195 | |||
196 | const struct dma_map_ops dma_direct_ops = { | ||
197 | .alloc = dma_direct_alloc, | ||
198 | .free = dma_direct_free, | ||
199 | .map_page = dma_direct_map_page, | ||
200 | .map_sg = dma_direct_map_sg, | ||
201 | .dma_supported = dma_direct_supported, | ||
202 | .mapping_error = dma_direct_mapping_error, | ||
203 | }; | ||
204 | EXPORT_SYMBOL(dma_direct_ops); | ||