diff options
author | Christoph Hellwig <hch@lst.de> | 2018-06-12 13:01:45 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2018-06-14 02:50:37 -0400 |
commit | cf65a0f6f6ff7631ba0ac0513a14ca5b65320d80 (patch) | |
tree | a81edcdf00e5a6e99fc2064fbcd9de4f33a4684f /drivers | |
parent | e37460c1ca08cf9d3b82eb3b6f205888d8d01182 (diff) |
dma-mapping: move all DMA mapping code to kernel/dma
Currently the code is split over various files with dma- prefixes in the
lib/ and drives/base directories, and the number of files keeps growing.
Move them into a single directory to keep the code together and remove
the file name prefixes. To match the irq infrastructure this directory
is placed under the kernel/ directory.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/Makefile | 3 | ||||
-rw-r--r-- | drivers/base/dma-coherent.c | 434 | ||||
-rw-r--r-- | drivers/base/dma-contiguous.c | 278 | ||||
-rw-r--r-- | drivers/base/dma-mapping.c | 345 |
4 files changed, 0 insertions, 1060 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index b074f242a435..704f44295810 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile | |||
@@ -8,10 +8,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \ | |||
8 | topology.o container.o property.o cacheinfo.o \ | 8 | topology.o container.o property.o cacheinfo.o \ |
9 | devcon.o | 9 | devcon.o |
10 | obj-$(CONFIG_DEVTMPFS) += devtmpfs.o | 10 | obj-$(CONFIG_DEVTMPFS) += devtmpfs.o |
11 | obj-$(CONFIG_DMA_CMA) += dma-contiguous.o | ||
12 | obj-y += power/ | 11 | obj-y += power/ |
13 | obj-$(CONFIG_HAS_DMA) += dma-mapping.o | ||
14 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o | ||
15 | obj-$(CONFIG_ISA_BUS_API) += isa.o | 12 | obj-$(CONFIG_ISA_BUS_API) += isa.o |
16 | obj-y += firmware_loader/ | 13 | obj-y += firmware_loader/ |
17 | obj-$(CONFIG_NUMA) += node.o | 14 | obj-$(CONFIG_NUMA) += node.o |
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c deleted file mode 100644 index 597d40893862..000000000000 --- a/drivers/base/dma-coherent.c +++ /dev/null | |||
@@ -1,434 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Coherent per-device memory handling. | ||
4 | * Borrowed from i386 | ||
5 | */ | ||
6 | #include <linux/io.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/dma-mapping.h> | ||
11 | |||
12 | struct dma_coherent_mem { | ||
13 | void *virt_base; | ||
14 | dma_addr_t device_base; | ||
15 | unsigned long pfn_base; | ||
16 | int size; | ||
17 | int flags; | ||
18 | unsigned long *bitmap; | ||
19 | spinlock_t spinlock; | ||
20 | bool use_dev_dma_pfn_offset; | ||
21 | }; | ||
22 | |||
23 | static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; | ||
24 | |||
25 | static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) | ||
26 | { | ||
27 | if (dev && dev->dma_mem) | ||
28 | return dev->dma_mem; | ||
29 | return NULL; | ||
30 | } | ||
31 | |||
32 | static inline dma_addr_t dma_get_device_base(struct device *dev, | ||
33 | struct dma_coherent_mem * mem) | ||
34 | { | ||
35 | if (mem->use_dev_dma_pfn_offset) | ||
36 | return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; | ||
37 | else | ||
38 | return mem->device_base; | ||
39 | } | ||
40 | |||
41 | static int dma_init_coherent_memory( | ||
42 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, | ||
43 | struct dma_coherent_mem **mem) | ||
44 | { | ||
45 | struct dma_coherent_mem *dma_mem = NULL; | ||
46 | void __iomem *mem_base = NULL; | ||
47 | int pages = size >> PAGE_SHIFT; | ||
48 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
49 | int ret; | ||
50 | |||
51 | if (!size) { | ||
52 | ret = -EINVAL; | ||
53 | goto out; | ||
54 | } | ||
55 | |||
56 | mem_base = memremap(phys_addr, size, MEMREMAP_WC); | ||
57 | if (!mem_base) { | ||
58 | ret = -EINVAL; | ||
59 | goto out; | ||
60 | } | ||
61 | dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
62 | if (!dma_mem) { | ||
63 | ret = -ENOMEM; | ||
64 | goto out; | ||
65 | } | ||
66 | dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
67 | if (!dma_mem->bitmap) { | ||
68 | ret = -ENOMEM; | ||
69 | goto out; | ||
70 | } | ||
71 | |||
72 | dma_mem->virt_base = mem_base; | ||
73 | dma_mem->device_base = device_addr; | ||
74 | dma_mem->pfn_base = PFN_DOWN(phys_addr); | ||
75 | dma_mem->size = pages; | ||
76 | dma_mem->flags = flags; | ||
77 | spin_lock_init(&dma_mem->spinlock); | ||
78 | |||
79 | *mem = dma_mem; | ||
80 | return 0; | ||
81 | |||
82 | out: | ||
83 | kfree(dma_mem); | ||
84 | if (mem_base) | ||
85 | memunmap(mem_base); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | static void dma_release_coherent_memory(struct dma_coherent_mem *mem) | ||
90 | { | ||
91 | if (!mem) | ||
92 | return; | ||
93 | |||
94 | memunmap(mem->virt_base); | ||
95 | kfree(mem->bitmap); | ||
96 | kfree(mem); | ||
97 | } | ||
98 | |||
99 | static int dma_assign_coherent_memory(struct device *dev, | ||
100 | struct dma_coherent_mem *mem) | ||
101 | { | ||
102 | if (!dev) | ||
103 | return -ENODEV; | ||
104 | |||
105 | if (dev->dma_mem) | ||
106 | return -EBUSY; | ||
107 | |||
108 | dev->dma_mem = mem; | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | ||
113 | dma_addr_t device_addr, size_t size, int flags) | ||
114 | { | ||
115 | struct dma_coherent_mem *mem; | ||
116 | int ret; | ||
117 | |||
118 | ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem); | ||
119 | if (ret) | ||
120 | return ret; | ||
121 | |||
122 | ret = dma_assign_coherent_memory(dev, mem); | ||
123 | if (ret) | ||
124 | dma_release_coherent_memory(mem); | ||
125 | return ret; | ||
126 | } | ||
127 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
128 | |||
129 | void dma_release_declared_memory(struct device *dev) | ||
130 | { | ||
131 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
132 | |||
133 | if (!mem) | ||
134 | return; | ||
135 | dma_release_coherent_memory(mem); | ||
136 | dev->dma_mem = NULL; | ||
137 | } | ||
138 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
139 | |||
140 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
141 | dma_addr_t device_addr, size_t size) | ||
142 | { | ||
143 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
144 | unsigned long flags; | ||
145 | int pos, err; | ||
146 | |||
147 | size += device_addr & ~PAGE_MASK; | ||
148 | |||
149 | if (!mem) | ||
150 | return ERR_PTR(-EINVAL); | ||
151 | |||
152 | spin_lock_irqsave(&mem->spinlock, flags); | ||
153 | pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem)); | ||
154 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); | ||
155 | spin_unlock_irqrestore(&mem->spinlock, flags); | ||
156 | |||
157 | if (err != 0) | ||
158 | return ERR_PTR(err); | ||
159 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
160 | } | ||
161 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
162 | |||
163 | static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, | ||
164 | ssize_t size, dma_addr_t *dma_handle) | ||
165 | { | ||
166 | int order = get_order(size); | ||
167 | unsigned long flags; | ||
168 | int pageno; | ||
169 | void *ret; | ||
170 | |||
171 | spin_lock_irqsave(&mem->spinlock, flags); | ||
172 | |||
173 | if (unlikely(size > (mem->size << PAGE_SHIFT))) | ||
174 | goto err; | ||
175 | |||
176 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); | ||
177 | if (unlikely(pageno < 0)) | ||
178 | goto err; | ||
179 | |||
180 | /* | ||
181 | * Memory was found in the coherent area. | ||
182 | */ | ||
183 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | ||
184 | ret = mem->virt_base + (pageno << PAGE_SHIFT); | ||
185 | spin_unlock_irqrestore(&mem->spinlock, flags); | ||
186 | memset(ret, 0, size); | ||
187 | return ret; | ||
188 | err: | ||
189 | spin_unlock_irqrestore(&mem->spinlock, flags); | ||
190 | return NULL; | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool | ||
195 | * @dev: device from which we allocate memory | ||
196 | * @size: size of requested memory area | ||
197 | * @dma_handle: This will be filled with the correct dma handle | ||
198 | * @ret: This pointer will be filled with the virtual address | ||
199 | * to allocated area. | ||
200 | * | ||
201 | * This function should be only called from per-arch dma_alloc_coherent() | ||
202 | * to support allocation from per-device coherent memory pools. | ||
203 | * | ||
204 | * Returns 0 if dma_alloc_coherent should continue with allocating from | ||
205 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. | ||
206 | */ | ||
207 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, | ||
208 | dma_addr_t *dma_handle, void **ret) | ||
209 | { | ||
210 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | ||
211 | |||
212 | if (!mem) | ||
213 | return 0; | ||
214 | |||
215 | *ret = __dma_alloc_from_coherent(mem, size, dma_handle); | ||
216 | if (*ret) | ||
217 | return 1; | ||
218 | |||
219 | /* | ||
220 | * In the case where the allocation can not be satisfied from the | ||
221 | * per-device area, try to fall back to generic memory if the | ||
222 | * constraints allow it. | ||
223 | */ | ||
224 | return mem->flags & DMA_MEMORY_EXCLUSIVE; | ||
225 | } | ||
226 | EXPORT_SYMBOL(dma_alloc_from_dev_coherent); | ||
227 | |||
228 | void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) | ||
229 | { | ||
230 | if (!dma_coherent_default_memory) | ||
231 | return NULL; | ||
232 | |||
233 | return __dma_alloc_from_coherent(dma_coherent_default_memory, size, | ||
234 | dma_handle); | ||
235 | } | ||
236 | |||
237 | static int __dma_release_from_coherent(struct dma_coherent_mem *mem, | ||
238 | int order, void *vaddr) | ||
239 | { | ||
240 | if (mem && vaddr >= mem->virt_base && vaddr < | ||
241 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
242 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
243 | unsigned long flags; | ||
244 | |||
245 | spin_lock_irqsave(&mem->spinlock, flags); | ||
246 | bitmap_release_region(mem->bitmap, page, order); | ||
247 | spin_unlock_irqrestore(&mem->spinlock, flags); | ||
248 | return 1; | ||
249 | } | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | /** | ||
254 | * dma_release_from_dev_coherent() - free memory to device coherent memory pool | ||
255 | * @dev: device from which the memory was allocated | ||
256 | * @order: the order of pages allocated | ||
257 | * @vaddr: virtual address of allocated pages | ||
258 | * | ||
259 | * This checks whether the memory was allocated from the per-device | ||
260 | * coherent memory pool and if so, releases that memory. | ||
261 | * | ||
262 | * Returns 1 if we correctly released the memory, or 0 if the caller should | ||
263 | * proceed with releasing memory from generic pools. | ||
264 | */ | ||
265 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) | ||
266 | { | ||
267 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | ||
268 | |||
269 | return __dma_release_from_coherent(mem, order, vaddr); | ||
270 | } | ||
271 | EXPORT_SYMBOL(dma_release_from_dev_coherent); | ||
272 | |||
273 | int dma_release_from_global_coherent(int order, void *vaddr) | ||
274 | { | ||
275 | if (!dma_coherent_default_memory) | ||
276 | return 0; | ||
277 | |||
278 | return __dma_release_from_coherent(dma_coherent_default_memory, order, | ||
279 | vaddr); | ||
280 | } | ||
281 | |||
282 | static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, | ||
283 | struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) | ||
284 | { | ||
285 | if (mem && vaddr >= mem->virt_base && vaddr + size <= | ||
286 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
287 | unsigned long off = vma->vm_pgoff; | ||
288 | int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
289 | int user_count = vma_pages(vma); | ||
290 | int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
291 | |||
292 | *ret = -ENXIO; | ||
293 | if (off < count && user_count <= count - off) { | ||
294 | unsigned long pfn = mem->pfn_base + start + off; | ||
295 | *ret = remap_pfn_range(vma, vma->vm_start, pfn, | ||
296 | user_count << PAGE_SHIFT, | ||
297 | vma->vm_page_prot); | ||
298 | } | ||
299 | return 1; | ||
300 | } | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool | ||
306 | * @dev: device from which the memory was allocated | ||
307 | * @vma: vm_area for the userspace memory | ||
308 | * @vaddr: cpu address returned by dma_alloc_from_dev_coherent | ||
309 | * @size: size of the memory buffer allocated | ||
310 | * @ret: result from remap_pfn_range() | ||
311 | * | ||
312 | * This checks whether the memory was allocated from the per-device | ||
313 | * coherent memory pool and if so, maps that memory to the provided vma. | ||
314 | * | ||
315 | * Returns 1 if @vaddr belongs to the device coherent pool and the caller | ||
316 | * should return @ret, or 0 if they should proceed with mapping memory from | ||
317 | * generic areas. | ||
318 | */ | ||
319 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, | ||
320 | void *vaddr, size_t size, int *ret) | ||
321 | { | ||
322 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | ||
323 | |||
324 | return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); | ||
325 | } | ||
326 | EXPORT_SYMBOL(dma_mmap_from_dev_coherent); | ||
327 | |||
328 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, | ||
329 | size_t size, int *ret) | ||
330 | { | ||
331 | if (!dma_coherent_default_memory) | ||
332 | return 0; | ||
333 | |||
334 | return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, | ||
335 | vaddr, size, ret); | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * Support for reserved memory regions defined in device tree | ||
340 | */ | ||
341 | #ifdef CONFIG_OF_RESERVED_MEM | ||
342 | #include <linux/of.h> | ||
343 | #include <linux/of_fdt.h> | ||
344 | #include <linux/of_reserved_mem.h> | ||
345 | |||
346 | static struct reserved_mem *dma_reserved_default_memory __initdata; | ||
347 | |||
348 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) | ||
349 | { | ||
350 | struct dma_coherent_mem *mem = rmem->priv; | ||
351 | int ret; | ||
352 | |||
353 | if (!mem) { | ||
354 | ret = dma_init_coherent_memory(rmem->base, rmem->base, | ||
355 | rmem->size, | ||
356 | DMA_MEMORY_EXCLUSIVE, &mem); | ||
357 | if (ret) { | ||
358 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", | ||
359 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | ||
360 | return ret; | ||
361 | } | ||
362 | } | ||
363 | mem->use_dev_dma_pfn_offset = true; | ||
364 | rmem->priv = mem; | ||
365 | dma_assign_coherent_memory(dev, mem); | ||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static void rmem_dma_device_release(struct reserved_mem *rmem, | ||
370 | struct device *dev) | ||
371 | { | ||
372 | if (dev) | ||
373 | dev->dma_mem = NULL; | ||
374 | } | ||
375 | |||
376 | static const struct reserved_mem_ops rmem_dma_ops = { | ||
377 | .device_init = rmem_dma_device_init, | ||
378 | .device_release = rmem_dma_device_release, | ||
379 | }; | ||
380 | |||
381 | static int __init rmem_dma_setup(struct reserved_mem *rmem) | ||
382 | { | ||
383 | unsigned long node = rmem->fdt_node; | ||
384 | |||
385 | if (of_get_flat_dt_prop(node, "reusable", NULL)) | ||
386 | return -EINVAL; | ||
387 | |||
388 | #ifdef CONFIG_ARM | ||
389 | if (!of_get_flat_dt_prop(node, "no-map", NULL)) { | ||
390 | pr_err("Reserved memory: regions without no-map are not yet supported\n"); | ||
391 | return -EINVAL; | ||
392 | } | ||
393 | |||
394 | if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { | ||
395 | WARN(dma_reserved_default_memory, | ||
396 | "Reserved memory: region for default DMA coherent area is redefined\n"); | ||
397 | dma_reserved_default_memory = rmem; | ||
398 | } | ||
399 | #endif | ||
400 | |||
401 | rmem->ops = &rmem_dma_ops; | ||
402 | pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", | ||
403 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | ||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | static int __init dma_init_reserved_memory(void) | ||
408 | { | ||
409 | const struct reserved_mem_ops *ops; | ||
410 | int ret; | ||
411 | |||
412 | if (!dma_reserved_default_memory) | ||
413 | return -ENOMEM; | ||
414 | |||
415 | ops = dma_reserved_default_memory->ops; | ||
416 | |||
417 | /* | ||
418 | * We rely on rmem_dma_device_init() does not propagate error of | ||
419 | * dma_assign_coherent_memory() for "NULL" device. | ||
420 | */ | ||
421 | ret = ops->device_init(dma_reserved_default_memory, NULL); | ||
422 | |||
423 | if (!ret) { | ||
424 | dma_coherent_default_memory = dma_reserved_default_memory->priv; | ||
425 | pr_info("DMA: default coherent area is set\n"); | ||
426 | } | ||
427 | |||
428 | return ret; | ||
429 | } | ||
430 | |||
431 | core_initcall(dma_init_reserved_memory); | ||
432 | |||
433 | RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); | ||
434 | #endif | ||
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c deleted file mode 100644 index d987dcd1bd56..000000000000 --- a/drivers/base/dma-contiguous.c +++ /dev/null | |||
@@ -1,278 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
2 | /* | ||
3 | * Contiguous Memory Allocator for DMA mapping framework | ||
4 | * Copyright (c) 2010-2011 by Samsung Electronics. | ||
5 | * Written by: | ||
6 | * Marek Szyprowski <m.szyprowski@samsung.com> | ||
7 | * Michal Nazarewicz <mina86@mina86.com> | ||
8 | */ | ||
9 | |||
10 | #define pr_fmt(fmt) "cma: " fmt | ||
11 | |||
12 | #ifdef CONFIG_CMA_DEBUG | ||
13 | #ifndef DEBUG | ||
14 | # define DEBUG | ||
15 | #endif | ||
16 | #endif | ||
17 | |||
18 | #include <asm/page.h> | ||
19 | #include <asm/dma-contiguous.h> | ||
20 | |||
21 | #include <linux/memblock.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/sizes.h> | ||
24 | #include <linux/dma-contiguous.h> | ||
25 | #include <linux/cma.h> | ||
26 | |||
27 | #ifdef CONFIG_CMA_SIZE_MBYTES | ||
28 | #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES | ||
29 | #else | ||
30 | #define CMA_SIZE_MBYTES 0 | ||
31 | #endif | ||
32 | |||
33 | struct cma *dma_contiguous_default_area; | ||
34 | |||
35 | /* | ||
36 | * Default global CMA area size can be defined in kernel's .config. | ||
37 | * This is useful mainly for distro maintainers to create a kernel | ||
38 | * that works correctly for most supported systems. | ||
39 | * The size can be set in bytes or as a percentage of the total memory | ||
40 | * in the system. | ||
41 | * | ||
42 | * Users, who want to set the size of global CMA area for their system | ||
43 | * should use cma= kernel parameter. | ||
44 | */ | ||
45 | static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; | ||
46 | static phys_addr_t size_cmdline = -1; | ||
47 | static phys_addr_t base_cmdline; | ||
48 | static phys_addr_t limit_cmdline; | ||
49 | |||
50 | static int __init early_cma(char *p) | ||
51 | { | ||
52 | pr_debug("%s(%s)\n", __func__, p); | ||
53 | size_cmdline = memparse(p, &p); | ||
54 | if (*p != '@') | ||
55 | return 0; | ||
56 | base_cmdline = memparse(p + 1, &p); | ||
57 | if (*p != '-') { | ||
58 | limit_cmdline = base_cmdline + size_cmdline; | ||
59 | return 0; | ||
60 | } | ||
61 | limit_cmdline = memparse(p + 1, &p); | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | early_param("cma", early_cma); | ||
66 | |||
67 | #ifdef CONFIG_CMA_SIZE_PERCENTAGE | ||
68 | |||
69 | static phys_addr_t __init __maybe_unused cma_early_percent_memory(void) | ||
70 | { | ||
71 | struct memblock_region *reg; | ||
72 | unsigned long total_pages = 0; | ||
73 | |||
74 | /* | ||
75 | * We cannot use memblock_phys_mem_size() here, because | ||
76 | * memblock_analyze() has not been called yet. | ||
77 | */ | ||
78 | for_each_memblock(memory, reg) | ||
79 | total_pages += memblock_region_memory_end_pfn(reg) - | ||
80 | memblock_region_memory_base_pfn(reg); | ||
81 | |||
82 | return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; | ||
83 | } | ||
84 | |||
85 | #else | ||
86 | |||
87 | static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) | ||
88 | { | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | #endif | ||
93 | |||
94 | /** | ||
95 | * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling | ||
96 | * @limit: End address of the reserved memory (optional, 0 for any). | ||
97 | * | ||
98 | * This function reserves memory from early allocator. It should be | ||
99 | * called by arch specific code once the early allocator (memblock or bootmem) | ||
100 | * has been activated and all other subsystems have already allocated/reserved | ||
101 | * memory. | ||
102 | */ | ||
103 | void __init dma_contiguous_reserve(phys_addr_t limit) | ||
104 | { | ||
105 | phys_addr_t selected_size = 0; | ||
106 | phys_addr_t selected_base = 0; | ||
107 | phys_addr_t selected_limit = limit; | ||
108 | bool fixed = false; | ||
109 | |||
110 | pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); | ||
111 | |||
112 | if (size_cmdline != -1) { | ||
113 | selected_size = size_cmdline; | ||
114 | selected_base = base_cmdline; | ||
115 | selected_limit = min_not_zero(limit_cmdline, limit); | ||
116 | if (base_cmdline + size_cmdline == limit_cmdline) | ||
117 | fixed = true; | ||
118 | } else { | ||
119 | #ifdef CONFIG_CMA_SIZE_SEL_MBYTES | ||
120 | selected_size = size_bytes; | ||
121 | #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) | ||
122 | selected_size = cma_early_percent_memory(); | ||
123 | #elif defined(CONFIG_CMA_SIZE_SEL_MIN) | ||
124 | selected_size = min(size_bytes, cma_early_percent_memory()); | ||
125 | #elif defined(CONFIG_CMA_SIZE_SEL_MAX) | ||
126 | selected_size = max(size_bytes, cma_early_percent_memory()); | ||
127 | #endif | ||
128 | } | ||
129 | |||
130 | if (selected_size && !dma_contiguous_default_area) { | ||
131 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, | ||
132 | (unsigned long)selected_size / SZ_1M); | ||
133 | |||
134 | dma_contiguous_reserve_area(selected_size, selected_base, | ||
135 | selected_limit, | ||
136 | &dma_contiguous_default_area, | ||
137 | fixed); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | /** | ||
142 | * dma_contiguous_reserve_area() - reserve custom contiguous area | ||
143 | * @size: Size of the reserved area (in bytes), | ||
144 | * @base: Base address of the reserved area optional, use 0 for any | ||
145 | * @limit: End address of the reserved memory (optional, 0 for any). | ||
146 | * @res_cma: Pointer to store the created cma region. | ||
147 | * @fixed: hint about where to place the reserved area | ||
148 | * | ||
149 | * This function reserves memory from early allocator. It should be | ||
150 | * called by arch specific code once the early allocator (memblock or bootmem) | ||
151 | * has been activated and all other subsystems have already allocated/reserved | ||
152 | * memory. This function allows to create custom reserved areas for specific | ||
153 | * devices. | ||
154 | * | ||
155 | * If @fixed is true, reserve contiguous area at exactly @base. If false, | ||
156 | * reserve in range from @base to @limit. | ||
157 | */ | ||
158 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | ||
159 | phys_addr_t limit, struct cma **res_cma, | ||
160 | bool fixed) | ||
161 | { | ||
162 | int ret; | ||
163 | |||
164 | ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, | ||
165 | "reserved", res_cma); | ||
166 | if (ret) | ||
167 | return ret; | ||
168 | |||
169 | /* Architecture specific contiguous memory fixup. */ | ||
170 | dma_contiguous_early_fixup(cma_get_base(*res_cma), | ||
171 | cma_get_size(*res_cma)); | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | /** | ||
177 | * dma_alloc_from_contiguous() - allocate pages from contiguous area | ||
178 | * @dev: Pointer to device for which the allocation is performed. | ||
179 | * @count: Requested number of pages. | ||
180 | * @align: Requested alignment of pages (in PAGE_SIZE order). | ||
181 | * @gfp_mask: GFP flags to use for this allocation. | ||
182 | * | ||
183 | * This function allocates memory buffer for specified device. It uses | ||
184 | * device specific contiguous memory area if available or the default | ||
185 | * global one. Requires architecture specific dev_get_cma_area() helper | ||
186 | * function. | ||
187 | */ | ||
188 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, | ||
189 | unsigned int align, gfp_t gfp_mask) | ||
190 | { | ||
191 | if (align > CONFIG_CMA_ALIGNMENT) | ||
192 | align = CONFIG_CMA_ALIGNMENT; | ||
193 | |||
194 | return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask); | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * dma_release_from_contiguous() - release allocated pages | ||
199 | * @dev: Pointer to device for which the pages were allocated. | ||
200 | * @pages: Allocated pages. | ||
201 | * @count: Number of allocated pages. | ||
202 | * | ||
203 | * This function releases memory allocated by dma_alloc_from_contiguous(). | ||
204 | * It returns false when provided pages do not belong to contiguous area and | ||
205 | * true otherwise. | ||
206 | */ | ||
207 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | ||
208 | int count) | ||
209 | { | ||
210 | return cma_release(dev_get_cma_area(dev), pages, count); | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Support for reserved memory regions defined in device tree | ||
215 | */ | ||
216 | #ifdef CONFIG_OF_RESERVED_MEM | ||
217 | #include <linux/of.h> | ||
218 | #include <linux/of_fdt.h> | ||
219 | #include <linux/of_reserved_mem.h> | ||
220 | |||
221 | #undef pr_fmt | ||
222 | #define pr_fmt(fmt) fmt | ||
223 | |||
224 | static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) | ||
225 | { | ||
226 | dev_set_cma_area(dev, rmem->priv); | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static void rmem_cma_device_release(struct reserved_mem *rmem, | ||
231 | struct device *dev) | ||
232 | { | ||
233 | dev_set_cma_area(dev, NULL); | ||
234 | } | ||
235 | |||
236 | static const struct reserved_mem_ops rmem_cma_ops = { | ||
237 | .device_init = rmem_cma_device_init, | ||
238 | .device_release = rmem_cma_device_release, | ||
239 | }; | ||
240 | |||
241 | static int __init rmem_cma_setup(struct reserved_mem *rmem) | ||
242 | { | ||
243 | phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); | ||
244 | phys_addr_t mask = align - 1; | ||
245 | unsigned long node = rmem->fdt_node; | ||
246 | struct cma *cma; | ||
247 | int err; | ||
248 | |||
249 | if (!of_get_flat_dt_prop(node, "reusable", NULL) || | ||
250 | of_get_flat_dt_prop(node, "no-map", NULL)) | ||
251 | return -EINVAL; | ||
252 | |||
253 | if ((rmem->base & mask) || (rmem->size & mask)) { | ||
254 | pr_err("Reserved memory: incorrect alignment of CMA region\n"); | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | |||
258 | err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); | ||
259 | if (err) { | ||
260 | pr_err("Reserved memory: unable to setup CMA region\n"); | ||
261 | return err; | ||
262 | } | ||
263 | /* Architecture specific contiguous memory fixup. */ | ||
264 | dma_contiguous_early_fixup(rmem->base, rmem->size); | ||
265 | |||
266 | if (of_get_flat_dt_prop(node, "linux,cma-default", NULL)) | ||
267 | dma_contiguous_set_default(cma); | ||
268 | |||
269 | rmem->ops = &rmem_cma_ops; | ||
270 | rmem->priv = cma; | ||
271 | |||
272 | pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", | ||
273 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup); | ||
278 | #endif | ||
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c deleted file mode 100644 index f831a582209c..000000000000 --- a/drivers/base/dma-mapping.c +++ /dev/null | |||
@@ -1,345 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * drivers/base/dma-mapping.c - arch-independent dma-mapping routines | ||
4 | * | ||
5 | * Copyright (c) 2006 SUSE Linux Products GmbH | ||
6 | * Copyright (c) 2006 Tejun Heo <teheo@suse.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/acpi.h> | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/gfp.h> | ||
13 | #include <linux/of_device.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | |||
17 | /* | ||
18 | * Managed DMA API | ||
19 | */ | ||
20 | struct dma_devres { | ||
21 | size_t size; | ||
22 | void *vaddr; | ||
23 | dma_addr_t dma_handle; | ||
24 | unsigned long attrs; | ||
25 | }; | ||
26 | |||
27 | static void dmam_release(struct device *dev, void *res) | ||
28 | { | ||
29 | struct dma_devres *this = res; | ||
30 | |||
31 | dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, | ||
32 | this->attrs); | ||
33 | } | ||
34 | |||
35 | static int dmam_match(struct device *dev, void *res, void *match_data) | ||
36 | { | ||
37 | struct dma_devres *this = res, *match = match_data; | ||
38 | |||
39 | if (this->vaddr == match->vaddr) { | ||
40 | WARN_ON(this->size != match->size || | ||
41 | this->dma_handle != match->dma_handle); | ||
42 | return 1; | ||
43 | } | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | /** | ||
48 | * dmam_alloc_coherent - Managed dma_alloc_coherent() | ||
49 | * @dev: Device to allocate coherent memory for | ||
50 | * @size: Size of allocation | ||
51 | * @dma_handle: Out argument for allocated DMA handle | ||
52 | * @gfp: Allocation flags | ||
53 | * | ||
54 | * Managed dma_alloc_coherent(). Memory allocated using this function | ||
55 | * will be automatically released on driver detach. | ||
56 | * | ||
57 | * RETURNS: | ||
58 | * Pointer to allocated memory on success, NULL on failure. | ||
59 | */ | ||
60 | void *dmam_alloc_coherent(struct device *dev, size_t size, | ||
61 | dma_addr_t *dma_handle, gfp_t gfp) | ||
62 | { | ||
63 | struct dma_devres *dr; | ||
64 | void *vaddr; | ||
65 | |||
66 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); | ||
67 | if (!dr) | ||
68 | return NULL; | ||
69 | |||
70 | vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); | ||
71 | if (!vaddr) { | ||
72 | devres_free(dr); | ||
73 | return NULL; | ||
74 | } | ||
75 | |||
76 | dr->vaddr = vaddr; | ||
77 | dr->dma_handle = *dma_handle; | ||
78 | dr->size = size; | ||
79 | |||
80 | devres_add(dev, dr); | ||
81 | |||
82 | return vaddr; | ||
83 | } | ||
84 | EXPORT_SYMBOL(dmam_alloc_coherent); | ||
85 | |||
86 | /** | ||
87 | * dmam_free_coherent - Managed dma_free_coherent() | ||
88 | * @dev: Device to free coherent memory for | ||
89 | * @size: Size of allocation | ||
90 | * @vaddr: Virtual address of the memory to free | ||
91 | * @dma_handle: DMA handle of the memory to free | ||
92 | * | ||
93 | * Managed dma_free_coherent(). | ||
94 | */ | ||
95 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
96 | dma_addr_t dma_handle) | ||
97 | { | ||
98 | struct dma_devres match_data = { size, vaddr, dma_handle }; | ||
99 | |||
100 | dma_free_coherent(dev, size, vaddr, dma_handle); | ||
101 | WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); | ||
102 | } | ||
103 | EXPORT_SYMBOL(dmam_free_coherent); | ||
104 | |||
105 | /** | ||
106 | * dmam_alloc_attrs - Managed dma_alloc_attrs() | ||
107 | * @dev: Device to allocate non_coherent memory for | ||
108 | * @size: Size of allocation | ||
109 | * @dma_handle: Out argument for allocated DMA handle | ||
110 | * @gfp: Allocation flags | ||
111 | * @attrs: Flags in the DMA_ATTR_* namespace. | ||
112 | * | ||
113 | * Managed dma_alloc_attrs(). Memory allocated using this function will be | ||
114 | * automatically released on driver detach. | ||
115 | * | ||
116 | * RETURNS: | ||
117 | * Pointer to allocated memory on success, NULL on failure. | ||
118 | */ | ||
119 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
120 | gfp_t gfp, unsigned long attrs) | ||
121 | { | ||
122 | struct dma_devres *dr; | ||
123 | void *vaddr; | ||
124 | |||
125 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); | ||
126 | if (!dr) | ||
127 | return NULL; | ||
128 | |||
129 | vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); | ||
130 | if (!vaddr) { | ||
131 | devres_free(dr); | ||
132 | return NULL; | ||
133 | } | ||
134 | |||
135 | dr->vaddr = vaddr; | ||
136 | dr->dma_handle = *dma_handle; | ||
137 | dr->size = size; | ||
138 | dr->attrs = attrs; | ||
139 | |||
140 | devres_add(dev, dr); | ||
141 | |||
142 | return vaddr; | ||
143 | } | ||
144 | EXPORT_SYMBOL(dmam_alloc_attrs); | ||
145 | |||
146 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT | ||
147 | |||
148 | static void dmam_coherent_decl_release(struct device *dev, void *res) | ||
149 | { | ||
150 | dma_release_declared_memory(dev); | ||
151 | } | ||
152 | |||
153 | /** | ||
154 | * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() | ||
155 | * @dev: Device to declare coherent memory for | ||
156 | * @phys_addr: Physical address of coherent memory to be declared | ||
157 | * @device_addr: Device address of coherent memory to be declared | ||
158 | * @size: Size of coherent memory to be declared | ||
159 | * @flags: Flags | ||
160 | * | ||
161 | * Managed dma_declare_coherent_memory(). | ||
162 | * | ||
163 | * RETURNS: | ||
164 | * 0 on success, -errno on failure. | ||
165 | */ | ||
166 | int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | ||
167 | dma_addr_t device_addr, size_t size, int flags) | ||
168 | { | ||
169 | void *res; | ||
170 | int rc; | ||
171 | |||
172 | res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); | ||
173 | if (!res) | ||
174 | return -ENOMEM; | ||
175 | |||
176 | rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, | ||
177 | flags); | ||
178 | if (!rc) | ||
179 | devres_add(dev, res); | ||
180 | else | ||
181 | devres_free(res); | ||
182 | |||
183 | return rc; | ||
184 | } | ||
185 | EXPORT_SYMBOL(dmam_declare_coherent_memory); | ||
186 | |||
187 | /** | ||
188 | * dmam_release_declared_memory - Managed dma_release_declared_memory(). | ||
189 | * @dev: Device to release declared coherent memory for | ||
190 | * | ||
191 | * Managed dmam_release_declared_memory(). | ||
192 | */ | ||
193 | void dmam_release_declared_memory(struct device *dev) | ||
194 | { | ||
195 | WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); | ||
196 | } | ||
197 | EXPORT_SYMBOL(dmam_release_declared_memory); | ||
198 | |||
199 | #endif | ||
200 | |||
201 | /* | ||
202 | * Create scatter-list for the already allocated DMA buffer. | ||
203 | */ | ||
204 | int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
205 | void *cpu_addr, dma_addr_t handle, size_t size) | ||
206 | { | ||
207 | struct page *page = virt_to_page(cpu_addr); | ||
208 | int ret; | ||
209 | |||
210 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | ||
211 | if (unlikely(ret)) | ||
212 | return ret; | ||
213 | |||
214 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | ||
215 | return 0; | ||
216 | } | ||
217 | EXPORT_SYMBOL(dma_common_get_sgtable); | ||
218 | |||
219 | /* | ||
220 | * Create userspace mapping for the DMA-coherent memory. | ||
221 | */ | ||
222 | int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
223 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
224 | { | ||
225 | int ret = -ENXIO; | ||
226 | #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP | ||
227 | unsigned long user_count = vma_pages(vma); | ||
228 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
229 | unsigned long off = vma->vm_pgoff; | ||
230 | |||
231 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
232 | |||
233 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) | ||
234 | return ret; | ||
235 | |||
236 | if (off < count && user_count <= (count - off)) | ||
237 | ret = remap_pfn_range(vma, vma->vm_start, | ||
238 | page_to_pfn(virt_to_page(cpu_addr)) + off, | ||
239 | user_count << PAGE_SHIFT, | ||
240 | vma->vm_page_prot); | ||
241 | #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ | ||
242 | |||
243 | return ret; | ||
244 | } | ||
245 | EXPORT_SYMBOL(dma_common_mmap); | ||
246 | |||
247 | #ifdef CONFIG_MMU | ||
248 | static struct vm_struct *__dma_common_pages_remap(struct page **pages, | ||
249 | size_t size, unsigned long vm_flags, pgprot_t prot, | ||
250 | const void *caller) | ||
251 | { | ||
252 | struct vm_struct *area; | ||
253 | |||
254 | area = get_vm_area_caller(size, vm_flags, caller); | ||
255 | if (!area) | ||
256 | return NULL; | ||
257 | |||
258 | if (map_vm_area(area, prot, pages)) { | ||
259 | vunmap(area->addr); | ||
260 | return NULL; | ||
261 | } | ||
262 | |||
263 | return area; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * remaps an array of PAGE_SIZE pages into another vm_area | ||
268 | * Cannot be used in non-sleeping contexts | ||
269 | */ | ||
270 | void *dma_common_pages_remap(struct page **pages, size_t size, | ||
271 | unsigned long vm_flags, pgprot_t prot, | ||
272 | const void *caller) | ||
273 | { | ||
274 | struct vm_struct *area; | ||
275 | |||
276 | area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); | ||
277 | if (!area) | ||
278 | return NULL; | ||
279 | |||
280 | area->pages = pages; | ||
281 | |||
282 | return area->addr; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * remaps an allocated contiguous region into another vm_area. | ||
287 | * Cannot be used in non-sleeping contexts | ||
288 | */ | ||
289 | |||
290 | void *dma_common_contiguous_remap(struct page *page, size_t size, | ||
291 | unsigned long vm_flags, | ||
292 | pgprot_t prot, const void *caller) | ||
293 | { | ||
294 | int i; | ||
295 | struct page **pages; | ||
296 | struct vm_struct *area; | ||
297 | |||
298 | pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); | ||
299 | if (!pages) | ||
300 | return NULL; | ||
301 | |||
302 | for (i = 0; i < (size >> PAGE_SHIFT); i++) | ||
303 | pages[i] = nth_page(page, i); | ||
304 | |||
305 | area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); | ||
306 | |||
307 | kfree(pages); | ||
308 | |||
309 | if (!area) | ||
310 | return NULL; | ||
311 | return area->addr; | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * unmaps a range previously mapped by dma_common_*_remap | ||
316 | */ | ||
317 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) | ||
318 | { | ||
319 | struct vm_struct *area = find_vm_area(cpu_addr); | ||
320 | |||
321 | if (!area || (area->flags & vm_flags) != vm_flags) { | ||
322 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); | ||
323 | return; | ||
324 | } | ||
325 | |||
326 | unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size)); | ||
327 | vunmap(cpu_addr); | ||
328 | } | ||
329 | #endif | ||
330 | |||
331 | /* | ||
332 | * enables DMA API use for a device | ||
333 | */ | ||
334 | int dma_configure(struct device *dev) | ||
335 | { | ||
336 | if (dev->bus->dma_configure) | ||
337 | return dev->bus->dma_configure(dev); | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | void dma_deconfigure(struct device *dev) | ||
342 | { | ||
343 | of_dma_deconfigure(dev); | ||
344 | acpi_dma_deconfigure(dev); | ||
345 | } | ||