aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/dma-coherent.c
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2014-10-13 18:51:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 20:18:12 -0400
commit7bfa5ab6fa1b18f53fb94f922e107e6fbdc5e485 (patch)
tree1bc3d95e53fa20bd903dbcb97a40bac247b7ce80 /drivers/base/dma-coherent.c
parent71458cfc782eafe4b27656e078d379a34e472adf (diff)
drivers: dma-coherent: add initialization from device tree
Initialization procedure of dma coherent pool has been split into two parts, so memory pool can now be initialized without assigning to particular struct device. Then initialized region can be assigned to more than one struct device. To protect from concurent allocations from structure. The last part of this patch adds support for handling 'shared-dma-pool' reserved-memory device tree nodes. [akpm@linux-foundation.org: use more appropriate printk facility levels] [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Grant Likely <grant.likely@linaro.org> Cc: Laura Abbott <lauraa@codeaurora.org> Cc: Josh Cartwright <joshc@codeaurora.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/base/dma-coherent.c')
-rw-r--r--drivers/base/dma-coherent.c151
1 files changed, 129 insertions, 22 deletions
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 7d6e84a51424..55b83983a9c0 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -14,11 +14,14 @@ struct dma_coherent_mem {
14 int size; 14 int size;
15 int flags; 15 int flags;
16 unsigned long *bitmap; 16 unsigned long *bitmap;
17 spinlock_t spinlock;
17}; 18};
18 19
19int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 20static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_addr,
20 dma_addr_t device_addr, size_t size, int flags) 21 size_t size, int flags,
22 struct dma_coherent_mem **mem)
21{ 23{
24 struct dma_coherent_mem *dma_mem = NULL;
22 void __iomem *mem_base = NULL; 25 void __iomem *mem_base = NULL;
23 int pages = size >> PAGE_SHIFT; 26 int pages = size >> PAGE_SHIFT;
24 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); 27 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
@@ -27,40 +30,77 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
27 goto out; 30 goto out;
28 if (!size) 31 if (!size)
29 goto out; 32 goto out;
30 if (dev->dma_mem)
31 goto out;
32
33 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
34 33
35 mem_base = ioremap(phys_addr, size); 34 mem_base = ioremap(phys_addr, size);
36 if (!mem_base) 35 if (!mem_base)
37 goto out; 36 goto out;
38 37
39 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); 38 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
40 if (!dev->dma_mem) 39 if (!dma_mem)
41 goto out; 40 goto out;
42 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 41 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
43 if (!dev->dma_mem->bitmap) 42 if (!dma_mem->bitmap)
44 goto free1_out; 43 goto out;
44
45 dma_mem->virt_base = mem_base;
46 dma_mem->device_base = device_addr;
47 dma_mem->pfn_base = PFN_DOWN(phys_addr);
48 dma_mem->size = pages;
49 dma_mem->flags = flags;
50 spin_lock_init(&dma_mem->spinlock);
45 51
46 dev->dma_mem->virt_base = mem_base; 52 *mem = dma_mem;
47 dev->dma_mem->device_base = device_addr;
48 dev->dma_mem->pfn_base = PFN_DOWN(phys_addr);
49 dev->dma_mem->size = pages;
50 dev->dma_mem->flags = flags;
51 53
52 if (flags & DMA_MEMORY_MAP) 54 if (flags & DMA_MEMORY_MAP)
53 return DMA_MEMORY_MAP; 55 return DMA_MEMORY_MAP;
54 56
55 return DMA_MEMORY_IO; 57 return DMA_MEMORY_IO;
56 58
57 free1_out: 59out:
58 kfree(dev->dma_mem); 60 kfree(dma_mem);
59 out:
60 if (mem_base) 61 if (mem_base)
61 iounmap(mem_base); 62 iounmap(mem_base);
62 return 0; 63 return 0;
63} 64}
65
66static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
67{
68 if (!mem)
69 return;
70 iounmap(mem->virt_base);
71 kfree(mem->bitmap);
72 kfree(mem);
73}
74
75static int dma_assign_coherent_memory(struct device *dev,
76 struct dma_coherent_mem *mem)
77{
78 if (dev->dma_mem)
79 return -EBUSY;
80
81 dev->dma_mem = mem;
82 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
83
84 return 0;
85}
86
87int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
88 dma_addr_t device_addr, size_t size, int flags)
89{
90 struct dma_coherent_mem *mem;
91 int ret;
92
93 ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags,
94 &mem);
95 if (ret == 0)
96 return 0;
97
98 if (dma_assign_coherent_memory(dev, mem) == 0)
99 return ret;
100
101 dma_release_coherent_memory(mem);
102 return 0;
103}
64EXPORT_SYMBOL(dma_declare_coherent_memory); 104EXPORT_SYMBOL(dma_declare_coherent_memory);
65 105
66void dma_release_declared_memory(struct device *dev) 106void dma_release_declared_memory(struct device *dev)
@@ -69,10 +109,8 @@ void dma_release_declared_memory(struct device *dev)
69 109
70 if (!mem) 110 if (!mem)
71 return; 111 return;
112 dma_release_coherent_memory(mem);
72 dev->dma_mem = NULL; 113 dev->dma_mem = NULL;
73 iounmap(mem->virt_base);
74 kfree(mem->bitmap);
75 kfree(mem);
76} 114}
77EXPORT_SYMBOL(dma_release_declared_memory); 115EXPORT_SYMBOL(dma_release_declared_memory);
78 116
@@ -80,6 +118,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
80 dma_addr_t device_addr, size_t size) 118 dma_addr_t device_addr, size_t size)
81{ 119{
82 struct dma_coherent_mem *mem = dev->dma_mem; 120 struct dma_coherent_mem *mem = dev->dma_mem;
121 unsigned long flags;
83 int pos, err; 122 int pos, err;
84 123
85 size += device_addr & ~PAGE_MASK; 124 size += device_addr & ~PAGE_MASK;
@@ -87,8 +126,11 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
87 if (!mem) 126 if (!mem)
88 return ERR_PTR(-EINVAL); 127 return ERR_PTR(-EINVAL);
89 128
129 spin_lock_irqsave(&mem->spinlock, flags);
90 pos = (device_addr - mem->device_base) >> PAGE_SHIFT; 130 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
91 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); 131 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
132 spin_unlock_irqrestore(&mem->spinlock, flags);
133
92 if (err != 0) 134 if (err != 0)
93 return ERR_PTR(err); 135 return ERR_PTR(err);
94 return mem->virt_base + (pos << PAGE_SHIFT); 136 return mem->virt_base + (pos << PAGE_SHIFT);
@@ -115,6 +157,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
115{ 157{
116 struct dma_coherent_mem *mem; 158 struct dma_coherent_mem *mem;
117 int order = get_order(size); 159 int order = get_order(size);
160 unsigned long flags;
118 int pageno; 161 int pageno;
119 162
120 if (!dev) 163 if (!dev)
@@ -124,6 +167,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
124 return 0; 167 return 0;
125 168
126 *ret = NULL; 169 *ret = NULL;
170 spin_lock_irqsave(&mem->spinlock, flags);
127 171
128 if (unlikely(size > (mem->size << PAGE_SHIFT))) 172 if (unlikely(size > (mem->size << PAGE_SHIFT)))
129 goto err; 173 goto err;
@@ -138,10 +182,12 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
138 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); 182 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
139 *ret = mem->virt_base + (pageno << PAGE_SHIFT); 183 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
140 memset(*ret, 0, size); 184 memset(*ret, 0, size);
185 spin_unlock_irqrestore(&mem->spinlock, flags);
141 186
142 return 1; 187 return 1;
143 188
144err: 189err:
190 spin_unlock_irqrestore(&mem->spinlock, flags);
145 /* 191 /*
146 * In the case where the allocation can not be satisfied from the 192 * In the case where the allocation can not be satisfied from the
147 * per-device area, try to fall back to generic memory if the 193 * per-device area, try to fall back to generic memory if the
@@ -171,8 +217,11 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
171 if (mem && vaddr >= mem->virt_base && vaddr < 217 if (mem && vaddr >= mem->virt_base && vaddr <
172 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 218 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
173 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 219 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
220 unsigned long flags;
174 221
222 spin_lock_irqsave(&mem->spinlock, flags);
175 bitmap_release_region(mem->bitmap, page, order); 223 bitmap_release_region(mem->bitmap, page, order);
224 spin_unlock_irqrestore(&mem->spinlock, flags);
176 return 1; 225 return 1;
177 } 226 }
178 return 0; 227 return 0;
@@ -218,3 +267,61 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
218 return 0; 267 return 0;
219} 268}
220EXPORT_SYMBOL(dma_mmap_from_coherent); 269EXPORT_SYMBOL(dma_mmap_from_coherent);
270
271/*
272 * Support for reserved memory regions defined in device tree
273 */
274#ifdef CONFIG_OF_RESERVED_MEM
275#include <linux/of.h>
276#include <linux/of_fdt.h>
277#include <linux/of_reserved_mem.h>
278
279static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
280{
281 struct dma_coherent_mem *mem = rmem->priv;
282
283 if (!mem &&
284 dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
285 DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
286 &mem) != DMA_MEMORY_MAP) {
287 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
288 &rmem->base, (unsigned long)rmem->size / SZ_1M);
289 return -ENODEV;
290 }
291 rmem->priv = mem;
292 dma_assign_coherent_memory(dev, mem);
293 return 0;
294}
295
296static void rmem_dma_device_release(struct reserved_mem *rmem,
297 struct device *dev)
298{
299 dev->dma_mem = NULL;
300}
301
302static const struct reserved_mem_ops rmem_dma_ops = {
303 .device_init = rmem_dma_device_init,
304 .device_release = rmem_dma_device_release,
305};
306
307static int __init rmem_dma_setup(struct reserved_mem *rmem)
308{
309 unsigned long node = rmem->fdt_node;
310
311 if (of_get_flat_dt_prop(node, "reusable", NULL))
312 return -EINVAL;
313
314#ifdef CONFIG_ARM
315 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
316 pr_err("Reserved memory: regions without no-map are not yet supported\n");
317 return -EINVAL;
318 }
319#endif
320
321 rmem->ops = &rmem_dma_ops;
322 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
323 &rmem->base, (unsigned long)rmem->size / SZ_1M);
324 return 0;
325}
326RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
327#endif