aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/ion/ion_iommu_heap.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/ion/ion_iommu_heap.c')
-rw-r--r--drivers/gpu/ion/ion_iommu_heap.c382
1 files changed, 382 insertions, 0 deletions
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
new file mode 100644
index 00000000000..a3d2d726bda
--- /dev/null
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -0,0 +1,382 @@
1/*
2 * drivers/gpu/ion/ion_iommu_heap.c
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#define pr_fmt(fmt) "%s(): " fmt, __func__
21
22#include <linux/spinlock.h>
23#include <linux/kernel.h>
24#include <linux/genalloc.h>
25#include <linux/io.h>
26#include <linux/ion.h>
27#include <linux/mm.h>
28#include <linux/scatterlist.h>
29#include <linux/slab.h>
30#include <linux/vmalloc.h>
31#include <linux/iommu.h>
32#include <linux/highmem.h>
33#include <linux/platform_device.h>
34
35#include <asm/cacheflush.h>
36
37#include "ion_priv.h"
38
39#define NUM_PAGES(buf) (PAGE_ALIGN((buf)->size) >> PAGE_SHIFT)
40
41#define GFP_ION (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
42
43struct ion_iommu_heap {
44 struct ion_heap heap;
45 struct gen_pool *pool;
46 struct iommu_domain *domain;
47 struct device *dev;
48};
49
50static struct scatterlist *iommu_heap_map_dma(struct ion_heap *heap,
51 struct ion_buffer *buf)
52{
53 struct ion_iommu_heap *h =
54 container_of(heap, struct ion_iommu_heap, heap);
55 int err, npages = NUM_PAGES(buf);
56 unsigned int i;
57 struct scatterlist *sg;
58 unsigned long da = (unsigned long)buf->priv_virt;
59
60 for_each_sg(buf->sglist, sg, npages, i) {
61 phys_addr_t pa;
62
63 pa = sg_phys(sg);
64 BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
65 err = iommu_map(h->domain, da, pa, PAGE_SIZE, 0);
66 if (err)
67 goto err_out;
68
69 sg->dma_address = da;
70 da += PAGE_SIZE;
71 }
72
73 pr_debug("da:%p pa:%08x va:%p\n",
74 buf->priv_virt, sg_phys(buf->sglist), buf->vaddr);
75
76 return buf->sglist;
77
78err_out:
79 if (i-- > 0) {
80 unsigned int j;
81 for_each_sg(buf->sglist, sg, i, j)
82 iommu_unmap(h->domain, sg_dma_address(sg), 0);
83 }
84 return ERR_PTR(err);
85}
86
87static void iommu_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buf)
88{
89 struct ion_iommu_heap *h =
90 container_of(heap, struct ion_iommu_heap, heap);
91 unsigned int i;
92 struct scatterlist *sg;
93 int npages = NUM_PAGES(buf);
94
95 for_each_sg(buf->sglist, sg, npages, i)
96 iommu_unmap(h->domain, sg_dma_address(sg), 0);
97
98 pr_debug("da:%p\n", buf->priv_virt);
99}
100
101struct scatterlist *iommu_heap_remap_dma(struct ion_heap *heap,
102 struct ion_buffer *buf,
103 unsigned long addr)
104{
105 struct ion_iommu_heap *h =
106 container_of(heap, struct ion_iommu_heap, heap);
107 int err;
108 unsigned int i;
109 unsigned long da, da_to_free = (unsigned long)buf->priv_virt;
110 int npages = NUM_PAGES(buf);
111
112 BUG_ON(!buf->priv_virt);
113
114 da = gen_pool_alloc_addr(h->pool, buf->size, addr);
115 if (da == 0) {
116 pr_err("dma address alloc failed, addr=0x%lx", addr);
117 return ERR_PTR(-ENOMEM);
118 } else {
119 pr_err("iommu_heap_remap_dma passed, addr=0x%lx",
120 addr);
121 iommu_heap_unmap_dma(heap, buf);
122 gen_pool_free(h->pool, da_to_free, buf->size);
123 buf->priv_virt = (void *)da;
124 }
125 for (i = 0; i < npages; i++) {
126 phys_addr_t pa;
127
128 pa = page_to_phys(buf->pages[i]);
129 err = iommu_map(h->domain, da, pa, 0, 0);
130 if (err)
131 goto err_out;
132 da += PAGE_SIZE;
133 }
134
135 pr_debug("da:%p pa:%08x va:%p\n",
136 buf->priv_virt, page_to_phys(buf->pages[0]), buf->vaddr);
137
138 return (struct scatterlist *)buf->pages;
139
140err_out:
141 if (i-- > 0) {
142 da = (unsigned long)buf->priv_virt;
143 iommu_unmap(h->domain, da + (i << PAGE_SHIFT), 0);
144 }
145 return ERR_PTR(err);
146}
147
148static int ion_buffer_allocate(struct ion_buffer *buf)
149{
150 int i, npages = NUM_PAGES(buf);
151
152 buf->pages = kmalloc(npages * sizeof(*buf->pages), GFP_KERNEL);
153 if (!buf->pages)
154 goto err_pages;
155
156 buf->sglist = vzalloc(npages * sizeof(*buf->sglist));
157 if (!buf->sglist)
158 goto err_sgl;
159
160 sg_init_table(buf->sglist, npages);
161
162 for (i = 0; i < npages; i++) {
163 struct page *page;
164 phys_addr_t pa;
165
166 page = alloc_page(GFP_ION);
167 if (!page)
168 goto err_pgalloc;
169 pa = page_to_phys(page);
170
171 sg_set_page(&buf->sglist[i], page, PAGE_SIZE, 0);
172
173 flush_dcache_page(page);
174 outer_flush_range(pa, pa + PAGE_SIZE);
175
176 buf->pages[i] = page;
177
178 pr_debug_once("pa:%08x\n", pa);
179 }
180 return 0;
181
182err_pgalloc:
183 while (i-- > 0)
184 __free_page(buf->pages[i]);
185 vfree(buf->sglist);
186err_sgl:
187 kfree(buf->pages);
188err_pages:
189 return -ENOMEM;
190}
191
192static void ion_buffer_free(struct ion_buffer *buf)
193{
194 int i, npages = NUM_PAGES(buf);
195
196 for (i = 0; i < npages; i++)
197 __free_page(buf->pages[i]);
198 vfree(buf->sglist);
199 kfree(buf->pages);
200}
201
202static int iommu_heap_allocate(struct ion_heap *heap, struct ion_buffer *buf,
203 unsigned long len, unsigned long align,
204 unsigned long flags)
205{
206 int err;
207 struct ion_iommu_heap *h =
208 container_of(heap, struct ion_iommu_heap, heap);
209 unsigned long da;
210 struct scatterlist *sgl;
211
212 len = round_up(len, PAGE_SIZE);
213
214 da = gen_pool_alloc(h->pool, len);
215 if (!da)
216 return -ENOMEM;
217
218 buf->priv_virt = (void *)da;
219 buf->size = len;
220
221 WARN_ON(!IS_ALIGNED(da, PAGE_SIZE));
222
223 err = ion_buffer_allocate(buf);
224 if (err)
225 goto err_alloc_buf;
226
227 sgl = iommu_heap_map_dma(heap, buf);
228 if (IS_ERR_OR_NULL(sgl))
229 goto err_heap_map_dma;
230 buf->vaddr = 0;
231 return 0;
232
233err_heap_map_dma:
234 ion_buffer_free(buf);
235err_alloc_buf:
236 gen_pool_free(h->pool, da, len);
237 buf->size = 0;
238 buf->pages = NULL;
239 buf->priv_virt = NULL;
240 return err;
241}
242
243static void iommu_heap_free(struct ion_buffer *buf)
244{
245 struct ion_heap *heap = buf->heap;
246 struct ion_iommu_heap *h =
247 container_of(heap, struct ion_iommu_heap, heap);
248 void *da = buf->priv_virt;
249
250 iommu_heap_unmap_dma(heap, buf);
251 ion_buffer_free(buf);
252 gen_pool_free(h->pool, (unsigned long)da, buf->size);
253
254 buf->pages = NULL;
255 buf->priv_virt = NULL;
256 pr_debug("da:%p\n", da);
257}
258
259static int iommu_heap_phys(struct ion_heap *heap, struct ion_buffer *buf,
260 ion_phys_addr_t *addr, size_t *len)
261{
262 *addr = (unsigned long)buf->priv_virt;
263 *len = buf->size;
264 pr_debug("da:%08lx(%x)\n", *addr, *len);
265 return 0;
266}
267
268static void *iommu_heap_map_kernel(struct ion_heap *heap,
269 struct ion_buffer *buf)
270{
271 int npages = NUM_PAGES(buf);
272
273 BUG_ON(!buf->pages);
274 buf->vaddr = vm_map_ram(buf->pages, npages, -1,
275 pgprot_noncached(pgprot_kernel));
276 pr_debug("va:%p\n", buf->vaddr);
277 WARN_ON(!buf->vaddr);
278 return buf->vaddr;
279}
280
281static void iommu_heap_unmap_kernel(struct ion_heap *heap,
282 struct ion_buffer *buf)
283{
284 int npages = NUM_PAGES(buf);
285
286 BUG_ON(!buf->pages);
287 WARN_ON(!buf->vaddr);
288 vm_unmap_ram(buf->vaddr, npages);
289 buf->vaddr = NULL;
290 pr_debug("va:%p\n", buf->vaddr);
291}
292
293static int iommu_heap_map_user(struct ion_heap *mapper,
294 struct ion_buffer *buf,
295 struct vm_area_struct *vma)
296{
297 int i = vma->vm_pgoff >> PAGE_SHIFT;
298 unsigned long uaddr = vma->vm_start;
299 unsigned long usize = vma->vm_end - vma->vm_start;
300
301 pr_debug("vma:%08lx-%08lx\n", vma->vm_start, vma->vm_end);
302 BUG_ON(!buf->pages);
303
304 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
305 do {
306 int ret;
307 struct page *page = buf->pages[i++];
308
309 ret = vm_insert_page(vma, uaddr, page);
310 if (ret)
311 return ret;
312
313 uaddr += PAGE_SIZE;
314 usize -= PAGE_SIZE;
315 } while (usize > 0);
316
317 return 0;
318}
319
320static struct ion_heap_ops iommu_heap_ops = {
321 .allocate = iommu_heap_allocate,
322 .free = iommu_heap_free,
323 .phys = iommu_heap_phys,
324 .map_dma = iommu_heap_map_dma,
325 .unmap_dma = iommu_heap_unmap_dma,
326 .map_kernel = iommu_heap_map_kernel,
327 .unmap_kernel = iommu_heap_unmap_kernel,
328 .map_user = iommu_heap_map_user,
329};
330
331struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *data)
332{
333 struct ion_iommu_heap *h;
334 int err;
335
336 h = kzalloc(sizeof(*h), GFP_KERNEL);
337 if (!h) {
338 err = -ENOMEM;
339 goto err_heap;
340 }
341
342 h->pool = gen_pool_create(12, -1);
343 if (!h->pool) {
344 err = -ENOMEM;
345 goto err_genpool;
346 }
347 gen_pool_add(h->pool, data->base, data->size, -1);
348
349 h->heap.ops = &iommu_heap_ops;
350 h->domain = iommu_domain_alloc(&platform_bus_type);
351 h->dev = data->priv;
352 if (!h->domain) {
353 err = -ENOMEM;
354 goto err_iommu_alloc;
355 }
356
357 err = iommu_attach_device(h->domain, h->dev);
358 if (err)
359 goto err_iommu_attach;
360
361 return &h->heap;
362
363err_iommu_attach:
364 iommu_domain_free(h->domain);
365err_iommu_alloc:
366 gen_pool_destroy(h->pool);
367err_genpool:
368 kfree(h);
369err_heap:
370 return ERR_PTR(err);
371}
372
373void ion_iommu_heap_destroy(struct ion_heap *heap)
374{
375 struct ion_iommu_heap *h =
376 container_of(heap, struct ion_iommu_heap, heap);
377
378 iommu_detach_device(h->domain, h->dev);
379 gen_pool_destroy(h->pool);
380 iommu_domain_free(h->domain);
381 kfree(h);
382}