diff options
author | Christopher Kenna <cjk@cs.unc.edu> | 2012-09-28 13:46:28 -0400 |
---|---|---|
committer | Christopher Kenna <cjk@cs.unc.edu> | 2012-09-28 14:50:15 -0400 |
commit | daa22703f14c007e93b464c45fa60019a36f546d (patch) | |
tree | a1a130b6e128dc9d57c35c026977e1b4953105e1 /drivers/media/video/videobuf2-ion.c | |
parent | 5aa287dcf1b5879aa0150b0511833c52885f5b4c (diff) |
Apply k4412 kernel from HardKernel for ODROID-X.
Diffstat (limited to 'drivers/media/video/videobuf2-ion.c')
-rw-r--r-- | drivers/media/video/videobuf2-ion.c | 841 |
1 files changed, 841 insertions, 0 deletions
diff --git a/drivers/media/video/videobuf2-ion.c b/drivers/media/video/videobuf2-ion.c new file mode 100644 index 00000000000..75185e54cf6 --- /dev/null +++ b/drivers/media/video/videobuf2-ion.c | |||
@@ -0,0 +1,841 @@ | |||
1 | /* linux/drivers/media/video/videobuf2-ion.c | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * http://www.samsung.com/ | ||
5 | * | ||
6 | * Implementation of Android ION memory allocator for videobuf2 | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/err.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/file.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/scatterlist.h> | ||
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/ion.h> | ||
22 | #include <linux/syscalls.h> | ||
23 | |||
24 | #include <asm/cacheflush.h> | ||
25 | |||
26 | #include <media/videobuf2-ion.h> | ||
27 | #include <plat/iovmm.h> | ||
28 | #include <plat/cpu.h> | ||
29 | |||
30 | static int vb2_ion_debug; | ||
31 | module_param(vb2_ion_debug, int, 0644); | ||
32 | #define dbg(level, fmt, arg...) \ | ||
33 | do { \ | ||
34 | if (vb2_ion_debug >= level) \ | ||
35 | printk(KERN_DEBUG "vb2_ion: " fmt, ## arg); \ | ||
36 | } while (0) | ||
37 | |||
38 | #define SIZE_THRESHOLD SZ_1M | ||
39 | |||
40 | struct vb2_ion_conf { | ||
41 | struct device *dev; | ||
42 | const char *name; | ||
43 | |||
44 | struct ion_client *client; | ||
45 | |||
46 | unsigned long align; | ||
47 | bool contig; | ||
48 | bool sharable; | ||
49 | bool cacheable; | ||
50 | bool use_mmu; | ||
51 | atomic_t mmu_enable; | ||
52 | |||
53 | spinlock_t slock; | ||
54 | }; | ||
55 | |||
56 | struct vb2_ion_buf { | ||
57 | struct vm_area_struct **vma; | ||
58 | int vma_count; | ||
59 | struct vb2_ion_conf *conf; | ||
60 | struct vb2_vmarea_handler handler; | ||
61 | |||
62 | struct ion_handle *handle; /* Kernel space */ | ||
63 | |||
64 | dma_addr_t kva; | ||
65 | dma_addr_t dva; | ||
66 | unsigned long size; | ||
67 | |||
68 | struct scatterlist *sg; | ||
69 | int nents; | ||
70 | |||
71 | atomic_t ref; | ||
72 | |||
73 | bool cacheable; | ||
74 | }; | ||
75 | |||
76 | static void vb2_ion_put(void *buf_priv); | ||
77 | |||
78 | static struct ion_client *vb2_ion_init_ion(struct vb2_ion *ion, | ||
79 | struct vb2_drv *drv) | ||
80 | { | ||
81 | struct ion_client *client; | ||
82 | int ret; | ||
83 | int mask = ION_HEAP_EXYNOS_MASK | ION_HEAP_EXYNOS_CONTIG_MASK | | ||
84 | ION_HEAP_EXYNOS_USER_MASK; | ||
85 | |||
86 | client = ion_client_create(ion_exynos, mask, ion->name); | ||
87 | if (IS_ERR(client)) { | ||
88 | pr_err("ion_client_create: ion_name(%s)\n", ion->name); | ||
89 | return ERR_PTR(-EINVAL); | ||
90 | } | ||
91 | |||
92 | if (!drv->use_mmu) | ||
93 | return client; | ||
94 | |||
95 | ret = iovmm_setup(ion->dev); | ||
96 | if (ret) { | ||
97 | pr_err("iovmm_setup: ion_name(%s)\n", ion->name); | ||
98 | ion_client_destroy(client); | ||
99 | return ERR_PTR(-EINVAL); | ||
100 | } | ||
101 | |||
102 | return client; | ||
103 | } | ||
104 | |||
105 | static void vb2_ion_init_conf(struct vb2_ion_conf *conf, | ||
106 | struct ion_client *client, | ||
107 | struct vb2_ion *ion, | ||
108 | struct vb2_drv *drv) | ||
109 | { | ||
110 | conf->dev = ion->dev; | ||
111 | conf->name = ion->name; | ||
112 | conf->client = client; | ||
113 | conf->contig = ion->contig; | ||
114 | conf->cacheable = ion->cacheable; | ||
115 | conf->align = ion->align; | ||
116 | conf->use_mmu = drv->use_mmu; | ||
117 | |||
118 | spin_lock_init(&conf->slock); | ||
119 | } | ||
120 | |||
121 | void *vb2_ion_init(struct vb2_ion *ion, | ||
122 | struct vb2_drv *drv) | ||
123 | { | ||
124 | struct ion_client *client; | ||
125 | struct vb2_ion_conf *conf; | ||
126 | |||
127 | conf = kzalloc(sizeof *conf, GFP_KERNEL); | ||
128 | if (!conf) | ||
129 | return ERR_PTR(-ENOMEM); | ||
130 | |||
131 | client = vb2_ion_init_ion(ion, drv); | ||
132 | if (IS_ERR(client)) { | ||
133 | kfree(conf); | ||
134 | return ERR_PTR(-EINVAL); | ||
135 | } | ||
136 | |||
137 | vb2_ion_init_conf(conf, client, ion, drv); | ||
138 | |||
139 | return conf; | ||
140 | } | ||
141 | EXPORT_SYMBOL_GPL(vb2_ion_init); | ||
142 | |||
143 | void vb2_ion_cleanup(void *alloc_ctx) | ||
144 | { | ||
145 | struct vb2_ion_conf *conf = alloc_ctx; | ||
146 | |||
147 | BUG_ON(!conf); | ||
148 | |||
149 | if (conf->use_mmu) { | ||
150 | if (atomic_read(&conf->mmu_enable)) { | ||
151 | pr_warning("mmu_enable(%d)\n", atomic_read(&conf->mmu_enable)); | ||
152 | iovmm_deactivate(conf->dev); | ||
153 | } | ||
154 | |||
155 | iovmm_cleanup(conf->dev); | ||
156 | } | ||
157 | |||
158 | ion_client_destroy(conf->client); | ||
159 | |||
160 | kfree(alloc_ctx); | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(vb2_ion_cleanup); | ||
163 | |||
164 | void **vb2_ion_init_multi(unsigned int num_planes, | ||
165 | struct vb2_ion *ion, | ||
166 | struct vb2_drv *drv) | ||
167 | { | ||
168 | struct ion_client *client; | ||
169 | struct vb2_ion_conf *conf; | ||
170 | void **alloc_ctxes; | ||
171 | int i; | ||
172 | |||
173 | /* allocate structure of alloc_ctxes */ | ||
174 | alloc_ctxes = kzalloc((sizeof *alloc_ctxes + sizeof *conf) * num_planes, | ||
175 | GFP_KERNEL); | ||
176 | |||
177 | if (!alloc_ctxes) | ||
178 | return ERR_PTR(-ENOMEM); | ||
179 | |||
180 | client = vb2_ion_init_ion(ion, drv); | ||
181 | if (IS_ERR(client)) { | ||
182 | kfree(alloc_ctxes); | ||
183 | return ERR_PTR(-EINVAL); | ||
184 | } | ||
185 | |||
186 | conf = (void *)(alloc_ctxes + num_planes); | ||
187 | for (i = 0; i < num_planes; ++i, ++conf) { | ||
188 | alloc_ctxes[i] = conf; | ||
189 | vb2_ion_init_conf(conf, client, ion, drv); | ||
190 | } | ||
191 | |||
192 | return alloc_ctxes; | ||
193 | } | ||
194 | EXPORT_SYMBOL_GPL(vb2_ion_init_multi); | ||
195 | |||
196 | void vb2_ion_cleanup_multi(void **alloc_ctxes) | ||
197 | { | ||
198 | struct vb2_ion_conf *conf = alloc_ctxes[0]; | ||
199 | |||
200 | BUG_ON(!conf); | ||
201 | |||
202 | if (conf->use_mmu) { | ||
203 | if (atomic_read(&conf->mmu_enable)) { | ||
204 | pr_warning("mmu_enable(%d)\n", atomic_read(&conf->mmu_enable)); | ||
205 | iovmm_deactivate(conf->dev); | ||
206 | } | ||
207 | |||
208 | iovmm_cleanup(conf->dev); | ||
209 | } | ||
210 | |||
211 | ion_client_destroy(conf->client); | ||
212 | |||
213 | kfree(alloc_ctxes); | ||
214 | } | ||
215 | EXPORT_SYMBOL_GPL(vb2_ion_cleanup_multi); | ||
216 | |||
217 | static void *vb2_ion_alloc(void *alloc_ctx, unsigned long size) | ||
218 | { | ||
219 | struct vb2_ion_conf *conf = alloc_ctx; | ||
220 | struct vb2_ion_buf *buf; | ||
221 | struct scatterlist *sg; | ||
222 | size_t len; | ||
223 | u32 heap = 0; | ||
224 | int ret = 0; | ||
225 | |||
226 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | ||
227 | if (!buf) { | ||
228 | pr_err("no memory for vb2_ion_conf\n"); | ||
229 | return ERR_PTR(-ENOMEM); | ||
230 | } | ||
231 | |||
232 | /* Set vb2_ion_buf */ | ||
233 | buf->conf = conf; | ||
234 | buf->size = size; | ||
235 | buf->cacheable = conf->cacheable; | ||
236 | |||
237 | /* Allocate: physical memory */ | ||
238 | if (conf->contig) | ||
239 | heap = ION_HEAP_EXYNOS_CONTIG_MASK; | ||
240 | else | ||
241 | heap = ION_HEAP_EXYNOS_MASK; | ||
242 | |||
243 | buf->handle = ion_alloc(conf->client, size, conf->align, heap); | ||
244 | if (IS_ERR(buf->handle)) { | ||
245 | pr_err("ion_alloc of size %ld\n", size); | ||
246 | ret = -ENOMEM; | ||
247 | goto err_alloc; | ||
248 | } | ||
249 | |||
250 | /* Getting scatterlist */ | ||
251 | buf->sg = ion_map_dma(conf->client, buf->handle); | ||
252 | if (IS_ERR(buf->sg)) { | ||
253 | pr_err("ion_map_dma conf->name(%s)\n", conf->name); | ||
254 | ret = -ENOMEM; | ||
255 | goto err_map_dma; | ||
256 | } | ||
257 | dbg(6, "PA(0x%x), SIZE(%x)\n", buf->sg->dma_address, buf->sg->length); | ||
258 | |||
259 | sg = buf->sg; | ||
260 | do { | ||
261 | buf->nents++; | ||
262 | } while ((sg = sg_next(sg))); | ||
263 | dbg(6, "buf->nents(0x%x)\n", buf->nents); | ||
264 | |||
265 | /* Map DVA */ | ||
266 | if (conf->use_mmu) { | ||
267 | buf->dva = iovmm_map(conf->dev, buf->sg, 0, size); | ||
268 | if (!buf->dva) { | ||
269 | pr_err("iovmm_map: conf->name(%s)\n", conf->name); | ||
270 | goto err_ion_map_dva; | ||
271 | } | ||
272 | dbg(6, "DVA(0x%x)\n", buf->dva); | ||
273 | } else { | ||
274 | ret = ion_phys(conf->client, buf->handle, | ||
275 | (unsigned long *)&buf->dva, &len); | ||
276 | if (ret) { | ||
277 | pr_err("ion_phys: conf->name(%s)\n", conf->name); | ||
278 | goto err_ion_map_dva; | ||
279 | } | ||
280 | } | ||
281 | |||
282 | /* Set struct vb2_vmarea_handler */ | ||
283 | buf->handler.refcount = &buf->ref; | ||
284 | buf->handler.put = vb2_ion_put; | ||
285 | buf->handler.arg = buf; | ||
286 | |||
287 | atomic_inc(&buf->ref); | ||
288 | |||
289 | return buf; | ||
290 | |||
291 | err_ion_map_dva: | ||
292 | ion_unmap_dma(conf->client, buf->handle); | ||
293 | |||
294 | err_map_dma: | ||
295 | ion_free(conf->client, buf->handle); | ||
296 | |||
297 | err_alloc: | ||
298 | kfree(buf); | ||
299 | |||
300 | return ERR_PTR(ret); | ||
301 | } | ||
302 | |||
303 | static void vb2_ion_put(void *buf_priv) | ||
304 | { | ||
305 | struct vb2_ion_buf *buf = buf_priv; | ||
306 | struct vb2_ion_conf *conf = buf->conf; | ||
307 | |||
308 | dbg(6, "released: buf_refcnt(%d)\n", atomic_read(&buf->ref) - 1); | ||
309 | |||
310 | if (atomic_dec_and_test(&buf->ref)) { | ||
311 | if (conf->use_mmu) | ||
312 | iovmm_unmap(conf->dev, buf->dva); | ||
313 | |||
314 | ion_unmap_dma(conf->client, buf->handle); | ||
315 | |||
316 | if (buf->kva) | ||
317 | ion_unmap_kernel(conf->client, buf->handle); | ||
318 | |||
319 | ion_free(conf->client, buf->handle); | ||
320 | |||
321 | kfree(buf); | ||
322 | } | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * _vb2_ion_get_vma() - lock userspace mapped memory | ||
327 | * @vaddr: starting virtual address of the area to be verified | ||
328 | * @size: size of the area | ||
329 | * @res_vma: will return locked copy of struct vm_area for the given area | ||
330 | * | ||
331 | * This function will go through memory area of size @size mapped at @vaddr | ||
332 | * If they are contiguous the virtual memory area is locked and a @res_vma is | ||
333 | * filled with the copy and @res_pa set to the physical address of the buffer. | ||
334 | * | ||
335 | * Returns 0 on success. | ||
336 | */ | ||
337 | static struct vm_area_struct **_vb2_ion_get_vma(unsigned long vaddr, | ||
338 | unsigned long size, int *vma_num) | ||
339 | { | ||
340 | struct mm_struct *mm = current->mm; | ||
341 | struct vm_area_struct *vma, *vma0; | ||
342 | struct vm_area_struct **vmas; | ||
343 | unsigned long prev_end = 0; | ||
344 | unsigned long end; | ||
345 | int i; | ||
346 | |||
347 | end = vaddr + size; | ||
348 | |||
349 | down_read(&mm->mmap_sem); | ||
350 | vma0 = find_vma(mm, vaddr); | ||
351 | if (!vma0) { | ||
352 | vmas = ERR_PTR(-EINVAL); | ||
353 | goto done; | ||
354 | } | ||
355 | |||
356 | for (*vma_num = 1, vma = vma0->vm_next, prev_end = vma0->vm_end; | ||
357 | vma && (end > vma->vm_start) && (prev_end == vma->vm_start); | ||
358 | prev_end = vma->vm_end, vma = vma->vm_next) { | ||
359 | *vma_num += 1; | ||
360 | } | ||
361 | |||
362 | if (prev_end < end) { | ||
363 | vmas = ERR_PTR(-EINVAL); | ||
364 | goto done; | ||
365 | } | ||
366 | |||
367 | vmas = kmalloc(sizeof(*vmas) * *vma_num, GFP_KERNEL); | ||
368 | if (!vmas) { | ||
369 | vmas = ERR_PTR(-ENOMEM); | ||
370 | goto done; | ||
371 | } | ||
372 | |||
373 | for (i = 0; i < *vma_num; i++, vma0 = vma0->vm_next) { | ||
374 | vmas[i] = vb2_get_vma(vma0); | ||
375 | if (!vmas[i]) | ||
376 | break; | ||
377 | } | ||
378 | |||
379 | if (i < *vma_num) { | ||
380 | while (i-- > 0) | ||
381 | vb2_put_vma(vmas[i]); | ||
382 | |||
383 | kfree(vmas); | ||
384 | vmas = ERR_PTR(-ENOMEM); | ||
385 | } | ||
386 | |||
387 | done: | ||
388 | up_read(&mm->mmap_sem); | ||
389 | return vmas; | ||
390 | } | ||
391 | |||
392 | static void *vb2_ion_get_userptr(void *alloc_ctx, unsigned long vaddr, | ||
393 | unsigned long size, int write) | ||
394 | { | ||
395 | struct vb2_ion_conf *conf = alloc_ctx; | ||
396 | struct vb2_ion_buf *buf = NULL; | ||
397 | size_t len; | ||
398 | int ret = 0; | ||
399 | bool malloced = false; | ||
400 | struct scatterlist *sg; | ||
401 | off_t offset; | ||
402 | |||
403 | /* Create vb2_ion_buf */ | ||
404 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | ||
405 | if (!buf) { | ||
406 | pr_err("kzalloc failed\n"); | ||
407 | return ERR_PTR(-ENOMEM); | ||
408 | } | ||
409 | |||
410 | /* Getting handle, client from DVA */ | ||
411 | buf->handle = ion_import_uva(conf->client, vaddr, &offset); | ||
412 | if (IS_ERR(buf->handle)) { | ||
413 | if ((PTR_ERR(buf->handle) == -ENXIO) && conf->use_mmu) { | ||
414 | int flags = ION_HEAP_EXYNOS_USER_MASK; | ||
415 | |||
416 | if (write) | ||
417 | flags |= ION_EXYNOS_WRITE_MASK; | ||
418 | |||
419 | buf->handle = ion_exynos_get_user_pages(conf->client, | ||
420 | vaddr, size, flags); | ||
421 | if (IS_ERR(buf->handle)) | ||
422 | ret = PTR_ERR(buf->handle); | ||
423 | } else { | ||
424 | ret = -EINVAL; | ||
425 | } | ||
426 | |||
427 | if (ret) { | ||
428 | pr_err("%s: Failed to retrieving non-ion user buffer @ " | ||
429 | "0x%lx (size:0x%lx, dev:%s, errno %ld)\n", | ||
430 | __func__, vaddr, size, dev_name(conf->dev), | ||
431 | PTR_ERR(buf->handle)); | ||
432 | goto err_import_uva; | ||
433 | } | ||
434 | |||
435 | malloced = true; | ||
436 | offset = 0; | ||
437 | } | ||
438 | |||
439 | /* TODO: Need to check whether already DVA is created or not */ | ||
440 | |||
441 | buf->sg = ion_map_dma(conf->client, buf->handle); | ||
442 | if (IS_ERR(buf->sg)) { | ||
443 | ret = -ENOMEM; | ||
444 | goto err_map_dma; | ||
445 | } | ||
446 | dbg(6, "PA(0x%x) size(%x)\n", buf->sg->dma_address, buf->sg->length); | ||
447 | |||
448 | sg = buf->sg; | ||
449 | do { | ||
450 | buf->nents++; | ||
451 | } while ((sg = sg_next(sg))); | ||
452 | |||
453 | /* Map DVA */ | ||
454 | if (conf->use_mmu) { | ||
455 | buf->dva = iovmm_map(conf->dev, buf->sg, offset, size); | ||
456 | if (!buf->dva) { | ||
457 | pr_err("iovmm_map: conf->name(%s)\n", conf->name); | ||
458 | goto err_ion_map_dva; | ||
459 | } | ||
460 | dbg(6, "DVA(0x%x)\n", buf->dva); | ||
461 | } else { | ||
462 | ret = ion_phys(conf->client, buf->handle, | ||
463 | (unsigned long *)&buf->dva, &len); | ||
464 | if (ret) { | ||
465 | pr_err("ion_phys: conf->name(%s)\n", conf->name); | ||
466 | goto err_ion_map_dva; | ||
467 | } | ||
468 | |||
469 | buf->dva += offset; | ||
470 | } | ||
471 | |||
472 | /* Set vb2_ion_buf */ | ||
473 | buf->vma = _vb2_ion_get_vma(vaddr, size, &buf->vma_count); | ||
474 | if (IS_ERR(buf->vma)) { | ||
475 | pr_err("Failed acquiring VMA 0x%08lx\n", vaddr); | ||
476 | |||
477 | if (conf->use_mmu) | ||
478 | iovmm_unmap(conf->dev, buf->dva); | ||
479 | |||
480 | goto err_get_vma; | ||
481 | } | ||
482 | |||
483 | buf->conf = conf; | ||
484 | buf->size = size; | ||
485 | buf->cacheable = conf->cacheable; | ||
486 | |||
487 | return buf; | ||
488 | |||
489 | err_get_vma: /* fall through */ | ||
490 | err_ion_map_dva: | ||
491 | ion_unmap_dma(conf->client, buf->handle); | ||
492 | |||
493 | err_map_dma: | ||
494 | ion_free(conf->client, buf->handle); | ||
495 | |||
496 | err_import_uva: | ||
497 | kfree(buf); | ||
498 | |||
499 | return ERR_PTR(ret); | ||
500 | } | ||
501 | |||
502 | static void vb2_ion_put_userptr(void *mem_priv) | ||
503 | { | ||
504 | struct vb2_ion_buf *buf = mem_priv; | ||
505 | struct vb2_ion_conf *conf = buf->conf; | ||
506 | int i; | ||
507 | |||
508 | if (!buf) { | ||
509 | pr_err("No buffer to put\n"); | ||
510 | return; | ||
511 | } | ||
512 | |||
513 | /* Unmap DVA, KVA */ | ||
514 | if (conf->use_mmu) | ||
515 | iovmm_unmap(conf->dev, buf->dva); | ||
516 | |||
517 | ion_unmap_dma(conf->client, buf->handle); | ||
518 | if (buf->kva) | ||
519 | ion_unmap_kernel(conf->client, buf->handle); | ||
520 | |||
521 | ion_free(conf->client, buf->handle); | ||
522 | |||
523 | for (i = 0; i < buf->vma_count; i++) | ||
524 | vb2_put_vma(buf->vma[i]); | ||
525 | kfree(buf->vma); | ||
526 | |||
527 | kfree(buf); | ||
528 | } | ||
529 | |||
530 | static void *vb2_ion_cookie(void *buf_priv) | ||
531 | { | ||
532 | struct vb2_ion_buf *buf = buf_priv; | ||
533 | |||
534 | if (!buf) { | ||
535 | pr_err("failed to get buffer\n"); | ||
536 | return NULL; | ||
537 | } | ||
538 | |||
539 | return (void *)buf->dva; | ||
540 | } | ||
541 | |||
542 | static void *vb2_ion_vaddr(void *buf_priv) | ||
543 | { | ||
544 | struct vb2_ion_buf *buf = buf_priv; | ||
545 | struct vb2_ion_conf *conf = buf->conf; | ||
546 | |||
547 | if (!buf) { | ||
548 | pr_err("failed to get buffer\n"); | ||
549 | return NULL; | ||
550 | } | ||
551 | |||
552 | if (!buf->kva) { | ||
553 | buf->kva = (dma_addr_t)ion_map_kernel(conf->client, buf->handle); | ||
554 | if (IS_ERR(ERR_PTR(buf->kva))) { | ||
555 | pr_err("ion_map_kernel handle(%x)\n", | ||
556 | (u32)buf->handle); | ||
557 | return NULL; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | return (void *)buf->kva; | ||
562 | } | ||
563 | |||
564 | static unsigned int vb2_ion_num_users(void *buf_priv) | ||
565 | { | ||
566 | struct vb2_ion_buf *buf = buf_priv; | ||
567 | |||
568 | return atomic_read(&buf->ref); | ||
569 | } | ||
570 | |||
571 | /** | ||
572 | * _vb2_ion_mmap_pfn_range() - map physical pages(vcm) to userspace | ||
573 | * @vma: virtual memory region for the mapping | ||
574 | * @sg: scatterlist to be mapped | ||
575 | * @nents: number of scatterlist to be mapped | ||
576 | * @size: size of the memory to be mapped | ||
577 | * @vm_ops: vm operations to be assigned to the created area | ||
578 | * @priv: private data to be associated with the area | ||
579 | * | ||
580 | * Returns 0 on success. | ||
581 | */ | ||
582 | static int _vb2_ion_mmap_pfn_range(struct vm_area_struct *vma, | ||
583 | struct scatterlist *sg, | ||
584 | int nents, | ||
585 | unsigned long size, | ||
586 | const struct vm_operations_struct *vm_ops, | ||
587 | void *priv) | ||
588 | { | ||
589 | struct scatterlist *s; | ||
590 | dma_addr_t addr; | ||
591 | size_t len; | ||
592 | unsigned long org_vm_start = vma->vm_start; | ||
593 | int vma_size = vma->vm_end - vma->vm_start; | ||
594 | resource_size_t remap_size; | ||
595 | int mapped_size = 0; | ||
596 | int remap_break = 0; | ||
597 | int ret, i = 0; | ||
598 | |||
599 | for_each_sg(sg, s, nents, i) { | ||
600 | addr = sg_phys(s); | ||
601 | len = sg_dma_len(s); | ||
602 | if ((mapped_size + len) > vma_size) { | ||
603 | remap_size = vma_size - mapped_size; | ||
604 | remap_break = 1; | ||
605 | } else { | ||
606 | remap_size = len; | ||
607 | } | ||
608 | |||
609 | ret = remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, | ||
610 | remap_size, vma->vm_page_prot); | ||
611 | if (ret) { | ||
612 | pr_err("Remapping failed, error: %d\n", ret); | ||
613 | return ret; | ||
614 | } | ||
615 | |||
616 | dbg(6, "%dth page vaddr(0x%08x), paddr(0x%08x), size(0x%08x)\n", | ||
617 | i++, (u32)vma->vm_start, addr, len); | ||
618 | |||
619 | mapped_size += remap_size; | ||
620 | vma->vm_start += len; | ||
621 | |||
622 | if (remap_break) | ||
623 | break; | ||
624 | } | ||
625 | |||
626 | WARN_ON(size > mapped_size); | ||
627 | |||
628 | /* re-assign initial start address */ | ||
629 | vma->vm_start = org_vm_start; | ||
630 | vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; | ||
631 | vma->vm_private_data = priv; | ||
632 | vma->vm_ops = vm_ops; | ||
633 | |||
634 | vma->vm_ops->open(vma); | ||
635 | |||
636 | return 0; | ||
637 | } | ||
638 | |||
639 | static int vb2_ion_mmap(void *buf_priv, struct vm_area_struct *vma) | ||
640 | { | ||
641 | struct vb2_ion_buf *buf = buf_priv; | ||
642 | |||
643 | if (!buf) { | ||
644 | pr_err("No buffer to map\n"); | ||
645 | return -EINVAL; | ||
646 | } | ||
647 | |||
648 | if (!buf->cacheable) | ||
649 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
650 | |||
651 | return _vb2_ion_mmap_pfn_range(vma, buf->sg, buf->nents, buf->size, | ||
652 | &vb2_common_vm_ops, &buf->handler); | ||
653 | } | ||
654 | |||
655 | const struct vb2_mem_ops vb2_ion_memops = { | ||
656 | .alloc = vb2_ion_alloc, | ||
657 | .put = vb2_ion_put, | ||
658 | .cookie = vb2_ion_cookie, | ||
659 | .vaddr = vb2_ion_vaddr, | ||
660 | .mmap = vb2_ion_mmap, | ||
661 | .get_userptr = vb2_ion_get_userptr, | ||
662 | .put_userptr = vb2_ion_put_userptr, | ||
663 | .num_users = vb2_ion_num_users, | ||
664 | }; | ||
665 | EXPORT_SYMBOL_GPL(vb2_ion_memops); | ||
666 | |||
667 | |||
668 | void vb2_ion_set_sharable(void *alloc_ctx, bool sharable) | ||
669 | { | ||
670 | ((struct vb2_ion_conf *)alloc_ctx)->sharable = sharable; | ||
671 | } | ||
672 | |||
673 | void vb2_ion_set_cacheable(void *alloc_ctx, bool cacheable) | ||
674 | { | ||
675 | ((struct vb2_ion_conf *)alloc_ctx)->cacheable = cacheable; | ||
676 | } | ||
677 | |||
678 | bool vb2_ion_get_cacheable(void *alloc_ctx) | ||
679 | { | ||
680 | return ((struct vb2_ion_conf *)alloc_ctx)->cacheable; | ||
681 | } | ||
682 | |||
683 | #if 0 | ||
684 | int vb2_ion_cache_flush(struct vb2_buffer *vb, u32 num_planes) | ||
685 | { | ||
686 | struct vb2_ion_conf *conf; | ||
687 | struct vb2_ion_buf *buf; | ||
688 | int i, ret; | ||
689 | |||
690 | for (i = 0; i < num_planes; i++) { | ||
691 | buf = vb->planes[i].mem_priv; | ||
692 | conf = buf->conf; | ||
693 | |||
694 | if (!buf->cacheable) { | ||
695 | pr_warning("This is non-cacheable buffer allocator\n"); | ||
696 | return -EINVAL; | ||
697 | } | ||
698 | |||
699 | ret = dma_map_sg(conf->dev, buf->sg, buf->nents, DMA_TO_DEVICE); | ||
700 | if (ret) { | ||
701 | pr_err("flush sg cnt(%d)\n", ret); | ||
702 | return -EINVAL; | ||
703 | } | ||
704 | } | ||
705 | |||
706 | return 0; | ||
707 | } | ||
708 | #else | ||
709 | static void _vb2_ion_cache_flush_all(void) | ||
710 | { | ||
711 | flush_cache_all(); /* L1 */ | ||
712 | smp_call_function((void (*)(void *))__cpuc_flush_kern_all, NULL, 1); | ||
713 | outer_flush_all(); /* L2 */ | ||
714 | } | ||
715 | |||
716 | static void _vb2_ion_cache_flush_range(struct vb2_ion_buf *buf, | ||
717 | unsigned long size) | ||
718 | { | ||
719 | struct scatterlist *s; | ||
720 | phys_addr_t start, end; | ||
721 | int i; | ||
722 | |||
723 | /* sequentially traversal phys */ | ||
724 | if (size > SZ_64K ) { | ||
725 | flush_cache_all(); /* L1 */ | ||
726 | smp_call_function((void (*)(void *))__cpuc_flush_kern_all, NULL, 1); | ||
727 | |||
728 | for_each_sg(buf->sg, s, buf->nents, i) { | ||
729 | start = sg_phys(s); | ||
730 | end = start + sg_dma_len(s) - 1; | ||
731 | |||
732 | outer_flush_range(start, end); /* L2 */ | ||
733 | } | ||
734 | } else { | ||
735 | dma_sync_sg_for_device(buf->conf->dev, buf->sg, buf->nents, | ||
736 | DMA_BIDIRECTIONAL); | ||
737 | dma_sync_sg_for_cpu(buf->conf->dev, buf->sg, buf->nents, | ||
738 | DMA_BIDIRECTIONAL); | ||
739 | } | ||
740 | } | ||
741 | |||
742 | |||
743 | int vb2_ion_cache_flush(struct vb2_buffer *vb, u32 num_planes) | ||
744 | { | ||
745 | struct vb2_ion_conf *conf; | ||
746 | struct vb2_ion_buf *buf; | ||
747 | unsigned long size = 0; | ||
748 | int i; | ||
749 | |||
750 | for (i = 0; i < num_planes; i++) { | ||
751 | buf = vb->planes[i].mem_priv; | ||
752 | conf = buf->conf; | ||
753 | |||
754 | if (!buf->cacheable) { | ||
755 | pr_warning("This is non-cacheable buffer allocator\n"); | ||
756 | return -EINVAL; | ||
757 | } | ||
758 | |||
759 | size += buf->size; | ||
760 | } | ||
761 | |||
762 | if (size > (unsigned long)SIZE_THRESHOLD) { | ||
763 | _vb2_ion_cache_flush_all(); | ||
764 | } else { | ||
765 | for (i = 0; i < num_planes; i++) { | ||
766 | buf = vb->planes[i].mem_priv; | ||
767 | _vb2_ion_cache_flush_range(buf, size); | ||
768 | } | ||
769 | } | ||
770 | |||
771 | return 0; | ||
772 | } | ||
773 | #endif | ||
774 | |||
775 | int vb2_ion_cache_inv(struct vb2_buffer *vb, u32 num_planes) | ||
776 | { | ||
777 | struct vb2_ion_conf *conf; | ||
778 | struct vb2_ion_buf *buf; | ||
779 | int i; | ||
780 | |||
781 | for (i = 0; i < num_planes; i++) { | ||
782 | buf = vb->planes[i].mem_priv; | ||
783 | conf = buf->conf; | ||
784 | if (!buf->cacheable) { | ||
785 | pr_warning("This is non-cacheable buffer allocator\n"); | ||
786 | return -EINVAL; | ||
787 | } | ||
788 | |||
789 | dma_unmap_sg(conf->dev, buf->sg, buf->nents, DMA_FROM_DEVICE); | ||
790 | } | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | void vb2_ion_suspend(void *alloc_ctx) | ||
796 | { | ||
797 | struct vb2_ion_conf *conf = alloc_ctx; | ||
798 | unsigned long flags; | ||
799 | |||
800 | if (!conf->use_mmu) | ||
801 | return; | ||
802 | |||
803 | spin_lock_irqsave(&conf->slock, flags); | ||
804 | if (!atomic_read(&conf->mmu_enable)) { | ||
805 | pr_warning("Already suspend: device(%x)\n", (u32)conf->dev); | ||
806 | return; | ||
807 | } | ||
808 | |||
809 | atomic_dec(&conf->mmu_enable); | ||
810 | iovmm_deactivate(conf->dev); | ||
811 | spin_unlock_irqrestore(&conf->slock, flags); | ||
812 | |||
813 | } | ||
814 | |||
815 | void vb2_ion_resume(void *alloc_ctx) | ||
816 | { | ||
817 | struct vb2_ion_conf *conf = alloc_ctx; | ||
818 | int ret; | ||
819 | unsigned long flags; | ||
820 | |||
821 | if (!conf->use_mmu) | ||
822 | return; | ||
823 | |||
824 | spin_lock_irqsave(&conf->slock, flags); | ||
825 | if (atomic_read(&conf->mmu_enable)) { | ||
826 | pr_warning("Already resume: device(%x)\n", (u32)conf->dev); | ||
827 | return; | ||
828 | } | ||
829 | |||
830 | atomic_inc(&conf->mmu_enable); | ||
831 | ret = iovmm_activate(conf->dev); | ||
832 | if (ret) { | ||
833 | pr_err("iovmm_activate: dev(%x)\n", (u32)conf->dev); | ||
834 | atomic_dec(&conf->mmu_enable); | ||
835 | } | ||
836 | spin_unlock_irqrestore(&conf->slock, flags); | ||
837 | } | ||
838 | |||
839 | MODULE_AUTHOR("Jonghun, Han <jonghun.han@samsung.com>"); | ||
840 | MODULE_DESCRIPTION("Android ION allocator handling routines for videobuf2"); | ||
841 | MODULE_LICENSE("GPL"); | ||