aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c207
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c91
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c148
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c90
6 files changed, 132 insertions, 407 deletions
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index b4b022933e29..82876a67f144 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -84,6 +84,7 @@ config VIDEOBUF2_CORE
84 84
85config VIDEOBUF2_MEMOPS 85config VIDEOBUF2_MEMOPS
86 tristate 86 tristate
87 select FRAME_VECTOR
87 88
88config VIDEOBUF2_DMA_CONTIG 89config VIDEOBUF2_DMA_CONTIG
89 tristate 90 tristate
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index f1022d810d22..4f59b7ec05d0 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1691,9 +1691,7 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1691 ret = __qbuf_mmap(vb, b); 1691 ret = __qbuf_mmap(vb, b);
1692 break; 1692 break;
1693 case V4L2_MEMORY_USERPTR: 1693 case V4L2_MEMORY_USERPTR:
1694 down_read(&current->mm->mmap_sem);
1695 ret = __qbuf_userptr(vb, b); 1694 ret = __qbuf_userptr(vb, b);
1696 up_read(&current->mm->mmap_sem);
1697 break; 1695 break;
1698 case V4L2_MEMORY_DMABUF: 1696 case V4L2_MEMORY_DMABUF:
1699 ret = __qbuf_dmabuf(vb, b); 1697 ret = __qbuf_dmabuf(vb, b);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 94c1e6455d36..2397ceb1dc6b 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -32,15 +32,13 @@ struct vb2_dc_buf {
32 dma_addr_t dma_addr; 32 dma_addr_t dma_addr;
33 enum dma_data_direction dma_dir; 33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt; 34 struct sg_table *dma_sgt;
35 struct frame_vector *vec;
35 36
36 /* MMAP related */ 37 /* MMAP related */
37 struct vb2_vmarea_handler handler; 38 struct vb2_vmarea_handler handler;
38 atomic_t refcount; 39 atomic_t refcount;
39 struct sg_table *sgt_base; 40 struct sg_table *sgt_base;
40 41
41 /* USERPTR related */
42 struct vm_area_struct *vma;
43
44 /* DMABUF related */ 42 /* DMABUF related */
45 struct dma_buf_attachment *db_attach; 43 struct dma_buf_attachment *db_attach;
46}; 44};
@@ -49,24 +47,6 @@ struct vb2_dc_buf {
49/* scatterlist table functions */ 47/* scatterlist table functions */
50/*********************************************/ 48/*********************************************/
51 49
52
53static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
55{
56 struct scatterlist *s;
57 unsigned int i;
58
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62 >> PAGE_SHIFT;
63 unsigned int j;
64
65 for (j = 0; j < n_pages; ++j, ++page)
66 cb(page);
67 }
68}
69
70static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) 50static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71{ 51{
72 struct scatterlist *s; 52 struct scatterlist *s;
@@ -429,92 +409,12 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
429/* callbacks for USERPTR buffers */ 409/* callbacks for USERPTR buffers */
430/*********************************************/ 410/*********************************************/
431 411
432static inline int vma_is_io(struct vm_area_struct *vma)
433{
434 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
435}
436
437static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
438 struct vm_area_struct *vma, unsigned long *res)
439{
440 unsigned long pfn, start_pfn, prev_pfn;
441 unsigned int i;
442 int ret;
443
444 if (!vma_is_io(vma))
445 return -EFAULT;
446
447 ret = follow_pfn(vma, start, &pfn);
448 if (ret)
449 return ret;
450
451 start_pfn = pfn;
452 start += PAGE_SIZE;
453
454 for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
455 prev_pfn = pfn;
456 ret = follow_pfn(vma, start, &pfn);
457
458 if (ret) {
459 pr_err("no page for address %lu\n", start);
460 return ret;
461 }
462 if (pfn != prev_pfn + 1)
463 return -EINVAL;
464 }
465
466 *res = start_pfn;
467 return 0;
468}
469
470static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
471 int n_pages, struct vm_area_struct *vma,
472 enum dma_data_direction dma_dir)
473{
474 if (vma_is_io(vma)) {
475 unsigned int i;
476
477 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
478 unsigned long pfn;
479 int ret = follow_pfn(vma, start, &pfn);
480
481 if (!pfn_valid(pfn))
482 return -EINVAL;
483
484 if (ret) {
485 pr_err("no page for address %lu\n", start);
486 return ret;
487 }
488 pages[i] = pfn_to_page(pfn);
489 }
490 } else {
491 int n;
492
493 n = get_user_pages(current, current->mm, start & PAGE_MASK,
494 n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
495 /* negative error means that no page was pinned */
496 n = max(n, 0);
497 if (n != n_pages) {
498 pr_err("got only %d of %d user pages\n", n, n_pages);
499 while (n)
500 put_page(pages[--n]);
501 return -EFAULT;
502 }
503 }
504
505 return 0;
506}
507
508static void vb2_dc_put_dirty_page(struct page *page)
509{
510 set_page_dirty_lock(page);
511 put_page(page);
512}
513
514static void vb2_dc_put_userptr(void *buf_priv) 412static void vb2_dc_put_userptr(void *buf_priv)
515{ 413{
516 struct vb2_dc_buf *buf = buf_priv; 414 struct vb2_dc_buf *buf = buf_priv;
517 struct sg_table *sgt = buf->dma_sgt; 415 struct sg_table *sgt = buf->dma_sgt;
416 int i;
417 struct page **pages;
518 418
519 if (sgt) { 419 if (sgt) {
520 DEFINE_DMA_ATTRS(attrs); 420 DEFINE_DMA_ATTRS(attrs);
@@ -526,13 +426,15 @@ static void vb2_dc_put_userptr(void *buf_priv)
526 */ 426 */
527 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 427 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
528 buf->dma_dir, &attrs); 428 buf->dma_dir, &attrs);
529 if (!vma_is_io(buf->vma)) 429 pages = frame_vector_pages(buf->vec);
530 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); 430 /* sgt should exist only if vector contains pages... */
531 431 BUG_ON(IS_ERR(pages));
432 for (i = 0; i < frame_vector_count(buf->vec); i++)
433 set_page_dirty_lock(pages[i]);
532 sg_free_table(sgt); 434 sg_free_table(sgt);
533 kfree(sgt); 435 kfree(sgt);
534 } 436 }
535 vb2_put_vma(buf->vma); 437 vb2_destroy_framevec(buf->vec);
536 kfree(buf); 438 kfree(buf);
537} 439}
538 440
@@ -572,13 +474,10 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
572{ 474{
573 struct vb2_dc_conf *conf = alloc_ctx; 475 struct vb2_dc_conf *conf = alloc_ctx;
574 struct vb2_dc_buf *buf; 476 struct vb2_dc_buf *buf;
575 unsigned long start; 477 struct frame_vector *vec;
576 unsigned long end;
577 unsigned long offset; 478 unsigned long offset;
578 struct page **pages; 479 int n_pages, i;
579 int n_pages;
580 int ret = 0; 480 int ret = 0;
581 struct vm_area_struct *vma;
582 struct sg_table *sgt; 481 struct sg_table *sgt;
583 unsigned long contig_size; 482 unsigned long contig_size;
584 unsigned long dma_align = dma_get_cache_alignment(); 483 unsigned long dma_align = dma_get_cache_alignment();
@@ -604,72 +503,43 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
604 buf->dev = conf->dev; 503 buf->dev = conf->dev;
605 buf->dma_dir = dma_dir; 504 buf->dma_dir = dma_dir;
606 505
607 start = vaddr & PAGE_MASK;
608 offset = vaddr & ~PAGE_MASK; 506 offset = vaddr & ~PAGE_MASK;
609 end = PAGE_ALIGN(vaddr + size); 507 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
610 n_pages = (end - start) >> PAGE_SHIFT; 508 if (IS_ERR(vec)) {
611 509 ret = PTR_ERR(vec);
612 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
613 if (!pages) {
614 ret = -ENOMEM;
615 pr_err("failed to allocate pages table\n");
616 goto fail_buf; 510 goto fail_buf;
617 } 511 }
512 buf->vec = vec;
513 n_pages = frame_vector_count(vec);
514 ret = frame_vector_to_pages(vec);
515 if (ret < 0) {
516 unsigned long *nums = frame_vector_pfns(vec);
618 517
619 /* current->mm->mmap_sem is taken by videobuf2 core */ 518 /*
620 vma = find_vma(current->mm, vaddr); 519 * Failed to convert to pages... Check the memory is physically
621 if (!vma) { 520 * contiguous and use direct mapping
622 pr_err("no vma for address %lu\n", vaddr); 521 */
623 ret = -EFAULT; 522 for (i = 1; i < n_pages; i++)
624 goto fail_pages; 523 if (nums[i-1] + 1 != nums[i])
625 } 524 goto fail_pfnvec;
626 525 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
627 if (vma->vm_end < vaddr + size) { 526 goto out;
628 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
629 ret = -EFAULT;
630 goto fail_pages;
631 }
632
633 buf->vma = vb2_get_vma(vma);
634 if (!buf->vma) {
635 pr_err("failed to copy vma\n");
636 ret = -ENOMEM;
637 goto fail_pages;
638 }
639
640 /* extract page list from userspace mapping */
641 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
642 if (ret) {
643 unsigned long pfn;
644 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
645 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
646 buf->size = size;
647 kfree(pages);
648 return buf;
649 }
650
651 pr_err("failed to get user pages\n");
652 goto fail_vma;
653 } 527 }
654 528
655 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 529 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
656 if (!sgt) { 530 if (!sgt) {
657 pr_err("failed to allocate sg table\n"); 531 pr_err("failed to allocate sg table\n");
658 ret = -ENOMEM; 532 ret = -ENOMEM;
659 goto fail_get_user_pages; 533 goto fail_pfnvec;
660 } 534 }
661 535
662 ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 536 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
663 offset, size, GFP_KERNEL); 537 offset, size, GFP_KERNEL);
664 if (ret) { 538 if (ret) {
665 pr_err("failed to initialize sg table\n"); 539 pr_err("failed to initialize sg table\n");
666 goto fail_sgt; 540 goto fail_sgt;
667 } 541 }
668 542
669 /* pages are no longer needed */
670 kfree(pages);
671 pages = NULL;
672
673 /* 543 /*
674 * No need to sync to the device, this will happen later when the 544 * No need to sync to the device, this will happen later when the
675 * prepare() memop is called. 545 * prepare() memop is called.
@@ -691,8 +561,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
691 } 561 }
692 562
693 buf->dma_addr = sg_dma_address(sgt->sgl); 563 buf->dma_addr = sg_dma_address(sgt->sgl);
694 buf->size = size;
695 buf->dma_sgt = sgt; 564 buf->dma_sgt = sgt;
565out:
566 buf->size = size;
696 567
697 return buf; 568 return buf;
698 569
@@ -701,23 +572,13 @@ fail_map_sg:
701 buf->dma_dir, &attrs); 572 buf->dma_dir, &attrs);
702 573
703fail_sgt_init: 574fail_sgt_init:
704 if (!vma_is_io(buf->vma))
705 vb2_dc_sgt_foreach_page(sgt, put_page);
706 sg_free_table(sgt); 575 sg_free_table(sgt);
707 576
708fail_sgt: 577fail_sgt:
709 kfree(sgt); 578 kfree(sgt);
710 579
711fail_get_user_pages: 580fail_pfnvec:
712 if (pages && !vma_is_io(buf->vma)) 581 vb2_destroy_framevec(vec);
713 while (n_pages)
714 put_page(pages[--n_pages]);
715
716fail_vma:
717 vb2_put_vma(buf->vma);
718
719fail_pages:
720 kfree(pages); /* kfree is NULL-proof */
721 582
722fail_buf: 583fail_buf:
723 kfree(buf); 584 kfree(buf);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 7289b81bd7b7..be7bd6535c9d 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -38,6 +38,7 @@ struct vb2_dma_sg_buf {
38 struct device *dev; 38 struct device *dev;
39 void *vaddr; 39 void *vaddr;
40 struct page **pages; 40 struct page **pages;
41 struct frame_vector *vec;
41 int offset; 42 int offset;
42 enum dma_data_direction dma_dir; 43 enum dma_data_direction dma_dir;
43 struct sg_table sg_table; 44 struct sg_table sg_table;
@@ -51,7 +52,6 @@ struct vb2_dma_sg_buf {
51 unsigned int num_pages; 52 unsigned int num_pages;
52 atomic_t refcount; 53 atomic_t refcount;
53 struct vb2_vmarea_handler handler; 54 struct vb2_vmarea_handler handler;
54 struct vm_area_struct *vma;
55 55
56 struct dma_buf_attachment *db_attach; 56 struct dma_buf_attachment *db_attach;
57}; 57};
@@ -225,25 +225,17 @@ static void vb2_dma_sg_finish(void *buf_priv)
225 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 225 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
226} 226}
227 227
228static inline int vma_is_io(struct vm_area_struct *vma)
229{
230 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
231}
232
233static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, 228static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
234 unsigned long size, 229 unsigned long size,
235 enum dma_data_direction dma_dir) 230 enum dma_data_direction dma_dir)
236{ 231{
237 struct vb2_dma_sg_conf *conf = alloc_ctx; 232 struct vb2_dma_sg_conf *conf = alloc_ctx;
238 struct vb2_dma_sg_buf *buf; 233 struct vb2_dma_sg_buf *buf;
239 unsigned long first, last;
240 int num_pages_from_user;
241 struct vm_area_struct *vma;
242 struct sg_table *sgt; 234 struct sg_table *sgt;
243 DEFINE_DMA_ATTRS(attrs); 235 DEFINE_DMA_ATTRS(attrs);
236 struct frame_vector *vec;
244 237
245 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); 238 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
246
247 buf = kzalloc(sizeof *buf, GFP_KERNEL); 239 buf = kzalloc(sizeof *buf, GFP_KERNEL);
248 if (!buf) 240 if (!buf)
249 return NULL; 241 return NULL;
@@ -254,61 +246,19 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
254 buf->offset = vaddr & ~PAGE_MASK; 246 buf->offset = vaddr & ~PAGE_MASK;
255 buf->size = size; 247 buf->size = size;
256 buf->dma_sgt = &buf->sg_table; 248 buf->dma_sgt = &buf->sg_table;
249 vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
250 if (IS_ERR(vec))
251 goto userptr_fail_pfnvec;
252 buf->vec = vec;
257 253
258 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; 254 buf->pages = frame_vector_pages(vec);
259 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; 255 if (IS_ERR(buf->pages))
260 buf->num_pages = last - first + 1; 256 goto userptr_fail_sgtable;
261 257 buf->num_pages = frame_vector_count(vec);
262 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
263 GFP_KERNEL);
264 if (!buf->pages)
265 goto userptr_fail_alloc_pages;
266
267 vma = find_vma(current->mm, vaddr);
268 if (!vma) {
269 dprintk(1, "no vma for address %lu\n", vaddr);
270 goto userptr_fail_find_vma;
271 }
272
273 if (vma->vm_end < vaddr + size) {
274 dprintk(1, "vma at %lu is too small for %lu bytes\n",
275 vaddr, size);
276 goto userptr_fail_find_vma;
277 }
278
279 buf->vma = vb2_get_vma(vma);
280 if (!buf->vma) {
281 dprintk(1, "failed to copy vma\n");
282 goto userptr_fail_find_vma;
283 }
284
285 if (vma_is_io(buf->vma)) {
286 for (num_pages_from_user = 0;
287 num_pages_from_user < buf->num_pages;
288 ++num_pages_from_user, vaddr += PAGE_SIZE) {
289 unsigned long pfn;
290
291 if (follow_pfn(vma, vaddr, &pfn)) {
292 dprintk(1, "no page for address %lu\n", vaddr);
293 break;
294 }
295 buf->pages[num_pages_from_user] = pfn_to_page(pfn);
296 }
297 } else
298 num_pages_from_user = get_user_pages(current, current->mm,
299 vaddr & PAGE_MASK,
300 buf->num_pages,
301 buf->dma_dir == DMA_FROM_DEVICE,
302 1, /* force */
303 buf->pages,
304 NULL);
305
306 if (num_pages_from_user != buf->num_pages)
307 goto userptr_fail_get_user_pages;
308 258
309 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, 259 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
310 buf->num_pages, buf->offset, size, 0)) 260 buf->num_pages, buf->offset, size, 0))
311 goto userptr_fail_alloc_table_from_pages; 261 goto userptr_fail_sgtable;
312 262
313 sgt = &buf->sg_table; 263 sgt = &buf->sg_table;
314 /* 264 /*
@@ -324,17 +274,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
324 274
325userptr_fail_map: 275userptr_fail_map:
326 sg_free_table(&buf->sg_table); 276 sg_free_table(&buf->sg_table);
327userptr_fail_alloc_table_from_pages: 277userptr_fail_sgtable:
328userptr_fail_get_user_pages: 278 vb2_destroy_framevec(vec);
329 dprintk(1, "get_user_pages requested/got: %d/%d]\n", 279userptr_fail_pfnvec:
330 buf->num_pages, num_pages_from_user);
331 if (!vma_is_io(buf->vma))
332 while (--num_pages_from_user >= 0)
333 put_page(buf->pages[num_pages_from_user]);
334 vb2_put_vma(buf->vma);
335userptr_fail_find_vma:
336 kfree(buf->pages);
337userptr_fail_alloc_pages:
338 kfree(buf); 280 kfree(buf);
339 return NULL; 281 return NULL;
340} 282}
@@ -362,11 +304,8 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
362 while (--i >= 0) { 304 while (--i >= 0) {
363 if (buf->dma_dir == DMA_FROM_DEVICE) 305 if (buf->dma_dir == DMA_FROM_DEVICE)
364 set_page_dirty_lock(buf->pages[i]); 306 set_page_dirty_lock(buf->pages[i]);
365 if (!vma_is_io(buf->vma))
366 put_page(buf->pages[i]);
367 } 307 }
368 kfree(buf->pages); 308 vb2_destroy_framevec(buf->vec);
369 vb2_put_vma(buf->vma);
370 kfree(buf); 309 kfree(buf);
371} 310}
372 311
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 0d49b7951f84..48c6a49c4928 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -23,118 +23,62 @@
23#include <media/videobuf2-memops.h> 23#include <media/videobuf2-memops.h>
24 24
25/** 25/**
26 * vb2_get_vma() - acquire and lock the virtual memory area 26 * vb2_create_framevec() - map virtual addresses to pfns
27 * @vma: given virtual memory area 27 * @start: Virtual user address where we start mapping
28 * @length: Length of a range to map
29 * @write: Should we map for writing into the area
28 * 30 *
29 * This function attempts to acquire an area mapped in the userspace for 31 * This function allocates and fills in a vector with pfns corresponding to
30 * the duration of a hardware operation. The area is "locked" by performing 32 * virtual address range passed in arguments. If pfns have corresponding pages,
31 * the same set of operation that are done when process calls fork() and 33 * page references are also grabbed to pin pages in memory. The function
32 * memory areas are duplicated. 34 * returns pointer to the vector on success and error pointer in case of
33 * 35 * failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
34 * Returns a copy of a virtual memory region on success or NULL.
35 */
36struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
37{
38 struct vm_area_struct *vma_copy;
39
40 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
41 if (vma_copy == NULL)
42 return NULL;
43
44 if (vma->vm_ops && vma->vm_ops->open)
45 vma->vm_ops->open(vma);
46
47 if (vma->vm_file)
48 get_file(vma->vm_file);
49
50 memcpy(vma_copy, vma, sizeof(*vma));
51
52 vma_copy->vm_mm = NULL;
53 vma_copy->vm_next = NULL;
54 vma_copy->vm_prev = NULL;
55
56 return vma_copy;
57}
58EXPORT_SYMBOL_GPL(vb2_get_vma);
59
60/**
61 * vb2_put_userptr() - release a userspace virtual memory area
62 * @vma: virtual memory region associated with the area to be released
63 *
64 * This function releases the previously acquired memory area after a hardware
65 * operation.
66 */ 36 */
67void vb2_put_vma(struct vm_area_struct *vma) 37struct frame_vector *vb2_create_framevec(unsigned long start,
38 unsigned long length,
39 bool write)
68{ 40{
69 if (!vma) 41 int ret;
70 return; 42 unsigned long first, last;
71 43 unsigned long nr;
72 if (vma->vm_ops && vma->vm_ops->close) 44 struct frame_vector *vec;
73 vma->vm_ops->close(vma); 45
74 46 first = start >> PAGE_SHIFT;
75 if (vma->vm_file) 47 last = (start + length - 1) >> PAGE_SHIFT;
76 fput(vma->vm_file); 48 nr = last - first + 1;
77 49 vec = frame_vector_create(nr);
78 kfree(vma); 50 if (!vec)
51 return ERR_PTR(-ENOMEM);
52 ret = get_vaddr_frames(start, nr, write, 1, vec);
53 if (ret < 0)
54 goto out_destroy;
55 /* We accept only complete set of PFNs */
56 if (ret != nr) {
57 ret = -EFAULT;
58 goto out_release;
59 }
60 return vec;
61out_release:
62 put_vaddr_frames(vec);
63out_destroy:
64 frame_vector_destroy(vec);
65 return ERR_PTR(ret);
79} 66}
80EXPORT_SYMBOL_GPL(vb2_put_vma); 67EXPORT_SYMBOL(vb2_create_framevec);
81 68
82/** 69/**
83 * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory 70 * vb2_destroy_framevec() - release vector of mapped pfns
84 * @vaddr: starting virtual address of the area to be verified 71 * @vec: vector of pfns / pages to release
85 * @size: size of the area
86 * @res_paddr: will return physical address for the given vaddr
87 * @res_vma: will return locked copy of struct vm_area for the given area
88 *
89 * This function will go through memory area of size @size mapped at @vaddr and
90 * verify that the underlying physical pages are contiguous. If they are
91 * contiguous the virtual memory area is locked and a @res_vma is filled with
92 * the copy and @res_pa set to the physical address of the buffer.
93 * 72 *
94 * Returns 0 on success. 73 * This releases references to all pages in the vector @vec (if corresponding
74 * pfns are backed by pages) and frees the passed vector.
95 */ 75 */
96int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, 76void vb2_destroy_framevec(struct frame_vector *vec)
97 struct vm_area_struct **res_vma, dma_addr_t *res_pa)
98{ 77{
99 struct mm_struct *mm = current->mm; 78 put_vaddr_frames(vec);
100 struct vm_area_struct *vma; 79 frame_vector_destroy(vec);
101 unsigned long offset, start, end;
102 unsigned long this_pfn, prev_pfn;
103 dma_addr_t pa = 0;
104
105 start = vaddr;
106 offset = start & ~PAGE_MASK;
107 end = start + size;
108
109 vma = find_vma(mm, start);
110
111 if (vma == NULL || vma->vm_end < end)
112 return -EFAULT;
113
114 for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
115 int ret = follow_pfn(vma, start, &this_pfn);
116 if (ret)
117 return ret;
118
119 if (prev_pfn == 0)
120 pa = this_pfn << PAGE_SHIFT;
121 else if (this_pfn != prev_pfn + 1)
122 return -EFAULT;
123
124 prev_pfn = this_pfn;
125 }
126
127 /*
128 * Memory is contiguous, lock vma and return to the caller
129 */
130 *res_vma = vb2_get_vma(vma);
131 if (*res_vma == NULL)
132 return -ENOMEM;
133
134 *res_pa = pa + offset;
135 return 0;
136} 80}
137EXPORT_SYMBOL_GPL(vb2_get_contig_userptr); 81EXPORT_SYMBOL(vb2_destroy_framevec);
138 82
139/** 83/**
140 * vb2_common_vm_open() - increase refcount of the vma 84 * vb2_common_vm_open() - increase refcount of the vma
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 2fe4c27f524a..ecb8f0c7f025 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -23,11 +23,9 @@
23 23
24struct vb2_vmalloc_buf { 24struct vb2_vmalloc_buf {
25 void *vaddr; 25 void *vaddr;
26 struct page **pages; 26 struct frame_vector *vec;
27 struct vm_area_struct *vma;
28 enum dma_data_direction dma_dir; 27 enum dma_data_direction dma_dir;
29 unsigned long size; 28 unsigned long size;
30 unsigned int n_pages;
31 atomic_t refcount; 29 atomic_t refcount;
32 struct vb2_vmarea_handler handler; 30 struct vb2_vmarea_handler handler;
33 struct dma_buf *dbuf; 31 struct dma_buf *dbuf;
@@ -76,10 +74,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
76 enum dma_data_direction dma_dir) 74 enum dma_data_direction dma_dir)
77{ 75{
78 struct vb2_vmalloc_buf *buf; 76 struct vb2_vmalloc_buf *buf;
79 unsigned long first, last; 77 struct frame_vector *vec;
80 int n_pages, offset; 78 int n_pages, offset, i;
81 struct vm_area_struct *vma;
82 dma_addr_t physp;
83 79
84 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 80 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
85 if (!buf) 81 if (!buf)
@@ -88,51 +84,36 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
88 buf->dma_dir = dma_dir; 84 buf->dma_dir = dma_dir;
89 offset = vaddr & ~PAGE_MASK; 85 offset = vaddr & ~PAGE_MASK;
90 buf->size = size; 86 buf->size = size;
91 87 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
92 88 if (IS_ERR(vec))
93 vma = find_vma(current->mm, vaddr); 89 goto fail_pfnvec_create;
94 if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) { 90 buf->vec = vec;
95 if (vb2_get_contig_userptr(vaddr, size, &vma, &physp)) 91 n_pages = frame_vector_count(vec);
96 goto fail_pages_array_alloc; 92 if (frame_vector_to_pages(vec) < 0) {
97 buf->vma = vma; 93 unsigned long *nums = frame_vector_pfns(vec);
98 buf->vaddr = (__force void *)ioremap_nocache(physp, size); 94
99 if (!buf->vaddr) 95 /*
100 goto fail_pages_array_alloc; 96 * We cannot get page pointers for these pfns. Check memory is
97 * physically contiguous and use direct mapping.
98 */
99 for (i = 1; i < n_pages; i++)
100 if (nums[i-1] + 1 != nums[i])
101 goto fail_map;
102 buf->vaddr = (__force void *)
103 ioremap_nocache(nums[0] << PAGE_SHIFT, size);
101 } else { 104 } else {
102 first = vaddr >> PAGE_SHIFT; 105 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
103 last = (vaddr + size - 1) >> PAGE_SHIFT;
104 buf->n_pages = last - first + 1;
105 buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
106 GFP_KERNEL);
107 if (!buf->pages)
108 goto fail_pages_array_alloc;
109
110 /* current->mm->mmap_sem is taken by videobuf2 core */
111 n_pages = get_user_pages(current, current->mm,
112 vaddr & PAGE_MASK, buf->n_pages,
113 dma_dir == DMA_FROM_DEVICE,
114 1, /* force */
115 buf->pages, NULL);
116 if (n_pages != buf->n_pages)
117 goto fail_get_user_pages;
118
119 buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
120 PAGE_KERNEL); 106 PAGE_KERNEL);
121 if (!buf->vaddr)
122 goto fail_get_user_pages;
123 } 107 }
124 108
109 if (!buf->vaddr)
110 goto fail_map;
125 buf->vaddr += offset; 111 buf->vaddr += offset;
126 return buf; 112 return buf;
127 113
128fail_get_user_pages: 114fail_map:
129 pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages, 115 vb2_destroy_framevec(vec);
130 buf->n_pages); 116fail_pfnvec_create:
131 while (--n_pages >= 0)
132 put_page(buf->pages[n_pages]);
133 kfree(buf->pages);
134
135fail_pages_array_alloc:
136 kfree(buf); 117 kfree(buf);
137 118
138 return NULL; 119 return NULL;
@@ -143,20 +124,21 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
143 struct vb2_vmalloc_buf *buf = buf_priv; 124 struct vb2_vmalloc_buf *buf = buf_priv;
144 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; 125 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
145 unsigned int i; 126 unsigned int i;
127 struct page **pages;
128 unsigned int n_pages;
146 129
147 if (buf->pages) { 130 if (!buf->vec->is_pfns) {
131 n_pages = frame_vector_count(buf->vec);
132 pages = frame_vector_pages(buf->vec);
148 if (vaddr) 133 if (vaddr)
149 vm_unmap_ram((void *)vaddr, buf->n_pages); 134 vm_unmap_ram((void *)vaddr, n_pages);
150 for (i = 0; i < buf->n_pages; ++i) { 135 if (buf->dma_dir == DMA_FROM_DEVICE)
151 if (buf->dma_dir == DMA_FROM_DEVICE) 136 for (i = 0; i < n_pages; i++)
152 set_page_dirty_lock(buf->pages[i]); 137 set_page_dirty_lock(pages[i]);
153 put_page(buf->pages[i]);
154 }
155 kfree(buf->pages);
156 } else { 138 } else {
157 vb2_put_vma(buf->vma);
158 iounmap((__force void __iomem *)buf->vaddr); 139 iounmap((__force void __iomem *)buf->vaddr);
159 } 140 }
141 vb2_destroy_framevec(buf->vec);
160 kfree(buf); 142 kfree(buf);
161} 143}
162 144