diff options
author | Jan Kara <jack@suse.cz> | 2015-07-13 10:55:49 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2015-08-16 12:13:46 -0400 |
commit | fb639eb39154312af8bf08c58cc0142179e0c224 (patch) | |
tree | 950f3e92fe7762b9f1eb966fc436aa1ae83ff4bc | |
parent | 5a9e4dec393a2c5a01be6adc63065059b367d532 (diff) |
[media] media: vb2: Convert vb2_dc_get_userptr() to use frame vector
Convert vb2_dc_get_userptr() to use frame vector infrastructure. When we
are doing that there's no need to allocate page array and some code can
be simplified.
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-contig.c | 212 |
1 files changed, 34 insertions, 178 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index c548ce425701..2397ceb1dc6b 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -32,15 +32,13 @@ struct vb2_dc_buf { | |||
32 | dma_addr_t dma_addr; | 32 | dma_addr_t dma_addr; |
33 | enum dma_data_direction dma_dir; | 33 | enum dma_data_direction dma_dir; |
34 | struct sg_table *dma_sgt; | 34 | struct sg_table *dma_sgt; |
35 | struct frame_vector *vec; | ||
35 | 36 | ||
36 | /* MMAP related */ | 37 | /* MMAP related */ |
37 | struct vb2_vmarea_handler handler; | 38 | struct vb2_vmarea_handler handler; |
38 | atomic_t refcount; | 39 | atomic_t refcount; |
39 | struct sg_table *sgt_base; | 40 | struct sg_table *sgt_base; |
40 | 41 | ||
41 | /* USERPTR related */ | ||
42 | struct vm_area_struct *vma; | ||
43 | |||
44 | /* DMABUF related */ | 42 | /* DMABUF related */ |
45 | struct dma_buf_attachment *db_attach; | 43 | struct dma_buf_attachment *db_attach; |
46 | }; | 44 | }; |
@@ -49,24 +47,6 @@ struct vb2_dc_buf { | |||
49 | /* scatterlist table functions */ | 47 | /* scatterlist table functions */ |
50 | /*********************************************/ | 48 | /*********************************************/ |
51 | 49 | ||
52 | |||
53 | static void vb2_dc_sgt_foreach_page(struct sg_table *sgt, | ||
54 | void (*cb)(struct page *pg)) | ||
55 | { | ||
56 | struct scatterlist *s; | ||
57 | unsigned int i; | ||
58 | |||
59 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { | ||
60 | struct page *page = sg_page(s); | ||
61 | unsigned int n_pages = PAGE_ALIGN(s->offset + s->length) | ||
62 | >> PAGE_SHIFT; | ||
63 | unsigned int j; | ||
64 | |||
65 | for (j = 0; j < n_pages; ++j, ++page) | ||
66 | cb(page); | ||
67 | } | ||
68 | } | ||
69 | |||
70 | static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) | 50 | static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) |
71 | { | 51 | { |
72 | struct scatterlist *s; | 52 | struct scatterlist *s; |
@@ -429,92 +409,12 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags) | |||
429 | /* callbacks for USERPTR buffers */ | 409 | /* callbacks for USERPTR buffers */ |
430 | /*********************************************/ | 410 | /*********************************************/ |
431 | 411 | ||
432 | static inline int vma_is_io(struct vm_area_struct *vma) | ||
433 | { | ||
434 | return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); | ||
435 | } | ||
436 | |||
437 | static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, | ||
438 | struct vm_area_struct *vma, unsigned long *res) | ||
439 | { | ||
440 | unsigned long pfn, start_pfn, prev_pfn; | ||
441 | unsigned int i; | ||
442 | int ret; | ||
443 | |||
444 | if (!vma_is_io(vma)) | ||
445 | return -EFAULT; | ||
446 | |||
447 | ret = follow_pfn(vma, start, &pfn); | ||
448 | if (ret) | ||
449 | return ret; | ||
450 | |||
451 | start_pfn = pfn; | ||
452 | start += PAGE_SIZE; | ||
453 | |||
454 | for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) { | ||
455 | prev_pfn = pfn; | ||
456 | ret = follow_pfn(vma, start, &pfn); | ||
457 | |||
458 | if (ret) { | ||
459 | pr_err("no page for address %lu\n", start); | ||
460 | return ret; | ||
461 | } | ||
462 | if (pfn != prev_pfn + 1) | ||
463 | return -EINVAL; | ||
464 | } | ||
465 | |||
466 | *res = start_pfn; | ||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, | ||
471 | int n_pages, struct vm_area_struct *vma, | ||
472 | enum dma_data_direction dma_dir) | ||
473 | { | ||
474 | if (vma_is_io(vma)) { | ||
475 | unsigned int i; | ||
476 | |||
477 | for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) { | ||
478 | unsigned long pfn; | ||
479 | int ret = follow_pfn(vma, start, &pfn); | ||
480 | |||
481 | if (!pfn_valid(pfn)) | ||
482 | return -EINVAL; | ||
483 | |||
484 | if (ret) { | ||
485 | pr_err("no page for address %lu\n", start); | ||
486 | return ret; | ||
487 | } | ||
488 | pages[i] = pfn_to_page(pfn); | ||
489 | } | ||
490 | } else { | ||
491 | int n; | ||
492 | |||
493 | n = get_user_pages(current, current->mm, start & PAGE_MASK, | ||
494 | n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL); | ||
495 | /* negative error means that no page was pinned */ | ||
496 | n = max(n, 0); | ||
497 | if (n != n_pages) { | ||
498 | pr_err("got only %d of %d user pages\n", n, n_pages); | ||
499 | while (n) | ||
500 | put_page(pages[--n]); | ||
501 | return -EFAULT; | ||
502 | } | ||
503 | } | ||
504 | |||
505 | return 0; | ||
506 | } | ||
507 | |||
508 | static void vb2_dc_put_dirty_page(struct page *page) | ||
509 | { | ||
510 | set_page_dirty_lock(page); | ||
511 | put_page(page); | ||
512 | } | ||
513 | |||
514 | static void vb2_dc_put_userptr(void *buf_priv) | 412 | static void vb2_dc_put_userptr(void *buf_priv) |
515 | { | 413 | { |
516 | struct vb2_dc_buf *buf = buf_priv; | 414 | struct vb2_dc_buf *buf = buf_priv; |
517 | struct sg_table *sgt = buf->dma_sgt; | 415 | struct sg_table *sgt = buf->dma_sgt; |
416 | int i; | ||
417 | struct page **pages; | ||
518 | 418 | ||
519 | if (sgt) { | 419 | if (sgt) { |
520 | DEFINE_DMA_ATTRS(attrs); | 420 | DEFINE_DMA_ATTRS(attrs); |
@@ -526,13 +426,15 @@ static void vb2_dc_put_userptr(void *buf_priv) | |||
526 | */ | 426 | */ |
527 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, | 427 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
528 | buf->dma_dir, &attrs); | 428 | buf->dma_dir, &attrs); |
529 | if (!vma_is_io(buf->vma)) | 429 | pages = frame_vector_pages(buf->vec); |
530 | vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); | 430 | /* sgt should exist only if vector contains pages... */ |
531 | 431 | BUG_ON(IS_ERR(pages)); | |
432 | for (i = 0; i < frame_vector_count(buf->vec); i++) | ||
433 | set_page_dirty_lock(pages[i]); | ||
532 | sg_free_table(sgt); | 434 | sg_free_table(sgt); |
533 | kfree(sgt); | 435 | kfree(sgt); |
534 | } | 436 | } |
535 | vb2_put_vma(buf->vma); | 437 | vb2_destroy_framevec(buf->vec); |
536 | kfree(buf); | 438 | kfree(buf); |
537 | } | 439 | } |
538 | 440 | ||
@@ -572,13 +474,10 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
572 | { | 474 | { |
573 | struct vb2_dc_conf *conf = alloc_ctx; | 475 | struct vb2_dc_conf *conf = alloc_ctx; |
574 | struct vb2_dc_buf *buf; | 476 | struct vb2_dc_buf *buf; |
575 | unsigned long start; | 477 | struct frame_vector *vec; |
576 | unsigned long end; | ||
577 | unsigned long offset; | 478 | unsigned long offset; |
578 | struct page **pages; | 479 | int n_pages, i; |
579 | int n_pages; | ||
580 | int ret = 0; | 480 | int ret = 0; |
581 | struct vm_area_struct *vma; | ||
582 | struct sg_table *sgt; | 481 | struct sg_table *sgt; |
583 | unsigned long contig_size; | 482 | unsigned long contig_size; |
584 | unsigned long dma_align = dma_get_cache_alignment(); | 483 | unsigned long dma_align = dma_get_cache_alignment(); |
@@ -604,75 +503,43 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
604 | buf->dev = conf->dev; | 503 | buf->dev = conf->dev; |
605 | buf->dma_dir = dma_dir; | 504 | buf->dma_dir = dma_dir; |
606 | 505 | ||
607 | start = vaddr & PAGE_MASK; | ||
608 | offset = vaddr & ~PAGE_MASK; | 506 | offset = vaddr & ~PAGE_MASK; |
609 | end = PAGE_ALIGN(vaddr + size); | 507 | vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); |
610 | n_pages = (end - start) >> PAGE_SHIFT; | 508 | if (IS_ERR(vec)) { |
611 | 509 | ret = PTR_ERR(vec); | |
612 | pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL); | ||
613 | if (!pages) { | ||
614 | ret = -ENOMEM; | ||
615 | pr_err("failed to allocate pages table\n"); | ||
616 | goto fail_buf; | 510 | goto fail_buf; |
617 | } | 511 | } |
512 | buf->vec = vec; | ||
513 | n_pages = frame_vector_count(vec); | ||
514 | ret = frame_vector_to_pages(vec); | ||
515 | if (ret < 0) { | ||
516 | unsigned long *nums = frame_vector_pfns(vec); | ||
618 | 517 | ||
619 | down_read(¤t->mm->mmap_sem); | 518 | /* |
620 | /* current->mm->mmap_sem is taken by videobuf2 core */ | 519 | * Failed to convert to pages... Check the memory is physically |
621 | vma = find_vma(current->mm, vaddr); | 520 | * contiguous and use direct mapping |
622 | if (!vma) { | 521 | */ |
623 | pr_err("no vma for address %lu\n", vaddr); | 522 | for (i = 1; i < n_pages; i++) |
624 | ret = -EFAULT; | 523 | if (nums[i-1] + 1 != nums[i]) |
625 | goto fail_pages; | 524 | goto fail_pfnvec; |
626 | } | 525 | buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]); |
627 | 526 | goto out; | |
628 | if (vma->vm_end < vaddr + size) { | ||
629 | pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size); | ||
630 | ret = -EFAULT; | ||
631 | goto fail_pages; | ||
632 | } | ||
633 | |||
634 | buf->vma = vb2_get_vma(vma); | ||
635 | if (!buf->vma) { | ||
636 | pr_err("failed to copy vma\n"); | ||
637 | ret = -ENOMEM; | ||
638 | goto fail_pages; | ||
639 | } | ||
640 | |||
641 | /* extract page list from userspace mapping */ | ||
642 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir); | ||
643 | if (ret) { | ||
644 | unsigned long pfn; | ||
645 | if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { | ||
646 | up_read(¤t->mm->mmap_sem); | ||
647 | buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn); | ||
648 | buf->size = size; | ||
649 | kfree(pages); | ||
650 | return buf; | ||
651 | } | ||
652 | |||
653 | pr_err("failed to get user pages\n"); | ||
654 | goto fail_vma; | ||
655 | } | 527 | } |
656 | up_read(¤t->mm->mmap_sem); | ||
657 | 528 | ||
658 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | 529 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); |
659 | if (!sgt) { | 530 | if (!sgt) { |
660 | pr_err("failed to allocate sg table\n"); | 531 | pr_err("failed to allocate sg table\n"); |
661 | ret = -ENOMEM; | 532 | ret = -ENOMEM; |
662 | goto fail_get_user_pages; | 533 | goto fail_pfnvec; |
663 | } | 534 | } |
664 | 535 | ||
665 | ret = sg_alloc_table_from_pages(sgt, pages, n_pages, | 536 | ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages, |
666 | offset, size, GFP_KERNEL); | 537 | offset, size, GFP_KERNEL); |
667 | if (ret) { | 538 | if (ret) { |
668 | pr_err("failed to initialize sg table\n"); | 539 | pr_err("failed to initialize sg table\n"); |
669 | goto fail_sgt; | 540 | goto fail_sgt; |
670 | } | 541 | } |
671 | 542 | ||
672 | /* pages are no longer needed */ | ||
673 | kfree(pages); | ||
674 | pages = NULL; | ||
675 | |||
676 | /* | 543 | /* |
677 | * No need to sync to the device, this will happen later when the | 544 | * No need to sync to the device, this will happen later when the |
678 | * prepare() memop is called. | 545 | * prepare() memop is called. |
@@ -694,8 +561,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
694 | } | 561 | } |
695 | 562 | ||
696 | buf->dma_addr = sg_dma_address(sgt->sgl); | 563 | buf->dma_addr = sg_dma_address(sgt->sgl); |
697 | buf->size = size; | ||
698 | buf->dma_sgt = sgt; | 564 | buf->dma_sgt = sgt; |
565 | out: | ||
566 | buf->size = size; | ||
699 | 567 | ||
700 | return buf; | 568 | return buf; |
701 | 569 | ||
@@ -704,25 +572,13 @@ fail_map_sg: | |||
704 | buf->dma_dir, &attrs); | 572 | buf->dma_dir, &attrs); |
705 | 573 | ||
706 | fail_sgt_init: | 574 | fail_sgt_init: |
707 | if (!vma_is_io(buf->vma)) | ||
708 | vb2_dc_sgt_foreach_page(sgt, put_page); | ||
709 | sg_free_table(sgt); | 575 | sg_free_table(sgt); |
710 | 576 | ||
711 | fail_sgt: | 577 | fail_sgt: |
712 | kfree(sgt); | 578 | kfree(sgt); |
713 | 579 | ||
714 | fail_get_user_pages: | 580 | fail_pfnvec: |
715 | if (pages && !vma_is_io(buf->vma)) | 581 | vb2_destroy_framevec(vec); |
716 | while (n_pages) | ||
717 | put_page(pages[--n_pages]); | ||
718 | |||
719 | down_read(¤t->mm->mmap_sem); | ||
720 | fail_vma: | ||
721 | vb2_put_vma(buf->vma); | ||
722 | |||
723 | fail_pages: | ||
724 | up_read(¤t->mm->mmap_sem); | ||
725 | kfree(pages); /* kfree is NULL-proof */ | ||
726 | 582 | ||
727 | fail_buf: | 583 | fail_buf: |
728 | kfree(buf); | 584 | kfree(buf); |