diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2013-06-19 07:56:46 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <m.chehab@samsung.com> | 2013-09-24 12:41:38 -0400 |
commit | 774d23010f6a7d7d299552f0b1edfbe1ec82c5e7 (patch) | |
tree | bc7a8ad585e0254b27cee8b21e66dd873bd0f82e | |
parent | 3c5c23c57717bf134a3c3f4af5886c7e08500e34 (diff) |
[media] videobuf2-dc: Fix support for mappings without struct page in userptr mode
Earlier version of dma-contig allocator in user ptr mode assumed that in
all cases DMA address equals physical address. This was just a special case.
Commit e15dab752d4c588544ccabdbe020a7cc092e23c8 introduced correct support
for converting userpage to dma address, but unfortunately it broke the
support for simple dma address = physical address for the case, when given
physical frame has no struct page associated with it (this happens if one
use for example dma_declare_coherent api or other reserved memory approach).
This commit restores support for such cases.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
[s.nawrocki@samsung.com: replaced #elsif with #elif]
Signed-off-by: Sylwester Nawrocki <s.nawrocki@samsung.com>
Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-contig.c | 87 |
1 files changed, 82 insertions, 5 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index fd56f2563201..646f08f4f504 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -423,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma) | |||
423 | return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); | 423 | return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); |
424 | } | 424 | } |
425 | 425 | ||
426 | static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, | ||
427 | struct vm_area_struct *vma, unsigned long *res) | ||
428 | { | ||
429 | unsigned long pfn, start_pfn, prev_pfn; | ||
430 | unsigned int i; | ||
431 | int ret; | ||
432 | |||
433 | if (!vma_is_io(vma)) | ||
434 | return -EFAULT; | ||
435 | |||
436 | ret = follow_pfn(vma, start, &pfn); | ||
437 | if (ret) | ||
438 | return ret; | ||
439 | |||
440 | start_pfn = pfn; | ||
441 | start += PAGE_SIZE; | ||
442 | |||
443 | for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) { | ||
444 | prev_pfn = pfn; | ||
445 | ret = follow_pfn(vma, start, &pfn); | ||
446 | |||
447 | if (ret) { | ||
448 | pr_err("no page for address %lu\n", start); | ||
449 | return ret; | ||
450 | } | ||
451 | if (pfn != prev_pfn + 1) | ||
452 | return -EINVAL; | ||
453 | } | ||
454 | |||
455 | *res = start_pfn; | ||
456 | return 0; | ||
457 | } | ||
458 | |||
426 | static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, | 459 | static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, |
427 | int n_pages, struct vm_area_struct *vma, int write) | 460 | int n_pages, struct vm_area_struct *vma, int write) |
428 | { | 461 | { |
@@ -433,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, | |||
433 | unsigned long pfn; | 466 | unsigned long pfn; |
434 | int ret = follow_pfn(vma, start, &pfn); | 467 | int ret = follow_pfn(vma, start, &pfn); |
435 | 468 | ||
469 | if (!pfn_valid(pfn)) | ||
470 | return -EINVAL; | ||
471 | |||
436 | if (ret) { | 472 | if (ret) { |
437 | pr_err("no page for address %lu\n", start); | 473 | pr_err("no page for address %lu\n", start); |
438 | return ret; | 474 | return ret; |
@@ -468,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv) | |||
468 | struct vb2_dc_buf *buf = buf_priv; | 504 | struct vb2_dc_buf *buf = buf_priv; |
469 | struct sg_table *sgt = buf->dma_sgt; | 505 | struct sg_table *sgt = buf->dma_sgt; |
470 | 506 | ||
471 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); | 507 | if (sgt) { |
472 | if (!vma_is_io(buf->vma)) | 508 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); |
473 | vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); | 509 | if (!vma_is_io(buf->vma)) |
510 | vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); | ||
474 | 511 | ||
475 | sg_free_table(sgt); | 512 | sg_free_table(sgt); |
476 | kfree(sgt); | 513 | kfree(sgt); |
514 | } | ||
477 | vb2_put_vma(buf->vma); | 515 | vb2_put_vma(buf->vma); |
478 | kfree(buf); | 516 | kfree(buf); |
479 | } | 517 | } |
480 | 518 | ||
519 | /* | ||
520 | * For some kind of reserved memory there might be no struct page available, | ||
521 | * so all that can be done to support such 'pages' is to try to convert | ||
522 | * pfn to dma address or at the last resort just assume that | ||
523 | * dma address == physical address (like it has been assumed in earlier version | ||
524 | * of videobuf2-dma-contig | ||
525 | */ | ||
526 | |||
527 | #ifdef __arch_pfn_to_dma | ||
528 | static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) | ||
529 | { | ||
530 | return (dma_addr_t)__arch_pfn_to_dma(dev, pfn); | ||
531 | } | ||
532 | #elif defined(__pfn_to_bus) | ||
533 | static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) | ||
534 | { | ||
535 | return (dma_addr_t)__pfn_to_bus(pfn); | ||
536 | } | ||
537 | #elif defined(__pfn_to_phys) | ||
538 | static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) | ||
539 | { | ||
540 | return (dma_addr_t)__pfn_to_phys(pfn); | ||
541 | } | ||
542 | #else | ||
543 | static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) | ||
544 | { | ||
545 | /* really, we cannot do anything better at this point */ | ||
546 | return (dma_addr_t)(pfn) << PAGE_SHIFT; | ||
547 | } | ||
548 | #endif | ||
549 | |||
481 | static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | 550 | static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, |
482 | unsigned long size, int write) | 551 | unsigned long size, int write) |
483 | { | 552 | { |
@@ -548,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
548 | /* extract page list from userspace mapping */ | 617 | /* extract page list from userspace mapping */ |
549 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); | 618 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); |
550 | if (ret) { | 619 | if (ret) { |
620 | unsigned long pfn; | ||
621 | if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { | ||
622 | buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn); | ||
623 | buf->size = size; | ||
624 | kfree(pages); | ||
625 | return buf; | ||
626 | } | ||
627 | |||
551 | pr_err("failed to get user pages\n"); | 628 | pr_err("failed to get user pages\n"); |
552 | goto fail_vma; | 629 | goto fail_vma; |
553 | } | 630 | } |