aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-11 19:42:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-11 19:42:39 -0400
commit06a660ada2064bbdcd09aeb8173f2ad128c71978 (patch)
tree1d0172ca83bd0a8c2186fcb41ae750420d7acb14
parentd9b44fe30fb8637b23f804eab2e7afbce129d714 (diff)
parent63540f01917c0d8b03b9813a0d6539469b163139 (diff)
Merge tag 'media/v4.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media
Pull media updates from Mauro Carvalho Chehab: "A series of patches that move part of the code used to allocate memory from the media subsystem to the mm subsystem" [ The mm parts have been acked by VM people, and the series was apparently in -mm for a while - Linus ] * tag 'media/v4.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media: [media] drm/exynos: Convert g2d_userptr_get_dma_addr() to use get_vaddr_frames() [media] media: vb2: Remove unused functions [media] media: vb2: Convert vb2_dc_get_userptr() to use frame vector [media] media: vb2: Convert vb2_vmalloc_get_userptr() to use frame vector [media] media: vb2: Convert vb2_dma_sg_get_userptr() to use frame vector [media] vb2: Provide helpers for mapping virtual addresses [media] media: omap_vout: Convert omap_vout_uservirt_to_phys() to use get_vaddr_pfns() [media] mm: Provide new get_vaddr_frames() helper [media] vb2: Push mmap_sem down to memops
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c97
-rw-r--r--drivers/media/platform/omap/Kconfig1
-rw-r--r--drivers/media/platform/omap/omap_vout.c69
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c207
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c91
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c148
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c90
-rw-r--r--include/linux/mm.h44
-rw-r--r--include/media/videobuf2-memops.h11
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/Makefile1
-rw-r--r--mm/frame_vector.c230
16 files changed, 477 insertions, 608 deletions
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index df0b61a60501..bd1a4156f647 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -77,6 +77,7 @@ config DRM_EXYNOS_VIDI
77config DRM_EXYNOS_G2D 77config DRM_EXYNOS_G2D
78 bool "Exynos DRM G2D" 78 bool "Exynos DRM G2D"
79 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D 79 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
80 select FRAME_VECTOR
80 help 81 help
81 Choose this option if you want to use Exynos G2D for DRM. 82 Choose this option if you want to use Exynos G2D for DRM.
82 83
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 535b4ad6c4b1..3734c34aed16 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -194,10 +194,8 @@ struct g2d_cmdlist_userptr {
194 dma_addr_t dma_addr; 194 dma_addr_t dma_addr;
195 unsigned long userptr; 195 unsigned long userptr;
196 unsigned long size; 196 unsigned long size;
197 struct page **pages; 197 struct frame_vector *vec;
198 unsigned int npages;
199 struct sg_table *sgt; 198 struct sg_table *sgt;
200 struct vm_area_struct *vma;
201 atomic_t refcount; 199 atomic_t refcount;
202 bool in_pool; 200 bool in_pool;
203 bool out_of_list; 201 bool out_of_list;
@@ -367,6 +365,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
367{ 365{
368 struct g2d_cmdlist_userptr *g2d_userptr = 366 struct g2d_cmdlist_userptr *g2d_userptr =
369 (struct g2d_cmdlist_userptr *)obj; 367 (struct g2d_cmdlist_userptr *)obj;
368 struct page **pages;
370 369
371 if (!obj) 370 if (!obj)
372 return; 371 return;
@@ -386,19 +385,21 @@ out:
386 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, 385 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
387 DMA_BIDIRECTIONAL); 386 DMA_BIDIRECTIONAL);
388 387
389 exynos_gem_put_pages_to_userptr(g2d_userptr->pages, 388 pages = frame_vector_pages(g2d_userptr->vec);
390 g2d_userptr->npages, 389 if (!IS_ERR(pages)) {
391 g2d_userptr->vma); 390 int i;
392 391
393 exynos_gem_put_vma(g2d_userptr->vma); 392 for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
393 set_page_dirty_lock(pages[i]);
394 }
395 put_vaddr_frames(g2d_userptr->vec);
396 frame_vector_destroy(g2d_userptr->vec);
394 397
395 if (!g2d_userptr->out_of_list) 398 if (!g2d_userptr->out_of_list)
396 list_del_init(&g2d_userptr->list); 399 list_del_init(&g2d_userptr->list);
397 400
398 sg_free_table(g2d_userptr->sgt); 401 sg_free_table(g2d_userptr->sgt);
399 kfree(g2d_userptr->sgt); 402 kfree(g2d_userptr->sgt);
400
401 drm_free_large(g2d_userptr->pages);
402 kfree(g2d_userptr); 403 kfree(g2d_userptr);
403} 404}
404 405
@@ -412,9 +413,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
412 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 413 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
413 struct g2d_cmdlist_userptr *g2d_userptr; 414 struct g2d_cmdlist_userptr *g2d_userptr;
414 struct g2d_data *g2d; 415 struct g2d_data *g2d;
415 struct page **pages;
416 struct sg_table *sgt; 416 struct sg_table *sgt;
417 struct vm_area_struct *vma;
418 unsigned long start, end; 417 unsigned long start, end;
419 unsigned int npages, offset; 418 unsigned int npages, offset;
420 int ret; 419 int ret;
@@ -460,65 +459,40 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
460 return ERR_PTR(-ENOMEM); 459 return ERR_PTR(-ENOMEM);
461 460
462 atomic_set(&g2d_userptr->refcount, 1); 461 atomic_set(&g2d_userptr->refcount, 1);
462 g2d_userptr->size = size;
463 463
464 start = userptr & PAGE_MASK; 464 start = userptr & PAGE_MASK;
465 offset = userptr & ~PAGE_MASK; 465 offset = userptr & ~PAGE_MASK;
466 end = PAGE_ALIGN(userptr + size); 466 end = PAGE_ALIGN(userptr + size);
467 npages = (end - start) >> PAGE_SHIFT; 467 npages = (end - start) >> PAGE_SHIFT;
468 g2d_userptr->npages = npages; 468 g2d_userptr->vec = frame_vector_create(npages);
469 469 if (!g2d_userptr->vec) {
470 pages = drm_calloc_large(npages, sizeof(struct page *));
471 if (!pages) {
472 DRM_ERROR("failed to allocate pages.\n");
473 ret = -ENOMEM; 470 ret = -ENOMEM;
474 goto err_free; 471 goto err_free;
475 } 472 }
476 473
477 down_read(&current->mm->mmap_sem); 474 ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
478 vma = find_vma(current->mm, userptr); 475 if (ret != npages) {
479 if (!vma) { 476 DRM_ERROR("failed to get user pages from userptr.\n");
480 up_read(&current->mm->mmap_sem); 477 if (ret < 0)
481 DRM_ERROR("failed to get vm region.\n"); 478 goto err_destroy_framevec;
482 ret = -EFAULT; 479 ret = -EFAULT;
483 goto err_free_pages; 480 goto err_put_framevec;
484 } 481 }
485 482 if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
486 if (vma->vm_end < userptr + size) {
487 up_read(&current->mm->mmap_sem);
488 DRM_ERROR("vma is too small.\n");
489 ret = -EFAULT; 483 ret = -EFAULT;
490 goto err_free_pages; 484 goto err_put_framevec;
491 }
492
493 g2d_userptr->vma = exynos_gem_get_vma(vma);
494 if (!g2d_userptr->vma) {
495 up_read(&current->mm->mmap_sem);
496 DRM_ERROR("failed to copy vma.\n");
497 ret = -ENOMEM;
498 goto err_free_pages;
499 }
500
501 g2d_userptr->size = size;
502
503 ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
504 npages, pages, vma);
505 if (ret < 0) {
506 up_read(&current->mm->mmap_sem);
507 DRM_ERROR("failed to get user pages from userptr.\n");
508 goto err_put_vma;
509 } 485 }
510 486
511 up_read(&current->mm->mmap_sem);
512 g2d_userptr->pages = pages;
513
514 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 487 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
515 if (!sgt) { 488 if (!sgt) {
516 ret = -ENOMEM; 489 ret = -ENOMEM;
517 goto err_free_userptr; 490 goto err_put_framevec;
518 } 491 }
519 492
520 ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, 493 ret = sg_alloc_table_from_pages(sgt,
521 size, GFP_KERNEL); 494 frame_vector_pages(g2d_userptr->vec),
495 npages, offset, size, GFP_KERNEL);
522 if (ret < 0) { 496 if (ret < 0) {
523 DRM_ERROR("failed to get sgt from pages.\n"); 497 DRM_ERROR("failed to get sgt from pages.\n");
524 goto err_free_sgt; 498 goto err_free_sgt;
@@ -553,16 +527,11 @@ err_sg_free_table:
553err_free_sgt: 527err_free_sgt:
554 kfree(sgt); 528 kfree(sgt);
555 529
556err_free_userptr: 530err_put_framevec:
557 exynos_gem_put_pages_to_userptr(g2d_userptr->pages, 531 put_vaddr_frames(g2d_userptr->vec);
558 g2d_userptr->npages,
559 g2d_userptr->vma);
560
561err_put_vma:
562 exynos_gem_put_vma(g2d_userptr->vma);
563 532
564err_free_pages: 533err_destroy_framevec:
565 drm_free_large(pages); 534 frame_vector_destroy(g2d_userptr->vec);
566 535
567err_free: 536err_free:
568 kfree(g2d_userptr); 537 kfree(g2d_userptr);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 62b9ea1b07fb..f12fbc36b120 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -366,103 +366,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
366 return 0; 366 return 0;
367} 367}
368 368
369struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
370{
371 struct vm_area_struct *vma_copy;
372
373 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
374 if (!vma_copy)
375 return NULL;
376
377 if (vma->vm_ops && vma->vm_ops->open)
378 vma->vm_ops->open(vma);
379
380 if (vma->vm_file)
381 get_file(vma->vm_file);
382
383 memcpy(vma_copy, vma, sizeof(*vma));
384
385 vma_copy->vm_mm = NULL;
386 vma_copy->vm_next = NULL;
387 vma_copy->vm_prev = NULL;
388
389 return vma_copy;
390}
391
392void exynos_gem_put_vma(struct vm_area_struct *vma)
393{
394 if (!vma)
395 return;
396
397 if (vma->vm_ops && vma->vm_ops->close)
398 vma->vm_ops->close(vma);
399
400 if (vma->vm_file)
401 fput(vma->vm_file);
402
403 kfree(vma);
404}
405
406int exynos_gem_get_pages_from_userptr(unsigned long start,
407 unsigned int npages,
408 struct page **pages,
409 struct vm_area_struct *vma)
410{
411 int get_npages;
412
413 /* the memory region mmaped with VM_PFNMAP. */
414 if (vma_is_io(vma)) {
415 unsigned int i;
416
417 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
418 unsigned long pfn;
419 int ret = follow_pfn(vma, start, &pfn);
420 if (ret)
421 return ret;
422
423 pages[i] = pfn_to_page(pfn);
424 }
425
426 if (i != npages) {
427 DRM_ERROR("failed to get user_pages.\n");
428 return -EINVAL;
429 }
430
431 return 0;
432 }
433
434 get_npages = get_user_pages(current, current->mm, start,
435 npages, 1, 1, pages, NULL);
436 get_npages = max(get_npages, 0);
437 if (get_npages != npages) {
438 DRM_ERROR("failed to get user_pages.\n");
439 while (get_npages)
440 put_page(pages[--get_npages]);
441 return -EFAULT;
442 }
443
444 return 0;
445}
446
447void exynos_gem_put_pages_to_userptr(struct page **pages,
448 unsigned int npages,
449 struct vm_area_struct *vma)
450{
451 if (!vma_is_io(vma)) {
452 unsigned int i;
453
454 for (i = 0; i < npages; i++) {
455 set_page_dirty_lock(pages[i]);
456
457 /*
458 * undo the reference we took when populating
459 * the table.
460 */
461 put_page(pages[i]);
462 }
463 }
464}
465
466int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, 369int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
467 struct sg_table *sgt, 370 struct sg_table *sgt,
468 enum dma_data_direction dir) 371 enum dma_data_direction dir)
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index dc2aaab54aef..217d613b0fe7 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -10,6 +10,7 @@ config VIDEO_OMAP2_VOUT
10 select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS 10 select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
11 select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3 11 select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
12 select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB 12 select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
13 select FRAME_VECTOR
13 default n 14 default n
14 ---help--- 15 ---help---
15 V4L2 Display driver support for OMAP2/3 based boards. 16 V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index de2474e1132d..70c28d19ea04 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -195,46 +195,34 @@ static int omap_vout_try_format(struct v4l2_pix_format *pix)
195} 195}
196 196
197/* 197/*
198 * omap_vout_uservirt_to_phys: This inline function is used to convert user 198 * omap_vout_get_userptr: Convert user space virtual address to physical
199 * space virtual address to physical address. 199 * address.
200 */ 200 */
201static unsigned long omap_vout_uservirt_to_phys(unsigned long virtp) 201static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
202 u32 *physp)
202{ 203{
203 unsigned long physp = 0; 204 struct frame_vector *vec;
204 struct vm_area_struct *vma; 205 int ret;
205 struct mm_struct *mm = current->mm;
206 206
207 /* For kernel direct-mapped memory, take the easy way */ 207 /* For kernel direct-mapped memory, take the easy way */
208 if (virtp >= PAGE_OFFSET) 208 if (virtp >= PAGE_OFFSET) {
209 return virt_to_phys((void *) virtp); 209 *physp = virt_to_phys((void *)virtp);
210 210 return 0;
211 down_read(&current->mm->mmap_sem); 211 }
212 vma = find_vma(mm, virtp);
213 if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
214 /* this will catch, kernel-allocated, mmaped-to-usermode
215 addresses */
216 physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
217 up_read(&current->mm->mmap_sem);
218 } else {
219 /* otherwise, use get_user_pages() for general userland pages */
220 int res, nr_pages = 1;
221 struct page *pages;
222 212
223 res = get_user_pages(current, current->mm, virtp, nr_pages, 1, 213 vec = frame_vector_create(1);
224 0, &pages, NULL); 214 if (!vec)
225 up_read(&current->mm->mmap_sem); 215 return -ENOMEM;
226 216
227 if (res == nr_pages) { 217 ret = get_vaddr_frames(virtp, 1, true, false, vec);
228 physp = __pa(page_address(&pages[0]) + 218 if (ret != 1) {
229 (virtp & ~PAGE_MASK)); 219 frame_vector_destroy(vec);
230 } else { 220 return -EINVAL;
231 printk(KERN_WARNING VOUT_NAME
232 "get_user_pages failed\n");
233 return 0;
234 }
235 } 221 }
222 *physp = __pfn_to_phys(frame_vector_pfns(vec)[0]);
223 vb->priv = vec;
236 224
237 return physp; 225 return 0;
238} 226}
239 227
240/* 228/*
@@ -784,11 +772,15 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
784 * address of the buffer 772 * address of the buffer
785 */ 773 */
786 if (V4L2_MEMORY_USERPTR == vb->memory) { 774 if (V4L2_MEMORY_USERPTR == vb->memory) {
775 int ret;
776
787 if (0 == vb->baddr) 777 if (0 == vb->baddr)
788 return -EINVAL; 778 return -EINVAL;
789 /* Physical address */ 779 /* Physical address */
790 vout->queued_buf_addr[vb->i] = (u8 *) 780 ret = omap_vout_get_userptr(vb, vb->baddr,
791 omap_vout_uservirt_to_phys(vb->baddr); 781 (u32 *)&vout->queued_buf_addr[vb->i]);
782 if (ret < 0)
783 return ret;
792 } else { 784 } else {
793 unsigned long addr, dma_addr; 785 unsigned long addr, dma_addr;
794 unsigned long size; 786 unsigned long size;
@@ -834,12 +826,13 @@ static void omap_vout_buffer_queue(struct videobuf_queue *q,
834static void omap_vout_buffer_release(struct videobuf_queue *q, 826static void omap_vout_buffer_release(struct videobuf_queue *q,
835 struct videobuf_buffer *vb) 827 struct videobuf_buffer *vb)
836{ 828{
837 struct omap_vout_device *vout = q->priv_data;
838
839 vb->state = VIDEOBUF_NEEDS_INIT; 829 vb->state = VIDEOBUF_NEEDS_INIT;
830 if (vb->memory == V4L2_MEMORY_USERPTR && vb->priv) {
831 struct frame_vector *vec = vb->priv;
840 832
841 if (V4L2_MEMORY_MMAP != vout->memory) 833 put_vaddr_frames(vec);
842 return; 834 frame_vector_destroy(vec);
835 }
843} 836}
844 837
845/* 838/*
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index b4b022933e29..82876a67f144 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -84,6 +84,7 @@ config VIDEOBUF2_CORE
84 84
85config VIDEOBUF2_MEMOPS 85config VIDEOBUF2_MEMOPS
86 tristate 86 tristate
87 select FRAME_VECTOR
87 88
88config VIDEOBUF2_DMA_CONTIG 89config VIDEOBUF2_DMA_CONTIG
89 tristate 90 tristate
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index f1022d810d22..4f59b7ec05d0 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1691,9 +1691,7 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1691 ret = __qbuf_mmap(vb, b); 1691 ret = __qbuf_mmap(vb, b);
1692 break; 1692 break;
1693 case V4L2_MEMORY_USERPTR: 1693 case V4L2_MEMORY_USERPTR:
1694 down_read(&current->mm->mmap_sem);
1695 ret = __qbuf_userptr(vb, b); 1694 ret = __qbuf_userptr(vb, b);
1696 up_read(&current->mm->mmap_sem);
1697 break; 1695 break;
1698 case V4L2_MEMORY_DMABUF: 1696 case V4L2_MEMORY_DMABUF:
1699 ret = __qbuf_dmabuf(vb, b); 1697 ret = __qbuf_dmabuf(vb, b);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 94c1e6455d36..2397ceb1dc6b 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -32,15 +32,13 @@ struct vb2_dc_buf {
32 dma_addr_t dma_addr; 32 dma_addr_t dma_addr;
33 enum dma_data_direction dma_dir; 33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt; 34 struct sg_table *dma_sgt;
35 struct frame_vector *vec;
35 36
36 /* MMAP related */ 37 /* MMAP related */
37 struct vb2_vmarea_handler handler; 38 struct vb2_vmarea_handler handler;
38 atomic_t refcount; 39 atomic_t refcount;
39 struct sg_table *sgt_base; 40 struct sg_table *sgt_base;
40 41
41 /* USERPTR related */
42 struct vm_area_struct *vma;
43
44 /* DMABUF related */ 42 /* DMABUF related */
45 struct dma_buf_attachment *db_attach; 43 struct dma_buf_attachment *db_attach;
46}; 44};
@@ -49,24 +47,6 @@ struct vb2_dc_buf {
49/* scatterlist table functions */ 47/* scatterlist table functions */
50/*********************************************/ 48/*********************************************/
51 49
52
53static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
55{
56 struct scatterlist *s;
57 unsigned int i;
58
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62 >> PAGE_SHIFT;
63 unsigned int j;
64
65 for (j = 0; j < n_pages; ++j, ++page)
66 cb(page);
67 }
68}
69
70static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) 50static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71{ 51{
72 struct scatterlist *s; 52 struct scatterlist *s;
@@ -429,92 +409,12 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
429/* callbacks for USERPTR buffers */ 409/* callbacks for USERPTR buffers */
430/*********************************************/ 410/*********************************************/
431 411
432static inline int vma_is_io(struct vm_area_struct *vma)
433{
434 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
435}
436
437static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
438 struct vm_area_struct *vma, unsigned long *res)
439{
440 unsigned long pfn, start_pfn, prev_pfn;
441 unsigned int i;
442 int ret;
443
444 if (!vma_is_io(vma))
445 return -EFAULT;
446
447 ret = follow_pfn(vma, start, &pfn);
448 if (ret)
449 return ret;
450
451 start_pfn = pfn;
452 start += PAGE_SIZE;
453
454 for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
455 prev_pfn = pfn;
456 ret = follow_pfn(vma, start, &pfn);
457
458 if (ret) {
459 pr_err("no page for address %lu\n", start);
460 return ret;
461 }
462 if (pfn != prev_pfn + 1)
463 return -EINVAL;
464 }
465
466 *res = start_pfn;
467 return 0;
468}
469
470static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
471 int n_pages, struct vm_area_struct *vma,
472 enum dma_data_direction dma_dir)
473{
474 if (vma_is_io(vma)) {
475 unsigned int i;
476
477 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
478 unsigned long pfn;
479 int ret = follow_pfn(vma, start, &pfn);
480
481 if (!pfn_valid(pfn))
482 return -EINVAL;
483
484 if (ret) {
485 pr_err("no page for address %lu\n", start);
486 return ret;
487 }
488 pages[i] = pfn_to_page(pfn);
489 }
490 } else {
491 int n;
492
493 n = get_user_pages(current, current->mm, start & PAGE_MASK,
494 n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
495 /* negative error means that no page was pinned */
496 n = max(n, 0);
497 if (n != n_pages) {
498 pr_err("got only %d of %d user pages\n", n, n_pages);
499 while (n)
500 put_page(pages[--n]);
501 return -EFAULT;
502 }
503 }
504
505 return 0;
506}
507
508static void vb2_dc_put_dirty_page(struct page *page)
509{
510 set_page_dirty_lock(page);
511 put_page(page);
512}
513
514static void vb2_dc_put_userptr(void *buf_priv) 412static void vb2_dc_put_userptr(void *buf_priv)
515{ 413{
516 struct vb2_dc_buf *buf = buf_priv; 414 struct vb2_dc_buf *buf = buf_priv;
517 struct sg_table *sgt = buf->dma_sgt; 415 struct sg_table *sgt = buf->dma_sgt;
416 int i;
417 struct page **pages;
518 418
519 if (sgt) { 419 if (sgt) {
520 DEFINE_DMA_ATTRS(attrs); 420 DEFINE_DMA_ATTRS(attrs);
@@ -526,13 +426,15 @@ static void vb2_dc_put_userptr(void *buf_priv)
526 */ 426 */
527 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 427 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
528 buf->dma_dir, &attrs); 428 buf->dma_dir, &attrs);
529 if (!vma_is_io(buf->vma)) 429 pages = frame_vector_pages(buf->vec);
530 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); 430 /* sgt should exist only if vector contains pages... */
531 431 BUG_ON(IS_ERR(pages));
432 for (i = 0; i < frame_vector_count(buf->vec); i++)
433 set_page_dirty_lock(pages[i]);
532 sg_free_table(sgt); 434 sg_free_table(sgt);
533 kfree(sgt); 435 kfree(sgt);
534 } 436 }
535 vb2_put_vma(buf->vma); 437 vb2_destroy_framevec(buf->vec);
536 kfree(buf); 438 kfree(buf);
537} 439}
538 440
@@ -572,13 +474,10 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
572{ 474{
573 struct vb2_dc_conf *conf = alloc_ctx; 475 struct vb2_dc_conf *conf = alloc_ctx;
574 struct vb2_dc_buf *buf; 476 struct vb2_dc_buf *buf;
575 unsigned long start; 477 struct frame_vector *vec;
576 unsigned long end;
577 unsigned long offset; 478 unsigned long offset;
578 struct page **pages; 479 int n_pages, i;
579 int n_pages;
580 int ret = 0; 480 int ret = 0;
581 struct vm_area_struct *vma;
582 struct sg_table *sgt; 481 struct sg_table *sgt;
583 unsigned long contig_size; 482 unsigned long contig_size;
584 unsigned long dma_align = dma_get_cache_alignment(); 483 unsigned long dma_align = dma_get_cache_alignment();
@@ -604,72 +503,43 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
604 buf->dev = conf->dev; 503 buf->dev = conf->dev;
605 buf->dma_dir = dma_dir; 504 buf->dma_dir = dma_dir;
606 505
607 start = vaddr & PAGE_MASK;
608 offset = vaddr & ~PAGE_MASK; 506 offset = vaddr & ~PAGE_MASK;
609 end = PAGE_ALIGN(vaddr + size); 507 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
610 n_pages = (end - start) >> PAGE_SHIFT; 508 if (IS_ERR(vec)) {
611 509 ret = PTR_ERR(vec);
612 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
613 if (!pages) {
614 ret = -ENOMEM;
615 pr_err("failed to allocate pages table\n");
616 goto fail_buf; 510 goto fail_buf;
617 } 511 }
512 buf->vec = vec;
513 n_pages = frame_vector_count(vec);
514 ret = frame_vector_to_pages(vec);
515 if (ret < 0) {
516 unsigned long *nums = frame_vector_pfns(vec);
618 517
619 /* current->mm->mmap_sem is taken by videobuf2 core */ 518 /*
620 vma = find_vma(current->mm, vaddr); 519 * Failed to convert to pages... Check the memory is physically
621 if (!vma) { 520 * contiguous and use direct mapping
622 pr_err("no vma for address %lu\n", vaddr); 521 */
623 ret = -EFAULT; 522 for (i = 1; i < n_pages; i++)
624 goto fail_pages; 523 if (nums[i-1] + 1 != nums[i])
625 } 524 goto fail_pfnvec;
626 525 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
627 if (vma->vm_end < vaddr + size) { 526 goto out;
628 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
629 ret = -EFAULT;
630 goto fail_pages;
631 }
632
633 buf->vma = vb2_get_vma(vma);
634 if (!buf->vma) {
635 pr_err("failed to copy vma\n");
636 ret = -ENOMEM;
637 goto fail_pages;
638 }
639
640 /* extract page list from userspace mapping */
641 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
642 if (ret) {
643 unsigned long pfn;
644 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
645 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
646 buf->size = size;
647 kfree(pages);
648 return buf;
649 }
650
651 pr_err("failed to get user pages\n");
652 goto fail_vma;
653 } 527 }
654 528
655 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 529 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
656 if (!sgt) { 530 if (!sgt) {
657 pr_err("failed to allocate sg table\n"); 531 pr_err("failed to allocate sg table\n");
658 ret = -ENOMEM; 532 ret = -ENOMEM;
659 goto fail_get_user_pages; 533 goto fail_pfnvec;
660 } 534 }
661 535
662 ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 536 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
663 offset, size, GFP_KERNEL); 537 offset, size, GFP_KERNEL);
664 if (ret) { 538 if (ret) {
665 pr_err("failed to initialize sg table\n"); 539 pr_err("failed to initialize sg table\n");
666 goto fail_sgt; 540 goto fail_sgt;
667 } 541 }
668 542
669 /* pages are no longer needed */
670 kfree(pages);
671 pages = NULL;
672
673 /* 543 /*
674 * No need to sync to the device, this will happen later when the 544 * No need to sync to the device, this will happen later when the
675 * prepare() memop is called. 545 * prepare() memop is called.
@@ -691,8 +561,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
691 } 561 }
692 562
693 buf->dma_addr = sg_dma_address(sgt->sgl); 563 buf->dma_addr = sg_dma_address(sgt->sgl);
694 buf->size = size;
695 buf->dma_sgt = sgt; 564 buf->dma_sgt = sgt;
565out:
566 buf->size = size;
696 567
697 return buf; 568 return buf;
698 569
@@ -701,23 +572,13 @@ fail_map_sg:
701 buf->dma_dir, &attrs); 572 buf->dma_dir, &attrs);
702 573
703fail_sgt_init: 574fail_sgt_init:
704 if (!vma_is_io(buf->vma))
705 vb2_dc_sgt_foreach_page(sgt, put_page);
706 sg_free_table(sgt); 575 sg_free_table(sgt);
707 576
708fail_sgt: 577fail_sgt:
709 kfree(sgt); 578 kfree(sgt);
710 579
711fail_get_user_pages: 580fail_pfnvec:
712 if (pages && !vma_is_io(buf->vma)) 581 vb2_destroy_framevec(vec);
713 while (n_pages)
714 put_page(pages[--n_pages]);
715
716fail_vma:
717 vb2_put_vma(buf->vma);
718
719fail_pages:
720 kfree(pages); /* kfree is NULL-proof */
721 582
722fail_buf: 583fail_buf:
723 kfree(buf); 584 kfree(buf);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 7289b81bd7b7..be7bd6535c9d 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -38,6 +38,7 @@ struct vb2_dma_sg_buf {
38 struct device *dev; 38 struct device *dev;
39 void *vaddr; 39 void *vaddr;
40 struct page **pages; 40 struct page **pages;
41 struct frame_vector *vec;
41 int offset; 42 int offset;
42 enum dma_data_direction dma_dir; 43 enum dma_data_direction dma_dir;
43 struct sg_table sg_table; 44 struct sg_table sg_table;
@@ -51,7 +52,6 @@ struct vb2_dma_sg_buf {
51 unsigned int num_pages; 52 unsigned int num_pages;
52 atomic_t refcount; 53 atomic_t refcount;
53 struct vb2_vmarea_handler handler; 54 struct vb2_vmarea_handler handler;
54 struct vm_area_struct *vma;
55 55
56 struct dma_buf_attachment *db_attach; 56 struct dma_buf_attachment *db_attach;
57}; 57};
@@ -225,25 +225,17 @@ static void vb2_dma_sg_finish(void *buf_priv)
225 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 225 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
226} 226}
227 227
228static inline int vma_is_io(struct vm_area_struct *vma)
229{
230 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
231}
232
233static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, 228static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
234 unsigned long size, 229 unsigned long size,
235 enum dma_data_direction dma_dir) 230 enum dma_data_direction dma_dir)
236{ 231{
237 struct vb2_dma_sg_conf *conf = alloc_ctx; 232 struct vb2_dma_sg_conf *conf = alloc_ctx;
238 struct vb2_dma_sg_buf *buf; 233 struct vb2_dma_sg_buf *buf;
239 unsigned long first, last;
240 int num_pages_from_user;
241 struct vm_area_struct *vma;
242 struct sg_table *sgt; 234 struct sg_table *sgt;
243 DEFINE_DMA_ATTRS(attrs); 235 DEFINE_DMA_ATTRS(attrs);
236 struct frame_vector *vec;
244 237
245 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); 238 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
246
247 buf = kzalloc(sizeof *buf, GFP_KERNEL); 239 buf = kzalloc(sizeof *buf, GFP_KERNEL);
248 if (!buf) 240 if (!buf)
249 return NULL; 241 return NULL;
@@ -254,61 +246,19 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
254 buf->offset = vaddr & ~PAGE_MASK; 246 buf->offset = vaddr & ~PAGE_MASK;
255 buf->size = size; 247 buf->size = size;
256 buf->dma_sgt = &buf->sg_table; 248 buf->dma_sgt = &buf->sg_table;
249 vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
250 if (IS_ERR(vec))
251 goto userptr_fail_pfnvec;
252 buf->vec = vec;
257 253
258 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; 254 buf->pages = frame_vector_pages(vec);
259 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; 255 if (IS_ERR(buf->pages))
260 buf->num_pages = last - first + 1; 256 goto userptr_fail_sgtable;
261 257 buf->num_pages = frame_vector_count(vec);
262 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
263 GFP_KERNEL);
264 if (!buf->pages)
265 goto userptr_fail_alloc_pages;
266
267 vma = find_vma(current->mm, vaddr);
268 if (!vma) {
269 dprintk(1, "no vma for address %lu\n", vaddr);
270 goto userptr_fail_find_vma;
271 }
272
273 if (vma->vm_end < vaddr + size) {
274 dprintk(1, "vma at %lu is too small for %lu bytes\n",
275 vaddr, size);
276 goto userptr_fail_find_vma;
277 }
278
279 buf->vma = vb2_get_vma(vma);
280 if (!buf->vma) {
281 dprintk(1, "failed to copy vma\n");
282 goto userptr_fail_find_vma;
283 }
284
285 if (vma_is_io(buf->vma)) {
286 for (num_pages_from_user = 0;
287 num_pages_from_user < buf->num_pages;
288 ++num_pages_from_user, vaddr += PAGE_SIZE) {
289 unsigned long pfn;
290
291 if (follow_pfn(vma, vaddr, &pfn)) {
292 dprintk(1, "no page for address %lu\n", vaddr);
293 break;
294 }
295 buf->pages[num_pages_from_user] = pfn_to_page(pfn);
296 }
297 } else
298 num_pages_from_user = get_user_pages(current, current->mm,
299 vaddr & PAGE_MASK,
300 buf->num_pages,
301 buf->dma_dir == DMA_FROM_DEVICE,
302 1, /* force */
303 buf->pages,
304 NULL);
305
306 if (num_pages_from_user != buf->num_pages)
307 goto userptr_fail_get_user_pages;
308 258
309 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, 259 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
310 buf->num_pages, buf->offset, size, 0)) 260 buf->num_pages, buf->offset, size, 0))
311 goto userptr_fail_alloc_table_from_pages; 261 goto userptr_fail_sgtable;
312 262
313 sgt = &buf->sg_table; 263 sgt = &buf->sg_table;
314 /* 264 /*
@@ -324,17 +274,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
324 274
325userptr_fail_map: 275userptr_fail_map:
326 sg_free_table(&buf->sg_table); 276 sg_free_table(&buf->sg_table);
327userptr_fail_alloc_table_from_pages: 277userptr_fail_sgtable:
328userptr_fail_get_user_pages: 278 vb2_destroy_framevec(vec);
329 dprintk(1, "get_user_pages requested/got: %d/%d]\n", 279userptr_fail_pfnvec:
330 buf->num_pages, num_pages_from_user);
331 if (!vma_is_io(buf->vma))
332 while (--num_pages_from_user >= 0)
333 put_page(buf->pages[num_pages_from_user]);
334 vb2_put_vma(buf->vma);
335userptr_fail_find_vma:
336 kfree(buf->pages);
337userptr_fail_alloc_pages:
338 kfree(buf); 280 kfree(buf);
339 return NULL; 281 return NULL;
340} 282}
@@ -362,11 +304,8 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
362 while (--i >= 0) { 304 while (--i >= 0) {
363 if (buf->dma_dir == DMA_FROM_DEVICE) 305 if (buf->dma_dir == DMA_FROM_DEVICE)
364 set_page_dirty_lock(buf->pages[i]); 306 set_page_dirty_lock(buf->pages[i]);
365 if (!vma_is_io(buf->vma))
366 put_page(buf->pages[i]);
367 } 307 }
368 kfree(buf->pages); 308 vb2_destroy_framevec(buf->vec);
369 vb2_put_vma(buf->vma);
370 kfree(buf); 309 kfree(buf);
371} 310}
372 311
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 0d49b7951f84..48c6a49c4928 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -23,118 +23,62 @@
23#include <media/videobuf2-memops.h> 23#include <media/videobuf2-memops.h>
24 24
25/** 25/**
26 * vb2_get_vma() - acquire and lock the virtual memory area 26 * vb2_create_framevec() - map virtual addresses to pfns
27 * @vma: given virtual memory area 27 * @start: Virtual user address where we start mapping
28 * @length: Length of a range to map
29 * @write: Should we map for writing into the area
28 * 30 *
29 * This function attempts to acquire an area mapped in the userspace for 31 * This function allocates and fills in a vector with pfns corresponding to
30 * the duration of a hardware operation. The area is "locked" by performing 32 * virtual address range passed in arguments. If pfns have corresponding pages,
31 * the same set of operation that are done when process calls fork() and 33 * page references are also grabbed to pin pages in memory. The function
32 * memory areas are duplicated. 34 * returns pointer to the vector on success and error pointer in case of
33 * 35 * failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
34 * Returns a copy of a virtual memory region on success or NULL.
35 */
36struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
37{
38 struct vm_area_struct *vma_copy;
39
40 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
41 if (vma_copy == NULL)
42 return NULL;
43
44 if (vma->vm_ops && vma->vm_ops->open)
45 vma->vm_ops->open(vma);
46
47 if (vma->vm_file)
48 get_file(vma->vm_file);
49
50 memcpy(vma_copy, vma, sizeof(*vma));
51
52 vma_copy->vm_mm = NULL;
53 vma_copy->vm_next = NULL;
54 vma_copy->vm_prev = NULL;
55
56 return vma_copy;
57}
58EXPORT_SYMBOL_GPL(vb2_get_vma);
59
60/**
61 * vb2_put_userptr() - release a userspace virtual memory area
62 * @vma: virtual memory region associated with the area to be released
63 *
64 * This function releases the previously acquired memory area after a hardware
65 * operation.
66 */ 36 */
67void vb2_put_vma(struct vm_area_struct *vma) 37struct frame_vector *vb2_create_framevec(unsigned long start,
38 unsigned long length,
39 bool write)
68{ 40{
69 if (!vma) 41 int ret;
70 return; 42 unsigned long first, last;
71 43 unsigned long nr;
72 if (vma->vm_ops && vma->vm_ops->close) 44 struct frame_vector *vec;
73 vma->vm_ops->close(vma); 45
74 46 first = start >> PAGE_SHIFT;
75 if (vma->vm_file) 47 last = (start + length - 1) >> PAGE_SHIFT;
76 fput(vma->vm_file); 48 nr = last - first + 1;
77 49 vec = frame_vector_create(nr);
78 kfree(vma); 50 if (!vec)
51 return ERR_PTR(-ENOMEM);
52 ret = get_vaddr_frames(start, nr, write, 1, vec);
53 if (ret < 0)
54 goto out_destroy;
55 /* We accept only complete set of PFNs */
56 if (ret != nr) {
57 ret = -EFAULT;
58 goto out_release;
59 }
60 return vec;
61out_release:
62 put_vaddr_frames(vec);
63out_destroy:
64 frame_vector_destroy(vec);
65 return ERR_PTR(ret);
79} 66}
80EXPORT_SYMBOL_GPL(vb2_put_vma); 67EXPORT_SYMBOL(vb2_create_framevec);
81 68
82/** 69/**
83 * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory 70 * vb2_destroy_framevec() - release vector of mapped pfns
84 * @vaddr: starting virtual address of the area to be verified 71 * @vec: vector of pfns / pages to release
85 * @size: size of the area
86 * @res_paddr: will return physical address for the given vaddr
87 * @res_vma: will return locked copy of struct vm_area for the given area
88 *
89 * This function will go through memory area of size @size mapped at @vaddr and
90 * verify that the underlying physical pages are contiguous. If they are
91 * contiguous the virtual memory area is locked and a @res_vma is filled with
92 * the copy and @res_pa set to the physical address of the buffer.
93 * 72 *
94 * Returns 0 on success. 73 * This releases references to all pages in the vector @vec (if corresponding
74 * pfns are backed by pages) and frees the passed vector.
95 */ 75 */
96int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, 76void vb2_destroy_framevec(struct frame_vector *vec)
97 struct vm_area_struct **res_vma, dma_addr_t *res_pa)
98{ 77{
99 struct mm_struct *mm = current->mm; 78 put_vaddr_frames(vec);
100 struct vm_area_struct *vma; 79 frame_vector_destroy(vec);
101 unsigned long offset, start, end;
102 unsigned long this_pfn, prev_pfn;
103 dma_addr_t pa = 0;
104
105 start = vaddr;
106 offset = start & ~PAGE_MASK;
107 end = start + size;
108
109 vma = find_vma(mm, start);
110
111 if (vma == NULL || vma->vm_end < end)
112 return -EFAULT;
113
114 for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
115 int ret = follow_pfn(vma, start, &this_pfn);
116 if (ret)
117 return ret;
118
119 if (prev_pfn == 0)
120 pa = this_pfn << PAGE_SHIFT;
121 else if (this_pfn != prev_pfn + 1)
122 return -EFAULT;
123
124 prev_pfn = this_pfn;
125 }
126
127 /*
128 * Memory is contiguous, lock vma and return to the caller
129 */
130 *res_vma = vb2_get_vma(vma);
131 if (*res_vma == NULL)
132 return -ENOMEM;
133
134 *res_pa = pa + offset;
135 return 0;
136} 80}
137EXPORT_SYMBOL_GPL(vb2_get_contig_userptr); 81EXPORT_SYMBOL(vb2_destroy_framevec);
138 82
139/** 83/**
140 * vb2_common_vm_open() - increase refcount of the vma 84 * vb2_common_vm_open() - increase refcount of the vma
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 2fe4c27f524a..ecb8f0c7f025 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -23,11 +23,9 @@
23 23
24struct vb2_vmalloc_buf { 24struct vb2_vmalloc_buf {
25 void *vaddr; 25 void *vaddr;
26 struct page **pages; 26 struct frame_vector *vec;
27 struct vm_area_struct *vma;
28 enum dma_data_direction dma_dir; 27 enum dma_data_direction dma_dir;
29 unsigned long size; 28 unsigned long size;
30 unsigned int n_pages;
31 atomic_t refcount; 29 atomic_t refcount;
32 struct vb2_vmarea_handler handler; 30 struct vb2_vmarea_handler handler;
33 struct dma_buf *dbuf; 31 struct dma_buf *dbuf;
@@ -76,10 +74,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
76 enum dma_data_direction dma_dir) 74 enum dma_data_direction dma_dir)
77{ 75{
78 struct vb2_vmalloc_buf *buf; 76 struct vb2_vmalloc_buf *buf;
79 unsigned long first, last; 77 struct frame_vector *vec;
80 int n_pages, offset; 78 int n_pages, offset, i;
81 struct vm_area_struct *vma;
82 dma_addr_t physp;
83 79
84 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 80 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
85 if (!buf) 81 if (!buf)
@@ -88,51 +84,36 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
88 buf->dma_dir = dma_dir; 84 buf->dma_dir = dma_dir;
89 offset = vaddr & ~PAGE_MASK; 85 offset = vaddr & ~PAGE_MASK;
90 buf->size = size; 86 buf->size = size;
91 87 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
92 88 if (IS_ERR(vec))
93 vma = find_vma(current->mm, vaddr); 89 goto fail_pfnvec_create;
94 if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) { 90 buf->vec = vec;
95 if (vb2_get_contig_userptr(vaddr, size, &vma, &physp)) 91 n_pages = frame_vector_count(vec);
96 goto fail_pages_array_alloc; 92 if (frame_vector_to_pages(vec) < 0) {
97 buf->vma = vma; 93 unsigned long *nums = frame_vector_pfns(vec);
98 buf->vaddr = (__force void *)ioremap_nocache(physp, size); 94
99 if (!buf->vaddr) 95 /*
100 goto fail_pages_array_alloc; 96 * We cannot get page pointers for these pfns. Check memory is
97 * physically contiguous and use direct mapping.
98 */
99 for (i = 1; i < n_pages; i++)
100 if (nums[i-1] + 1 != nums[i])
101 goto fail_map;
102 buf->vaddr = (__force void *)
103 ioremap_nocache(nums[0] << PAGE_SHIFT, size);
101 } else { 104 } else {
102 first = vaddr >> PAGE_SHIFT; 105 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
103 last = (vaddr + size - 1) >> PAGE_SHIFT;
104 buf->n_pages = last - first + 1;
105 buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
106 GFP_KERNEL);
107 if (!buf->pages)
108 goto fail_pages_array_alloc;
109
110 /* current->mm->mmap_sem is taken by videobuf2 core */
111 n_pages = get_user_pages(current, current->mm,
112 vaddr & PAGE_MASK, buf->n_pages,
113 dma_dir == DMA_FROM_DEVICE,
114 1, /* force */
115 buf->pages, NULL);
116 if (n_pages != buf->n_pages)
117 goto fail_get_user_pages;
118
119 buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
120 PAGE_KERNEL); 106 PAGE_KERNEL);
121 if (!buf->vaddr)
122 goto fail_get_user_pages;
123 } 107 }
124 108
109 if (!buf->vaddr)
110 goto fail_map;
125 buf->vaddr += offset; 111 buf->vaddr += offset;
126 return buf; 112 return buf;
127 113
128fail_get_user_pages: 114fail_map:
129 pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages, 115 vb2_destroy_framevec(vec);
130 buf->n_pages); 116fail_pfnvec_create:
131 while (--n_pages >= 0)
132 put_page(buf->pages[n_pages]);
133 kfree(buf->pages);
134
135fail_pages_array_alloc:
136 kfree(buf); 117 kfree(buf);
137 118
138 return NULL; 119 return NULL;
@@ -143,20 +124,21 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
143 struct vb2_vmalloc_buf *buf = buf_priv; 124 struct vb2_vmalloc_buf *buf = buf_priv;
144 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; 125 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
145 unsigned int i; 126 unsigned int i;
127 struct page **pages;
128 unsigned int n_pages;
146 129
147 if (buf->pages) { 130 if (!buf->vec->is_pfns) {
131 n_pages = frame_vector_count(buf->vec);
132 pages = frame_vector_pages(buf->vec);
148 if (vaddr) 133 if (vaddr)
149 vm_unmap_ram((void *)vaddr, buf->n_pages); 134 vm_unmap_ram((void *)vaddr, n_pages);
150 for (i = 0; i < buf->n_pages; ++i) { 135 if (buf->dma_dir == DMA_FROM_DEVICE)
151 if (buf->dma_dir == DMA_FROM_DEVICE) 136 for (i = 0; i < n_pages; i++)
152 set_page_dirty_lock(buf->pages[i]); 137 set_page_dirty_lock(pages[i]);
153 put_page(buf->pages[i]);
154 }
155 kfree(buf->pages);
156 } else { 138 } else {
157 vb2_put_vma(buf->vma);
158 iounmap((__force void __iomem *)buf->vaddr); 139 iounmap((__force void __iomem *)buf->vaddr);
159 } 140 }
141 vb2_destroy_framevec(buf->vec);
160 kfree(buf); 142 kfree(buf);
161} 143}
162 144
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fda728e3c27d..91c08f6f0dc9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -20,6 +20,7 @@
20#include <linux/shrinker.h> 20#include <linux/shrinker.h>
21#include <linux/resource.h> 21#include <linux/resource.h>
22#include <linux/page_ext.h> 22#include <linux/page_ext.h>
23#include <linux/err.h>
23 24
24struct mempolicy; 25struct mempolicy;
25struct anon_vma; 26struct anon_vma;
@@ -1214,6 +1215,49 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1214 int write, int force, struct page **pages); 1215 int write, int force, struct page **pages);
1215int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1216int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1216 struct page **pages); 1217 struct page **pages);
1218
1219/* Container for pinned pfns / pages */
1220struct frame_vector {
1221 unsigned int nr_allocated; /* Number of frames we have space for */
1222 unsigned int nr_frames; /* Number of frames stored in ptrs array */
1223 bool got_ref; /* Did we pin pages by getting page ref? */
1224 bool is_pfns; /* Does array contain pages or pfns? */
1225 void *ptrs[0]; /* Array of pinned pfns / pages. Use
1226 * pfns_vector_pages() or pfns_vector_pfns()
1227 * for access */
1228};
1229
1230struct frame_vector *frame_vector_create(unsigned int nr_frames);
1231void frame_vector_destroy(struct frame_vector *vec);
1232int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1233 bool write, bool force, struct frame_vector *vec);
1234void put_vaddr_frames(struct frame_vector *vec);
1235int frame_vector_to_pages(struct frame_vector *vec);
1236void frame_vector_to_pfns(struct frame_vector *vec);
1237
1238static inline unsigned int frame_vector_count(struct frame_vector *vec)
1239{
1240 return vec->nr_frames;
1241}
1242
1243static inline struct page **frame_vector_pages(struct frame_vector *vec)
1244{
1245 if (vec->is_pfns) {
1246 int err = frame_vector_to_pages(vec);
1247
1248 if (err)
1249 return ERR_PTR(err);
1250 }
1251 return (struct page **)(vec->ptrs);
1252}
1253
1254static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1255{
1256 if (!vec->is_pfns)
1257 frame_vector_to_pfns(vec);
1258 return (unsigned long *)(vec->ptrs);
1259}
1260
1217struct kvec; 1261struct kvec;
1218int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1262int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1219 struct page **pages); 1263 struct page **pages);
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index 9f36641a6781..6513c7ec3116 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -15,6 +15,7 @@
15#define _MEDIA_VIDEOBUF2_MEMOPS_H 15#define _MEDIA_VIDEOBUF2_MEMOPS_H
16 16
17#include <media/videobuf2-core.h> 17#include <media/videobuf2-core.h>
18#include <linux/mm.h>
18 19
19/** 20/**
20 * struct vb2_vmarea_handler - common vma refcount tracking handler 21 * struct vb2_vmarea_handler - common vma refcount tracking handler
@@ -31,11 +32,9 @@ struct vb2_vmarea_handler {
31 32
32extern const struct vm_operations_struct vb2_common_vm_ops; 33extern const struct vm_operations_struct vb2_common_vm_ops;
33 34
34int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, 35struct frame_vector *vb2_create_framevec(unsigned long start,
35 struct vm_area_struct **res_vma, dma_addr_t *res_pa); 36 unsigned long length,
36 37 bool write);
37struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma); 38void vb2_destroy_framevec(struct frame_vector *vec);
38void vb2_put_vma(struct vm_area_struct *vma);
39
40 39
41#endif 40#endif
diff --git a/mm/Kconfig b/mm/Kconfig
index 6413d027c0b2..0d9fdcd01e47 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -677,3 +677,6 @@ config ZONE_DEVICE
677 mapping in an O_DIRECT operation, among other things. 677 mapping in an O_DIRECT operation, among other things.
678 678
679 If FS_DAX is enabled, then say Y. 679 If FS_DAX is enabled, then say Y.
680
681config FRAME_VECTOR
682 bool
diff --git a/mm/Makefile b/mm/Makefile
index 56f8eed73f1a..2ed43191fc3b 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -80,3 +80,4 @@ obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
80obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o 80obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
81obj-$(CONFIG_USERFAULTFD) += userfaultfd.o 81obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
82obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o 82obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
83obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
new file mode 100644
index 000000000000..cdabcb93c6a6
--- /dev/null
+++ b/mm/frame_vector.c
@@ -0,0 +1,230 @@
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/err.h>
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
7#include <linux/pagemap.h>
8#include <linux/sched.h>
9
10/*
11 * get_vaddr_frames() - map virtual addresses to pfns
12 * @start: starting user address
13 * @nr_frames: number of pages / pfns from start to map
14 * @write: whether pages will be written to by the caller
15 * @force: whether to force write access even if user mapping is
16 * readonly. See description of the same argument of
17 get_user_pages().
18 * @vec: structure which receives pages / pfns of the addresses mapped.
19 * It should have space for at least nr_frames entries.
20 *
21 * This function maps virtual addresses from @start and fills @vec structure
22 * with page frame numbers or page pointers to corresponding pages (choice
23 * depends on the type of the vma underlying the virtual address). If @start
24 * belongs to a normal vma, the function grabs reference to each of the pages
25 * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't
26 * touch page structures and the caller must make sure pfns aren't reused for
27 * anything else while he is using them.
28 *
29 * The function returns number of pages mapped which may be less than
30 * @nr_frames. In particular we stop mapping if there are more vmas of
31 * different type underlying the specified range of virtual addresses.
32 * When the function isn't able to map a single page, it returns error.
33 *
34 * This function takes care of grabbing mmap_sem as necessary.
35 */
36int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
37 bool write, bool force, struct frame_vector *vec)
38{
39 struct mm_struct *mm = current->mm;
40 struct vm_area_struct *vma;
41 int ret = 0;
42 int err;
43 int locked;
44
45 if (nr_frames == 0)
46 return 0;
47
48 if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
49 nr_frames = vec->nr_allocated;
50
51 down_read(&mm->mmap_sem);
52 locked = 1;
53 vma = find_vma_intersection(mm, start, start + 1);
54 if (!vma) {
55 ret = -EFAULT;
56 goto out;
57 }
58 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
59 vec->got_ref = true;
60 vec->is_pfns = false;
61 ret = get_user_pages_locked(current, mm, start, nr_frames,
62 write, force, (struct page **)(vec->ptrs), &locked);
63 goto out;
64 }
65
66 vec->got_ref = false;
67 vec->is_pfns = true;
68 do {
69 unsigned long *nums = frame_vector_pfns(vec);
70
71 while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
72 err = follow_pfn(vma, start, &nums[ret]);
73 if (err) {
74 if (ret == 0)
75 ret = err;
76 goto out;
77 }
78 start += PAGE_SIZE;
79 ret++;
80 }
81 /*
82 * We stop if we have enough pages or if VMA doesn't completely
83 * cover the tail page.
84 */
85 if (ret >= nr_frames || start < vma->vm_end)
86 break;
87 vma = find_vma_intersection(mm, start, start + 1);
88 } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
89out:
90 if (locked)
91 up_read(&mm->mmap_sem);
92 if (!ret)
93 ret = -EFAULT;
94 if (ret > 0)
95 vec->nr_frames = ret;
96 return ret;
97}
98EXPORT_SYMBOL(get_vaddr_frames);
99
100/**
101 * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired
102 * them
103 * @vec: frame vector to put
104 *
105 * Drop references to pages if get_vaddr_frames() acquired them. We also
106 * invalidate the frame vector so that it is prepared for the next call into
107 * get_vaddr_frames().
108 */
109void put_vaddr_frames(struct frame_vector *vec)
110{
111 int i;
112 struct page **pages;
113
114 if (!vec->got_ref)
115 goto out;
116 pages = frame_vector_pages(vec);
117 /*
118 * frame_vector_pages() might needed to do a conversion when
119 * get_vaddr_frames() got pages but vec was later converted to pfns.
120 * But it shouldn't really fail to convert pfns back...
121 */
122 if (WARN_ON(IS_ERR(pages)))
123 goto out;
124 for (i = 0; i < vec->nr_frames; i++)
125 put_page(pages[i]);
126 vec->got_ref = false;
127out:
128 vec->nr_frames = 0;
129}
130EXPORT_SYMBOL(put_vaddr_frames);
131
132/**
133 * frame_vector_to_pages - convert frame vector to contain page pointers
134 * @vec: frame vector to convert
135 *
136 * Convert @vec to contain array of page pointers. If the conversion is
137 * successful, return 0. Otherwise return an error. Note that we do not grab
138 * page references for the page structures.
139 */
140int frame_vector_to_pages(struct frame_vector *vec)
141{
142 int i;
143 unsigned long *nums;
144 struct page **pages;
145
146 if (!vec->is_pfns)
147 return 0;
148 nums = frame_vector_pfns(vec);
149 for (i = 0; i < vec->nr_frames; i++)
150 if (!pfn_valid(nums[i]))
151 return -EINVAL;
152 pages = (struct page **)nums;
153 for (i = 0; i < vec->nr_frames; i++)
154 pages[i] = pfn_to_page(nums[i]);
155 vec->is_pfns = false;
156 return 0;
157}
158EXPORT_SYMBOL(frame_vector_to_pages);
159
160/**
161 * frame_vector_to_pfns - convert frame vector to contain pfns
162 * @vec: frame vector to convert
163 *
164 * Convert @vec to contain array of pfns.
165 */
166void frame_vector_to_pfns(struct frame_vector *vec)
167{
168 int i;
169 unsigned long *nums;
170 struct page **pages;
171
172 if (vec->is_pfns)
173 return;
174 pages = (struct page **)(vec->ptrs);
175 nums = (unsigned long *)pages;
176 for (i = 0; i < vec->nr_frames; i++)
177 nums[i] = page_to_pfn(pages[i]);
178 vec->is_pfns = true;
179}
180EXPORT_SYMBOL(frame_vector_to_pfns);
181
182/**
183 * frame_vector_create() - allocate & initialize structure for pinned pfns
184 * @nr_frames: number of pfns slots we should reserve
185 *
186 * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns
187 * pfns.
188 */
189struct frame_vector *frame_vector_create(unsigned int nr_frames)
190{
191 struct frame_vector *vec;
192 int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames;
193
194 if (WARN_ON_ONCE(nr_frames == 0))
195 return NULL;
196 /*
197 * This is absurdly high. It's here just to avoid strange effects when
198 * arithmetics overflows.
199 */
200 if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2))
201 return NULL;
202 /*
203 * Avoid higher order allocations, use vmalloc instead. It should
204 * be rare anyway.
205 */
206 if (size <= PAGE_SIZE)
207 vec = kmalloc(size, GFP_KERNEL);
208 else
209 vec = vmalloc(size);
210 if (!vec)
211 return NULL;
212 vec->nr_allocated = nr_frames;
213 vec->nr_frames = 0;
214 return vec;
215}
216EXPORT_SYMBOL(frame_vector_create);
217
218/**
219 * frame_vector_destroy() - free memory allocated to carry frame vector
220 * @vec: Frame vector to free
221 *
222 * Free structure allocated by frame_vector_create() to carry frames.
223 */
224void frame_vector_destroy(struct frame_vector *vec)
225{
226 /* Make sure put_vaddr_frames() got called properly... */
227 VM_BUG_ON(vec->nr_frames > 0);
228 kvfree(vec);
229}
230EXPORT_SYMBOL(frame_vector_destroy);