aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/Kconfig3
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c22
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c300
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c700
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c40
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c56
14 files changed, 1065 insertions, 117 deletions
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 0c54e19d9944..65875c3aba1b 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -59,6 +59,7 @@ config VIDEOBUF_DVB
59 59
60# Used by drivers that need Videobuf2 modules 60# Used by drivers that need Videobuf2 modules
61config VIDEOBUF2_CORE 61config VIDEOBUF2_CORE
62 select DMA_SHARED_BUFFER
62 tristate 63 tristate
63 64
64config VIDEOBUF2_MEMOPS 65config VIDEOBUF2_MEMOPS
@@ -68,11 +69,13 @@ config VIDEOBUF2_DMA_CONTIG
68 tristate 69 tristate
69 select VIDEOBUF2_CORE 70 select VIDEOBUF2_CORE
70 select VIDEOBUF2_MEMOPS 71 select VIDEOBUF2_MEMOPS
72 select DMA_SHARED_BUFFER
71 73
72config VIDEOBUF2_VMALLOC 74config VIDEOBUF2_VMALLOC
73 tristate 75 tristate
74 select VIDEOBUF2_CORE 76 select VIDEOBUF2_CORE
75 select VIDEOBUF2_MEMOPS 77 select VIDEOBUF2_MEMOPS
78 select DMA_SHARED_BUFFER
76 79
77config VIDEOBUF2_DMA_SG 80config VIDEOBUF2_DMA_SG
78 tristate 81 tristate
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index f995dd31151d..380ddd89fa4c 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -837,7 +837,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
837 struct v4l2_dv_timings *fmt) 837 struct v4l2_dv_timings *fmt)
838{ 838{
839 int pix_clk; 839 int pix_clk;
840 int v_fp, v_bp, h_fp, h_bp, hsync; 840 int v_fp, v_bp, h_fp, hsync;
841 int frame_width, image_height, image_width; 841 int frame_width, image_height, image_width;
842 bool default_gtf; 842 bool default_gtf;
843 int h_blank; 843 int h_blank;
@@ -885,7 +885,6 @@ bool v4l2_detect_gtf(unsigned frame_height,
885 hsync = hsync - hsync % GTF_CELL_GRAN; 885 hsync = hsync - hsync % GTF_CELL_GRAN;
886 886
887 h_fp = h_blank / 2 - hsync; 887 h_fp = h_blank / 2 - hsync;
888 h_bp = h_blank / 2;
889 888
890 fmt->bt.polarities = polarities; 889 fmt->bt.polarities = polarities;
891 fmt->bt.width = image_width; 890 fmt->bt.width = image_width;
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 83ffb6436baf..7157af301b14 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -297,6 +297,7 @@ struct v4l2_plane32 {
297 union { 297 union {
298 __u32 mem_offset; 298 __u32 mem_offset;
299 compat_long_t userptr; 299 compat_long_t userptr;
300 __s32 fd;
300 } m; 301 } m;
301 __u32 data_offset; 302 __u32 data_offset;
302 __u32 reserved[11]; 303 __u32 reserved[11];
@@ -318,6 +319,7 @@ struct v4l2_buffer32 {
318 __u32 offset; 319 __u32 offset;
319 compat_long_t userptr; 320 compat_long_t userptr;
320 compat_caddr_t planes; 321 compat_caddr_t planes;
322 __s32 fd;
321 } m; 323 } m;
322 __u32 length; 324 __u32 length;
323 __u32 reserved2; 325 __u32 reserved2;
@@ -341,6 +343,9 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
341 up_pln = compat_ptr(p); 343 up_pln = compat_ptr(p);
342 if (put_user((unsigned long)up_pln, &up->m.userptr)) 344 if (put_user((unsigned long)up_pln, &up->m.userptr))
343 return -EFAULT; 345 return -EFAULT;
346 } else if (memory == V4L2_MEMORY_DMABUF) {
347 if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
348 return -EFAULT;
344 } else { 349 } else {
345 if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, 350 if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
346 sizeof(__u32))) 351 sizeof(__u32)))
@@ -364,6 +369,11 @@ static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
364 if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset, 369 if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
365 sizeof(__u32))) 370 sizeof(__u32)))
366 return -EFAULT; 371 return -EFAULT;
372 /* For DMABUF, driver might've set up the fd, so copy it back. */
373 if (memory == V4L2_MEMORY_DMABUF)
374 if (copy_in_user(&up32->m.fd, &up->m.fd,
375 sizeof(int)))
376 return -EFAULT;
367 377
368 return 0; 378 return 0;
369} 379}
@@ -446,6 +456,10 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
446 if (get_user(kp->m.offset, &up->m.offset)) 456 if (get_user(kp->m.offset, &up->m.offset))
447 return -EFAULT; 457 return -EFAULT;
448 break; 458 break;
459 case V4L2_MEMORY_DMABUF:
460 if (get_user(kp->m.fd, &up->m.fd))
461 return -EFAULT;
462 break;
449 } 463 }
450 } 464 }
451 465
@@ -510,6 +524,10 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
510 if (put_user(kp->m.offset, &up->m.offset)) 524 if (put_user(kp->m.offset, &up->m.offset))
511 return -EFAULT; 525 return -EFAULT;
512 break; 526 break;
527 case V4L2_MEMORY_DMABUF:
528 if (put_user(kp->m.fd, &up->m.fd))
529 return -EFAULT;
530 break;
513 } 531 }
514 } 532 }
515 533
@@ -1000,6 +1018,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
1000 case VIDIOC_S_FBUF32: 1018 case VIDIOC_S_FBUF32:
1001 case VIDIOC_OVERLAY32: 1019 case VIDIOC_OVERLAY32:
1002 case VIDIOC_QBUF32: 1020 case VIDIOC_QBUF32:
1021 case VIDIOC_EXPBUF:
1003 case VIDIOC_DQBUF32: 1022 case VIDIOC_DQBUF32:
1004 case VIDIOC_STREAMON32: 1023 case VIDIOC_STREAMON32:
1005 case VIDIOC_STREAMOFF32: 1024 case VIDIOC_STREAMOFF32:
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index a2df842e5100..98dcad9c8a3b 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -571,6 +571,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
571 SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs); 571 SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
572 SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf); 572 SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
573 SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf); 573 SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
574 SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
574 SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf); 575 SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
575 SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon); 576 SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
576 SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff); 577 SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 18a040b935a3..c72009218152 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2009--2010 Nokia Corporation. 6 * Copyright (C) 2009--2010 Nokia Corporation.
7 * 7 *
8 * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com> 8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 9e3fc040ea20..e57c002b4150 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2009--2010 Nokia Corporation. 6 * Copyright (C) 2009--2010 Nokia Corporation.
7 * 7 *
8 * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com> 8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 8f388ff31ebb..aa6e7c788db2 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -155,6 +155,7 @@ static const char *v4l2_memory_names[] = {
155 [V4L2_MEMORY_MMAP] = "mmap", 155 [V4L2_MEMORY_MMAP] = "mmap",
156 [V4L2_MEMORY_USERPTR] = "userptr", 156 [V4L2_MEMORY_USERPTR] = "userptr",
157 [V4L2_MEMORY_OVERLAY] = "overlay", 157 [V4L2_MEMORY_OVERLAY] = "overlay",
158 [V4L2_MEMORY_DMABUF] = "dmabuf",
158}; 159};
159 160
160#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown") 161#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown")
@@ -453,6 +454,15 @@ static void v4l_print_buffer(const void *arg, bool write_only)
453 tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits); 454 tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
454} 455}
455 456
457static void v4l_print_exportbuffer(const void *arg, bool write_only)
458{
459 const struct v4l2_exportbuffer *p = arg;
460
461 pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n",
462 p->fd, prt_names(p->type, v4l2_type_names),
463 p->index, p->plane, p->flags);
464}
465
456static void v4l_print_create_buffers(const void *arg, bool write_only) 466static void v4l_print_create_buffers(const void *arg, bool write_only)
457{ 467{
458 const struct v4l2_create_buffers *p = arg; 468 const struct v4l2_create_buffers *p = arg;
@@ -1960,6 +1970,7 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
1960 IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO), 1970 IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
1961 IOCTL_INFO_FNC(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO), 1971 IOCTL_INFO_FNC(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
1962 IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE), 1972 IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
1973 IOCTL_INFO_STD(VIDIOC_EXPBUF, vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
1963 IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE), 1974 IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
1964 IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), 1975 IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
1965 IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), 1976 IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 3ac83583ad7a..438ea45d1074 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -369,6 +369,19 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
369EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 369EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
370 370
371/** 371/**
372 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
373 * the type
374 */
375int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
376 struct v4l2_exportbuffer *eb)
377{
378 struct vb2_queue *vq;
379
380 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
381 return vb2_expbuf(vq, eb);
382}
383EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
384/**
372 * v4l2_m2m_streamon() - turn on streaming for a video queue 385 * v4l2_m2m_streamon() - turn on streaming for a video queue
373 */ 386 */
374int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 387int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
@@ -510,12 +523,10 @@ struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
510{ 523{
511 struct v4l2_m2m_dev *m2m_dev; 524 struct v4l2_m2m_dev *m2m_dev;
512 525
513 if (!m2m_ops) 526 if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
527 WARN_ON(!m2m_ops->job_abort))
514 return ERR_PTR(-EINVAL); 528 return ERR_PTR(-EINVAL);
515 529
516 BUG_ON(!m2m_ops->device_run);
517 BUG_ON(!m2m_ops->job_abort);
518
519 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); 530 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
520 if (!m2m_dev) 531 if (!m2m_dev)
521 return ERR_PTR(-ENOMEM); 532 return ERR_PTR(-ENOMEM);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index dced41c1d993..996c248dea42 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -412,20 +412,20 @@ static int
412v4l2_subdev_link_validate_get_format(struct media_pad *pad, 412v4l2_subdev_link_validate_get_format(struct media_pad *pad,
413 struct v4l2_subdev_format *fmt) 413 struct v4l2_subdev_format *fmt)
414{ 414{
415 switch (media_entity_type(pad->entity)) { 415 if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) {
416 case MEDIA_ENT_T_V4L2_SUBDEV: 416 struct v4l2_subdev *sd =
417 media_entity_to_v4l2_subdev(pad->entity);
418
417 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; 419 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
418 fmt->pad = pad->index; 420 fmt->pad = pad->index;
419 return v4l2_subdev_call(media_entity_to_v4l2_subdev( 421 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
420 pad->entity),
421 pad, get_fmt, NULL, fmt);
422 default:
423 WARN(1, "Driver bug! Wrong media entity type %d, entity %s\n",
424 media_entity_type(pad->entity), pad->entity->name);
425 /* Fall through */
426 case MEDIA_ENT_T_DEVNODE_V4L:
427 return -EINVAL;
428 } 422 }
423
424 WARN(pad->entity->type != MEDIA_ENT_T_DEVNODE_V4L,
425 "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
426 pad->entity->type, pad->entity->name);
427
428 return -EINVAL;
429} 429}
430 430
431int v4l2_subdev_link_validate(struct media_link *link) 431int v4l2_subdev_link_validate(struct media_link *link)
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index bf7a326b1cdc..5449e8aa984a 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -335,6 +335,9 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
335 case V4L2_MEMORY_OVERLAY: 335 case V4L2_MEMORY_OVERLAY:
336 b->m.offset = vb->boff; 336 b->m.offset = vb->boff;
337 break; 337 break;
338 case V4L2_MEMORY_DMABUF:
339 /* DMABUF is not handled in videobuf framework */
340 break;
338 } 341 }
339 342
340 b->flags = 0; 343 b->flags = 0;
@@ -405,6 +408,7 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
405 break; 408 break;
406 case V4L2_MEMORY_USERPTR: 409 case V4L2_MEMORY_USERPTR:
407 case V4L2_MEMORY_OVERLAY: 410 case V4L2_MEMORY_OVERLAY:
411 case V4L2_MEMORY_DMABUF:
408 /* nothing */ 412 /* nothing */
409 break; 413 break;
410 } 414 }
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 432df119af27..9f81be23a81f 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -109,6 +109,36 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
109} 109}
110 110
111/** 111/**
112 * __vb2_plane_dmabuf_put() - release memory associated with
113 * a DMABUF shared plane
114 */
115static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
116{
117 if (!p->mem_priv)
118 return;
119
120 if (p->dbuf_mapped)
121 call_memop(q, unmap_dmabuf, p->mem_priv);
122
123 call_memop(q, detach_dmabuf, p->mem_priv);
124 dma_buf_put(p->dbuf);
125 memset(p, 0, sizeof(*p));
126}
127
128/**
129 * __vb2_buf_dmabuf_put() - release memory associated with
130 * a DMABUF shared buffer
131 */
132static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
133{
134 struct vb2_queue *q = vb->vb2_queue;
135 unsigned int plane;
136
137 for (plane = 0; plane < vb->num_planes; ++plane)
138 __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
139}
140
141/**
112 * __setup_offsets() - setup unique offsets ("cookies") for every plane in 142 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
113 * every buffer on the queue 143 * every buffer on the queue
114 */ 144 */
@@ -230,6 +260,8 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
230 /* Free MMAP buffers or release USERPTR buffers */ 260 /* Free MMAP buffers or release USERPTR buffers */
231 if (q->memory == V4L2_MEMORY_MMAP) 261 if (q->memory == V4L2_MEMORY_MMAP)
232 __vb2_buf_mem_free(vb); 262 __vb2_buf_mem_free(vb);
263 else if (q->memory == V4L2_MEMORY_DMABUF)
264 __vb2_buf_dmabuf_put(vb);
233 else 265 else
234 __vb2_buf_userptr_put(vb); 266 __vb2_buf_userptr_put(vb);
235 } 267 }
@@ -362,6 +394,8 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
362 b->m.offset = vb->v4l2_planes[0].m.mem_offset; 394 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
363 else if (q->memory == V4L2_MEMORY_USERPTR) 395 else if (q->memory == V4L2_MEMORY_USERPTR)
364 b->m.userptr = vb->v4l2_planes[0].m.userptr; 396 b->m.userptr = vb->v4l2_planes[0].m.userptr;
397 else if (q->memory == V4L2_MEMORY_DMABUF)
398 b->m.fd = vb->v4l2_planes[0].m.fd;
365 } 399 }
366 400
367 /* 401 /*
@@ -454,13 +488,28 @@ static int __verify_mmap_ops(struct vb2_queue *q)
454} 488}
455 489
456/** 490/**
491 * __verify_dmabuf_ops() - verify that all memory operations required for
492 * DMABUF queue type have been provided
493 */
494static int __verify_dmabuf_ops(struct vb2_queue *q)
495{
496 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
497 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
498 !q->mem_ops->unmap_dmabuf)
499 return -EINVAL;
500
501 return 0;
502}
503
504/**
457 * __verify_memory_type() - Check whether the memory type and buffer type 505 * __verify_memory_type() - Check whether the memory type and buffer type
458 * passed to a buffer operation are compatible with the queue. 506 * passed to a buffer operation are compatible with the queue.
459 */ 507 */
460static int __verify_memory_type(struct vb2_queue *q, 508static int __verify_memory_type(struct vb2_queue *q,
461 enum v4l2_memory memory, enum v4l2_buf_type type) 509 enum v4l2_memory memory, enum v4l2_buf_type type)
462{ 510{
463 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR) { 511 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
512 memory != V4L2_MEMORY_DMABUF) {
464 dprintk(1, "reqbufs: unsupported memory type\n"); 513 dprintk(1, "reqbufs: unsupported memory type\n");
465 return -EINVAL; 514 return -EINVAL;
466 } 515 }
@@ -484,6 +533,11 @@ static int __verify_memory_type(struct vb2_queue *q,
484 return -EINVAL; 533 return -EINVAL;
485 } 534 }
486 535
536 if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
537 dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
538 return -EINVAL;
539 }
540
487 /* 541 /*
488 * Place the busy tests at the end: -EBUSY can be ignored when 542 * Place the busy tests at the end: -EBUSY can be ignored when
489 * create_bufs is called with count == 0, but count == 0 should still 543 * create_bufs is called with count == 0, but count == 0 should still
@@ -790,6 +844,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
790{ 844{
791 struct vb2_queue *q = vb->vb2_queue; 845 struct vb2_queue *q = vb->vb2_queue;
792 unsigned long flags; 846 unsigned long flags;
847 unsigned int plane;
793 848
794 if (vb->state != VB2_BUF_STATE_ACTIVE) 849 if (vb->state != VB2_BUF_STATE_ACTIVE)
795 return; 850 return;
@@ -800,6 +855,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
800 dprintk(4, "Done processing on buffer %d, state: %d\n", 855 dprintk(4, "Done processing on buffer %d, state: %d\n",
801 vb->v4l2_buf.index, vb->state); 856 vb->v4l2_buf.index, vb->state);
802 857
858 /* sync buffers */
859 for (plane = 0; plane < vb->num_planes; ++plane)
860 call_memop(q, finish, vb->planes[plane].mem_priv);
861
803 /* Add the buffer to the done buffers list */ 862 /* Add the buffer to the done buffers list */
804 spin_lock_irqsave(&q->done_lock, flags); 863 spin_lock_irqsave(&q->done_lock, flags);
805 vb->state = state; 864 vb->state = state;
@@ -845,6 +904,16 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
845 b->m.planes[plane].length; 904 b->m.planes[plane].length;
846 } 905 }
847 } 906 }
907 if (b->memory == V4L2_MEMORY_DMABUF) {
908 for (plane = 0; plane < vb->num_planes; ++plane) {
909 v4l2_planes[plane].m.fd =
910 b->m.planes[plane].m.fd;
911 v4l2_planes[plane].length =
912 b->m.planes[plane].length;
913 v4l2_planes[plane].data_offset =
914 b->m.planes[plane].data_offset;
915 }
916 }
848 } else { 917 } else {
849 /* 918 /*
850 * Single-planar buffers do not use planes array, 919 * Single-planar buffers do not use planes array,
@@ -859,6 +928,13 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
859 v4l2_planes[0].m.userptr = b->m.userptr; 928 v4l2_planes[0].m.userptr = b->m.userptr;
860 v4l2_planes[0].length = b->length; 929 v4l2_planes[0].length = b->length;
861 } 930 }
931
932 if (b->memory == V4L2_MEMORY_DMABUF) {
933 v4l2_planes[0].m.fd = b->m.fd;
934 v4l2_planes[0].length = b->length;
935 v4l2_planes[0].data_offset = 0;
936 }
937
862 } 938 }
863 939
864 vb->v4l2_buf.field = b->field; 940 vb->v4l2_buf.field = b->field;
@@ -959,14 +1035,121 @@ static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
959} 1035}
960 1036
961/** 1037/**
1038 * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
1039 */
1040static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1041{
1042 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1043 struct vb2_queue *q = vb->vb2_queue;
1044 void *mem_priv;
1045 unsigned int plane;
1046 int ret;
1047 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
1048
1049 /* Verify and copy relevant information provided by the userspace */
1050 __fill_vb2_buffer(vb, b, planes);
1051
1052 for (plane = 0; plane < vb->num_planes; ++plane) {
1053 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1054
1055 if (IS_ERR_OR_NULL(dbuf)) {
1056 dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
1057 plane);
1058 ret = -EINVAL;
1059 goto err;
1060 }
1061
1062 /* use DMABUF size if length is not provided */
1063 if (planes[plane].length == 0)
1064 planes[plane].length = dbuf->size;
1065
1066 if (planes[plane].length < planes[plane].data_offset +
1067 q->plane_sizes[plane]) {
1068 ret = -EINVAL;
1069 goto err;
1070 }
1071
1072 /* Skip the plane if already verified */
1073 if (dbuf == vb->planes[plane].dbuf &&
1074 vb->v4l2_planes[plane].length == planes[plane].length) {
1075 dma_buf_put(dbuf);
1076 continue;
1077 }
1078
1079 dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
1080
1081 /* Release previously acquired memory if present */
1082 __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
1083 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1084
1085 /* Acquire each plane's memory */
1086 mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
1087 dbuf, planes[plane].length, write);
1088 if (IS_ERR(mem_priv)) {
1089 dprintk(1, "qbuf: failed to attach dmabuf\n");
1090 ret = PTR_ERR(mem_priv);
1091 dma_buf_put(dbuf);
1092 goto err;
1093 }
1094
1095 vb->planes[plane].dbuf = dbuf;
1096 vb->planes[plane].mem_priv = mem_priv;
1097 }
1098
1099 /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
1100 * really we want to do this just before the DMA, not while queueing
1101 * the buffer(s)..
1102 */
1103 for (plane = 0; plane < vb->num_planes; ++plane) {
1104 ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
1105 if (ret) {
1106 dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
1107 plane);
1108 goto err;
1109 }
1110 vb->planes[plane].dbuf_mapped = 1;
1111 }
1112
1113 /*
1114 * Call driver-specific initialization on the newly acquired buffer,
1115 * if provided.
1116 */
1117 ret = call_qop(q, buf_init, vb);
1118 if (ret) {
1119 dprintk(1, "qbuf: buffer initialization failed\n");
1120 goto err;
1121 }
1122
1123 /*
1124 * Now that everything is in order, copy relevant information
1125 * provided by userspace.
1126 */
1127 for (plane = 0; plane < vb->num_planes; ++plane)
1128 vb->v4l2_planes[plane] = planes[plane];
1129
1130 return 0;
1131err:
1132 /* In case of errors, release planes that were already acquired */
1133 __vb2_buf_dmabuf_put(vb);
1134
1135 return ret;
1136}
1137
1138/**
962 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing 1139 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
963 */ 1140 */
964static void __enqueue_in_driver(struct vb2_buffer *vb) 1141static void __enqueue_in_driver(struct vb2_buffer *vb)
965{ 1142{
966 struct vb2_queue *q = vb->vb2_queue; 1143 struct vb2_queue *q = vb->vb2_queue;
1144 unsigned int plane;
967 1145
968 vb->state = VB2_BUF_STATE_ACTIVE; 1146 vb->state = VB2_BUF_STATE_ACTIVE;
969 atomic_inc(&q->queued_count); 1147 atomic_inc(&q->queued_count);
1148
1149 /* sync buffers */
1150 for (plane = 0; plane < vb->num_planes; ++plane)
1151 call_memop(q, prepare, vb->planes[plane].mem_priv);
1152
970 q->ops->buf_queue(vb); 1153 q->ops->buf_queue(vb);
971} 1154}
972 1155
@@ -982,6 +1165,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
982 case V4L2_MEMORY_USERPTR: 1165 case V4L2_MEMORY_USERPTR:
983 ret = __qbuf_userptr(vb, b); 1166 ret = __qbuf_userptr(vb, b);
984 break; 1167 break;
1168 case V4L2_MEMORY_DMABUF:
1169 ret = __qbuf_dmabuf(vb, b);
1170 break;
985 default: 1171 default:
986 WARN(1, "Invalid queue type\n"); 1172 WARN(1, "Invalid queue type\n");
987 ret = -EINVAL; 1173 ret = -EINVAL;
@@ -1303,6 +1489,30 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
1303EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); 1489EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1304 1490
1305/** 1491/**
1492 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1493 */
1494static void __vb2_dqbuf(struct vb2_buffer *vb)
1495{
1496 struct vb2_queue *q = vb->vb2_queue;
1497 unsigned int i;
1498
1499 /* nothing to do if the buffer is already dequeued */
1500 if (vb->state == VB2_BUF_STATE_DEQUEUED)
1501 return;
1502
1503 vb->state = VB2_BUF_STATE_DEQUEUED;
1504
1505 /* unmap DMABUF buffer */
1506 if (q->memory == V4L2_MEMORY_DMABUF)
1507 for (i = 0; i < vb->num_planes; ++i) {
1508 if (!vb->planes[i].dbuf_mapped)
1509 continue;
1510 call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
1511 vb->planes[i].dbuf_mapped = 0;
1512 }
1513}
1514
1515/**
1306 * vb2_dqbuf() - Dequeue a buffer to the userspace 1516 * vb2_dqbuf() - Dequeue a buffer to the userspace
1307 * @q: videobuf2 queue 1517 * @q: videobuf2 queue
1308 * @b: buffer structure passed from userspace to vidioc_dqbuf handler 1518 * @b: buffer structure passed from userspace to vidioc_dqbuf handler
@@ -1363,11 +1573,12 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
1363 __fill_v4l2_buffer(vb, b); 1573 __fill_v4l2_buffer(vb, b);
1364 /* Remove from videobuf queue */ 1574 /* Remove from videobuf queue */
1365 list_del(&vb->queued_entry); 1575 list_del(&vb->queued_entry);
1576 /* go back to dequeued state */
1577 __vb2_dqbuf(vb);
1366 1578
1367 dprintk(1, "dqbuf of buffer %d, with state %d\n", 1579 dprintk(1, "dqbuf of buffer %d, with state %d\n",
1368 vb->v4l2_buf.index, vb->state); 1580 vb->v4l2_buf.index, vb->state);
1369 1581
1370 vb->state = VB2_BUF_STATE_DEQUEUED;
1371 return 0; 1582 return 0;
1372} 1583}
1373EXPORT_SYMBOL_GPL(vb2_dqbuf); 1584EXPORT_SYMBOL_GPL(vb2_dqbuf);
@@ -1406,7 +1617,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
1406 * Reinitialize all buffers for next use. 1617 * Reinitialize all buffers for next use.
1407 */ 1618 */
1408 for (i = 0; i < q->num_buffers; ++i) 1619 for (i = 0; i < q->num_buffers; ++i)
1409 q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED; 1620 __vb2_dqbuf(q->bufs[i]);
1410} 1621}
1411 1622
1412/** 1623/**
@@ -1540,6 +1751,79 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
1540} 1751}
1541 1752
1542/** 1753/**
1754 * vb2_expbuf() - Export a buffer as a file descriptor
1755 * @q: videobuf2 queue
1756 * @eb: export buffer structure passed from userspace to vidioc_expbuf
1757 * handler in driver
1758 *
1759 * The return values from this function are intended to be directly returned
1760 * from vidioc_expbuf handler in driver.
1761 */
1762int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
1763{
1764 struct vb2_buffer *vb = NULL;
1765 struct vb2_plane *vb_plane;
1766 int ret;
1767 struct dma_buf *dbuf;
1768
1769 if (q->memory != V4L2_MEMORY_MMAP) {
1770 dprintk(1, "Queue is not currently set up for mmap\n");
1771 return -EINVAL;
1772 }
1773
1774 if (!q->mem_ops->get_dmabuf) {
1775 dprintk(1, "Queue does not support DMA buffer exporting\n");
1776 return -EINVAL;
1777 }
1778
1779 if (eb->flags & ~O_CLOEXEC) {
1780 dprintk(1, "Queue does support only O_CLOEXEC flag\n");
1781 return -EINVAL;
1782 }
1783
1784 if (eb->type != q->type) {
1785 dprintk(1, "qbuf: invalid buffer type\n");
1786 return -EINVAL;
1787 }
1788
1789 if (eb->index >= q->num_buffers) {
1790 dprintk(1, "buffer index out of range\n");
1791 return -EINVAL;
1792 }
1793
1794 vb = q->bufs[eb->index];
1795
1796 if (eb->plane >= vb->num_planes) {
1797 dprintk(1, "buffer plane out of range\n");
1798 return -EINVAL;
1799 }
1800
1801 vb_plane = &vb->planes[eb->plane];
1802
1803 dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
1804 if (IS_ERR_OR_NULL(dbuf)) {
1805 dprintk(1, "Failed to export buffer %d, plane %d\n",
1806 eb->index, eb->plane);
1807 return -EINVAL;
1808 }
1809
1810 ret = dma_buf_fd(dbuf, eb->flags);
1811 if (ret < 0) {
1812 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
1813 eb->index, eb->plane, ret);
1814 dma_buf_put(dbuf);
1815 return ret;
1816 }
1817
1818 dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
1819 eb->index, eb->plane, ret);
1820 eb->fd = ret;
1821
1822 return 0;
1823}
1824EXPORT_SYMBOL_GPL(vb2_expbuf);
1825
1826/**
1543 * vb2_mmap() - map video buffers into application address space 1827 * vb2_mmap() - map video buffers into application address space
1544 * @q: videobuf2 queue 1828 * @q: videobuf2 queue
1545 * @vma: vma passed to the mmap file operation handler in the driver 1829 * @vma: vma passed to the mmap file operation handler in the driver
@@ -2245,6 +2529,16 @@ int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
2245} 2529}
2246EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff); 2530EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
2247 2531
2532int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
2533{
2534 struct video_device *vdev = video_devdata(file);
2535
2536 if (vb2_queue_is_busy(vdev, file))
2537 return -EBUSY;
2538 return vb2_expbuf(vdev->queue, p);
2539}
2540EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
2541
2248/* v4l2_file_operations helpers */ 2542/* v4l2_file_operations helpers */
2249 2543
2250int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma) 2544int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 4b7132660a93..10beaee7f0ae 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -10,7 +10,10 @@
10 * the Free Software Foundation. 10 * the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/dma-buf.h>
13#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
14#include <linux/slab.h> 17#include <linux/slab.h>
15#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
16 19
@@ -23,40 +26,158 @@ struct vb2_dc_conf {
23}; 26};
24 27
25struct vb2_dc_buf { 28struct vb2_dc_buf {
26 struct vb2_dc_conf *conf; 29 struct device *dev;
27 void *vaddr; 30 void *vaddr;
28 dma_addr_t dma_addr;
29 unsigned long size; 31 unsigned long size;
30 struct vm_area_struct *vma; 32 dma_addr_t dma_addr;
31 atomic_t refcount; 33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
35
36 /* MMAP related */
32 struct vb2_vmarea_handler handler; 37 struct vb2_vmarea_handler handler;
38 atomic_t refcount;
39 struct sg_table *sgt_base;
40
41 /* USERPTR related */
42 struct vm_area_struct *vma;
43
44 /* DMABUF related */
45 struct dma_buf_attachment *db_attach;
33}; 46};
34 47
35static void vb2_dma_contig_put(void *buf_priv); 48/*********************************************/
49/* scatterlist table functions */
50/*********************************************/
51
52
53static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
55{
56 struct scatterlist *s;
57 unsigned int i;
58
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62 >> PAGE_SHIFT;
63 unsigned int j;
64
65 for (j = 0; j < n_pages; ++j, ++page)
66 cb(page);
67 }
68}
69
70static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71{
72 struct scatterlist *s;
73 dma_addr_t expected = sg_dma_address(sgt->sgl);
74 unsigned int i;
75 unsigned long size = 0;
76
77 for_each_sg(sgt->sgl, s, sgt->nents, i) {
78 if (sg_dma_address(s) != expected)
79 break;
80 expected = sg_dma_address(s) + sg_dma_len(s);
81 size += sg_dma_len(s);
82 }
83 return size;
84}
85
86/*********************************************/
87/* callbacks for all buffers */
88/*********************************************/
89
90static void *vb2_dc_cookie(void *buf_priv)
91{
92 struct vb2_dc_buf *buf = buf_priv;
93
94 return &buf->dma_addr;
95}
96
97static void *vb2_dc_vaddr(void *buf_priv)
98{
99 struct vb2_dc_buf *buf = buf_priv;
100
101 return buf->vaddr;
102}
103
104static unsigned int vb2_dc_num_users(void *buf_priv)
105{
106 struct vb2_dc_buf *buf = buf_priv;
107
108 return atomic_read(&buf->refcount);
109}
110
111static void vb2_dc_prepare(void *buf_priv)
112{
113 struct vb2_dc_buf *buf = buf_priv;
114 struct sg_table *sgt = buf->dma_sgt;
115
116 /* DMABUF exporter will flush the cache for us */
117 if (!sgt || buf->db_attach)
118 return;
119
120 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
121}
122
123static void vb2_dc_finish(void *buf_priv)
124{
125 struct vb2_dc_buf *buf = buf_priv;
126 struct sg_table *sgt = buf->dma_sgt;
127
128 /* DMABUF exporter will flush the cache for us */
129 if (!sgt || buf->db_attach)
130 return;
131
132 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
133}
134
135/*********************************************/
136/* callbacks for MMAP buffers */
137/*********************************************/
138
139static void vb2_dc_put(void *buf_priv)
140{
141 struct vb2_dc_buf *buf = buf_priv;
142
143 if (!atomic_dec_and_test(&buf->refcount))
144 return;
145
146 if (buf->sgt_base) {
147 sg_free_table(buf->sgt_base);
148 kfree(buf->sgt_base);
149 }
150 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
151 put_device(buf->dev);
152 kfree(buf);
153}
36 154
37static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size) 155static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
38{ 156{
39 struct vb2_dc_conf *conf = alloc_ctx; 157 struct vb2_dc_conf *conf = alloc_ctx;
158 struct device *dev = conf->dev;
40 struct vb2_dc_buf *buf; 159 struct vb2_dc_buf *buf;
41 160
42 buf = kzalloc(sizeof *buf, GFP_KERNEL); 161 buf = kzalloc(sizeof *buf, GFP_KERNEL);
43 if (!buf) 162 if (!buf)
44 return ERR_PTR(-ENOMEM); 163 return ERR_PTR(-ENOMEM);
45 164
46 buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->dma_addr, 165 /* align image size to PAGE_SIZE */
47 GFP_KERNEL); 166 size = PAGE_ALIGN(size);
167
168 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
48 if (!buf->vaddr) { 169 if (!buf->vaddr) {
49 dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n", 170 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
50 size);
51 kfree(buf); 171 kfree(buf);
52 return ERR_PTR(-ENOMEM); 172 return ERR_PTR(-ENOMEM);
53 } 173 }
54 174
55 buf->conf = conf; 175 /* Prevent the device from being released while the buffer is used */
176 buf->dev = get_device(dev);
56 buf->size = size; 177 buf->size = size;
57 178
58 buf->handler.refcount = &buf->refcount; 179 buf->handler.refcount = &buf->refcount;
59 buf->handler.put = vb2_dma_contig_put; 180 buf->handler.put = vb2_dc_put;
60 buf->handler.arg = buf; 181 buf->handler.arg = buf;
61 182
62 atomic_inc(&buf->refcount); 183 atomic_inc(&buf->refcount);
@@ -64,100 +185,569 @@ static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
64 return buf; 185 return buf;
65} 186}
66 187
67static void vb2_dma_contig_put(void *buf_priv) 188static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
68{ 189{
69 struct vb2_dc_buf *buf = buf_priv; 190 struct vb2_dc_buf *buf = buf_priv;
191 int ret;
70 192
71 if (atomic_dec_and_test(&buf->refcount)) { 193 if (!buf) {
72 dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr, 194 printk(KERN_ERR "No buffer to map\n");
73 buf->dma_addr); 195 return -EINVAL;
74 kfree(buf); 196 }
197
198 /*
199 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
200 * map whole buffer
201 */
202 vma->vm_pgoff = 0;
203
204 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
205 buf->dma_addr, buf->size);
206
207 if (ret) {
208 pr_err("Remapping memory failed, error: %d\n", ret);
209 return ret;
75 } 210 }
211
212 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
213 vma->vm_private_data = &buf->handler;
214 vma->vm_ops = &vb2_common_vm_ops;
215
216 vma->vm_ops->open(vma);
217
218 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
219 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
220 buf->size);
221
222 return 0;
76} 223}
77 224
78static void *vb2_dma_contig_cookie(void *buf_priv) 225/*********************************************/
226/* DMABUF ops for exporters */
227/*********************************************/
228
229struct vb2_dc_attachment {
230 struct sg_table sgt;
231 enum dma_data_direction dir;
232};
233
234static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
235 struct dma_buf_attachment *dbuf_attach)
79{ 236{
80 struct vb2_dc_buf *buf = buf_priv; 237 struct vb2_dc_attachment *attach;
238 unsigned int i;
239 struct scatterlist *rd, *wr;
240 struct sg_table *sgt;
241 struct vb2_dc_buf *buf = dbuf->priv;
242 int ret;
81 243
82 return &buf->dma_addr; 244 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
245 if (!attach)
246 return -ENOMEM;
247
248 sgt = &attach->sgt;
249 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
250 * map the same scatter list to multiple attachments at the same time.
251 */
252 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
253 if (ret) {
254 kfree(attach);
255 return -ENOMEM;
256 }
257
258 rd = buf->sgt_base->sgl;
259 wr = sgt->sgl;
260 for (i = 0; i < sgt->orig_nents; ++i) {
261 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
262 rd = sg_next(rd);
263 wr = sg_next(wr);
264 }
265
266 attach->dir = DMA_NONE;
267 dbuf_attach->priv = attach;
268
269 return 0;
83} 270}
84 271
85static void *vb2_dma_contig_vaddr(void *buf_priv) 272static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
273 struct dma_buf_attachment *db_attach)
86{ 274{
87 struct vb2_dc_buf *buf = buf_priv; 275 struct vb2_dc_attachment *attach = db_attach->priv;
88 if (!buf) 276 struct sg_table *sgt;
89 return NULL; 277
278 if (!attach)
279 return;
280
281 sgt = &attach->sgt;
282
283 /* release the scatterlist cache */
284 if (attach->dir != DMA_NONE)
285 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
286 attach->dir);
287 sg_free_table(sgt);
288 kfree(attach);
289 db_attach->priv = NULL;
290}
291
292static struct sg_table *vb2_dc_dmabuf_ops_map(
293 struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
294{
295 struct vb2_dc_attachment *attach = db_attach->priv;
296 /* stealing dmabuf mutex to serialize map/unmap operations */
297 struct mutex *lock = &db_attach->dmabuf->lock;
298 struct sg_table *sgt;
299 int ret;
300
301 mutex_lock(lock);
302
303 sgt = &attach->sgt;
304 /* return previously mapped sg table */
305 if (attach->dir == dir) {
306 mutex_unlock(lock);
307 return sgt;
308 }
309
310 /* release any previous cache */
311 if (attach->dir != DMA_NONE) {
312 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
313 attach->dir);
314 attach->dir = DMA_NONE;
315 }
316
317 /* mapping to the client with new direction */
318 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
319 if (ret <= 0) {
320 pr_err("failed to map scatterlist\n");
321 mutex_unlock(lock);
322 return ERR_PTR(-EIO);
323 }
324
325 attach->dir = dir;
326
327 mutex_unlock(lock);
328
329 return sgt;
330}
331
332static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
333 struct sg_table *sgt, enum dma_data_direction dir)
334{
335 /* nothing to be done here */
336}
337
338static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
339{
340 /* drop reference obtained in vb2_dc_get_dmabuf */
341 vb2_dc_put(dbuf->priv);
342}
343
344static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
345{
346 struct vb2_dc_buf *buf = dbuf->priv;
347
348 return buf->vaddr + pgnum * PAGE_SIZE;
349}
350
351static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
352{
353 struct vb2_dc_buf *buf = dbuf->priv;
90 354
91 return buf->vaddr; 355 return buf->vaddr;
92} 356}
93 357
94static unsigned int vb2_dma_contig_num_users(void *buf_priv) 358static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
359 struct vm_area_struct *vma)
95{ 360{
96 struct vb2_dc_buf *buf = buf_priv; 361 return vb2_dc_mmap(dbuf->priv, vma);
362}
97 363
98 return atomic_read(&buf->refcount); 364static struct dma_buf_ops vb2_dc_dmabuf_ops = {
365 .attach = vb2_dc_dmabuf_ops_attach,
366 .detach = vb2_dc_dmabuf_ops_detach,
367 .map_dma_buf = vb2_dc_dmabuf_ops_map,
368 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
369 .kmap = vb2_dc_dmabuf_ops_kmap,
370 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
371 .vmap = vb2_dc_dmabuf_ops_vmap,
372 .mmap = vb2_dc_dmabuf_ops_mmap,
373 .release = vb2_dc_dmabuf_ops_release,
374};
375
376static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
377{
378 int ret;
379 struct sg_table *sgt;
380
381 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
382 if (!sgt) {
383 dev_err(buf->dev, "failed to alloc sg table\n");
384 return NULL;
385 }
386
387 ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
388 buf->size);
389 if (ret < 0) {
390 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
391 kfree(sgt);
392 return NULL;
393 }
394
395 return sgt;
99} 396}
100 397
101static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma) 398static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
102{ 399{
103 struct vb2_dc_buf *buf = buf_priv; 400 struct vb2_dc_buf *buf = buf_priv;
401 struct dma_buf *dbuf;
104 402
105 if (!buf) { 403 if (!buf->sgt_base)
106 printk(KERN_ERR "No buffer to map\n"); 404 buf->sgt_base = vb2_dc_get_base_sgt(buf);
107 return -EINVAL; 405
406 if (WARN_ON(!buf->sgt_base))
407 return NULL;
408
409 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
410 if (IS_ERR(dbuf))
411 return NULL;
412
413 /* dmabuf keeps reference to vb2 buffer */
414 atomic_inc(&buf->refcount);
415
416 return dbuf;
417}
418
419/*********************************************/
420/* callbacks for USERPTR buffers */
421/*********************************************/
422
423static inline int vma_is_io(struct vm_area_struct *vma)
424{
425 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
426}
427
428static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
429 int n_pages, struct vm_area_struct *vma, int write)
430{
431 if (vma_is_io(vma)) {
432 unsigned int i;
433
434 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
435 unsigned long pfn;
436 int ret = follow_pfn(vma, start, &pfn);
437
438 if (ret) {
439 pr_err("no page for address %lu\n", start);
440 return ret;
441 }
442 pages[i] = pfn_to_page(pfn);
443 }
444 } else {
445 int n;
446
447 n = get_user_pages(current, current->mm, start & PAGE_MASK,
448 n_pages, write, 1, pages, NULL);
449 /* negative error means that no page was pinned */
450 n = max(n, 0);
451 if (n != n_pages) {
452 pr_err("got only %d of %d user pages\n", n, n_pages);
453 while (n)
454 put_page(pages[--n]);
455 return -EFAULT;
456 }
108 } 457 }
109 458
110 return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size, 459 return 0;
111 &vb2_common_vm_ops, &buf->handler);
112} 460}
113 461
114static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr, 462static void vb2_dc_put_dirty_page(struct page *page)
115 unsigned long size, int write)
116{ 463{
464 set_page_dirty_lock(page);
465 put_page(page);
466}
467
468static void vb2_dc_put_userptr(void *buf_priv)
469{
470 struct vb2_dc_buf *buf = buf_priv;
471 struct sg_table *sgt = buf->dma_sgt;
472
473 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
474 if (!vma_is_io(buf->vma))
475 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
476
477 sg_free_table(sgt);
478 kfree(sgt);
479 vb2_put_vma(buf->vma);
480 kfree(buf);
481}
482
483static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
484 unsigned long size, int write)
485{
486 struct vb2_dc_conf *conf = alloc_ctx;
117 struct vb2_dc_buf *buf; 487 struct vb2_dc_buf *buf;
488 unsigned long start;
489 unsigned long end;
490 unsigned long offset;
491 struct page **pages;
492 int n_pages;
493 int ret = 0;
118 struct vm_area_struct *vma; 494 struct vm_area_struct *vma;
119 dma_addr_t dma_addr = 0; 495 struct sg_table *sgt;
120 int ret; 496 unsigned long contig_size;
497 unsigned long dma_align = dma_get_cache_alignment();
498
499 /* Only cache aligned DMA transfers are reliable */
500 if (!IS_ALIGNED(vaddr | size, dma_align)) {
501 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
502 return ERR_PTR(-EINVAL);
503 }
504
505 if (!size) {
506 pr_debug("size is zero\n");
507 return ERR_PTR(-EINVAL);
508 }
121 509
122 buf = kzalloc(sizeof *buf, GFP_KERNEL); 510 buf = kzalloc(sizeof *buf, GFP_KERNEL);
123 if (!buf) 511 if (!buf)
124 return ERR_PTR(-ENOMEM); 512 return ERR_PTR(-ENOMEM);
125 513
126 ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr); 514 buf->dev = conf->dev;
515 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
516
517 start = vaddr & PAGE_MASK;
518 offset = vaddr & ~PAGE_MASK;
519 end = PAGE_ALIGN(vaddr + size);
520 n_pages = (end - start) >> PAGE_SHIFT;
521
522 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
523 if (!pages) {
524 ret = -ENOMEM;
525 pr_err("failed to allocate pages table\n");
526 goto fail_buf;
527 }
528
529 /* current->mm->mmap_sem is taken by videobuf2 core */
530 vma = find_vma(current->mm, vaddr);
531 if (!vma) {
532 pr_err("no vma for address %lu\n", vaddr);
533 ret = -EFAULT;
534 goto fail_pages;
535 }
536
537 if (vma->vm_end < vaddr + size) {
538 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
539 ret = -EFAULT;
540 goto fail_pages;
541 }
542
543 buf->vma = vb2_get_vma(vma);
544 if (!buf->vma) {
545 pr_err("failed to copy vma\n");
546 ret = -ENOMEM;
547 goto fail_pages;
548 }
549
550 /* extract page list from userspace mapping */
551 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
127 if (ret) { 552 if (ret) {
128 printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n", 553 pr_err("failed to get user pages\n");
129 vaddr); 554 goto fail_vma;
130 kfree(buf); 555 }
131 return ERR_PTR(ret); 556
557 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
558 if (!sgt) {
559 pr_err("failed to allocate sg table\n");
560 ret = -ENOMEM;
561 goto fail_get_user_pages;
562 }
563
564 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
565 offset, size, GFP_KERNEL);
566 if (ret) {
567 pr_err("failed to initialize sg table\n");
568 goto fail_sgt;
132 } 569 }
133 570
571 /* pages are no longer needed */
572 kfree(pages);
573 pages = NULL;
574
575 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
576 buf->dma_dir);
577 if (sgt->nents <= 0) {
578 pr_err("failed to map scatterlist\n");
579 ret = -EIO;
580 goto fail_sgt_init;
581 }
582
583 contig_size = vb2_dc_get_contiguous_size(sgt);
584 if (contig_size < size) {
585 pr_err("contiguous mapping is too small %lu/%lu\n",
586 contig_size, size);
587 ret = -EFAULT;
588 goto fail_map_sg;
589 }
590
591 buf->dma_addr = sg_dma_address(sgt->sgl);
134 buf->size = size; 592 buf->size = size;
135 buf->dma_addr = dma_addr; 593 buf->dma_sgt = sgt;
136 buf->vma = vma;
137 594
138 return buf; 595 return buf;
596
597fail_map_sg:
598 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
599
600fail_sgt_init:
601 if (!vma_is_io(buf->vma))
602 vb2_dc_sgt_foreach_page(sgt, put_page);
603 sg_free_table(sgt);
604
605fail_sgt:
606 kfree(sgt);
607
608fail_get_user_pages:
609 if (pages && !vma_is_io(buf->vma))
610 while (n_pages)
611 put_page(pages[--n_pages]);
612
613fail_vma:
614 vb2_put_vma(buf->vma);
615
616fail_pages:
617 kfree(pages); /* kfree is NULL-proof */
618
619fail_buf:
620 kfree(buf);
621
622 return ERR_PTR(ret);
139} 623}
140 624
141static void vb2_dma_contig_put_userptr(void *mem_priv) 625/*********************************************/
626/* callbacks for DMABUF buffers */
627/*********************************************/
628
629static int vb2_dc_map_dmabuf(void *mem_priv)
142{ 630{
143 struct vb2_dc_buf *buf = mem_priv; 631 struct vb2_dc_buf *buf = mem_priv;
632 struct sg_table *sgt;
633 unsigned long contig_size;
144 634
145 if (!buf) 635 if (WARN_ON(!buf->db_attach)) {
636 pr_err("trying to pin a non attached buffer\n");
637 return -EINVAL;
638 }
639
640 if (WARN_ON(buf->dma_sgt)) {
641 pr_err("dmabuf buffer is already pinned\n");
642 return 0;
643 }
644
645 /* get the associated scatterlist for this buffer */
646 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
647 if (IS_ERR_OR_NULL(sgt)) {
648 pr_err("Error getting dmabuf scatterlist\n");
649 return -EINVAL;
650 }
651
652 /* checking if dmabuf is big enough to store contiguous chunk */
653 contig_size = vb2_dc_get_contiguous_size(sgt);
654 if (contig_size < buf->size) {
655 pr_err("contiguous chunk is too small %lu/%lu b\n",
656 contig_size, buf->size);
657 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
658 return -EFAULT;
659 }
660
661 buf->dma_addr = sg_dma_address(sgt->sgl);
662 buf->dma_sgt = sgt;
663
664 return 0;
665}
666
667static void vb2_dc_unmap_dmabuf(void *mem_priv)
668{
669 struct vb2_dc_buf *buf = mem_priv;
670 struct sg_table *sgt = buf->dma_sgt;
671
672 if (WARN_ON(!buf->db_attach)) {
673 pr_err("trying to unpin a not attached buffer\n");
146 return; 674 return;
675 }
147 676
148 vb2_put_vma(buf->vma); 677 if (WARN_ON(!sgt)) {
678 pr_err("dmabuf buffer is already unpinned\n");
679 return;
680 }
681
682 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
683
684 buf->dma_addr = 0;
685 buf->dma_sgt = NULL;
686}
687
688static void vb2_dc_detach_dmabuf(void *mem_priv)
689{
690 struct vb2_dc_buf *buf = mem_priv;
691
692 /* if vb2 works correctly you should never detach mapped buffer */
693 if (WARN_ON(buf->dma_addr))
694 vb2_dc_unmap_dmabuf(buf);
695
696 /* detach this attachment */
697 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
149 kfree(buf); 698 kfree(buf);
150} 699}
151 700
701static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
702 unsigned long size, int write)
703{
704 struct vb2_dc_conf *conf = alloc_ctx;
705 struct vb2_dc_buf *buf;
706 struct dma_buf_attachment *dba;
707
708 if (dbuf->size < size)
709 return ERR_PTR(-EFAULT);
710
711 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
712 if (!buf)
713 return ERR_PTR(-ENOMEM);
714
715 buf->dev = conf->dev;
716 /* create attachment for the dmabuf with the user device */
717 dba = dma_buf_attach(dbuf, buf->dev);
718 if (IS_ERR(dba)) {
719 pr_err("failed to attach dmabuf\n");
720 kfree(buf);
721 return dba;
722 }
723
724 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
725 buf->size = size;
726 buf->db_attach = dba;
727
728 return buf;
729}
730
731/*********************************************/
732/* DMA CONTIG exported functions */
733/*********************************************/
734
152const struct vb2_mem_ops vb2_dma_contig_memops = { 735const struct vb2_mem_ops vb2_dma_contig_memops = {
153 .alloc = vb2_dma_contig_alloc, 736 .alloc = vb2_dc_alloc,
154 .put = vb2_dma_contig_put, 737 .put = vb2_dc_put,
155 .cookie = vb2_dma_contig_cookie, 738 .get_dmabuf = vb2_dc_get_dmabuf,
156 .vaddr = vb2_dma_contig_vaddr, 739 .cookie = vb2_dc_cookie,
157 .mmap = vb2_dma_contig_mmap, 740 .vaddr = vb2_dc_vaddr,
158 .get_userptr = vb2_dma_contig_get_userptr, 741 .mmap = vb2_dc_mmap,
159 .put_userptr = vb2_dma_contig_put_userptr, 742 .get_userptr = vb2_dc_get_userptr,
160 .num_users = vb2_dma_contig_num_users, 743 .put_userptr = vb2_dc_put_userptr,
744 .prepare = vb2_dc_prepare,
745 .finish = vb2_dc_finish,
746 .map_dmabuf = vb2_dc_map_dmabuf,
747 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
748 .attach_dmabuf = vb2_dc_attach_dmabuf,
749 .detach_dmabuf = vb2_dc_detach_dmabuf,
750 .num_users = vb2_dc_num_users,
161}; 751};
162EXPORT_SYMBOL_GPL(vb2_dma_contig_memops); 752EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
163 753
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 051ea3571b20..81c1ad8b2cf1 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -137,46 +137,6 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
137EXPORT_SYMBOL_GPL(vb2_get_contig_userptr); 137EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
138 138
139/** 139/**
140 * vb2_mmap_pfn_range() - map physical pages to userspace
141 * @vma: virtual memory region for the mapping
142 * @paddr: starting physical address of the memory to be mapped
143 * @size: size of the memory to be mapped
144 * @vm_ops: vm operations to be assigned to the created area
145 * @priv: private data to be associated with the area
146 *
147 * Returns 0 on success.
148 */
149int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
150 unsigned long size,
151 const struct vm_operations_struct *vm_ops,
152 void *priv)
153{
154 int ret;
155
156 size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
157
158 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
159 ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
160 size, vma->vm_page_prot);
161 if (ret) {
162 printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
163 return ret;
164 }
165
166 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
167 vma->vm_private_data = priv;
168 vma->vm_ops = vm_ops;
169
170 vma->vm_ops->open(vma);
171
172 pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
173 __func__, paddr, vma->vm_start, size);
174
175 return 0;
176}
177EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
178
179/**
180 * vb2_common_vm_open() - increase refcount of the vma 140 * vb2_common_vm_open() - increase refcount of the vma
181 * @vma: virtual memory region for the mapping 141 * @vma: virtual memory region for the mapping
182 * 142 *
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 94efa04d8d55..a47fd4f589a1 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -30,6 +30,7 @@ struct vb2_vmalloc_buf {
30 unsigned int n_pages; 30 unsigned int n_pages;
31 atomic_t refcount; 31 atomic_t refcount;
32 struct vb2_vmarea_handler handler; 32 struct vb2_vmarea_handler handler;
33 struct dma_buf *dbuf;
33}; 34};
34 35
35static void vb2_vmalloc_put(void *buf_priv); 36static void vb2_vmalloc_put(void *buf_priv);
@@ -207,11 +208,66 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
207 return 0; 208 return 0;
208} 209}
209 210
211/*********************************************/
212/* callbacks for DMABUF buffers */
213/*********************************************/
214
215static int vb2_vmalloc_map_dmabuf(void *mem_priv)
216{
217 struct vb2_vmalloc_buf *buf = mem_priv;
218
219 buf->vaddr = dma_buf_vmap(buf->dbuf);
220
221 return buf->vaddr ? 0 : -EFAULT;
222}
223
224static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
225{
226 struct vb2_vmalloc_buf *buf = mem_priv;
227
228 dma_buf_vunmap(buf->dbuf, buf->vaddr);
229 buf->vaddr = NULL;
230}
231
232static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
233{
234 struct vb2_vmalloc_buf *buf = mem_priv;
235
236 if (buf->vaddr)
237 dma_buf_vunmap(buf->dbuf, buf->vaddr);
238
239 kfree(buf);
240}
241
242static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
243 unsigned long size, int write)
244{
245 struct vb2_vmalloc_buf *buf;
246
247 if (dbuf->size < size)
248 return ERR_PTR(-EFAULT);
249
250 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
251 if (!buf)
252 return ERR_PTR(-ENOMEM);
253
254 buf->dbuf = dbuf;
255 buf->write = write;
256 buf->size = size;
257
258 return buf;
259}
260
261
210const struct vb2_mem_ops vb2_vmalloc_memops = { 262const struct vb2_mem_ops vb2_vmalloc_memops = {
211 .alloc = vb2_vmalloc_alloc, 263 .alloc = vb2_vmalloc_alloc,
212 .put = vb2_vmalloc_put, 264 .put = vb2_vmalloc_put,
213 .get_userptr = vb2_vmalloc_get_userptr, 265 .get_userptr = vb2_vmalloc_get_userptr,
214 .put_userptr = vb2_vmalloc_put_userptr, 266 .put_userptr = vb2_vmalloc_put_userptr,
267 .map_dmabuf = vb2_vmalloc_map_dmabuf,
268 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
269 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
270 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
215 .vaddr = vb2_vmalloc_vaddr, 271 .vaddr = vb2_vmalloc_vaddr,
216 .mmap = vb2_vmalloc_mmap, 272 .mmap = vb2_vmalloc_mmap,
217 .num_users = vb2_vmalloc_num_users, 273 .num_users = vb2_vmalloc_num_users,