aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJunghak Sung <jh1009.sung@samsung.com>2015-09-22 09:30:30 -0400
committerMauro Carvalho Chehab <mchehab@osg.samsung.com>2015-10-01 08:04:43 -0400
commit2d7007153f0c9b1dd00c01894df7d26ddc32b79f (patch)
tree8320f9d22f45dd7dcea64088b50ff706bb0082b2
parentc139990e842d550db2f59bd4f5993bba90f140e0 (diff)
[media] media: videobuf2: Restructure vb2_buffer
Remove v4l2 stuff - v4l2_buf, v4l2_plane - from struct vb2_buffer. Add new member variables - bytesused, length, offset, userptr, fd, data_offset - to struct vb2_plane in order to cover all information of v4l2_plane. struct vb2_plane { <snip> unsigned int bytesused; unsigned int length; union { unsigned int offset; unsigned long userptr; int fd; } m; unsigned int data_offset; } Replace v4l2_buf with new member variables - index, type, memory - which are common fields for buffer management. struct vb2_buffer { <snip> unsigned int index; unsigned int type; unsigned int memory; unsigned int num_planes; struct vb2_plane planes[VIDEO_MAX_PLANES]; <snip> }; v4l2 specific fields - flags, field, timestamp, timecode, sequence - are moved to vb2_v4l2_buffer in videobuf2-v4l2.c struct vb2_v4l2_buffer { struct vb2_buffer vb2_buf; __u32 flags; __u32 field; struct timeval timestamp; struct v4l2_timecode timecode; __u32 sequence; }; Signed-off-by: Junghak Sung <jh1009.sung@samsung.com> Signed-off-by: Geunyoung Kim <nenggun.kim@samsung.com> Acked-by: Seung-Woo Kim <sw0312.kim@samsung.com> Acked-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
-rw-r--r--drivers/input/touchscreen/sur40.c17
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c21
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h6
-rw-r--r--drivers/media/pci/cobalt/cobalt-irq.c7
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c20
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c11
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c24
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c9
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c16
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c27
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c21
-rw-r--r--drivers/media/pci/cx25821/cx25821.h3
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c13
-rw-r--r--drivers/media/pci/cx88/cx88-core.c8
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c11
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c14
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c17
-rw-r--r--drivers/media/pci/cx88/cx88-video.c19
-rw-r--r--drivers/media/pci/cx88/cx88.h2
-rw-r--r--drivers/media/pci/dt3155/dt3155.c17
-rw-r--r--drivers/media/pci/dt3155/dt3155.h3
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c19
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c14
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c14
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c10
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c21
-rw-r--r--drivers/media/pci/saa7134/saa7134.h2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c46
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c19
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c26
-rw-r--r--drivers/media/pci/tw68/tw68-video.c19
-rw-r--r--drivers/media/pci/tw68/tw68.h3
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c35
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h3
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c34
-rw-r--r--drivers/media/platform/coda/coda-bit.c133
-rw-r--r--drivers/media/platform/coda/coda-common.c21
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c6
-rw-r--r--drivers/media/platform/coda/coda.h6
-rw-r--r--drivers/media/platform/coda/trace.h16
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c31
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c30
-rw-r--r--drivers/media/platform/davinci/vpif_capture.h2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c39
-rw-r--r--drivers/media/platform/davinci/vpif_display.h2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c23
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c22
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c13
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c15
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c19
-rw-r--r--drivers/media/platform/m2m-deinterlace.c23
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c43
-rw-r--r--drivers/media/platform/mx2_emmaprp.c15
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c25
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h2
-rw-r--r--drivers/media/platform/rcar_jpu.c59
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c15
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c15
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c30
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c78
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c15
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c58
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c46
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c33
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_grp_layer.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_reg.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c11
-rw-r--r--drivers/media/platform/s5p-tv/mixer_vp_layer.c5
-rw-r--r--drivers/media/platform/sh_veu.c19
-rw-r--r--drivers/media/platform/sh_vou.c26
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c26
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c19
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c27
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c45
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c57
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c23
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c40
-rw-r--r--drivers/media/platform/vim2m.c51
-rw-r--r--drivers/media/platform/vivid/vivid-core.h2
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c73
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c34
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c44
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c45
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c18
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c15
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c15
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c18
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.h6
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c4
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c24
-rw-r--r--drivers/media/usb/airspy/airspy.c24
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c7
-rw-r--r--drivers/media/usb/au0828/au0828-video.c45
-rw-r--r--drivers/media/usb/au0828/au0828.h3
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c7
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c34
-rw-r--r--drivers/media/usb/em28xx/em28xx.h3
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c29
-rw-r--r--drivers/media/usb/go7007/go7007-priv.h2
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c20
-rw-r--r--drivers/media/usb/hackrf/hackrf.c22
-rw-r--r--drivers/media/usb/msi2500/msi2500.c17
-rw-r--r--drivers/media/usb/pwc/pwc-if.c33
-rw-r--r--drivers/media/usb/pwc/pwc-uncompress.c6
-rw-r--r--drivers/media/usb/pwc/pwc.h4
-rw-r--r--drivers/media/usb/s2255/s2255drv.c27
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c15
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c12
-rw-r--r--drivers/media/usb/stk1160/stk1160.h2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c19
-rw-r--r--drivers/media/usb/usbtv/usbtv.h3
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c26
-rw-r--r--drivers/media/usb/uvc/uvc_video.c20
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h4
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c8
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c219
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c43
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h3
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c21
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h4
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c26
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h2
-rw-r--r--include/media/davinci/vpbe_display.h3
-rw-r--r--include/media/v4l2-mem2mem.h9
-rw-r--r--include/media/videobuf2-core.h66
-rw-r--r--include/media/videobuf2-v4l2.h28
-rw-r--r--include/trace/events/v4l2.h35
137 files changed, 1647 insertions, 1263 deletions
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 3f117637e832..98d094587e85 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -38,6 +38,7 @@
38#include <media/v4l2-device.h> 38#include <media/v4l2-device.h>
39#include <media/v4l2-dev.h> 39#include <media/v4l2-dev.h>
40#include <media/v4l2-ioctl.h> 40#include <media/v4l2-ioctl.h>
41#include <media/videobuf2-v4l2.h>
41#include <media/videobuf2-dma-sg.h> 42#include <media/videobuf2-dma-sg.h>
42 43
43/* read 512 bytes from endpoint 0x86 -> get header + blobs */ 44/* read 512 bytes from endpoint 0x86 -> get header + blobs */
@@ -163,7 +164,7 @@ struct sur40_state {
163}; 164};
164 165
165struct sur40_buffer { 166struct sur40_buffer {
166 struct vb2_buffer vb; 167 struct vb2_v4l2_buffer vb;
167 struct list_head list; 168 struct list_head list;
168}; 169};
169 170
@@ -420,7 +421,7 @@ static void sur40_process_video(struct sur40_state *sur40)
420 421
421 dev_dbg(sur40->dev, "header acquired\n"); 422 dev_dbg(sur40->dev, "header acquired\n");
422 423
423 sgt = vb2_dma_sg_plane_desc(&new_buf->vb, 0); 424 sgt = vb2_dma_sg_plane_desc(&new_buf->vb.vb2_buf, 0);
424 425
425 result = usb_sg_init(&sgr, sur40->usbdev, 426 result = usb_sg_init(&sgr, sur40->usbdev,
426 usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), 0, 427 usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), 0,
@@ -443,15 +444,15 @@ static void sur40_process_video(struct sur40_state *sur40)
443 goto err_poll; 444 goto err_poll;
444 445
445 /* mark as finished */ 446 /* mark as finished */
446 v4l2_get_timestamp(&new_buf->vb.v4l2_buf.timestamp); 447 v4l2_get_timestamp(&new_buf->vb.timestamp);
447 new_buf->vb.v4l2_buf.sequence = sur40->sequence++; 448 new_buf->vb.sequence = sur40->sequence++;
448 new_buf->vb.v4l2_buf.field = V4L2_FIELD_NONE; 449 new_buf->vb.field = V4L2_FIELD_NONE;
449 vb2_buffer_done(&new_buf->vb, VB2_BUF_STATE_DONE); 450 vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
450 dev_dbg(sur40->dev, "buffer marked done\n"); 451 dev_dbg(sur40->dev, "buffer marked done\n");
451 return; 452 return;
452 453
453err_poll: 454err_poll:
454 vb2_buffer_done(&new_buf->vb, VB2_BUF_STATE_ERROR); 455 vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
455} 456}
456 457
457/* Initialize input device parameters. */ 458/* Initialize input device parameters. */
@@ -701,7 +702,7 @@ static void return_all_buffers(struct sur40_state *sur40,
701 702
702 spin_lock(&sur40->qlock); 703 spin_lock(&sur40->qlock);
703 list_for_each_entry_safe(buf, node, &sur40->buf_list, list) { 704 list_for_each_entry_safe(buf, node, &sur40->buf_list, list) {
704 vb2_buffer_done(&buf->vb, state); 705 vb2_buffer_done(&buf->vb.vb2_buf, state);
705 list_del(&buf->list); 706 list_del(&buf->list);
706 } 707 }
707 spin_unlock(&sur40->qlock); 708 spin_unlock(&sur40->qlock);
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index d5b994f17612..bf306a230eb0 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -29,6 +29,7 @@
29#include <media/v4l2-ioctl.h> 29#include <media/v4l2-ioctl.h>
30#include <media/v4l2-ctrls.h> 30#include <media/v4l2-ctrls.h>
31#include <media/v4l2-event.h> 31#include <media/v4l2-event.h>
32#include <media/videobuf2-v4l2.h>
32#include <media/videobuf2-vmalloc.h> 33#include <media/videobuf2-vmalloc.h>
33 34
34#include <linux/platform_device.h> 35#include <linux/platform_device.h>
@@ -107,7 +108,8 @@ static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
107 108
108/* intermediate buffers with raw data from the USB device */ 109/* intermediate buffers with raw data from the USB device */
109struct rtl2832_sdr_frame_buf { 110struct rtl2832_sdr_frame_buf {
110 struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */ 111 /* common v4l buffer stuff -- must be first */
112 struct vb2_v4l2_buffer vb;
111 struct list_head list; 113 struct list_head list;
112}; 114};
113 115
@@ -304,13 +306,13 @@ static void rtl2832_sdr_urb_complete(struct urb *urb)
304 } 306 }
305 307
306 /* fill framebuffer */ 308 /* fill framebuffer */
307 ptr = vb2_plane_vaddr(&fbuf->vb, 0); 309 ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
308 len = rtl2832_sdr_convert_stream(dev, ptr, urb->transfer_buffer, 310 len = rtl2832_sdr_convert_stream(dev, ptr, urb->transfer_buffer,
309 urb->actual_length); 311 urb->actual_length);
310 vb2_set_plane_payload(&fbuf->vb, 0, len); 312 vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len);
311 v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp); 313 v4l2_get_timestamp(&fbuf->vb.timestamp);
312 fbuf->vb.v4l2_buf.sequence = dev->sequence++; 314 fbuf->vb.sequence = dev->sequence++;
313 vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE); 315 vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
314 } 316 }
315skip: 317skip:
316 usb_submit_urb(urb, GFP_ATOMIC); 318 usb_submit_urb(urb, GFP_ATOMIC);
@@ -464,7 +466,7 @@ static void rtl2832_sdr_cleanup_queued_bufs(struct rtl2832_sdr_dev *dev)
464 buf = list_entry(dev->queued_bufs.next, 466 buf = list_entry(dev->queued_bufs.next,
465 struct rtl2832_sdr_frame_buf, list); 467 struct rtl2832_sdr_frame_buf, list);
466 list_del(&buf->list); 468 list_del(&buf->list);
467 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 469 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
468 } 470 }
469 spin_unlock_irqrestore(&dev->queued_bufs_lock, flags); 471 spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
470} 472}
@@ -518,14 +520,15 @@ static int rtl2832_sdr_buf_prepare(struct vb2_buffer *vb)
518 520
519static void rtl2832_sdr_buf_queue(struct vb2_buffer *vb) 521static void rtl2832_sdr_buf_queue(struct vb2_buffer *vb)
520{ 522{
523 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
521 struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 524 struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
522 struct rtl2832_sdr_frame_buf *buf = 525 struct rtl2832_sdr_frame_buf *buf =
523 container_of(vb, struct rtl2832_sdr_frame_buf, vb); 526 container_of(vbuf, struct rtl2832_sdr_frame_buf, vb);
524 unsigned long flags; 527 unsigned long flags;
525 528
526 /* Check the device has not disconnected between prep and queuing */ 529 /* Check the device has not disconnected between prep and queuing */
527 if (!dev->udev) { 530 if (!dev->udev) {
528 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 531 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
529 return; 532 return;
530 } 533 }
531 534
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index c206df930669..b2f08e4a68bf 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -35,6 +35,7 @@
35#include <media/v4l2-ioctl.h> 35#include <media/v4l2-ioctl.h>
36#include <media/v4l2-device.h> 36#include <media/v4l2-device.h>
37#include <media/v4l2-fh.h> 37#include <media/v4l2-fh.h>
38#include <media/videobuf2-v4l2.h>
38#include <media/videobuf2-dma-sg.h> 39#include <media/videobuf2-dma-sg.h>
39 40
40#include "m00233_video_measure_memmap_package.h" 41#include "m00233_video_measure_memmap_package.h"
@@ -206,11 +207,12 @@ struct sg_dma_desc_info {
206#define COBALT_STREAM_FL_ADV_IRQ 1 207#define COBALT_STREAM_FL_ADV_IRQ 1
207 208
208struct cobalt_buffer { 209struct cobalt_buffer {
209 struct vb2_buffer vb; 210 struct vb2_v4l2_buffer vb;
210 struct list_head list; 211 struct list_head list;
211}; 212};
212 213
213static inline struct cobalt_buffer *to_cobalt_buffer(struct vb2_buffer *vb2) 214static inline
215struct cobalt_buffer *to_cobalt_buffer(struct vb2_v4l2_buffer *vb2)
214{ 216{
215 return container_of(vb2, struct cobalt_buffer, vb); 217 return container_of(vb2, struct cobalt_buffer, vb);
216} 218}
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c
index d1f5898d11ba..3de26d0714b5 100644
--- a/drivers/media/pci/cobalt/cobalt-irq.c
+++ b/drivers/media/pci/cobalt/cobalt-irq.c
@@ -134,11 +134,12 @@ done:
134 skip = true; 134 skip = true;
135 s->skip_first_frames--; 135 s->skip_first_frames--;
136 } 136 }
137 v4l2_get_timestamp(&cb->vb.v4l2_buf.timestamp); 137 v4l2_get_timestamp(&cb->vb.timestamp);
138 /* TODO: the sequence number should be read from the FPGA so we 138 /* TODO: the sequence number should be read from the FPGA so we
139 also know about dropped frames. */ 139 also know about dropped frames. */
140 cb->vb.v4l2_buf.sequence = s->sequence++; 140 cb->vb.sequence = s->sequence++;
141 vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ? 141 vb2_buffer_done(&cb->vb.vb2_buf,
142 (skip || s->unstable_frame) ?
142 VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE); 143 VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
143} 144}
144 145
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index 9756fd3e8af5..7d331a4c33a2 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -75,7 +75,7 @@ static int cobalt_buf_init(struct vb2_buffer *vb)
75 const size_t bytes = 75 const size_t bytes =
76 COBALT_MAX_HEIGHT * max_pages_per_line * 0x20; 76 COBALT_MAX_HEIGHT * max_pages_per_line * 0x20;
77 const size_t audio_bytes = ((1920 * 4) / PAGE_SIZE + 1) * 0x20; 77 const size_t audio_bytes = ((1920 * 4) / PAGE_SIZE + 1) * 0x20;
78 struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->v4l2_buf.index]; 78 struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->index];
79 struct sg_table *sg_desc = vb2_dma_sg_plane_desc(vb, 0); 79 struct sg_table *sg_desc = vb2_dma_sg_plane_desc(vb, 0);
80 unsigned size; 80 unsigned size;
81 int ret; 81 int ret;
@@ -105,17 +105,18 @@ static int cobalt_buf_init(struct vb2_buffer *vb)
105static void cobalt_buf_cleanup(struct vb2_buffer *vb) 105static void cobalt_buf_cleanup(struct vb2_buffer *vb)
106{ 106{
107 struct cobalt_stream *s = vb->vb2_queue->drv_priv; 107 struct cobalt_stream *s = vb->vb2_queue->drv_priv;
108 struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->v4l2_buf.index]; 108 struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->index];
109 109
110 descriptor_list_free(desc); 110 descriptor_list_free(desc);
111} 111}
112 112
113static int cobalt_buf_prepare(struct vb2_buffer *vb) 113static int cobalt_buf_prepare(struct vb2_buffer *vb)
114{ 114{
115 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
115 struct cobalt_stream *s = vb->vb2_queue->drv_priv; 116 struct cobalt_stream *s = vb->vb2_queue->drv_priv;
116 117
117 vb2_set_plane_payload(vb, 0, s->stride * s->height); 118 vb2_set_plane_payload(vb, 0, s->stride * s->height);
118 vb->v4l2_buf.field = V4L2_FIELD_NONE; 119 vbuf->field = V4L2_FIELD_NONE;
119 return 0; 120 return 0;
120} 121}
121 122
@@ -128,7 +129,7 @@ static void chain_all_buffers(struct cobalt_stream *s)
128 129
129 list_for_each(p, &s->bufs) { 130 list_for_each(p, &s->bufs) {
130 cb = list_entry(p, struct cobalt_buffer, list); 131 cb = list_entry(p, struct cobalt_buffer, list);
131 desc[i] = &s->dma_desc_info[cb->vb.v4l2_buf.index]; 132 desc[i] = &s->dma_desc_info[cb->vb.vb2_buf.index];
132 if (i > 0) 133 if (i > 0)
133 descriptor_list_chain(desc[i-1], desc[i]); 134 descriptor_list_chain(desc[i-1], desc[i]);
134 i++; 135 i++;
@@ -137,10 +138,11 @@ static void chain_all_buffers(struct cobalt_stream *s)
137 138
138static void cobalt_buf_queue(struct vb2_buffer *vb) 139static void cobalt_buf_queue(struct vb2_buffer *vb)
139{ 140{
141 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
140 struct vb2_queue *q = vb->vb2_queue; 142 struct vb2_queue *q = vb->vb2_queue;
141 struct cobalt_stream *s = q->drv_priv; 143 struct cobalt_stream *s = q->drv_priv;
142 struct cobalt_buffer *cb = to_cobalt_buffer(vb); 144 struct cobalt_buffer *cb = to_cobalt_buffer(vbuf);
143 struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->v4l2_buf.index]; 145 struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->index];
144 unsigned long flags; 146 unsigned long flags;
145 147
146 /* Prepare new buffer */ 148 /* Prepare new buffer */
@@ -284,7 +286,7 @@ static void cobalt_dma_start_streaming(struct cobalt_stream *s)
284 &vo->control); 286 &vo->control);
285 } 287 }
286 cb = list_first_entry(&s->bufs, struct cobalt_buffer, list); 288 cb = list_first_entry(&s->bufs, struct cobalt_buffer, list);
287 omni_sg_dma_start(s, &s->dma_desc_info[cb->vb.v4l2_buf.index]); 289 omni_sg_dma_start(s, &s->dma_desc_info[cb->vb.vb2_buf.index]);
288 spin_unlock_irqrestore(&s->irqlock, flags); 290 spin_unlock_irqrestore(&s->irqlock, flags);
289} 291}
290 292
@@ -381,7 +383,7 @@ static void cobalt_dma_stop_streaming(struct cobalt_stream *s)
381 spin_lock_irqsave(&s->irqlock, flags); 383 spin_lock_irqsave(&s->irqlock, flags);
382 list_for_each(p, &s->bufs) { 384 list_for_each(p, &s->bufs) {
383 cb = list_entry(p, struct cobalt_buffer, list); 385 cb = list_entry(p, struct cobalt_buffer, list);
384 desc = &s->dma_desc_info[cb->vb.v4l2_buf.index]; 386 desc = &s->dma_desc_info[cb->vb.vb2_buf.index];
385 /* Stop DMA after this descriptor chain */ 387 /* Stop DMA after this descriptor chain */
386 descriptor_list_end_of_chain(desc); 388 descriptor_list_end_of_chain(desc);
387 } 389 }
@@ -416,7 +418,7 @@ static void cobalt_stop_streaming(struct vb2_queue *q)
416 list_for_each_safe(p, safe, &s->bufs) { 418 list_for_each_safe(p, safe, &s->bufs) {
417 cb = list_entry(p, struct cobalt_buffer, list); 419 cb = list_entry(p, struct cobalt_buffer, list);
418 list_del(&cb->list); 420 list_del(&cb->list);
419 vb2_buffer_done(&cb->vb, VB2_BUF_STATE_ERROR); 421 vb2_buffer_done(&cb->vb.vb2_buf, VB2_BUF_STATE_ERROR);
420 } 422 }
421 spin_unlock_irqrestore(&s->irqlock, flags); 423 spin_unlock_irqrestore(&s->irqlock, flags);
422 424
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 63c0ee5d0bf5..316a32213eff 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1155,17 +1155,19 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
1155 1155
1156static int buffer_prepare(struct vb2_buffer *vb) 1156static int buffer_prepare(struct vb2_buffer *vb)
1157{ 1157{
1158 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1158 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 1159 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
1159 struct cx23885_buffer *buf = 1160 struct cx23885_buffer *buf =
1160 container_of(vb, struct cx23885_buffer, vb); 1161 container_of(vbuf, struct cx23885_buffer, vb);
1161 1162
1162 return cx23885_buf_prepare(buf, &dev->ts1); 1163 return cx23885_buf_prepare(buf, &dev->ts1);
1163} 1164}
1164 1165
1165static void buffer_finish(struct vb2_buffer *vb) 1166static void buffer_finish(struct vb2_buffer *vb)
1166{ 1167{
1168 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1167 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 1169 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
1168 struct cx23885_buffer *buf = container_of(vb, 1170 struct cx23885_buffer *buf = container_of(vbuf,
1169 struct cx23885_buffer, vb); 1171 struct cx23885_buffer, vb);
1170 1172
1171 cx23885_free_buffer(dev, buf); 1173 cx23885_free_buffer(dev, buf);
@@ -1173,8 +1175,9 @@ static void buffer_finish(struct vb2_buffer *vb)
1173 1175
1174static void buffer_queue(struct vb2_buffer *vb) 1176static void buffer_queue(struct vb2_buffer *vb)
1175{ 1177{
1178 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1176 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 1179 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
1177 struct cx23885_buffer *buf = container_of(vb, 1180 struct cx23885_buffer *buf = container_of(vbuf,
1178 struct cx23885_buffer, vb); 1181 struct cx23885_buffer, vb);
1179 1182
1180 cx23885_buf_queue(&dev->ts1, buf); 1183 cx23885_buf_queue(&dev->ts1, buf);
@@ -1201,7 +1204,7 @@ static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
1201 struct cx23885_buffer, queue); 1204 struct cx23885_buffer, queue);
1202 1205
1203 list_del(&buf->queue); 1206 list_del(&buf->queue);
1204 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 1207 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
1205 } 1208 }
1206 spin_unlock_irqrestore(&dev->slock, flags); 1209 spin_unlock_irqrestore(&dev->slock, flags);
1207 return ret; 1210 return ret;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 7aee76af7a85..bc1c9602f435 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -427,12 +427,13 @@ static void cx23885_wakeup(struct cx23885_tsport *port,
427 buf = list_entry(q->active.next, 427 buf = list_entry(q->active.next,
428 struct cx23885_buffer, queue); 428 struct cx23885_buffer, queue);
429 429
430 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 430 v4l2_get_timestamp(&buf->vb.timestamp);
431 buf->vb.v4l2_buf.sequence = q->count++; 431 buf->vb.sequence = q->count++;
432 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index, 432 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
433 buf->vb.vb2_buf.index,
433 count, q->count); 434 count, q->count);
434 list_del(&buf->queue); 435 list_del(&buf->queue);
435 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 436 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
436} 437}
437 438
438int cx23885_sram_channel_setup(struct cx23885_dev *dev, 439int cx23885_sram_channel_setup(struct cx23885_dev *dev,
@@ -1453,12 +1454,12 @@ int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1453{ 1454{
1454 struct cx23885_dev *dev = port->dev; 1455 struct cx23885_dev *dev = port->dev;
1455 int size = port->ts_packet_size * port->ts_packet_count; 1456 int size = port->ts_packet_size * port->ts_packet_count;
1456 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0); 1457 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1457 1458
1458 dprintk(1, "%s: %p\n", __func__, buf); 1459 dprintk(1, "%s: %p\n", __func__, buf);
1459 if (vb2_plane_size(&buf->vb, 0) < size) 1460 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1460 return -EINVAL; 1461 return -EINVAL;
1461 vb2_set_plane_payload(&buf->vb, 0, size); 1462 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1462 1463
1463 cx23885_risc_databuffer(dev->pci, &buf->risc, 1464 cx23885_risc_databuffer(dev->pci, &buf->risc,
1464 sgt->sgl, 1465 sgt->sgl,
@@ -1503,7 +1504,7 @@ void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1503 if (list_empty(&cx88q->active)) { 1504 if (list_empty(&cx88q->active)) {
1504 list_add_tail(&buf->queue, &cx88q->active); 1505 list_add_tail(&buf->queue, &cx88q->active);
1505 dprintk(1, "[%p/%d] %s - first active\n", 1506 dprintk(1, "[%p/%d] %s - first active\n",
1506 buf, buf->vb.v4l2_buf.index, __func__); 1507 buf, buf->vb.vb2_buf.index, __func__);
1507 } else { 1508 } else {
1508 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); 1509 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1509 prev = list_entry(cx88q->active.prev, struct cx23885_buffer, 1510 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
@@ -1511,7 +1512,7 @@ void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1511 list_add_tail(&buf->queue, &cx88q->active); 1512 list_add_tail(&buf->queue, &cx88q->active);
1512 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 1513 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1513 dprintk(1, "[%p/%d] %s - append to active\n", 1514 dprintk(1, "[%p/%d] %s - append to active\n",
1514 buf, buf->vb.v4l2_buf.index, __func__); 1515 buf, buf->vb.vb2_buf.index, __func__);
1515 } 1516 }
1516 spin_unlock_irqrestore(&dev->slock, flags); 1517 spin_unlock_irqrestore(&dev->slock, flags);
1517} 1518}
@@ -1530,9 +1531,10 @@ static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1530 buf = list_entry(q->active.next, struct cx23885_buffer, 1531 buf = list_entry(q->active.next, struct cx23885_buffer,
1531 queue); 1532 queue);
1532 list_del(&buf->queue); 1533 list_del(&buf->queue);
1533 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 1534 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1534 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n", 1535 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1535 buf, buf->vb.v4l2_buf.index, reason, (unsigned long)buf->risc.dma); 1536 buf, buf->vb.vb2_buf.index, reason,
1537 (unsigned long)buf->risc.dma);
1536 } 1538 }
1537 spin_unlock_irqrestore(&port->slock, flags); 1539 spin_unlock_irqrestore(&port->slock, flags);
1538} 1540}
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 6e8c24cdb2cd..09ad51280295 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -110,18 +110,20 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
110 110
111static int buffer_prepare(struct vb2_buffer *vb) 111static int buffer_prepare(struct vb2_buffer *vb)
112{ 112{
113 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
113 struct cx23885_tsport *port = vb->vb2_queue->drv_priv; 114 struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
114 struct cx23885_buffer *buf = 115 struct cx23885_buffer *buf =
115 container_of(vb, struct cx23885_buffer, vb); 116 container_of(vbuf, struct cx23885_buffer, vb);
116 117
117 return cx23885_buf_prepare(buf, port); 118 return cx23885_buf_prepare(buf, port);
118} 119}
119 120
120static void buffer_finish(struct vb2_buffer *vb) 121static void buffer_finish(struct vb2_buffer *vb)
121{ 122{
123 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
122 struct cx23885_tsport *port = vb->vb2_queue->drv_priv; 124 struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
123 struct cx23885_dev *dev = port->dev; 125 struct cx23885_dev *dev = port->dev;
124 struct cx23885_buffer *buf = container_of(vb, 126 struct cx23885_buffer *buf = container_of(vbuf,
125 struct cx23885_buffer, vb); 127 struct cx23885_buffer, vb);
126 128
127 cx23885_free_buffer(dev, buf); 129 cx23885_free_buffer(dev, buf);
@@ -129,8 +131,9 @@ static void buffer_finish(struct vb2_buffer *vb)
129 131
130static void buffer_queue(struct vb2_buffer *vb) 132static void buffer_queue(struct vb2_buffer *vb)
131{ 133{
134 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
132 struct cx23885_tsport *port = vb->vb2_queue->drv_priv; 135 struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
133 struct cx23885_buffer *buf = container_of(vb, 136 struct cx23885_buffer *buf = container_of(vbuf,
134 struct cx23885_buffer, vb); 137 struct cx23885_buffer, vb);
135 138
136 cx23885_buf_queue(port, buf); 139 cx23885_buf_queue(port, buf);
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index d362d3838c84..6c9bb0316aba 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -138,8 +138,9 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
138 138
139static int buffer_prepare(struct vb2_buffer *vb) 139static int buffer_prepare(struct vb2_buffer *vb)
140{ 140{
141 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
141 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 142 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
142 struct cx23885_buffer *buf = container_of(vb, 143 struct cx23885_buffer *buf = container_of(vbuf,
143 struct cx23885_buffer, vb); 144 struct cx23885_buffer, vb);
144 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 145 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
145 unsigned lines = VBI_PAL_LINE_COUNT; 146 unsigned lines = VBI_PAL_LINE_COUNT;
@@ -161,7 +162,8 @@ static int buffer_prepare(struct vb2_buffer *vb)
161 162
162static void buffer_finish(struct vb2_buffer *vb) 163static void buffer_finish(struct vb2_buffer *vb)
163{ 164{
164 struct cx23885_buffer *buf = container_of(vb, 165 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
166 struct cx23885_buffer *buf = container_of(vbuf,
165 struct cx23885_buffer, vb); 167 struct cx23885_buffer, vb);
166 168
167 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); 169 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
@@ -190,8 +192,10 @@ static void buffer_finish(struct vb2_buffer *vb)
190 */ 192 */
191static void buffer_queue(struct vb2_buffer *vb) 193static void buffer_queue(struct vb2_buffer *vb)
192{ 194{
195 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
193 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 196 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
194 struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb); 197 struct cx23885_buffer *buf = container_of(vbuf,
198 struct cx23885_buffer, vb);
195 struct cx23885_buffer *prev; 199 struct cx23885_buffer *prev;
196 struct cx23885_dmaqueue *q = &dev->vbiq; 200 struct cx23885_dmaqueue *q = &dev->vbiq;
197 unsigned long flags; 201 unsigned long flags;
@@ -206,7 +210,7 @@ static void buffer_queue(struct vb2_buffer *vb)
206 list_add_tail(&buf->queue, &q->active); 210 list_add_tail(&buf->queue, &q->active);
207 spin_unlock_irqrestore(&dev->slock, flags); 211 spin_unlock_irqrestore(&dev->slock, flags);
208 dprintk(2, "[%p/%d] vbi_queue - first active\n", 212 dprintk(2, "[%p/%d] vbi_queue - first active\n",
209 buf, buf->vb.v4l2_buf.index); 213 buf, buf->vb.vb2_buf.index);
210 214
211 } else { 215 } else {
212 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); 216 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
@@ -217,7 +221,7 @@ static void buffer_queue(struct vb2_buffer *vb)
217 spin_unlock_irqrestore(&dev->slock, flags); 221 spin_unlock_irqrestore(&dev->slock, flags);
218 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 222 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
219 dprintk(2, "[%p/%d] buffer_queue - append to active\n", 223 dprintk(2, "[%p/%d] buffer_queue - append to active\n",
220 buf, buf->vb.v4l2_buf.index); 224 buf, buf->vb.vb2_buf.index);
221 } 225 }
222} 226}
223 227
@@ -245,7 +249,7 @@ static void cx23885_stop_streaming(struct vb2_queue *q)
245 struct cx23885_buffer, queue); 249 struct cx23885_buffer, queue);
246 250
247 list_del(&buf->queue); 251 list_del(&buf->queue);
248 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 252 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
249 } 253 }
250 spin_unlock_irqrestore(&dev->slock, flags); 254 spin_unlock_irqrestore(&dev->slock, flags);
251} 255}
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index ec76470d12a4..b6a193df618a 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -104,12 +104,12 @@ void cx23885_video_wakeup(struct cx23885_dev *dev,
104 buf = list_entry(q->active.next, 104 buf = list_entry(q->active.next,
105 struct cx23885_buffer, queue); 105 struct cx23885_buffer, queue);
106 106
107 buf->vb.v4l2_buf.sequence = q->count++; 107 buf->vb.sequence = q->count++;
108 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 108 v4l2_get_timestamp(&buf->vb.timestamp);
109 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index, 109 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
110 count, q->count); 110 buf->vb.vb2_buf.index, count, q->count);
111 list_del(&buf->queue); 111 list_del(&buf->queue);
112 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 112 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
113} 113}
114 114
115int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm) 115int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm)
@@ -329,9 +329,10 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
329 329
330static int buffer_prepare(struct vb2_buffer *vb) 330static int buffer_prepare(struct vb2_buffer *vb)
331{ 331{
332 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
332 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 333 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
333 struct cx23885_buffer *buf = 334 struct cx23885_buffer *buf =
334 container_of(vb, struct cx23885_buffer, vb); 335 container_of(vbuf, struct cx23885_buffer, vb);
335 u32 line0_offset, line1_offset; 336 u32 line0_offset, line1_offset;
336 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 337 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
337 int field_tff; 338 int field_tff;
@@ -401,7 +402,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
401 BUG(); 402 BUG();
402 } 403 }
403 dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp \"%s\" - dma=0x%08lx\n", 404 dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
404 buf, buf->vb.v4l2_buf.index, 405 buf, buf->vb.vb2_buf.index,
405 dev->width, dev->height, dev->fmt->depth, dev->fmt->name, 406 dev->width, dev->height, dev->fmt->depth, dev->fmt->name,
406 (unsigned long)buf->risc.dma); 407 (unsigned long)buf->risc.dma);
407 return 0; 408 return 0;
@@ -409,7 +410,8 @@ static int buffer_prepare(struct vb2_buffer *vb)
409 410
410static void buffer_finish(struct vb2_buffer *vb) 411static void buffer_finish(struct vb2_buffer *vb)
411{ 412{
412 struct cx23885_buffer *buf = container_of(vb, 413 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
414 struct cx23885_buffer *buf = container_of(vbuf,
413 struct cx23885_buffer, vb); 415 struct cx23885_buffer, vb);
414 416
415 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); 417 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
@@ -438,8 +440,9 @@ static void buffer_finish(struct vb2_buffer *vb)
438 */ 440 */
439static void buffer_queue(struct vb2_buffer *vb) 441static void buffer_queue(struct vb2_buffer *vb)
440{ 442{
443 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
441 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 444 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
442 struct cx23885_buffer *buf = container_of(vb, 445 struct cx23885_buffer *buf = container_of(vbuf,
443 struct cx23885_buffer, vb); 446 struct cx23885_buffer, vb);
444 struct cx23885_buffer *prev; 447 struct cx23885_buffer *prev;
445 struct cx23885_dmaqueue *q = &dev->vidq; 448 struct cx23885_dmaqueue *q = &dev->vidq;
@@ -455,7 +458,7 @@ static void buffer_queue(struct vb2_buffer *vb)
455 if (list_empty(&q->active)) { 458 if (list_empty(&q->active)) {
456 list_add_tail(&buf->queue, &q->active); 459 list_add_tail(&buf->queue, &q->active);
457 dprintk(2, "[%p/%d] buffer_queue - first active\n", 460 dprintk(2, "[%p/%d] buffer_queue - first active\n",
458 buf, buf->vb.v4l2_buf.index); 461 buf, buf->vb.vb2_buf.index);
459 } else { 462 } else {
460 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); 463 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
461 prev = list_entry(q->active.prev, struct cx23885_buffer, 464 prev = list_entry(q->active.prev, struct cx23885_buffer,
@@ -463,7 +466,7 @@ static void buffer_queue(struct vb2_buffer *vb)
463 list_add_tail(&buf->queue, &q->active); 466 list_add_tail(&buf->queue, &q->active);
464 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 467 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
465 dprintk(2, "[%p/%d] buffer_queue - append to active\n", 468 dprintk(2, "[%p/%d] buffer_queue - append to active\n",
466 buf, buf->vb.v4l2_buf.index); 469 buf, buf->vb.vb2_buf.index);
467 } 470 }
468 spin_unlock_irqrestore(&dev->slock, flags); 471 spin_unlock_irqrestore(&dev->slock, flags);
469} 472}
@@ -492,7 +495,7 @@ static void cx23885_stop_streaming(struct vb2_queue *q)
492 struct cx23885_buffer, queue); 495 struct cx23885_buffer, queue);
493 496
494 list_del(&buf->queue); 497 list_del(&buf->queue);
495 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 498 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
496 } 499 }
497 spin_unlock_irqrestore(&dev->slock, flags); 500 spin_unlock_irqrestore(&dev->slock, flags);
498} 501}
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 027ead438194..c5ba0833f47a 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -170,7 +170,7 @@ struct cx23885_riscmem {
170/* buffer for one video frame */ 170/* buffer for one video frame */
171struct cx23885_buffer { 171struct cx23885_buffer {
172 /* common v4l buffer stuff -- must be first */ 172 /* common v4l buffer stuff -- must be first */
173 struct vb2_buffer vb; 173 struct vb2_v4l2_buffer vb;
174 struct list_head queue; 174 struct list_head queue;
175 175
176 /* cx23885 specific */ 176 /* cx23885 specific */
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 7bc495e4ece2..f1deb8fd271c 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -130,10 +130,10 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
130 buf = list_entry(dmaq->active.next, 130 buf = list_entry(dmaq->active.next,
131 struct cx25821_buffer, queue); 131 struct cx25821_buffer, queue);
132 132
133 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 133 v4l2_get_timestamp(&buf->vb.timestamp);
134 buf->vb.v4l2_buf.sequence = dmaq->count++; 134 buf->vb.sequence = dmaq->count++;
135 list_del(&buf->queue); 135 list_del(&buf->queue);
136 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 136 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
137 } 137 }
138 spin_unlock(&dev->slock); 138 spin_unlock(&dev->slock);
139 handled++; 139 handled++;
@@ -159,10 +159,11 @@ static int cx25821_queue_setup(struct vb2_queue *q, const struct v4l2_format *fm
159 159
160static int cx25821_buffer_prepare(struct vb2_buffer *vb) 160static int cx25821_buffer_prepare(struct vb2_buffer *vb)
161{ 161{
162 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
162 struct cx25821_channel *chan = vb->vb2_queue->drv_priv; 163 struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
163 struct cx25821_dev *dev = chan->dev; 164 struct cx25821_dev *dev = chan->dev;
164 struct cx25821_buffer *buf = 165 struct cx25821_buffer *buf =
165 container_of(vb, struct cx25821_buffer, vb); 166 container_of(vbuf, struct cx25821_buffer, vb);
166 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 167 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
167 u32 line0_offset; 168 u32 line0_offset;
168 int bpl_local = LINE_SIZE_D1; 169 int bpl_local = LINE_SIZE_D1;
@@ -176,7 +177,7 @@ static int cx25821_buffer_prepare(struct vb2_buffer *vb)
176 if (vb2_plane_size(vb, 0) < chan->height * buf->bpl) 177 if (vb2_plane_size(vb, 0) < chan->height * buf->bpl)
177 return -EINVAL; 178 return -EINVAL;
178 vb2_set_plane_payload(vb, 0, chan->height * buf->bpl); 179 vb2_set_plane_payload(vb, 0, chan->height * buf->bpl);
179 buf->vb.v4l2_buf.field = chan->field; 180 buf->vb.field = chan->field;
180 181
181 if (chan->pixel_formats == PIXEL_FRMT_411) { 182 if (chan->pixel_formats == PIXEL_FRMT_411) {
182 bpl_local = buf->bpl; 183 bpl_local = buf->bpl;
@@ -231,7 +232,7 @@ static int cx25821_buffer_prepare(struct vb2_buffer *vb)
231 } 232 }
232 233
233 dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp \"%s\" - dma=0x%08lx\n", 234 dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
234 buf, buf->vb.v4l2_buf.index, chan->width, chan->height, 235 buf, buf->vb.vb2_buf.index, chan->width, chan->height,
235 chan->fmt->depth, chan->fmt->name, 236 chan->fmt->depth, chan->fmt->name,
236 (unsigned long)buf->risc.dma); 237 (unsigned long)buf->risc.dma);
237 238
@@ -240,8 +241,9 @@ static int cx25821_buffer_prepare(struct vb2_buffer *vb)
240 241
241static void cx25821_buffer_finish(struct vb2_buffer *vb) 242static void cx25821_buffer_finish(struct vb2_buffer *vb)
242{ 243{
244 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
243 struct cx25821_buffer *buf = 245 struct cx25821_buffer *buf =
244 container_of(vb, struct cx25821_buffer, vb); 246 container_of(vbuf, struct cx25821_buffer, vb);
245 struct cx25821_channel *chan = vb->vb2_queue->drv_priv; 247 struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
246 struct cx25821_dev *dev = chan->dev; 248 struct cx25821_dev *dev = chan->dev;
247 249
@@ -250,8 +252,9 @@ static void cx25821_buffer_finish(struct vb2_buffer *vb)
250 252
251static void cx25821_buffer_queue(struct vb2_buffer *vb) 253static void cx25821_buffer_queue(struct vb2_buffer *vb)
252{ 254{
255 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
253 struct cx25821_buffer *buf = 256 struct cx25821_buffer *buf =
254 container_of(vb, struct cx25821_buffer, vb); 257 container_of(vbuf, struct cx25821_buffer, vb);
255 struct cx25821_channel *chan = vb->vb2_queue->drv_priv; 258 struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
256 struct cx25821_dev *dev = chan->dev; 259 struct cx25821_dev *dev = chan->dev;
257 struct cx25821_buffer *prev; 260 struct cx25821_buffer *prev;
@@ -300,7 +303,7 @@ static void cx25821_stop_streaming(struct vb2_queue *q)
300 struct cx25821_buffer, queue); 303 struct cx25821_buffer, queue);
301 304
302 list_del(&buf->queue); 305 list_del(&buf->queue);
303 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 306 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
304 } 307 }
305 spin_unlock_irqrestore(&dev->slock, flags); 308 spin_unlock_irqrestore(&dev->slock, flags);
306} 309}
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index d81a08a2df4f..a513b68be0fa 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -34,6 +34,7 @@
34#include <media/v4l2-common.h> 34#include <media/v4l2-common.h>
35#include <media/v4l2-device.h> 35#include <media/v4l2-device.h>
36#include <media/v4l2-ctrls.h> 36#include <media/v4l2-ctrls.h>
37#include <media/videobuf2-v4l2.h>
37#include <media/videobuf2-dma-sg.h> 38#include <media/videobuf2-dma-sg.h>
38 39
39#include "cx25821-reg.h" 40#include "cx25821-reg.h"
@@ -127,7 +128,7 @@ struct cx25821_riscmem {
127/* buffer for one video frame */ 128/* buffer for one video frame */
128struct cx25821_buffer { 129struct cx25821_buffer {
129 /* common v4l buffer stuff -- must be first */ 130 /* common v4l buffer stuff -- must be first */
130 struct vb2_buffer vb; 131 struct vb2_v4l2_buffer vb;
131 struct list_head queue; 132 struct list_head queue;
132 133
133 /* cx25821 specific */ 134 /* cx25821 specific */
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 24216efa56e7..49d0b7c5271b 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -653,16 +653,18 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
653 653
654static int buffer_prepare(struct vb2_buffer *vb) 654static int buffer_prepare(struct vb2_buffer *vb)
655{ 655{
656 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
656 struct cx8802_dev *dev = vb->vb2_queue->drv_priv; 657 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
657 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 658 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
658 659
659 return cx8802_buf_prepare(vb->vb2_queue, dev, buf); 660 return cx8802_buf_prepare(vb->vb2_queue, dev, buf);
660} 661}
661 662
662static void buffer_finish(struct vb2_buffer *vb) 663static void buffer_finish(struct vb2_buffer *vb)
663{ 664{
665 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
664 struct cx8802_dev *dev = vb->vb2_queue->drv_priv; 666 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
665 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 667 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
666 struct cx88_riscmem *risc = &buf->risc; 668 struct cx88_riscmem *risc = &buf->risc;
667 669
668 if (risc->cpu) 670 if (risc->cpu)
@@ -672,8 +674,9 @@ static void buffer_finish(struct vb2_buffer *vb)
672 674
673static void buffer_queue(struct vb2_buffer *vb) 675static void buffer_queue(struct vb2_buffer *vb)
674{ 676{
677 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
675 struct cx8802_dev *dev = vb->vb2_queue->drv_priv; 678 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
676 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 679 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
677 680
678 cx8802_buf_queue(dev, buf); 681 cx8802_buf_queue(dev, buf);
679} 682}
@@ -721,7 +724,7 @@ fail:
721 struct cx88_buffer, list); 724 struct cx88_buffer, list);
722 725
723 list_del(&buf->list); 726 list_del(&buf->list);
724 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 727 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
725 } 728 }
726 spin_unlock_irqrestore(&dev->slock, flags); 729 spin_unlock_irqrestore(&dev->slock, flags);
727 return err; 730 return err;
@@ -749,7 +752,7 @@ static void stop_streaming(struct vb2_queue *q)
749 struct cx88_buffer, list); 752 struct cx88_buffer, list);
750 753
751 list_del(&buf->list); 754 list_del(&buf->list);
752 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 755 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
753 } 756 }
754 spin_unlock_irqrestore(&dev->slock, flags); 757 spin_unlock_irqrestore(&dev->slock, flags);
755} 758}
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index aab7cf4c9825..9a43c7826b60 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -518,11 +518,11 @@ void cx88_wakeup(struct cx88_core *core,
518 518
519 buf = list_entry(q->active.next, 519 buf = list_entry(q->active.next,
520 struct cx88_buffer, list); 520 struct cx88_buffer, list);
521 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 521 v4l2_get_timestamp(&buf->vb.timestamp);
522 buf->vb.v4l2_buf.field = core->field; 522 buf->vb.field = core->field;
523 buf->vb.v4l2_buf.sequence = q->count++; 523 buf->vb.sequence = q->count++;
524 list_del(&buf->list); 524 list_del(&buf->list);
525 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 525 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
526} 526}
527 527
528void cx88_shutdown(struct cx88_core *core) 528void cx88_shutdown(struct cx88_core *core)
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 9dfa5ee32a8f..f0923fb5a35d 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -99,16 +99,18 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
99 99
100static int buffer_prepare(struct vb2_buffer *vb) 100static int buffer_prepare(struct vb2_buffer *vb)
101{ 101{
102 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
102 struct cx8802_dev *dev = vb->vb2_queue->drv_priv; 103 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
103 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 104 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
104 105
105 return cx8802_buf_prepare(vb->vb2_queue, dev, buf); 106 return cx8802_buf_prepare(vb->vb2_queue, dev, buf);
106} 107}
107 108
108static void buffer_finish(struct vb2_buffer *vb) 109static void buffer_finish(struct vb2_buffer *vb)
109{ 110{
111 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
110 struct cx8802_dev *dev = vb->vb2_queue->drv_priv; 112 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
111 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 113 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
112 struct cx88_riscmem *risc = &buf->risc; 114 struct cx88_riscmem *risc = &buf->risc;
113 115
114 if (risc->cpu) 116 if (risc->cpu)
@@ -118,8 +120,9 @@ static void buffer_finish(struct vb2_buffer *vb)
118 120
119static void buffer_queue(struct vb2_buffer *vb) 121static void buffer_queue(struct vb2_buffer *vb)
120{ 122{
123 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
121 struct cx8802_dev *dev = vb->vb2_queue->drv_priv; 124 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
122 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 125 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
123 126
124 cx8802_buf_queue(dev, buf); 127 cx8802_buf_queue(dev, buf);
125} 128}
@@ -149,7 +152,7 @@ static void stop_streaming(struct vb2_queue *q)
149 struct cx88_buffer, list); 152 struct cx88_buffer, list);
150 153
151 list_del(&buf->list); 154 list_del(&buf->list);
152 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 155 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
153 } 156 }
154 spin_unlock_irqrestore(&dev->slock, flags); 157 spin_unlock_irqrestore(&dev->slock, flags);
155} 158}
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 34f505744477..9961b2232b97 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -214,7 +214,7 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
214 214
215 buf = list_entry(q->active.next, struct cx88_buffer, list); 215 buf = list_entry(q->active.next, struct cx88_buffer, list);
216 dprintk(2,"restart_queue [%p/%d]: restart dma\n", 216 dprintk(2,"restart_queue [%p/%d]: restart dma\n",
217 buf, buf->vb.v4l2_buf.index); 217 buf, buf->vb.vb2_buf.index);
218 cx8802_start_dma(dev, q, buf); 218 cx8802_start_dma(dev, q, buf);
219 return 0; 219 return 0;
220} 220}
@@ -225,13 +225,13 @@ int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
225 struct cx88_buffer *buf) 225 struct cx88_buffer *buf)
226{ 226{
227 int size = dev->ts_packet_size * dev->ts_packet_count; 227 int size = dev->ts_packet_size * dev->ts_packet_count;
228 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0); 228 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
229 struct cx88_riscmem *risc = &buf->risc; 229 struct cx88_riscmem *risc = &buf->risc;
230 int rc; 230 int rc;
231 231
232 if (vb2_plane_size(&buf->vb, 0) < size) 232 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
233 return -EINVAL; 233 return -EINVAL;
234 vb2_set_plane_payload(&buf->vb, 0, size); 234 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
235 235
236 rc = cx88_risc_databuffer(dev->pci, risc, sgt->sgl, 236 rc = cx88_risc_databuffer(dev->pci, risc, sgt->sgl,
237 dev->ts_packet_size, dev->ts_packet_count, 0); 237 dev->ts_packet_size, dev->ts_packet_count, 0);
@@ -259,7 +259,7 @@ void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
259 dprintk( 1, "queue is empty - first active\n" ); 259 dprintk( 1, "queue is empty - first active\n" );
260 list_add_tail(&buf->list, &cx88q->active); 260 list_add_tail(&buf->list, &cx88q->active);
261 dprintk(1,"[%p/%d] %s - first active\n", 261 dprintk(1,"[%p/%d] %s - first active\n",
262 buf, buf->vb.v4l2_buf.index, __func__); 262 buf, buf->vb.vb2_buf.index, __func__);
263 263
264 } else { 264 } else {
265 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); 265 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
@@ -268,7 +268,7 @@ void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
268 list_add_tail(&buf->list, &cx88q->active); 268 list_add_tail(&buf->list, &cx88q->active);
269 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 269 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
270 dprintk( 1, "[%p/%d] %s - append to active\n", 270 dprintk( 1, "[%p/%d] %s - append to active\n",
271 buf, buf->vb.v4l2_buf.index, __func__); 271 buf, buf->vb.vb2_buf.index, __func__);
272 } 272 }
273} 273}
274 274
@@ -284,7 +284,7 @@ static void do_cancel_buffers(struct cx8802_dev *dev)
284 while (!list_empty(&q->active)) { 284 while (!list_empty(&q->active)) {
285 buf = list_entry(q->active.next, struct cx88_buffer, list); 285 buf = list_entry(q->active.next, struct cx88_buffer, list);
286 list_del(&buf->list); 286 list_del(&buf->list);
287 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 287 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
288 } 288 }
289 spin_unlock_irqrestore(&dev->slock,flags); 289 spin_unlock_irqrestore(&dev->slock,flags);
290} 290}
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index 7510e80eb2ff..1d65543003b4 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -100,7 +100,7 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
100 100
101 buf = list_entry(q->active.next, struct cx88_buffer, list); 101 buf = list_entry(q->active.next, struct cx88_buffer, list);
102 dprintk(2,"restart_queue [%p/%d]: restart dma\n", 102 dprintk(2,"restart_queue [%p/%d]: restart dma\n",
103 buf, buf->vb.v4l2_buf.index); 103 buf, buf->vb.vb2_buf.index);
104 cx8800_start_vbi_dma(dev, q, buf); 104 cx8800_start_vbi_dma(dev, q, buf);
105 return 0; 105 return 0;
106} 106}
@@ -125,8 +125,9 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
125 125
126static int buffer_prepare(struct vb2_buffer *vb) 126static int buffer_prepare(struct vb2_buffer *vb)
127{ 127{
128 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
128 struct cx8800_dev *dev = vb->vb2_queue->drv_priv; 129 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
129 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 130 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
130 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 131 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
131 unsigned int lines; 132 unsigned int lines;
132 unsigned int size; 133 unsigned int size;
@@ -149,8 +150,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
149 150
150static void buffer_finish(struct vb2_buffer *vb) 151static void buffer_finish(struct vb2_buffer *vb)
151{ 152{
153 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
152 struct cx8800_dev *dev = vb->vb2_queue->drv_priv; 154 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
153 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 155 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
154 struct cx88_riscmem *risc = &buf->risc; 156 struct cx88_riscmem *risc = &buf->risc;
155 157
156 if (risc->cpu) 158 if (risc->cpu)
@@ -160,8 +162,9 @@ static void buffer_finish(struct vb2_buffer *vb)
160 162
161static void buffer_queue(struct vb2_buffer *vb) 163static void buffer_queue(struct vb2_buffer *vb)
162{ 164{
165 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
163 struct cx8800_dev *dev = vb->vb2_queue->drv_priv; 166 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
164 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 167 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
165 struct cx88_buffer *prev; 168 struct cx88_buffer *prev;
166 struct cx88_dmaqueue *q = &dev->vbiq; 169 struct cx88_dmaqueue *q = &dev->vbiq;
167 170
@@ -174,7 +177,7 @@ static void buffer_queue(struct vb2_buffer *vb)
174 list_add_tail(&buf->list, &q->active); 177 list_add_tail(&buf->list, &q->active);
175 cx8800_start_vbi_dma(dev, q, buf); 178 cx8800_start_vbi_dma(dev, q, buf);
176 dprintk(2,"[%p/%d] vbi_queue - first active\n", 179 dprintk(2,"[%p/%d] vbi_queue - first active\n",
177 buf, buf->vb.v4l2_buf.index); 180 buf, buf->vb.vb2_buf.index);
178 181
179 } else { 182 } else {
180 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); 183 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
@@ -182,7 +185,7 @@ static void buffer_queue(struct vb2_buffer *vb)
182 list_add_tail(&buf->list, &q->active); 185 list_add_tail(&buf->list, &q->active);
183 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 186 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
184 dprintk(2,"[%p/%d] buffer_queue - append to active\n", 187 dprintk(2,"[%p/%d] buffer_queue - append to active\n",
185 buf, buf->vb.v4l2_buf.index); 188 buf, buf->vb.vb2_buf.index);
186 } 189 }
187} 190}
188 191
@@ -213,7 +216,7 @@ static void stop_streaming(struct vb2_queue *q)
213 struct cx88_buffer, list); 216 struct cx88_buffer, list);
214 217
215 list_del(&buf->list); 218 list_del(&buf->list);
216 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 219 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
217 } 220 }
218 spin_unlock_irqrestore(&dev->slock, flags); 221 spin_unlock_irqrestore(&dev->slock, flags);
219} 222}
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 400e5caefd58..c6a337abdbb3 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -420,7 +420,7 @@ static int restart_video_queue(struct cx8800_dev *dev,
420 if (!list_empty(&q->active)) { 420 if (!list_empty(&q->active)) {
421 buf = list_entry(q->active.next, struct cx88_buffer, list); 421 buf = list_entry(q->active.next, struct cx88_buffer, list);
422 dprintk(2,"restart_queue [%p/%d]: restart dma\n", 422 dprintk(2,"restart_queue [%p/%d]: restart dma\n",
423 buf, buf->vb.v4l2_buf.index); 423 buf, buf->vb.vb2_buf.index);
424 start_video_dma(dev, q, buf); 424 start_video_dma(dev, q, buf);
425 } 425 }
426 return 0; 426 return 0;
@@ -444,9 +444,10 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
444 444
445static int buffer_prepare(struct vb2_buffer *vb) 445static int buffer_prepare(struct vb2_buffer *vb)
446{ 446{
447 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
447 struct cx8800_dev *dev = vb->vb2_queue->drv_priv; 448 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
448 struct cx88_core *core = dev->core; 449 struct cx88_core *core = dev->core;
449 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 450 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
450 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 451 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
451 452
452 buf->bpl = core->width * dev->fmt->depth >> 3; 453 buf->bpl = core->width * dev->fmt->depth >> 3;
@@ -489,7 +490,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
489 break; 490 break;
490 } 491 }
491 dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n", 492 dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
492 buf, buf->vb.v4l2_buf.index, 493 buf, buf->vb.vb2_buf.index,
493 core->width, core->height, dev->fmt->depth, dev->fmt->name, 494 core->width, core->height, dev->fmt->depth, dev->fmt->name,
494 (unsigned long)buf->risc.dma); 495 (unsigned long)buf->risc.dma);
495 return 0; 496 return 0;
@@ -497,8 +498,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
497 498
498static void buffer_finish(struct vb2_buffer *vb) 499static void buffer_finish(struct vb2_buffer *vb)
499{ 500{
501 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
500 struct cx8800_dev *dev = vb->vb2_queue->drv_priv; 502 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
501 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 503 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
502 struct cx88_riscmem *risc = &buf->risc; 504 struct cx88_riscmem *risc = &buf->risc;
503 505
504 if (risc->cpu) 506 if (risc->cpu)
@@ -508,8 +510,9 @@ static void buffer_finish(struct vb2_buffer *vb)
508 510
509static void buffer_queue(struct vb2_buffer *vb) 511static void buffer_queue(struct vb2_buffer *vb)
510{ 512{
513 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
511 struct cx8800_dev *dev = vb->vb2_queue->drv_priv; 514 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
512 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb); 515 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
513 struct cx88_buffer *prev; 516 struct cx88_buffer *prev;
514 struct cx88_core *core = dev->core; 517 struct cx88_core *core = dev->core;
515 struct cx88_dmaqueue *q = &dev->vidq; 518 struct cx88_dmaqueue *q = &dev->vidq;
@@ -522,7 +525,7 @@ static void buffer_queue(struct vb2_buffer *vb)
522 if (list_empty(&q->active)) { 525 if (list_empty(&q->active)) {
523 list_add_tail(&buf->list, &q->active); 526 list_add_tail(&buf->list, &q->active);
524 dprintk(2,"[%p/%d] buffer_queue - first active\n", 527 dprintk(2,"[%p/%d] buffer_queue - first active\n",
525 buf, buf->vb.v4l2_buf.index); 528 buf, buf->vb.vb2_buf.index);
526 529
527 } else { 530 } else {
528 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); 531 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
@@ -530,7 +533,7 @@ static void buffer_queue(struct vb2_buffer *vb)
530 list_add_tail(&buf->list, &q->active); 533 list_add_tail(&buf->list, &q->active);
531 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 534 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
532 dprintk(2, "[%p/%d] buffer_queue - append to active\n", 535 dprintk(2, "[%p/%d] buffer_queue - append to active\n",
533 buf, buf->vb.v4l2_buf.index); 536 buf, buf->vb.vb2_buf.index);
534 } 537 }
535} 538}
536 539
@@ -560,7 +563,7 @@ static void stop_streaming(struct vb2_queue *q)
560 struct cx88_buffer, list); 563 struct cx88_buffer, list);
561 564
562 list_del(&buf->list); 565 list_del(&buf->list);
563 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 566 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
564 } 567 }
565 spin_unlock_irqrestore(&dev->slock, flags); 568 spin_unlock_irqrestore(&dev->slock, flags);
566} 569}
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 785fe2e0d702..2996eb3ea1fc 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -321,7 +321,7 @@ struct cx88_riscmem {
321/* buffer for one video frame */ 321/* buffer for one video frame */
322struct cx88_buffer { 322struct cx88_buffer {
323 /* common v4l buffer stuff -- must be first */ 323 /* common v4l buffer stuff -- must be first */
324 struct vb2_buffer vb; 324 struct vb2_v4l2_buffer vb;
325 struct list_head list; 325 struct list_head list;
326 326
327 /* cx88 specific */ 327 /* cx88 specific */
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index 8df634518927..f27a8582d179 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -160,7 +160,7 @@ static int dt3155_buf_prepare(struct vb2_buffer *vb)
160static int dt3155_start_streaming(struct vb2_queue *q, unsigned count) 160static int dt3155_start_streaming(struct vb2_queue *q, unsigned count)
161{ 161{
162 struct dt3155_priv *pd = vb2_get_drv_priv(q); 162 struct dt3155_priv *pd = vb2_get_drv_priv(q);
163 struct vb2_buffer *vb = pd->curr_buf; 163 struct vb2_buffer *vb = &pd->curr_buf->vb2_buf;
164 dma_addr_t dma_addr; 164 dma_addr_t dma_addr;
165 165
166 pd->sequence = 0; 166 pd->sequence = 0;
@@ -208,7 +208,7 @@ static void dt3155_stop_streaming(struct vb2_queue *q)
208 208
209 spin_lock_irq(&pd->lock); 209 spin_lock_irq(&pd->lock);
210 if (pd->curr_buf) { 210 if (pd->curr_buf) {
211 vb2_buffer_done(pd->curr_buf, VB2_BUF_STATE_ERROR); 211 vb2_buffer_done(&pd->curr_buf->vb2_buf, VB2_BUF_STATE_ERROR);
212 pd->curr_buf = NULL; 212 pd->curr_buf = NULL;
213 } 213 }
214 214
@@ -222,6 +222,7 @@ static void dt3155_stop_streaming(struct vb2_queue *q)
222 222
223static void dt3155_buf_queue(struct vb2_buffer *vb) 223static void dt3155_buf_queue(struct vb2_buffer *vb)
224{ 224{
225 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
225 struct dt3155_priv *pd = vb2_get_drv_priv(vb->vb2_queue); 226 struct dt3155_priv *pd = vb2_get_drv_priv(vb->vb2_queue);
226 227
227 /* pd->vidq.streaming = 1 when dt3155_buf_queue() is invoked */ 228 /* pd->vidq.streaming = 1 when dt3155_buf_queue() is invoked */
@@ -229,7 +230,7 @@ static void dt3155_buf_queue(struct vb2_buffer *vb)
229 if (pd->curr_buf) 230 if (pd->curr_buf)
230 list_add_tail(&vb->done_entry, &pd->dmaq); 231 list_add_tail(&vb->done_entry, &pd->dmaq);
231 else 232 else
232 pd->curr_buf = vb; 233 pd->curr_buf = vbuf;
233 spin_unlock_irq(&pd->lock); 234 spin_unlock_irq(&pd->lock);
234} 235}
235 236
@@ -269,14 +270,14 @@ static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id)
269 270
270 spin_lock(&ipd->lock); 271 spin_lock(&ipd->lock);
271 if (ipd->curr_buf && !list_empty(&ipd->dmaq)) { 272 if (ipd->curr_buf && !list_empty(&ipd->dmaq)) {
272 v4l2_get_timestamp(&ipd->curr_buf->v4l2_buf.timestamp); 273 v4l2_get_timestamp(&ipd->curr_buf->timestamp);
273 ipd->curr_buf->v4l2_buf.sequence = ipd->sequence++; 274 ipd->curr_buf->sequence = ipd->sequence++;
274 ipd->curr_buf->v4l2_buf.field = V4L2_FIELD_NONE; 275 ipd->curr_buf->field = V4L2_FIELD_NONE;
275 vb2_buffer_done(ipd->curr_buf, VB2_BUF_STATE_DONE); 276 vb2_buffer_done(&ipd->curr_buf->vb2_buf, VB2_BUF_STATE_DONE);
276 277
277 ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), done_entry); 278 ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), done_entry);
278 list_del(&ivb->done_entry); 279 list_del(&ivb->done_entry);
279 ipd->curr_buf = ivb; 280 ipd->curr_buf = to_vb2_v4l2_buffer(ivb);
280 dma_addr = vb2_dma_contig_plane_dma_addr(ivb, 0); 281 dma_addr = vb2_dma_contig_plane_dma_addr(ivb, 0);
281 iowrite32(dma_addr, ipd->regs + EVEN_DMA_START); 282 iowrite32(dma_addr, ipd->regs + EVEN_DMA_START);
282 iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START); 283 iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START);
diff --git a/drivers/media/pci/dt3155/dt3155.h b/drivers/media/pci/dt3155/dt3155.h
index 4e1f4d598d57..b3531e0bc733 100644
--- a/drivers/media/pci/dt3155/dt3155.h
+++ b/drivers/media/pci/dt3155/dt3155.h
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <media/v4l2-device.h> 23#include <media/v4l2-device.h>
24#include <media/v4l2-dev.h> 24#include <media/v4l2-dev.h>
25#include <media/videobuf2-v4l2.h>
25 26
26#define DT3155_NAME "dt3155" 27#define DT3155_NAME "dt3155"
27#define DT3155_VER_MAJ 2 28#define DT3155_VER_MAJ 2
@@ -181,7 +182,7 @@ struct dt3155_priv {
181 struct pci_dev *pdev; 182 struct pci_dev *pdev;
182 struct vb2_queue vidq; 183 struct vb2_queue vidq;
183 struct vb2_alloc_ctx *alloc_ctx; 184 struct vb2_alloc_ctx *alloc_ctx;
184 struct vb2_buffer *curr_buf; 185 struct vb2_v4l2_buffer *curr_buf;
185 struct mutex mux; 186 struct mutex mux;
186 struct list_head dmaq; 187 struct list_head dmaq;
187 spinlock_t lock; 188 spinlock_t lock;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 6d8bf6277647..b012aa658a54 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -27,6 +27,7 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <media/videobuf2-v4l2.h>
30#include <media/videobuf2-vmalloc.h> 31#include <media/videobuf2-vmalloc.h>
31 32
32#include "netup_unidvb.h" 33#include "netup_unidvb.h"
@@ -110,7 +111,7 @@ struct netup_dma_regs {
110} __packed __aligned(1); 111} __packed __aligned(1);
111 112
112struct netup_unidvb_buffer { 113struct netup_unidvb_buffer {
113 struct vb2_buffer vb; 114 struct vb2_v4l2_buffer vb;
114 struct list_head list; 115 struct list_head list;
115 u32 size; 116 u32 size;
116}; 117};
@@ -300,7 +301,8 @@ static int netup_unidvb_queue_setup(struct vb2_queue *vq,
300static int netup_unidvb_buf_prepare(struct vb2_buffer *vb) 301static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
301{ 302{
302 struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 303 struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
303 struct netup_unidvb_buffer *buf = container_of(vb, 304 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
305 struct netup_unidvb_buffer *buf = container_of(vbuf,
304 struct netup_unidvb_buffer, vb); 306 struct netup_unidvb_buffer, vb);
305 307
306 dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf); 308 dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
@@ -312,7 +314,8 @@ static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
312{ 314{
313 unsigned long flags; 315 unsigned long flags;
314 struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 316 struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
315 struct netup_unidvb_buffer *buf = container_of(vb, 317 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
318 struct netup_unidvb_buffer *buf = container_of(vbuf,
316 struct netup_unidvb_buffer, vb); 319 struct netup_unidvb_buffer, vb);
317 320
318 dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf); 321 dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
@@ -509,7 +512,7 @@ static int netup_unidvb_ring_copy(struct netup_dma *dma,
509{ 512{
510 u32 copy_bytes, ring_bytes; 513 u32 copy_bytes, ring_bytes;
511 u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size; 514 u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
512 u8 *p = vb2_plane_vaddr(&buf->vb, 0); 515 u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
513 struct netup_unidvb_dev *ndev = dma->ndev; 516 struct netup_unidvb_dev *ndev = dma->ndev;
514 517
515 if (p == NULL) { 518 if (p == NULL) {
@@ -579,9 +582,9 @@ static void netup_unidvb_dma_worker(struct work_struct *work)
579 dev_dbg(&ndev->pci_dev->dev, 582 dev_dbg(&ndev->pci_dev->dev,
580 "%s(): buffer %p done, size %d\n", 583 "%s(): buffer %p done, size %d\n",
581 __func__, buf, buf->size); 584 __func__, buf, buf->size);
582 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 585 v4l2_get_timestamp(&buf->vb.timestamp);
583 vb2_set_plane_payload(&buf->vb, 0, buf->size); 586 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
584 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 587 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
585 } 588 }
586 } 589 }
587work_done: 590work_done:
@@ -599,7 +602,7 @@ static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
599 buf = list_first_entry(&dma->free_buffers, 602 buf = list_first_entry(&dma->free_buffers,
600 struct netup_unidvb_buffer, list); 603 struct netup_unidvb_buffer, list);
601 list_del(&buf->list); 604 list_del(&buf->list);
602 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 605 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
603 } 606 }
604 spin_unlock_irqrestore(&dma->lock, flags); 607 spin_unlock_irqrestore(&dma->lock, flags);
605} 608}
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 72d7f992375e..87f39f97a79f 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -216,13 +216,14 @@ int saa7134_buffer_count(unsigned int size, unsigned int count)
216 216
217int saa7134_buffer_startpage(struct saa7134_buf *buf) 217int saa7134_buffer_startpage(struct saa7134_buf *buf)
218{ 218{
219 return saa7134_buffer_pages(vb2_plane_size(&buf->vb2, 0)) * buf->vb2.v4l2_buf.index; 219 return saa7134_buffer_pages(vb2_plane_size(&buf->vb2.vb2_buf, 0))
220 * buf->vb2.vb2_buf.index;
220} 221}
221 222
222unsigned long saa7134_buffer_base(struct saa7134_buf *buf) 223unsigned long saa7134_buffer_base(struct saa7134_buf *buf)
223{ 224{
224 unsigned long base; 225 unsigned long base;
225 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0); 226 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2.vb2_buf, 0);
226 227
227 base = saa7134_buffer_startpage(buf) * 4096; 228 base = saa7134_buffer_startpage(buf) * 4096;
228 base += dma->sgl[0].offset; 229 base += dma->sgl[0].offset;
@@ -308,9 +309,9 @@ void saa7134_buffer_finish(struct saa7134_dev *dev,
308 core_dbg("buffer_finish %p\n", q->curr); 309 core_dbg("buffer_finish %p\n", q->curr);
309 310
310 /* finish current buffer */ 311 /* finish current buffer */
311 v4l2_get_timestamp(&q->curr->vb2.v4l2_buf.timestamp); 312 v4l2_get_timestamp(&q->curr->vb2.timestamp);
312 q->curr->vb2.v4l2_buf.sequence = q->seq_nr++; 313 q->curr->vb2.sequence = q->seq_nr++;
313 vb2_buffer_done(&q->curr->vb2, state); 314 vb2_buffer_done(&q->curr->vb2.vb2_buf, state);
314 q->curr = NULL; 315 q->curr = NULL;
315} 316}
316 317
@@ -375,7 +376,8 @@ void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
375 if (!list_empty(&q->queue)) { 376 if (!list_empty(&q->queue)) {
376 list_for_each_safe(pos, n, &q->queue) { 377 list_for_each_safe(pos, n, &q->queue) {
377 tmp = list_entry(pos, struct saa7134_buf, entry); 378 tmp = list_entry(pos, struct saa7134_buf, entry);
378 vb2_buffer_done(&tmp->vb2, VB2_BUF_STATE_ERROR); 379 vb2_buffer_done(&tmp->vb2.vb2_buf,
380 VB2_BUF_STATE_ERROR);
379 list_del(pos); 381 list_del(pos);
380 tmp = NULL; 382 tmp = NULL;
381 } 383 }
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 4b202fa5fbc4..b0ef37dc770a 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -79,8 +79,9 @@ static int buffer_activate(struct saa7134_dev *dev,
79 79
80int saa7134_ts_buffer_init(struct vb2_buffer *vb2) 80int saa7134_ts_buffer_init(struct vb2_buffer *vb2)
81{ 81{
82 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
82 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv; 83 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
83 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 84 struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
84 85
85 dmaq->curr = NULL; 86 dmaq->curr = NULL;
86 buf->activate = buffer_activate; 87 buf->activate = buffer_activate;
@@ -91,9 +92,10 @@ EXPORT_SYMBOL_GPL(saa7134_ts_buffer_init);
91 92
92int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2) 93int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
93{ 94{
95 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
94 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv; 96 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
95 struct saa7134_dev *dev = dmaq->dev; 97 struct saa7134_dev *dev = dmaq->dev;
96 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 98 struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
97 struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0); 99 struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
98 unsigned int lines, llength, size; 100 unsigned int lines, llength, size;
99 101
@@ -107,7 +109,7 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
107 return -EINVAL; 109 return -EINVAL;
108 110
109 vb2_set_plane_payload(vb2, 0, size); 111 vb2_set_plane_payload(vb2, 0, size);
110 vb2->v4l2_buf.field = dev->field; 112 vbuf->field = dev->field;
111 113
112 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, 114 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
113 saa7134_buffer_startpage(buf)); 115 saa7134_buffer_startpage(buf));
@@ -148,10 +150,12 @@ int saa7134_ts_start_streaming(struct vb2_queue *vq, unsigned int count)
148 150
149 list_for_each_entry_safe(buf, tmp, &dmaq->queue, entry) { 151 list_for_each_entry_safe(buf, tmp, &dmaq->queue, entry) {
150 list_del(&buf->entry); 152 list_del(&buf->entry);
151 vb2_buffer_done(&buf->vb2, VB2_BUF_STATE_QUEUED); 153 vb2_buffer_done(&buf->vb2.vb2_buf,
154 VB2_BUF_STATE_QUEUED);
152 } 155 }
153 if (dmaq->curr) { 156 if (dmaq->curr) {
154 vb2_buffer_done(&dmaq->curr->vb2, VB2_BUF_STATE_QUEUED); 157 vb2_buffer_done(&dmaq->curr->vb2.vb2_buf,
158 VB2_BUF_STATE_QUEUED);
155 dmaq->curr = NULL; 159 dmaq->curr = NULL;
156 } 160 }
157 return -EBUSY; 161 return -EBUSY;
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index 4d36586ad752..fb1605e25e52 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -83,7 +83,7 @@ static int buffer_activate(struct saa7134_dev *dev,
83 struct saa7134_buf *buf, 83 struct saa7134_buf *buf,
84 struct saa7134_buf *next) 84 struct saa7134_buf *next)
85{ 85{
86 struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_queue->drv_priv; 86 struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_buf.vb2_queue->drv_priv;
87 unsigned long control, base; 87 unsigned long control, base;
88 88
89 vbi_dbg("buffer_activate [%p]\n", buf); 89 vbi_dbg("buffer_activate [%p]\n", buf);
@@ -119,8 +119,9 @@ static int buffer_prepare(struct vb2_buffer *vb2)
119{ 119{
120 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv; 120 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
121 struct saa7134_dev *dev = dmaq->dev; 121 struct saa7134_dev *dev = dmaq->dev;
122 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 122 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
123 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0); 123 struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
124 struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
124 unsigned int size; 125 unsigned int size;
125 126
126 if (dma->sgl->offset) { 127 if (dma->sgl->offset) {
@@ -161,7 +162,8 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
161static int buffer_init(struct vb2_buffer *vb2) 162static int buffer_init(struct vb2_buffer *vb2)
162{ 163{
163 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv; 164 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
164 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 165 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
166 struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
165 167
166 dmaq->curr = NULL; 168 dmaq->curr = NULL;
167 buf->activate = buffer_activate; 169 buf->activate = buffer_activate;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 035039cfae6d..602d53d6122c 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -791,7 +791,7 @@ static int buffer_activate(struct saa7134_dev *dev,
791 struct saa7134_buf *buf, 791 struct saa7134_buf *buf,
792 struct saa7134_buf *next) 792 struct saa7134_buf *next)
793{ 793{
794 struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_queue->drv_priv; 794 struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_buf.vb2_queue->drv_priv;
795 unsigned long base,control,bpl; 795 unsigned long base,control,bpl;
796 unsigned long bpl_uv,lines_uv,base2,base3,tmp; /* planar */ 796 unsigned long bpl_uv,lines_uv,base2,base3,tmp; /* planar */
797 797
@@ -872,7 +872,8 @@ static int buffer_activate(struct saa7134_dev *dev,
872static int buffer_init(struct vb2_buffer *vb2) 872static int buffer_init(struct vb2_buffer *vb2)
873{ 873{
874 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv; 874 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
875 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 875 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
876 struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
876 877
877 dmaq->curr = NULL; 878 dmaq->curr = NULL;
878 buf->activate = buffer_activate; 879 buf->activate = buffer_activate;
@@ -883,8 +884,9 @@ static int buffer_prepare(struct vb2_buffer *vb2)
883{ 884{
884 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv; 885 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
885 struct saa7134_dev *dev = dmaq->dev; 886 struct saa7134_dev *dev = dmaq->dev;
886 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 887 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
887 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0); 888 struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
889 struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
888 unsigned int size; 890 unsigned int size;
889 891
890 if (dma->sgl->offset) { 892 if (dma->sgl->offset) {
@@ -896,7 +898,7 @@ static int buffer_prepare(struct vb2_buffer *vb2)
896 return -EINVAL; 898 return -EINVAL;
897 899
898 vb2_set_plane_payload(vb2, 0, size); 900 vb2_set_plane_payload(vb2, 0, size);
899 vb2->v4l2_buf.field = dev->field; 901 vbuf->field = dev->field;
900 902
901 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, 903 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
902 saa7134_buffer_startpage(buf)); 904 saa7134_buffer_startpage(buf));
@@ -932,7 +934,8 @@ void saa7134_vb2_buffer_queue(struct vb2_buffer *vb)
932{ 934{
933 struct saa7134_dmaqueue *dmaq = vb->vb2_queue->drv_priv; 935 struct saa7134_dmaqueue *dmaq = vb->vb2_queue->drv_priv;
934 struct saa7134_dev *dev = dmaq->dev; 936 struct saa7134_dev *dev = dmaq->dev;
935 struct saa7134_buf *buf = container_of(vb, struct saa7134_buf, vb2); 937 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
938 struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
936 939
937 saa7134_buffer_queue(dev, dmaq, buf); 940 saa7134_buffer_queue(dev, dmaq, buf);
938} 941}
@@ -953,10 +956,12 @@ int saa7134_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
953 956
954 list_for_each_entry_safe(buf, tmp, &dmaq->queue, entry) { 957 list_for_each_entry_safe(buf, tmp, &dmaq->queue, entry) {
955 list_del(&buf->entry); 958 list_del(&buf->entry);
956 vb2_buffer_done(&buf->vb2, VB2_BUF_STATE_QUEUED); 959 vb2_buffer_done(&buf->vb2.vb2_buf,
960 VB2_BUF_STATE_QUEUED);
957 } 961 }
958 if (dmaq->curr) { 962 if (dmaq->curr) {
959 vb2_buffer_done(&dmaq->curr->vb2, VB2_BUF_STATE_QUEUED); 963 vb2_buffer_done(&dmaq->curr->vb2.vb2_buf,
964 VB2_BUF_STATE_QUEUED);
960 dmaq->curr = NULL; 965 dmaq->curr = NULL;
961 } 966 }
962 return -EBUSY; 967 return -EBUSY;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 14c2b4e0ee40..fea0514dfa4a 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -460,7 +460,7 @@ struct saa7134_thread {
460/* buffer for one video/vbi/ts frame */ 460/* buffer for one video/vbi/ts frame */
461struct saa7134_buf { 461struct saa7134_buf {
462 /* common v4l buffer stuff -- must be first */ 462 /* common v4l buffer stuff -- must be first */
463 struct vb2_buffer vb2; 463 struct vb2_v4l2_buffer vb2;
464 464
465 /* saa7134 specific */ 465 /* saa7134 specific */
466 unsigned int top_seen; 466 unsigned int top_seen;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 53fff5425c13..78ac3fe22b45 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -458,11 +458,12 @@ static inline u32 vop_usec(const vop_header *vh)
458static int solo_fill_jpeg(struct solo_enc_dev *solo_enc, 458static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
459 struct vb2_buffer *vb, const vop_header *vh) 459 struct vb2_buffer *vb, const vop_header *vh)
460{ 460{
461 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
461 struct solo_dev *solo_dev = solo_enc->solo_dev; 462 struct solo_dev *solo_dev = solo_enc->solo_dev;
462 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); 463 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
463 int frame_size; 464 int frame_size;
464 465
465 vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; 466 vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
466 467
467 if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len) 468 if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len)
468 return -EIO; 469 return -EIO;
@@ -470,7 +471,7 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
470 frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN); 471 frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN);
471 vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len); 472 vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
472 473
473 return solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, 474 return solo_send_desc(solo_enc, solo_enc->jpeg_len, sgt,
474 vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev), 475 vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev),
475 frame_size, SOLO_JPEG_EXT_ADDR(solo_dev), 476 frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
476 SOLO_JPEG_EXT_SIZE(solo_dev)); 477 SOLO_JPEG_EXT_SIZE(solo_dev));
@@ -479,8 +480,9 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
479static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, 480static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
480 struct vb2_buffer *vb, const vop_header *vh) 481 struct vb2_buffer *vb, const vop_header *vh)
481{ 482{
483 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
482 struct solo_dev *solo_dev = solo_enc->solo_dev; 484 struct solo_dev *solo_dev = solo_enc->solo_dev;
483 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); 485 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
484 int frame_off, frame_size; 486 int frame_off, frame_size;
485 int skip = 0; 487 int skip = 0;
486 488
@@ -488,15 +490,15 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
488 return -EIO; 490 return -EIO;
489 491
490 /* If this is a key frame, add extra header */ 492 /* If this is a key frame, add extra header */
491 vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | 493 vbuf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
492 V4L2_BUF_FLAG_BFRAME); 494 V4L2_BUF_FLAG_BFRAME);
493 if (!vop_type(vh)) { 495 if (!vop_type(vh)) {
494 skip = solo_enc->vop_len; 496 skip = solo_enc->vop_len;
495 vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; 497 vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
496 vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) + 498 vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) +
497 solo_enc->vop_len); 499 solo_enc->vop_len);
498 } else { 500 } else {
499 vb->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME; 501 vbuf->flags |= V4L2_BUF_FLAG_PFRAME;
500 vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh)); 502 vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh));
501 } 503 }
502 504
@@ -505,7 +507,7 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
505 sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev); 507 sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
506 frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN); 508 frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN);
507 509
508 return solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size, 510 return solo_send_desc(solo_enc, skip, sgt, frame_off, frame_size,
509 SOLO_MP4E_EXT_ADDR(solo_dev), 511 SOLO_MP4E_EXT_ADDR(solo_dev),
510 SOLO_MP4E_EXT_SIZE(solo_dev)); 512 SOLO_MP4E_EXT_SIZE(solo_dev));
511} 513}
@@ -513,6 +515,7 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
513static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc, 515static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
514 struct vb2_buffer *vb, struct solo_enc_buf *enc_buf) 516 struct vb2_buffer *vb, struct solo_enc_buf *enc_buf)
515{ 517{
518 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
516 const vop_header *vh = enc_buf->vh; 519 const vop_header *vh = enc_buf->vh;
517 int ret; 520 int ret;
518 521
@@ -527,17 +530,18 @@ static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
527 } 530 }
528 531
529 if (!ret) { 532 if (!ret) {
530 vb->v4l2_buf.sequence = solo_enc->sequence++; 533 vbuf->sequence = solo_enc->sequence++;
531 vb->v4l2_buf.timestamp.tv_sec = vop_sec(vh); 534 vbuf->timestamp.tv_sec = vop_sec(vh);
532 vb->v4l2_buf.timestamp.tv_usec = vop_usec(vh); 535 vbuf->timestamp.tv_usec = vop_usec(vh);
533 536
534 /* Check for motion flags */ 537 /* Check for motion flags */
535 if (solo_is_motion_on(solo_enc) && enc_buf->motion) { 538 if (solo_is_motion_on(solo_enc) && enc_buf->motion) {
536 struct v4l2_event ev = { 539 struct v4l2_event ev = {
537 .type = V4L2_EVENT_MOTION_DET, 540 .type = V4L2_EVENT_MOTION_DET,
538 .u.motion_det = { 541 .u.motion_det = {
539 .flags = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ, 542 .flags
540 .frame_sequence = vb->v4l2_buf.sequence, 543 = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
544 .frame_sequence = vbuf->sequence,
541 .region_mask = enc_buf->motion ? 1 : 0, 545 .region_mask = enc_buf->motion ? 1 : 0,
542 }, 546 },
543 }; 547 };
@@ -571,7 +575,7 @@ static void solo_enc_handle_one(struct solo_enc_dev *solo_enc,
571 list_del(&vb->list); 575 list_del(&vb->list);
572 spin_unlock_irqrestore(&solo_enc->av_lock, flags); 576 spin_unlock_irqrestore(&solo_enc->av_lock, flags);
573 577
574 solo_enc_fillbuf(solo_enc, &vb->vb, enc_buf); 578 solo_enc_fillbuf(solo_enc, &vb->vb.vb2_buf, enc_buf);
575unlock: 579unlock:
576 mutex_unlock(&solo_enc->lock); 580 mutex_unlock(&solo_enc->lock);
577} 581}
@@ -678,10 +682,11 @@ static int solo_enc_queue_setup(struct vb2_queue *q,
678 682
679static void solo_enc_buf_queue(struct vb2_buffer *vb) 683static void solo_enc_buf_queue(struct vb2_buffer *vb)
680{ 684{
685 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
681 struct vb2_queue *vq = vb->vb2_queue; 686 struct vb2_queue *vq = vb->vb2_queue;
682 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vq); 687 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vq);
683 struct solo_vb2_buf *solo_vb = 688 struct solo_vb2_buf *solo_vb =
684 container_of(vb, struct solo_vb2_buf, vb); 689 container_of(vbuf, struct solo_vb2_buf, vb);
685 690
686 spin_lock(&solo_enc->av_lock); 691 spin_lock(&solo_enc->av_lock);
687 list_add_tail(&solo_vb->list, &solo_enc->vidq_active); 692 list_add_tail(&solo_vb->list, &solo_enc->vidq_active);
@@ -734,25 +739,26 @@ static void solo_enc_stop_streaming(struct vb2_queue *q)
734 struct solo_vb2_buf, list); 739 struct solo_vb2_buf, list);
735 740
736 list_del(&buf->list); 741 list_del(&buf->list);
737 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 742 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
738 } 743 }
739 spin_unlock_irqrestore(&solo_enc->av_lock, flags); 744 spin_unlock_irqrestore(&solo_enc->av_lock, flags);
740} 745}
741 746
742static void solo_enc_buf_finish(struct vb2_buffer *vb) 747static void solo_enc_buf_finish(struct vb2_buffer *vb)
743{ 748{
749 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
744 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vb->vb2_queue); 750 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vb->vb2_queue);
745 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); 751 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
746 752
747 switch (solo_enc->fmt) { 753 switch (solo_enc->fmt) {
748 case V4L2_PIX_FMT_MPEG4: 754 case V4L2_PIX_FMT_MPEG4:
749 case V4L2_PIX_FMT_H264: 755 case V4L2_PIX_FMT_H264:
750 if (vb->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) 756 if (vbuf->flags & V4L2_BUF_FLAG_KEYFRAME)
751 sg_copy_from_buffer(vbuf->sgl, vbuf->nents, 757 sg_copy_from_buffer(sgt->sgl, sgt->nents,
752 solo_enc->vop, solo_enc->vop_len); 758 solo_enc->vop, solo_enc->vop_len);
753 break; 759 break;
754 default: /* V4L2_PIX_FMT_MJPEG */ 760 default: /* V4L2_PIX_FMT_MJPEG */
755 sg_copy_from_buffer(vbuf->sgl, vbuf->nents, 761 sg_copy_from_buffer(sgt->sgl, sgt->nents,
756 solo_enc->jpeg_header, solo_enc->jpeg_len); 762 solo_enc->jpeg_header, solo_enc->jpeg_len);
757 break; 763 break;
758 } 764 }
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index 63ae8a61f603..57d0d9cf190e 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -26,6 +26,7 @@
26#include <media/v4l2-ioctl.h> 26#include <media/v4l2-ioctl.h>
27#include <media/v4l2-common.h> 27#include <media/v4l2-common.h>
28#include <media/v4l2-event.h> 28#include <media/v4l2-event.h>
29#include <media/videobuf2-v4l2.h>
29#include <media/videobuf2-dma-contig.h> 30#include <media/videobuf2-dma-contig.h>
30 31
31#include "solo6x10.h" 32#include "solo6x10.h"
@@ -191,13 +192,14 @@ static int solo_v4l2_set_ch(struct solo_dev *solo_dev, u8 ch)
191static void solo_fillbuf(struct solo_dev *solo_dev, 192static void solo_fillbuf(struct solo_dev *solo_dev,
192 struct vb2_buffer *vb) 193 struct vb2_buffer *vb)
193{ 194{
194 dma_addr_t vbuf; 195 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
196 dma_addr_t addr;
195 unsigned int fdma_addr; 197 unsigned int fdma_addr;
196 int error = -1; 198 int error = -1;
197 int i; 199 int i;
198 200
199 vbuf = vb2_dma_contig_plane_dma_addr(vb, 0); 201 addr = vb2_dma_contig_plane_dma_addr(vb, 0);
200 if (!vbuf) 202 if (!addr)
201 goto finish_buf; 203 goto finish_buf;
202 204
203 if (erase_off(solo_dev)) { 205 if (erase_off(solo_dev)) {
@@ -213,7 +215,7 @@ static void solo_fillbuf(struct solo_dev *solo_dev,
213 fdma_addr = SOLO_DISP_EXT_ADDR + (solo_dev->old_write * 215 fdma_addr = SOLO_DISP_EXT_ADDR + (solo_dev->old_write *
214 (SOLO_HW_BPL * solo_vlines(solo_dev))); 216 (SOLO_HW_BPL * solo_vlines(solo_dev)));
215 217
216 error = solo_p2m_dma_t(solo_dev, 0, vbuf, fdma_addr, 218 error = solo_p2m_dma_t(solo_dev, 0, addr, fdma_addr,
217 solo_bytesperline(solo_dev), 219 solo_bytesperline(solo_dev),
218 solo_vlines(solo_dev), SOLO_HW_BPL); 220 solo_vlines(solo_dev), SOLO_HW_BPL);
219 } 221 }
@@ -222,8 +224,8 @@ finish_buf:
222 if (!error) { 224 if (!error) {
223 vb2_set_plane_payload(vb, 0, 225 vb2_set_plane_payload(vb, 0,
224 solo_vlines(solo_dev) * solo_bytesperline(solo_dev)); 226 solo_vlines(solo_dev) * solo_bytesperline(solo_dev));
225 vb->v4l2_buf.sequence = solo_dev->sequence++; 227 vbuf->sequence = solo_dev->sequence++;
226 v4l2_get_timestamp(&vb->v4l2_buf.timestamp); 228 v4l2_get_timestamp(&vbuf->timestamp);
227 } 229 }
228 230
229 vb2_buffer_done(vb, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 231 vb2_buffer_done(vb, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
@@ -256,7 +258,7 @@ static void solo_thread_try(struct solo_dev *solo_dev)
256 258
257 spin_unlock(&solo_dev->slock); 259 spin_unlock(&solo_dev->slock);
258 260
259 solo_fillbuf(solo_dev, &vb->vb); 261 solo_fillbuf(solo_dev, &vb->vb.vb2_buf);
260 } 262 }
261 263
262 assert_spin_locked(&solo_dev->slock); 264 assert_spin_locked(&solo_dev->slock);
@@ -345,10 +347,11 @@ static void solo_stop_streaming(struct vb2_queue *q)
345 347
346static void solo_buf_queue(struct vb2_buffer *vb) 348static void solo_buf_queue(struct vb2_buffer *vb)
347{ 349{
350 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
348 struct vb2_queue *vq = vb->vb2_queue; 351 struct vb2_queue *vq = vb->vb2_queue;
349 struct solo_dev *solo_dev = vb2_get_drv_priv(vq); 352 struct solo_dev *solo_dev = vb2_get_drv_priv(vq);
350 struct solo_vb2_buf *solo_vb = 353 struct solo_vb2_buf *solo_vb =
351 container_of(vb, struct solo_vb2_buf, vb); 354 container_of(vbuf, struct solo_vb2_buf, vb);
352 355
353 spin_lock(&solo_dev->slock); 356 spin_lock(&solo_dev->slock);
354 list_add_tail(&solo_vb->list, &solo_dev->vidq_active); 357 list_add_tail(&solo_vb->list, &solo_dev->vidq_active);
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 5cc9e9dc61e4..4ab6586c0467 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -135,7 +135,7 @@ struct solo_p2m_dev {
135#define OSD_TEXT_MAX 44 135#define OSD_TEXT_MAX 44
136 136
137struct solo_vb2_buf { 137struct solo_vb2_buf {
138 struct vb2_buffer vb; 138 struct vb2_v4l2_buffer vb;
139 struct list_head list; 139 struct list_head list;
140}; 140};
141 141
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 59b3a36a3639..fc99f33d65e9 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -88,11 +88,11 @@
88 88
89 89
90struct vip_buffer { 90struct vip_buffer {
91 struct vb2_buffer vb; 91 struct vb2_v4l2_buffer vb;
92 struct list_head list; 92 struct list_head list;
93 dma_addr_t dma; 93 dma_addr_t dma;
94}; 94};
95static inline struct vip_buffer *to_vip_buffer(struct vb2_buffer *vb2) 95static inline struct vip_buffer *to_vip_buffer(struct vb2_v4l2_buffer *vb2)
96{ 96{
97 return container_of(vb2, struct vip_buffer, vb); 97 return container_of(vb2, struct vip_buffer, vb);
98} 98}
@@ -287,7 +287,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
287}; 287};
288static int buffer_init(struct vb2_buffer *vb) 288static int buffer_init(struct vb2_buffer *vb)
289{ 289{
290 struct vip_buffer *vip_buf = to_vip_buffer(vb); 290 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
291 struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
291 292
292 vip_buf->dma = vb2_dma_contig_plane_dma_addr(vb, 0); 293 vip_buf->dma = vb2_dma_contig_plane_dma_addr(vb, 0);
293 INIT_LIST_HEAD(&vip_buf->list); 294 INIT_LIST_HEAD(&vip_buf->list);
@@ -296,8 +297,9 @@ static int buffer_init(struct vb2_buffer *vb)
296 297
297static int buffer_prepare(struct vb2_buffer *vb) 298static int buffer_prepare(struct vb2_buffer *vb)
298{ 299{
300 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
299 struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue); 301 struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
300 struct vip_buffer *vip_buf = to_vip_buffer(vb); 302 struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
301 unsigned long size; 303 unsigned long size;
302 304
303 size = vip->format.sizeimage; 305 size = vip->format.sizeimage;
@@ -307,14 +309,15 @@ static int buffer_prepare(struct vb2_buffer *vb)
307 return -EINVAL; 309 return -EINVAL;
308 } 310 }
309 311
310 vb2_set_plane_payload(&vip_buf->vb, 0, size); 312 vb2_set_plane_payload(&vip_buf->vb.vb2_buf, 0, size);
311 313
312 return 0; 314 return 0;
313} 315}
314static void buffer_queue(struct vb2_buffer *vb) 316static void buffer_queue(struct vb2_buffer *vb)
315{ 317{
318 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
316 struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue); 319 struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
317 struct vip_buffer *vip_buf = to_vip_buffer(vb); 320 struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
318 321
319 spin_lock(&vip->lock); 322 spin_lock(&vip->lock);
320 list_add_tail(&vip_buf->list, &vip->buffer_list); 323 list_add_tail(&vip_buf->list, &vip->buffer_list);
@@ -329,8 +332,9 @@ static void buffer_queue(struct vb2_buffer *vb)
329} 332}
330static void buffer_finish(struct vb2_buffer *vb) 333static void buffer_finish(struct vb2_buffer *vb)
331{ 334{
335 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
332 struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue); 336 struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
333 struct vip_buffer *vip_buf = to_vip_buffer(vb); 337 struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
334 338
335 /* Buffer handled, remove it from the list */ 339 /* Buffer handled, remove it from the list */
336 spin_lock(&vip->lock); 340 spin_lock(&vip->lock);
@@ -370,7 +374,7 @@ static void stop_streaming(struct vb2_queue *vq)
370 /* Release all active buffers */ 374 /* Release all active buffers */
371 spin_lock(&vip->lock); 375 spin_lock(&vip->lock);
372 list_for_each_entry_safe(vip_buf, node, &vip->buffer_list, list) { 376 list_for_each_entry_safe(vip_buf, node, &vip->buffer_list, list) {
373 vb2_buffer_done(&vip_buf->vb, VB2_BUF_STATE_ERROR); 377 vb2_buffer_done(&vip_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
374 list_del(&vip_buf->list); 378 list_del(&vip_buf->list);
375 } 379 }
376 spin_unlock(&vip->lock); 380 spin_unlock(&vip->lock);
@@ -813,9 +817,9 @@ static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
813 /* Disable acquisition */ 817 /* Disable acquisition */
814 reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA); 818 reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
815 /* Remove the active buffer from the list */ 819 /* Remove the active buffer from the list */
816 v4l2_get_timestamp(&vip->active->vb.v4l2_buf.timestamp); 820 v4l2_get_timestamp(&vip->active->vb.timestamp);
817 vip->active->vb.v4l2_buf.sequence = vip->sequence++; 821 vip->active->vb.sequence = vip->sequence++;
818 vb2_buffer_done(&vip->active->vb, VB2_BUF_STATE_DONE); 822 vb2_buffer_done(&vip->active->vb.vb2_buf, VB2_BUF_STATE_DONE);
819 } 823 }
820 824
821 return IRQ_HANDLED; 825 return IRQ_HANDLED;
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 8355e55b4e8e..323721439b8e 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -423,9 +423,10 @@ static int tw68_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
423 */ 423 */
424static void tw68_buf_queue(struct vb2_buffer *vb) 424static void tw68_buf_queue(struct vb2_buffer *vb)
425{ 425{
426 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
426 struct vb2_queue *vq = vb->vb2_queue; 427 struct vb2_queue *vq = vb->vb2_queue;
427 struct tw68_dev *dev = vb2_get_drv_priv(vq); 428 struct tw68_dev *dev = vb2_get_drv_priv(vq);
428 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb); 429 struct tw68_buf *buf = container_of(vbuf, struct tw68_buf, vb);
429 struct tw68_buf *prev; 430 struct tw68_buf *prev;
430 unsigned long flags; 431 unsigned long flags;
431 432
@@ -457,9 +458,10 @@ static void tw68_buf_queue(struct vb2_buffer *vb)
457 */ 458 */
458static int tw68_buf_prepare(struct vb2_buffer *vb) 459static int tw68_buf_prepare(struct vb2_buffer *vb)
459{ 460{
461 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
460 struct vb2_queue *vq = vb->vb2_queue; 462 struct vb2_queue *vq = vb->vb2_queue;
461 struct tw68_dev *dev = vb2_get_drv_priv(vq); 463 struct tw68_dev *dev = vb2_get_drv_priv(vq);
462 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb); 464 struct tw68_buf *buf = container_of(vbuf, struct tw68_buf, vb);
463 struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0); 465 struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
464 unsigned size, bpl; 466 unsigned size, bpl;
465 467
@@ -499,9 +501,10 @@ static int tw68_buf_prepare(struct vb2_buffer *vb)
499 501
500static void tw68_buf_finish(struct vb2_buffer *vb) 502static void tw68_buf_finish(struct vb2_buffer *vb)
501{ 503{
504 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
502 struct vb2_queue *vq = vb->vb2_queue; 505 struct vb2_queue *vq = vb->vb2_queue;
503 struct tw68_dev *dev = vb2_get_drv_priv(vq); 506 struct tw68_dev *dev = vb2_get_drv_priv(vq);
504 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb); 507 struct tw68_buf *buf = container_of(vbuf, struct tw68_buf, vb);
505 508
506 pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma); 509 pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma);
507} 510}
@@ -528,7 +531,7 @@ static void tw68_stop_streaming(struct vb2_queue *q)
528 container_of(dev->active.next, struct tw68_buf, list); 531 container_of(dev->active.next, struct tw68_buf, list);
529 532
530 list_del(&buf->list); 533 list_del(&buf->list);
531 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 534 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
532 } 535 }
533} 536}
534 537
@@ -1012,10 +1015,10 @@ void tw68_irq_video_done(struct tw68_dev *dev, unsigned long status)
1012 buf = list_entry(dev->active.next, struct tw68_buf, list); 1015 buf = list_entry(dev->active.next, struct tw68_buf, list);
1013 list_del(&buf->list); 1016 list_del(&buf->list);
1014 spin_unlock(&dev->slock); 1017 spin_unlock(&dev->slock);
1015 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 1018 v4l2_get_timestamp(&buf->vb.timestamp);
1016 buf->vb.v4l2_buf.field = dev->field; 1019 buf->vb.field = dev->field;
1017 buf->vb.v4l2_buf.sequence = dev->seqnr++; 1020 buf->vb.sequence = dev->seqnr++;
1018 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 1021 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1019 status &= ~(TW68_DMAPI); 1022 status &= ~(TW68_DMAPI);
1020 if (0 == status) 1023 if (0 == status)
1021 return; 1024 return;
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index ef51e4d48866..6c7dcb300f34 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -36,6 +36,7 @@
36#include <media/v4l2-ioctl.h> 36#include <media/v4l2-ioctl.h>
37#include <media/v4l2-ctrls.h> 37#include <media/v4l2-ctrls.h>
38#include <media/v4l2-device.h> 38#include <media/v4l2-device.h>
39#include <media/videobuf2-v4l2.h>
39#include <media/videobuf2-dma-sg.h> 40#include <media/videobuf2-dma-sg.h>
40 41
41#include "tw68-reg.h" 42#include "tw68-reg.h"
@@ -118,7 +119,7 @@ struct tw68_dev; /* forward delclaration */
118 119
119/* buffer for one video/vbi/ts frame */ 120/* buffer for one video/vbi/ts frame */
120struct tw68_buf { 121struct tw68_buf {
121 struct vb2_buffer vb; 122 struct vb2_v4l2_buffer vb;
122 struct list_head list; 123 struct list_head list;
123 124
124 unsigned int size; 125 unsigned int size;
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index c8447fa3fd91..488d2754c27c 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -307,7 +307,8 @@ static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
307 return container_of(ccdc, struct vpfe_device, ccdc); 307 return container_of(ccdc, struct vpfe_device, ccdc);
308} 308}
309 309
310static inline struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_buffer *vb) 310static inline
311struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb)
311{ 312{
312 return container_of(vb, struct vpfe_cap_buffer, vb); 313 return container_of(vb, struct vpfe_cap_buffer, vb);
313} 314}
@@ -1257,14 +1258,14 @@ static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
1257 list_del(&vpfe->next_frm->list); 1258 list_del(&vpfe->next_frm->list);
1258 1259
1259 vpfe_set_sdr_addr(&vpfe->ccdc, 1260 vpfe_set_sdr_addr(&vpfe->ccdc,
1260 vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0)); 1261 vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
1261} 1262}
1262 1263
1263static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe) 1264static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
1264{ 1265{
1265 unsigned long addr; 1266 unsigned long addr;
1266 1267
1267 addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0) + 1268 addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
1268 vpfe->field_off; 1269 vpfe->field_off;
1269 1270
1270 vpfe_set_sdr_addr(&vpfe->ccdc, addr); 1271 vpfe_set_sdr_addr(&vpfe->ccdc, addr);
@@ -1280,10 +1281,10 @@ static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
1280 */ 1281 */
1281static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe) 1282static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
1282{ 1283{
1283 v4l2_get_timestamp(&vpfe->cur_frm->vb.v4l2_buf.timestamp); 1284 v4l2_get_timestamp(&vpfe->cur_frm->vb.timestamp);
1284 vpfe->cur_frm->vb.v4l2_buf.field = vpfe->fmt.fmt.pix.field; 1285 vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field;
1285 vpfe->cur_frm->vb.v4l2_buf.sequence = vpfe->sequence++; 1286 vpfe->cur_frm->vb.sequence = vpfe->sequence++;
1286 vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_DONE); 1287 vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
1287 vpfe->cur_frm = vpfe->next_frm; 1288 vpfe->cur_frm = vpfe->next_frm;
1288} 1289}
1289 1290
@@ -1942,6 +1943,7 @@ static int vpfe_queue_setup(struct vb2_queue *vq,
1942 */ 1943 */
1943static int vpfe_buffer_prepare(struct vb2_buffer *vb) 1944static int vpfe_buffer_prepare(struct vb2_buffer *vb)
1944{ 1945{
1946 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1945 struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue); 1947 struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
1946 1948
1947 vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage); 1949 vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
@@ -1949,7 +1951,7 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
1949 if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) 1951 if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
1950 return -EINVAL; 1952 return -EINVAL;
1951 1953
1952 vb->v4l2_buf.field = vpfe->fmt.fmt.pix.field; 1954 vbuf->field = vpfe->fmt.fmt.pix.field;
1953 1955
1954 return 0; 1956 return 0;
1955} 1957}
@@ -1960,8 +1962,9 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
1960 */ 1962 */
1961static void vpfe_buffer_queue(struct vb2_buffer *vb) 1963static void vpfe_buffer_queue(struct vb2_buffer *vb)
1962{ 1964{
1965 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1963 struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue); 1966 struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
1964 struct vpfe_cap_buffer *buf = to_vpfe_buffer(vb); 1967 struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf);
1965 unsigned long flags = 0; 1968 unsigned long flags = 0;
1966 1969
1967 /* add the buffer to the DMA queue */ 1970 /* add the buffer to the DMA queue */
@@ -2006,7 +2009,7 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
2006 list_del(&vpfe->cur_frm->list); 2009 list_del(&vpfe->cur_frm->list);
2007 spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags); 2010 spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
2008 2011
2009 addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb, 0); 2012 addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0);
2010 2013
2011 vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr)); 2014 vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
2012 2015
@@ -2023,7 +2026,7 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
2023err: 2026err:
2024 list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) { 2027 list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
2025 list_del(&buf->list); 2028 list_del(&buf->list);
2026 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 2029 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
2027 } 2030 }
2028 2031
2029 return ret; 2032 return ret;
@@ -2055,13 +2058,14 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
2055 /* release all active buffers */ 2058 /* release all active buffers */
2056 spin_lock_irqsave(&vpfe->dma_queue_lock, flags); 2059 spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
2057 if (vpfe->cur_frm == vpfe->next_frm) { 2060 if (vpfe->cur_frm == vpfe->next_frm) {
2058 vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_ERROR); 2061 vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
2062 VB2_BUF_STATE_ERROR);
2059 } else { 2063 } else {
2060 if (vpfe->cur_frm != NULL) 2064 if (vpfe->cur_frm != NULL)
2061 vb2_buffer_done(&vpfe->cur_frm->vb, 2065 vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
2062 VB2_BUF_STATE_ERROR); 2066 VB2_BUF_STATE_ERROR);
2063 if (vpfe->next_frm != NULL) 2067 if (vpfe->next_frm != NULL)
2064 vb2_buffer_done(&vpfe->next_frm->vb, 2068 vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
2065 VB2_BUF_STATE_ERROR); 2069 VB2_BUF_STATE_ERROR);
2066 } 2070 }
2067 2071
@@ -2069,7 +2073,8 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
2069 vpfe->next_frm = list_entry(vpfe->dma_queue.next, 2073 vpfe->next_frm = list_entry(vpfe->dma_queue.next,
2070 struct vpfe_cap_buffer, list); 2074 struct vpfe_cap_buffer, list);
2071 list_del(&vpfe->next_frm->list); 2075 list_del(&vpfe->next_frm->list);
2072 vb2_buffer_done(&vpfe->next_frm->vb, VB2_BUF_STATE_ERROR); 2076 vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
2077 VB2_BUF_STATE_ERROR);
2073 } 2078 }
2074 spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags); 2079 spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
2075} 2080}
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
index 5bfb35649a39..777bf97fea57 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -31,6 +31,7 @@
31#include <media/v4l2-dev.h> 31#include <media/v4l2-dev.h>
32#include <media/v4l2-device.h> 32#include <media/v4l2-device.h>
33#include <media/v4l2-ioctl.h> 33#include <media/v4l2-ioctl.h>
34#include <media/videobuf2-v4l2.h>
34#include <media/videobuf2-dma-contig.h> 35#include <media/videobuf2-dma-contig.h>
35 36
36#include "am437x-vpfe_regs.h" 37#include "am437x-vpfe_regs.h"
@@ -104,7 +105,7 @@ struct vpfe_config {
104}; 105};
105 106
106struct vpfe_cap_buffer { 107struct vpfe_cap_buffer {
107 struct vb2_buffer vb; 108 struct vb2_v4l2_buffer vb;
108 struct list_head list; 109 struct list_head list;
109}; 110};
110 111
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index b7e70fb05eb8..db059eb0ff19 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -54,7 +54,7 @@ struct bcap_format {
54}; 54};
55 55
56struct bcap_buffer { 56struct bcap_buffer {
57 struct vb2_buffer vb; 57 struct vb2_v4l2_buffer vb;
58 struct list_head list; 58 struct list_head list;
59}; 59};
60 60
@@ -149,7 +149,7 @@ static const struct bcap_format bcap_formats[] = {
149 149
150static irqreturn_t bcap_isr(int irq, void *dev_id); 150static irqreturn_t bcap_isr(int irq, void *dev_id);
151 151
152static struct bcap_buffer *to_bcap_vb(struct vb2_buffer *vb) 152static struct bcap_buffer *to_bcap_vb(struct vb2_v4l2_buffer *vb)
153{ 153{
154 return container_of(vb, struct bcap_buffer, vb); 154 return container_of(vb, struct bcap_buffer, vb);
155} 155}
@@ -223,6 +223,7 @@ static int bcap_queue_setup(struct vb2_queue *vq,
223 223
224static int bcap_buffer_prepare(struct vb2_buffer *vb) 224static int bcap_buffer_prepare(struct vb2_buffer *vb)
225{ 225{
226 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
226 struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue); 227 struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
227 unsigned long size = bcap_dev->fmt.sizeimage; 228 unsigned long size = bcap_dev->fmt.sizeimage;
228 229
@@ -233,15 +234,16 @@ static int bcap_buffer_prepare(struct vb2_buffer *vb)
233 } 234 }
234 vb2_set_plane_payload(vb, 0, size); 235 vb2_set_plane_payload(vb, 0, size);
235 236
236 vb->v4l2_buf.field = bcap_dev->fmt.field; 237 vbuf->field = bcap_dev->fmt.field;
237 238
238 return 0; 239 return 0;
239} 240}
240 241
241static void bcap_buffer_queue(struct vb2_buffer *vb) 242static void bcap_buffer_queue(struct vb2_buffer *vb)
242{ 243{
244 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
243 struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue); 245 struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
244 struct bcap_buffer *buf = to_bcap_vb(vb); 246 struct bcap_buffer *buf = to_bcap_vb(vbuf);
245 unsigned long flags; 247 unsigned long flags;
246 248
247 spin_lock_irqsave(&bcap_dev->lock, flags); 249 spin_lock_irqsave(&bcap_dev->lock, flags);
@@ -251,8 +253,9 @@ static void bcap_buffer_queue(struct vb2_buffer *vb)
251 253
252static void bcap_buffer_cleanup(struct vb2_buffer *vb) 254static void bcap_buffer_cleanup(struct vb2_buffer *vb)
253{ 255{
256 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
254 struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue); 257 struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
255 struct bcap_buffer *buf = to_bcap_vb(vb); 258 struct bcap_buffer *buf = to_bcap_vb(vbuf);
256 unsigned long flags; 259 unsigned long flags;
257 260
258 spin_lock_irqsave(&bcap_dev->lock, flags); 261 spin_lock_irqsave(&bcap_dev->lock, flags);
@@ -333,7 +336,8 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
333 struct bcap_buffer, list); 336 struct bcap_buffer, list);
334 /* remove buffer from the dma queue */ 337 /* remove buffer from the dma queue */
335 list_del_init(&bcap_dev->cur_frm->list); 338 list_del_init(&bcap_dev->cur_frm->list);
336 addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0); 339 addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb.vb2_buf,
340 0);
337 /* update DMA address */ 341 /* update DMA address */
338 ppi->ops->update_addr(ppi, (unsigned long)addr); 342 ppi->ops->update_addr(ppi, (unsigned long)addr);
339 /* enable ppi */ 343 /* enable ppi */
@@ -344,7 +348,7 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
344err: 348err:
345 list_for_each_entry_safe(buf, tmp, &bcap_dev->dma_queue, list) { 349 list_for_each_entry_safe(buf, tmp, &bcap_dev->dma_queue, list) {
346 list_del(&buf->list); 350 list_del(&buf->list);
347 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 351 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
348 } 352 }
349 353
350 return ret; 354 return ret;
@@ -367,13 +371,15 @@ static void bcap_stop_streaming(struct vb2_queue *vq)
367 371
368 /* release all active buffers */ 372 /* release all active buffers */
369 if (bcap_dev->cur_frm) 373 if (bcap_dev->cur_frm)
370 vb2_buffer_done(&bcap_dev->cur_frm->vb, VB2_BUF_STATE_ERROR); 374 vb2_buffer_done(&bcap_dev->cur_frm->vb.vb2_buf,
375 VB2_BUF_STATE_ERROR);
371 376
372 while (!list_empty(&bcap_dev->dma_queue)) { 377 while (!list_empty(&bcap_dev->dma_queue)) {
373 bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next, 378 bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
374 struct bcap_buffer, list); 379 struct bcap_buffer, list);
375 list_del_init(&bcap_dev->cur_frm->list); 380 list_del_init(&bcap_dev->cur_frm->list);
376 vb2_buffer_done(&bcap_dev->cur_frm->vb, VB2_BUF_STATE_ERROR); 381 vb2_buffer_done(&bcap_dev->cur_frm->vb.vb2_buf,
382 VB2_BUF_STATE_ERROR);
377 } 383 }
378} 384}
379 385
@@ -392,18 +398,19 @@ static irqreturn_t bcap_isr(int irq, void *dev_id)
392{ 398{
393 struct ppi_if *ppi = dev_id; 399 struct ppi_if *ppi = dev_id;
394 struct bcap_device *bcap_dev = ppi->priv; 400 struct bcap_device *bcap_dev = ppi->priv;
395 struct vb2_buffer *vb = &bcap_dev->cur_frm->vb; 401 struct vb2_v4l2_buffer *vbuf = &bcap_dev->cur_frm->vb;
402 struct vb2_buffer *vb = &vbuf->vb2_buf;
396 dma_addr_t addr; 403 dma_addr_t addr;
397 404
398 spin_lock(&bcap_dev->lock); 405 spin_lock(&bcap_dev->lock);
399 406
400 if (!list_empty(&bcap_dev->dma_queue)) { 407 if (!list_empty(&bcap_dev->dma_queue)) {
401 v4l2_get_timestamp(&vb->v4l2_buf.timestamp); 408 v4l2_get_timestamp(&vbuf->timestamp);
402 if (ppi->err) { 409 if (ppi->err) {
403 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 410 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
404 ppi->err = false; 411 ppi->err = false;
405 } else { 412 } else {
406 vb->v4l2_buf.sequence = bcap_dev->sequence++; 413 vbuf->sequence = bcap_dev->sequence++;
407 vb2_buffer_done(vb, VB2_BUF_STATE_DONE); 414 vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
408 } 415 }
409 bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next, 416 bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
@@ -420,7 +427,8 @@ static irqreturn_t bcap_isr(int irq, void *dev_id)
420 if (bcap_dev->stop) { 427 if (bcap_dev->stop) {
421 complete(&bcap_dev->comp); 428 complete(&bcap_dev->comp);
422 } else { 429 } else {
423 addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0); 430 addr = vb2_dma_contig_plane_dma_addr(
431 &bcap_dev->cur_frm->vb.vb2_buf, 0);
424 ppi->ops->update_addr(ppi, (unsigned long)addr); 432 ppi->ops->update_addr(ppi, (unsigned long)addr);
425 ppi->ops->start(ppi); 433 ppi->ops->start(ppi);
426 } 434 }
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index cd41d49b206d..654e964f84a2 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -179,31 +179,32 @@ static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
179} 179}
180 180
181static int coda_bitstream_queue(struct coda_ctx *ctx, 181static int coda_bitstream_queue(struct coda_ctx *ctx,
182 struct vb2_buffer *src_buf) 182 struct vb2_v4l2_buffer *src_buf)
183{ 183{
184 u32 src_size = vb2_get_plane_payload(src_buf, 0); 184 u32 src_size = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
185 u32 n; 185 u32 n;
186 186
187 n = kfifo_in(&ctx->bitstream_fifo, vb2_plane_vaddr(src_buf, 0), 187 n = kfifo_in(&ctx->bitstream_fifo,
188 src_size); 188 vb2_plane_vaddr(&src_buf->vb2_buf, 0), src_size);
189 if (n < src_size) 189 if (n < src_size)
190 return -ENOSPC; 190 return -ENOSPC;
191 191
192 src_buf->v4l2_buf.sequence = ctx->qsequence++; 192 src_buf->sequence = ctx->qsequence++;
193 193
194 return 0; 194 return 0;
195} 195}
196 196
197static bool coda_bitstream_try_queue(struct coda_ctx *ctx, 197static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
198 struct vb2_buffer *src_buf) 198 struct vb2_v4l2_buffer *src_buf)
199{ 199{
200 int ret; 200 int ret;
201 201
202 if (coda_get_bitstream_payload(ctx) + 202 if (coda_get_bitstream_payload(ctx) +
203 vb2_get_plane_payload(src_buf, 0) + 512 >= ctx->bitstream.size) 203 vb2_get_plane_payload(&src_buf->vb2_buf, 0) + 512 >=
204 ctx->bitstream.size)
204 return false; 205 return false;
205 206
206 if (vb2_plane_vaddr(src_buf, 0) == NULL) { 207 if (vb2_plane_vaddr(&src_buf->vb2_buf, 0) == NULL) {
207 v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n"); 208 v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
208 return true; 209 return true;
209 } 210 }
@@ -224,7 +225,7 @@ static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
224 225
225void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming) 226void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
226{ 227{
227 struct vb2_buffer *src_buf; 228 struct vb2_v4l2_buffer *src_buf;
228 struct coda_buffer_meta *meta; 229 struct coda_buffer_meta *meta;
229 unsigned long flags; 230 unsigned long flags;
230 u32 start; 231 u32 start;
@@ -257,7 +258,7 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
257 } 258 }
258 259
259 /* Dump empty buffers */ 260 /* Dump empty buffers */
260 if (!vb2_get_plane_payload(src_buf, 0)) { 261 if (!vb2_get_plane_payload(&src_buf->vb2_buf, 0)) {
261 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 262 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
262 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); 263 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
263 continue; 264 continue;
@@ -276,9 +277,9 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
276 277
277 meta = kmalloc(sizeof(*meta), GFP_KERNEL); 278 meta = kmalloc(sizeof(*meta), GFP_KERNEL);
278 if (meta) { 279 if (meta) {
279 meta->sequence = src_buf->v4l2_buf.sequence; 280 meta->sequence = src_buf->sequence;
280 meta->timecode = src_buf->v4l2_buf.timecode; 281 meta->timecode = src_buf->timecode;
281 meta->timestamp = src_buf->v4l2_buf.timestamp; 282 meta->timestamp = src_buf->timestamp;
282 meta->start = start; 283 meta->start = start;
283 meta->end = ctx->bitstream_fifo.kfifo.in & 284 meta->end = ctx->bitstream_fifo.kfifo.in &
284 ctx->bitstream_fifo.kfifo.mask; 285 ctx->bitstream_fifo.kfifo.mask;
@@ -483,20 +484,21 @@ err:
483 return ret; 484 return ret;
484} 485}
485 486
486static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf, 487static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
487 int header_code, u8 *header, int *size) 488 int header_code, u8 *header, int *size)
488{ 489{
490 struct vb2_buffer *vb = &buf->vb2_buf;
489 struct coda_dev *dev = ctx->dev; 491 struct coda_dev *dev = ctx->dev;
490 size_t bufsize; 492 size_t bufsize;
491 int ret; 493 int ret;
492 int i; 494 int i;
493 495
494 if (dev->devtype->product == CODA_960) 496 if (dev->devtype->product == CODA_960)
495 memset(vb2_plane_vaddr(buf, 0), 0, 64); 497 memset(vb2_plane_vaddr(vb, 0), 0, 64);
496 498
497 coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0), 499 coda_write(dev, vb2_dma_contig_plane_dma_addr(vb, 0),
498 CODA_CMD_ENC_HEADER_BB_START); 500 CODA_CMD_ENC_HEADER_BB_START);
499 bufsize = vb2_plane_size(buf, 0); 501 bufsize = vb2_plane_size(vb, 0);
500 if (dev->devtype->product == CODA_960) 502 if (dev->devtype->product == CODA_960)
501 bufsize /= 1024; 503 bufsize /= 1024;
502 coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE); 504 coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE);
@@ -509,14 +511,14 @@ static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
509 511
510 if (dev->devtype->product == CODA_960) { 512 if (dev->devtype->product == CODA_960) {
511 for (i = 63; i > 0; i--) 513 for (i = 63; i > 0; i--)
512 if (((char *)vb2_plane_vaddr(buf, 0))[i] != 0) 514 if (((char *)vb2_plane_vaddr(vb, 0))[i] != 0)
513 break; 515 break;
514 *size = i + 1; 516 *size = i + 1;
515 } else { 517 } else {
516 *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) - 518 *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
517 coda_read(dev, CODA_CMD_ENC_HEADER_BB_START); 519 coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
518 } 520 }
519 memcpy(header, vb2_plane_vaddr(buf, 0), *size); 521 memcpy(header, vb2_plane_vaddr(vb, 0), *size);
520 522
521 return 0; 523 return 0;
522} 524}
@@ -799,7 +801,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
799 struct v4l2_device *v4l2_dev = &dev->v4l2_dev; 801 struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
800 struct coda_q_data *q_data_src, *q_data_dst; 802 struct coda_q_data *q_data_src, *q_data_dst;
801 u32 bitstream_buf, bitstream_size; 803 u32 bitstream_buf, bitstream_size;
802 struct vb2_buffer *buf; 804 struct vb2_v4l2_buffer *buf;
803 int gamma, ret, value; 805 int gamma, ret, value;
804 u32 dst_fourcc; 806 u32 dst_fourcc;
805 int num_fb; 807 int num_fb;
@@ -810,7 +812,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
810 dst_fourcc = q_data_dst->fourcc; 812 dst_fourcc = q_data_dst->fourcc;
811 813
812 buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 814 buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
813 bitstream_buf = vb2_dma_contig_plane_dma_addr(buf, 0); 815 bitstream_buf = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
814 bitstream_size = q_data_dst->sizeimage; 816 bitstream_size = q_data_dst->sizeimage;
815 817
816 if (!coda_is_initialized(dev)) { 818 if (!coda_is_initialized(dev)) {
@@ -1185,7 +1187,7 @@ out:
1185static int coda_prepare_encode(struct coda_ctx *ctx) 1187static int coda_prepare_encode(struct coda_ctx *ctx)
1186{ 1188{
1187 struct coda_q_data *q_data_src, *q_data_dst; 1189 struct coda_q_data *q_data_src, *q_data_dst;
1188 struct vb2_buffer *src_buf, *dst_buf; 1190 struct vb2_v4l2_buffer *src_buf, *dst_buf;
1189 struct coda_dev *dev = ctx->dev; 1191 struct coda_dev *dev = ctx->dev;
1190 int force_ipicture; 1192 int force_ipicture;
1191 int quant_param = 0; 1193 int quant_param = 0;
@@ -1200,8 +1202,8 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
1200 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); 1202 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
1201 dst_fourcc = q_data_dst->fourcc; 1203 dst_fourcc = q_data_dst->fourcc;
1202 1204
1203 src_buf->v4l2_buf.sequence = ctx->osequence; 1205 src_buf->sequence = ctx->osequence;
1204 dst_buf->v4l2_buf.sequence = ctx->osequence; 1206 dst_buf->sequence = ctx->osequence;
1205 ctx->osequence++; 1207 ctx->osequence++;
1206 1208
1207 /* 1209 /*
@@ -1209,12 +1211,12 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
1209 * frame as IDR. This is a problem for some decoders that can't 1211 * frame as IDR. This is a problem for some decoders that can't
1210 * recover when a frame is lost. 1212 * recover when a frame is lost.
1211 */ 1213 */
1212 if (src_buf->v4l2_buf.sequence % ctx->params.gop_size) { 1214 if (src_buf->sequence % ctx->params.gop_size) {
1213 src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME; 1215 src_buf->flags |= V4L2_BUF_FLAG_PFRAME;
1214 src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME; 1216 src_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
1215 } else { 1217 } else {
1216 src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; 1218 src_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
1217 src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME; 1219 src_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
1218 } 1220 }
1219 1221
1220 if (dev->devtype->product == CODA_960) 1222 if (dev->devtype->product == CODA_960)
@@ -1224,9 +1226,9 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
1224 * Copy headers at the beginning of the first frame for H.264 only. 1226 * Copy headers at the beginning of the first frame for H.264 only.
1225 * In MPEG4 they are already copied by the coda. 1227 * In MPEG4 they are already copied by the coda.
1226 */ 1228 */
1227 if (src_buf->v4l2_buf.sequence == 0) { 1229 if (src_buf->sequence == 0) {
1228 pic_stream_buffer_addr = 1230 pic_stream_buffer_addr =
1229 vb2_dma_contig_plane_dma_addr(dst_buf, 0) + 1231 vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0) +
1230 ctx->vpu_header_size[0] + 1232 ctx->vpu_header_size[0] +
1231 ctx->vpu_header_size[1] + 1233 ctx->vpu_header_size[1] +
1232 ctx->vpu_header_size[2]; 1234 ctx->vpu_header_size[2];
@@ -1234,20 +1236,21 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
1234 ctx->vpu_header_size[0] - 1236 ctx->vpu_header_size[0] -
1235 ctx->vpu_header_size[1] - 1237 ctx->vpu_header_size[1] -
1236 ctx->vpu_header_size[2]; 1238 ctx->vpu_header_size[2];
1237 memcpy(vb2_plane_vaddr(dst_buf, 0), 1239 memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0),
1238 &ctx->vpu_header[0][0], ctx->vpu_header_size[0]); 1240 &ctx->vpu_header[0][0], ctx->vpu_header_size[0]);
1239 memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0], 1241 memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
1240 &ctx->vpu_header[1][0], ctx->vpu_header_size[1]); 1242 + ctx->vpu_header_size[0], &ctx->vpu_header[1][0],
1241 memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0] + 1243 ctx->vpu_header_size[1]);
1242 ctx->vpu_header_size[1], &ctx->vpu_header[2][0], 1244 memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
1243 ctx->vpu_header_size[2]); 1245 + ctx->vpu_header_size[0] + ctx->vpu_header_size[1],
1246 &ctx->vpu_header[2][0], ctx->vpu_header_size[2]);
1244 } else { 1247 } else {
1245 pic_stream_buffer_addr = 1248 pic_stream_buffer_addr =
1246 vb2_dma_contig_plane_dma_addr(dst_buf, 0); 1249 vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
1247 pic_stream_buffer_size = q_data_dst->sizeimage; 1250 pic_stream_buffer_size = q_data_dst->sizeimage;
1248 } 1251 }
1249 1252
1250 if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) { 1253 if (src_buf->flags & V4L2_BUF_FLAG_KEYFRAME) {
1251 force_ipicture = 1; 1254 force_ipicture = 1;
1252 switch (dst_fourcc) { 1255 switch (dst_fourcc) {
1253 case V4L2_PIX_FMT_H264: 1256 case V4L2_PIX_FMT_H264:
@@ -1324,7 +1327,7 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
1324 1327
1325static void coda_finish_encode(struct coda_ctx *ctx) 1328static void coda_finish_encode(struct coda_ctx *ctx)
1326{ 1329{
1327 struct vb2_buffer *src_buf, *dst_buf; 1330 struct vb2_v4l2_buffer *src_buf, *dst_buf;
1328 struct coda_dev *dev = ctx->dev; 1331 struct coda_dev *dev = ctx->dev;
1329 u32 wr_ptr, start_ptr; 1332 u32 wr_ptr, start_ptr;
1330 1333
@@ -1338,13 +1341,13 @@ static void coda_finish_encode(struct coda_ctx *ctx)
1338 wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)); 1341 wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
1339 1342
1340 /* Calculate bytesused field */ 1343 /* Calculate bytesused field */
1341 if (dst_buf->v4l2_buf.sequence == 0) { 1344 if (dst_buf->sequence == 0) {
1342 vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr + 1345 vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
1343 ctx->vpu_header_size[0] + 1346 ctx->vpu_header_size[0] +
1344 ctx->vpu_header_size[1] + 1347 ctx->vpu_header_size[1] +
1345 ctx->vpu_header_size[2]); 1348 ctx->vpu_header_size[2]);
1346 } else { 1349 } else {
1347 vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr); 1350 vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr);
1348 } 1351 }
1349 1352
1350 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n", 1353 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
@@ -1354,18 +1357,18 @@ static void coda_finish_encode(struct coda_ctx *ctx)
1354 coda_read(dev, CODA_RET_ENC_PIC_FLAG); 1357 coda_read(dev, CODA_RET_ENC_PIC_FLAG);
1355 1358
1356 if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) { 1359 if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
1357 dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; 1360 dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
1358 dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME; 1361 dst_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
1359 } else { 1362 } else {
1360 dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME; 1363 dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
1361 dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME; 1364 dst_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
1362 } 1365 }
1363 1366
1364 dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp; 1367 dst_buf->timestamp = src_buf->timestamp;
1365 dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1368 dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1366 dst_buf->v4l2_buf.flags |= 1369 dst_buf->flags |=
1367 src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1370 src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1368 dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode; 1371 dst_buf->timecode = src_buf->timecode;
1369 1372
1370 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); 1373 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
1371 1374
@@ -1378,8 +1381,8 @@ static void coda_finish_encode(struct coda_ctx *ctx)
1378 1381
1379 v4l2_dbg(1, coda_debug, &dev->v4l2_dev, 1382 v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
1380 "job finished: encoding frame (%d) (%s)\n", 1383 "job finished: encoding frame (%d) (%s)\n",
1381 dst_buf->v4l2_buf.sequence, 1384 dst_buf->sequence,
1382 (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ? 1385 (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ?
1383 "KEYFRAME" : "PFRAME"); 1386 "KEYFRAME" : "PFRAME");
1384} 1387}
1385 1388
@@ -1716,7 +1719,7 @@ static int coda_start_decoding(struct coda_ctx *ctx)
1716 1719
1717static int coda_prepare_decode(struct coda_ctx *ctx) 1720static int coda_prepare_decode(struct coda_ctx *ctx)
1718{ 1721{
1719 struct vb2_buffer *dst_buf; 1722 struct vb2_v4l2_buffer *dst_buf;
1720 struct coda_dev *dev = ctx->dev; 1723 struct coda_dev *dev = ctx->dev;
1721 struct coda_q_data *q_data_dst; 1724 struct coda_q_data *q_data_dst;
1722 struct coda_buffer_meta *meta; 1725 struct coda_buffer_meta *meta;
@@ -1763,7 +1766,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
1763 * well as the rotator buffer output. 1766 * well as the rotator buffer output.
1764 * ROT_INDEX needs to be < 0x40, but > ctx->num_internal_frames. 1767 * ROT_INDEX needs to be < 0x40, but > ctx->num_internal_frames.
1765 */ 1768 */
1766 coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->v4l2_buf.index, 1769 coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->vb2_buf.index,
1767 CODA9_CMD_DEC_PIC_ROT_INDEX); 1770 CODA9_CMD_DEC_PIC_ROT_INDEX);
1768 1771
1769 reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y; 1772 reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
@@ -1838,7 +1841,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
1838 struct coda_dev *dev = ctx->dev; 1841 struct coda_dev *dev = ctx->dev;
1839 struct coda_q_data *q_data_src; 1842 struct coda_q_data *q_data_src;
1840 struct coda_q_data *q_data_dst; 1843 struct coda_q_data *q_data_dst;
1841 struct vb2_buffer *dst_buf; 1844 struct vb2_v4l2_buffer *dst_buf;
1842 struct coda_buffer_meta *meta; 1845 struct coda_buffer_meta *meta;
1843 unsigned long payload; 1846 unsigned long payload;
1844 unsigned long flags; 1847 unsigned long flags;
@@ -2029,15 +2032,15 @@ static void coda_finish_decode(struct coda_ctx *ctx)
2029 if (ctx->display_idx >= 0 && 2032 if (ctx->display_idx >= 0 &&
2030 ctx->display_idx < ctx->num_internal_frames) { 2033 ctx->display_idx < ctx->num_internal_frames) {
2031 dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); 2034 dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
2032 dst_buf->v4l2_buf.sequence = ctx->osequence++; 2035 dst_buf->sequence = ctx->osequence++;
2033 2036
2034 dst_buf->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | 2037 dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
2035 V4L2_BUF_FLAG_PFRAME | 2038 V4L2_BUF_FLAG_PFRAME |
2036 V4L2_BUF_FLAG_BFRAME); 2039 V4L2_BUF_FLAG_BFRAME);
2037 dst_buf->v4l2_buf.flags |= ctx->frame_types[ctx->display_idx]; 2040 dst_buf->flags |= ctx->frame_types[ctx->display_idx];
2038 meta = &ctx->frame_metas[ctx->display_idx]; 2041 meta = &ctx->frame_metas[ctx->display_idx];
2039 dst_buf->v4l2_buf.timecode = meta->timecode; 2042 dst_buf->timecode = meta->timecode;
2040 dst_buf->v4l2_buf.timestamp = meta->timestamp; 2043 dst_buf->timestamp = meta->timestamp;
2041 2044
2042 trace_coda_dec_rot_done(ctx, dst_buf, meta); 2045 trace_coda_dec_rot_done(ctx, dst_buf, meta);
2043 2046
@@ -2052,15 +2055,15 @@ static void coda_finish_decode(struct coda_ctx *ctx)
2052 payload = width * height * 2; 2055 payload = width * height * 2;
2053 break; 2056 break;
2054 } 2057 }
2055 vb2_set_plane_payload(dst_buf, 0, payload); 2058 vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload);
2056 2059
2057 coda_m2m_buf_done(ctx, dst_buf, ctx->frame_errors[display_idx] ? 2060 coda_m2m_buf_done(ctx, dst_buf, ctx->frame_errors[display_idx] ?
2058 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 2061 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
2059 2062
2060 v4l2_dbg(1, coda_debug, &dev->v4l2_dev, 2063 v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
2061 "job finished: decoding frame (%d) (%s)\n", 2064 "job finished: decoding frame (%d) (%s)\n",
2062 dst_buf->v4l2_buf.sequence, 2065 dst_buf->sequence,
2063 (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ? 2066 (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ?
2064 "KEYFRAME" : "PFRAME"); 2067 "KEYFRAME" : "PFRAME");
2065 } else { 2068 } else {
2066 v4l2_dbg(1, coda_debug, &dev->v4l2_dev, 2069 v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 998fe6614b33..60336eec75af 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -84,9 +84,9 @@ unsigned int coda_read(struct coda_dev *dev, u32 reg)
84} 84}
85 85
86void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data, 86void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
87 struct vb2_buffer *buf, unsigned int reg_y) 87 struct vb2_v4l2_buffer *buf, unsigned int reg_y)
88{ 88{
89 u32 base_y = vb2_dma_contig_plane_dma_addr(buf, 0); 89 u32 base_y = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
90 u32 base_cb, base_cr; 90 u32 base_cb, base_cr;
91 91
92 switch (q_data->fourcc) { 92 switch (q_data->fourcc) {
@@ -684,17 +684,17 @@ static int coda_qbuf(struct file *file, void *priv,
684} 684}
685 685
686static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx, 686static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
687 struct vb2_buffer *buf) 687 struct vb2_v4l2_buffer *buf)
688{ 688{
689 struct vb2_queue *src_vq; 689 struct vb2_queue *src_vq;
690 690
691 src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); 691 src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
692 692
693 return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) && 693 return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
694 (buf->v4l2_buf.sequence == (ctx->qsequence - 1))); 694 (buf->sequence == (ctx->qsequence - 1)));
695} 695}
696 696
697void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_buffer *buf, 697void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
698 enum vb2_buffer_state state) 698 enum vb2_buffer_state state)
699{ 699{
700 const struct v4l2_event eos_event = { 700 const struct v4l2_event eos_event = {
@@ -702,7 +702,7 @@ void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_buffer *buf,
702 }; 702 };
703 703
704 if (coda_buf_is_end_of_stream(ctx, buf)) { 704 if (coda_buf_is_end_of_stream(ctx, buf)) {
705 buf->v4l2_buf.flags |= V4L2_BUF_FLAG_LAST; 705 buf->flags |= V4L2_BUF_FLAG_LAST;
706 706
707 v4l2_event_queue_fh(&ctx->fh, &eos_event); 707 v4l2_event_queue_fh(&ctx->fh, &eos_event);
708 } 708 }
@@ -1175,6 +1175,7 @@ static int coda_buf_prepare(struct vb2_buffer *vb)
1175 1175
1176static void coda_buf_queue(struct vb2_buffer *vb) 1176static void coda_buf_queue(struct vb2_buffer *vb)
1177{ 1177{
1178 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1178 struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 1179 struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1179 struct vb2_queue *vq = vb->vb2_queue; 1180 struct vb2_queue *vq = vb->vb2_queue;
1180 struct coda_q_data *q_data; 1181 struct coda_q_data *q_data;
@@ -1193,12 +1194,12 @@ static void coda_buf_queue(struct vb2_buffer *vb)
1193 if (vb2_get_plane_payload(vb, 0) == 0) 1194 if (vb2_get_plane_payload(vb, 0) == 0)
1194 coda_bit_stream_end_flag(ctx); 1195 coda_bit_stream_end_flag(ctx);
1195 mutex_lock(&ctx->bitstream_mutex); 1196 mutex_lock(&ctx->bitstream_mutex);
1196 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); 1197 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
1197 if (vb2_is_streaming(vb->vb2_queue)) 1198 if (vb2_is_streaming(vb->vb2_queue))
1198 coda_fill_bitstream(ctx, true); 1199 coda_fill_bitstream(ctx, true);
1199 mutex_unlock(&ctx->bitstream_mutex); 1200 mutex_unlock(&ctx->bitstream_mutex);
1200 } else { 1201 } else {
1201 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); 1202 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
1202 } 1203 }
1203} 1204}
1204 1205
@@ -1247,7 +1248,7 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
1247 struct coda_ctx *ctx = vb2_get_drv_priv(q); 1248 struct coda_ctx *ctx = vb2_get_drv_priv(q);
1248 struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev; 1249 struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
1249 struct coda_q_data *q_data_src, *q_data_dst; 1250 struct coda_q_data *q_data_src, *q_data_dst;
1250 struct vb2_buffer *buf; 1251 struct vb2_v4l2_buffer *buf;
1251 int ret = 0; 1252 int ret = 0;
1252 1253
1253 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); 1254 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
@@ -1338,7 +1339,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
1338{ 1339{
1339 struct coda_ctx *ctx = vb2_get_drv_priv(q); 1340 struct coda_ctx *ctx = vb2_get_drv_priv(q);
1340 struct coda_dev *dev = ctx->dev; 1341 struct coda_dev *dev = ctx->dev;
1341 struct vb2_buffer *buf; 1342 struct vb2_v4l2_buffer *buf;
1342 unsigned long flags; 1343 unsigned long flags;
1343 bool stop; 1344 bool stop;
1344 1345
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
index 11e734bc2cbd..96cd42a0baaf 100644
--- a/drivers/media/platform/coda/coda-jpeg.c
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -178,12 +178,12 @@ int coda_jpeg_write_tables(struct coda_ctx *ctx)
178 return 0; 178 return 0;
179} 179}
180 180
181bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb) 181bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_v4l2_buffer *vb)
182{ 182{
183 void *vaddr = vb2_plane_vaddr(vb, 0); 183 void *vaddr = vb2_plane_vaddr(&vb->vb2_buf, 0);
184 u16 soi = be16_to_cpup((__be16 *)vaddr); 184 u16 soi = be16_to_cpup((__be16 *)vaddr);
185 u16 eoi = be16_to_cpup((__be16 *)(vaddr + 185 u16 eoi = be16_to_cpup((__be16 *)(vaddr +
186 vb2_get_plane_payload(vb, 0) - 2)); 186 vb2_get_plane_payload(&vb->vb2_buf, 0) - 2));
187 187
188 return soi == SOI_MARKER && eoi == EOI_MARKER; 188 return soi == SOI_MARKER && eoi == EOI_MARKER;
189} 189}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index feb9671a12bd..96532b06bd9e 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -243,7 +243,7 @@ extern int coda_debug;
243void coda_write(struct coda_dev *dev, u32 data, u32 reg); 243void coda_write(struct coda_dev *dev, u32 data, u32 reg);
244unsigned int coda_read(struct coda_dev *dev, u32 reg); 244unsigned int coda_read(struct coda_dev *dev, u32 reg);
245void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data, 245void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
246 struct vb2_buffer *buf, unsigned int reg_y); 246 struct vb2_v4l2_buffer *buf, unsigned int reg_y);
247 247
248int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf, 248int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
249 size_t size, const char *name, struct dentry *parent); 249 size_t size, const char *name, struct dentry *parent);
@@ -284,12 +284,12 @@ static inline unsigned int coda_get_bitstream_payload(struct coda_ctx *ctx)
284 284
285void coda_bit_stream_end_flag(struct coda_ctx *ctx); 285void coda_bit_stream_end_flag(struct coda_ctx *ctx);
286 286
287void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_buffer *buf, 287void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
288 enum vb2_buffer_state state); 288 enum vb2_buffer_state state);
289 289
290int coda_h264_padding(int size, char *p); 290int coda_h264_padding(int size, char *p);
291 291
292bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb); 292bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_v4l2_buffer *vb);
293int coda_jpeg_write_tables(struct coda_ctx *ctx); 293int coda_jpeg_write_tables(struct coda_ctx *ctx);
294void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality); 294void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality);
295 295
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
index 9db6a6662913..f20666a4aa89 100644
--- a/drivers/media/platform/coda/trace.h
+++ b/drivers/media/platform/coda/trace.h
@@ -49,7 +49,7 @@ TRACE_EVENT(coda_bit_done,
49); 49);
50 50
51DECLARE_EVENT_CLASS(coda_buf_class, 51DECLARE_EVENT_CLASS(coda_buf_class,
52 TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf), 52 TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
53 53
54 TP_ARGS(ctx, buf), 54 TP_ARGS(ctx, buf),
55 55
@@ -61,7 +61,7 @@ DECLARE_EVENT_CLASS(coda_buf_class,
61 61
62 TP_fast_assign( 62 TP_fast_assign(
63 __entry->minor = ctx->fh.vdev->minor; 63 __entry->minor = ctx->fh.vdev->minor;
64 __entry->index = buf->v4l2_buf.index; 64 __entry->index = buf->vb2_buf.index;
65 __entry->ctx = ctx->idx; 65 __entry->ctx = ctx->idx;
66 ), 66 ),
67 67
@@ -70,17 +70,17 @@ DECLARE_EVENT_CLASS(coda_buf_class,
70); 70);
71 71
72DEFINE_EVENT(coda_buf_class, coda_enc_pic_run, 72DEFINE_EVENT(coda_buf_class, coda_enc_pic_run,
73 TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf), 73 TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
74 TP_ARGS(ctx, buf) 74 TP_ARGS(ctx, buf)
75); 75);
76 76
77DEFINE_EVENT(coda_buf_class, coda_enc_pic_done, 77DEFINE_EVENT(coda_buf_class, coda_enc_pic_done,
78 TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf), 78 TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
79 TP_ARGS(ctx, buf) 79 TP_ARGS(ctx, buf)
80); 80);
81 81
82DECLARE_EVENT_CLASS(coda_buf_meta_class, 82DECLARE_EVENT_CLASS(coda_buf_meta_class,
83 TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf, 83 TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
84 struct coda_buffer_meta *meta), 84 struct coda_buffer_meta *meta),
85 85
86 TP_ARGS(ctx, buf, meta), 86 TP_ARGS(ctx, buf, meta),
@@ -95,7 +95,7 @@ DECLARE_EVENT_CLASS(coda_buf_meta_class,
95 95
96 TP_fast_assign( 96 TP_fast_assign(
97 __entry->minor = ctx->fh.vdev->minor; 97 __entry->minor = ctx->fh.vdev->minor;
98 __entry->index = buf->v4l2_buf.index; 98 __entry->index = buf->vb2_buf.index;
99 __entry->start = meta->start; 99 __entry->start = meta->start;
100 __entry->end = meta->end; 100 __entry->end = meta->end;
101 __entry->ctx = ctx->idx; 101 __entry->ctx = ctx->idx;
@@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(coda_buf_meta_class,
107); 107);
108 108
109DEFINE_EVENT(coda_buf_meta_class, coda_bit_queue, 109DEFINE_EVENT(coda_buf_meta_class, coda_bit_queue,
110 TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf, 110 TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
111 struct coda_buffer_meta *meta), 111 struct coda_buffer_meta *meta),
112 TP_ARGS(ctx, buf, meta) 112 TP_ARGS(ctx, buf, meta)
113); 113);
@@ -146,7 +146,7 @@ DEFINE_EVENT(coda_meta_class, coda_dec_pic_done,
146); 146);
147 147
148DEFINE_EVENT(coda_buf_meta_class, coda_dec_rot_done, 148DEFINE_EVENT(coda_buf_meta_class, coda_dec_rot_done,
149 TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf, 149 TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
150 struct coda_buffer_meta *meta), 150 struct coda_buffer_meta *meta),
151 TP_ARGS(ctx, buf, meta) 151 TP_ARGS(ctx, buf, meta)
152); 152);
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index f69cdd7da10c..39f8ccfae339 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -74,8 +74,8 @@ static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
74 if (layer->cur_frm == layer->next_frm) 74 if (layer->cur_frm == layer->next_frm)
75 return; 75 return;
76 76
77 v4l2_get_timestamp(&layer->cur_frm->vb.v4l2_buf.timestamp); 77 v4l2_get_timestamp(&layer->cur_frm->vb.timestamp);
78 vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_DONE); 78 vb2_buffer_done(&layer->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
79 /* Make cur_frm pointing to next_frm */ 79 /* Make cur_frm pointing to next_frm */
80 layer->cur_frm = layer->next_frm; 80 layer->cur_frm = layer->next_frm;
81} 81}
@@ -104,8 +104,8 @@ static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
104 list_del(&layer->next_frm->list); 104 list_del(&layer->next_frm->list);
105 spin_unlock(&disp_obj->dma_queue_lock); 105 spin_unlock(&disp_obj->dma_queue_lock);
106 /* Mark state of the frame to active */ 106 /* Mark state of the frame to active */
107 layer->next_frm->vb.state = VB2_BUF_STATE_ACTIVE; 107 layer->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
108 addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb, 0); 108 addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb.vb2_buf, 0);
109 osd_device->ops.start_layer(osd_device, 109 osd_device->ops.start_layer(osd_device,
110 layer->layer_info.id, 110 layer->layer_info.id,
111 addr, 111 addr,
@@ -259,8 +259,9 @@ vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
259 */ 259 */
260static void vpbe_buffer_queue(struct vb2_buffer *vb) 260static void vpbe_buffer_queue(struct vb2_buffer *vb)
261{ 261{
262 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
262 /* Get the file handle object and layer object */ 263 /* Get the file handle object and layer object */
263 struct vpbe_disp_buffer *buf = container_of(vb, 264 struct vpbe_disp_buffer *buf = container_of(vbuf,
264 struct vpbe_disp_buffer, vb); 265 struct vpbe_disp_buffer, vb);
265 struct vpbe_layer *layer = vb2_get_drv_priv(vb->vb2_queue); 266 struct vpbe_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
266 struct vpbe_display *disp = layer->disp_dev; 267 struct vpbe_display *disp = layer->disp_dev;
@@ -290,7 +291,7 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
290 /* Remove buffer from the buffer queue */ 291 /* Remove buffer from the buffer queue */
291 list_del(&layer->cur_frm->list); 292 list_del(&layer->cur_frm->list);
292 /* Mark state of the current frame to active */ 293 /* Mark state of the current frame to active */
293 layer->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE; 294 layer->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
294 /* Initialize field_id and started member */ 295 /* Initialize field_id and started member */
295 layer->field_id = 0; 296 layer->field_id = 0;
296 297
@@ -299,10 +300,12 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
299 if (ret < 0) { 300 if (ret < 0) {
300 struct vpbe_disp_buffer *buf, *tmp; 301 struct vpbe_disp_buffer *buf, *tmp;
301 302
302 vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_QUEUED); 303 vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
304 VB2_BUF_STATE_QUEUED);
303 list_for_each_entry_safe(buf, tmp, &layer->dma_queue, list) { 305 list_for_each_entry_safe(buf, tmp, &layer->dma_queue, list) {
304 list_del(&buf->list); 306 list_del(&buf->list);
305 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 307 vb2_buffer_done(&buf->vb.vb2_buf,
308 VB2_BUF_STATE_QUEUED);
306 } 309 }
307 310
308 return ret; 311 return ret;
@@ -332,13 +335,14 @@ static void vpbe_stop_streaming(struct vb2_queue *vq)
332 /* release all active buffers */ 335 /* release all active buffers */
333 spin_lock_irqsave(&disp->dma_queue_lock, flags); 336 spin_lock_irqsave(&disp->dma_queue_lock, flags);
334 if (layer->cur_frm == layer->next_frm) { 337 if (layer->cur_frm == layer->next_frm) {
335 vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR); 338 vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
339 VB2_BUF_STATE_ERROR);
336 } else { 340 } else {
337 if (layer->cur_frm != NULL) 341 if (layer->cur_frm != NULL)
338 vb2_buffer_done(&layer->cur_frm->vb, 342 vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
339 VB2_BUF_STATE_ERROR); 343 VB2_BUF_STATE_ERROR);
340 if (layer->next_frm != NULL) 344 if (layer->next_frm != NULL)
341 vb2_buffer_done(&layer->next_frm->vb, 345 vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
342 VB2_BUF_STATE_ERROR); 346 VB2_BUF_STATE_ERROR);
343 } 347 }
344 348
@@ -346,7 +350,8 @@ static void vpbe_stop_streaming(struct vb2_queue *vq)
346 layer->next_frm = list_entry(layer->dma_queue.next, 350 layer->next_frm = list_entry(layer->dma_queue.next,
347 struct vpbe_disp_buffer, list); 351 struct vpbe_disp_buffer, list);
348 list_del(&layer->next_frm->list); 352 list_del(&layer->next_frm->list);
349 vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); 353 vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
354 VB2_BUF_STATE_ERROR);
350 } 355 }
351 spin_unlock_irqrestore(&disp->dma_queue_lock, flags); 356 spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
352} 357}
@@ -383,7 +388,7 @@ static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
383 unsigned long addr; 388 unsigned long addr;
384 int ret; 389 int ret;
385 390
386 addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb, 0); 391 addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb.vb2_buf, 0);
387 /* Set address in the display registers */ 392 /* Set address in the display registers */
388 osd_device->ops.start_layer(osd_device, 393 osd_device->ops.start_layer(osd_device,
389 layer->layer_info.id, 394 layer->layer_info.id,
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a5f548138b91..b29bb64db8bf 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -57,7 +57,8 @@ static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} };
57/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */ 57/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */
58static int ycmux_mode; 58static int ycmux_mode;
59 59
60static inline struct vpif_cap_buffer *to_vpif_buffer(struct vb2_buffer *vb) 60static inline
61struct vpif_cap_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb)
61{ 62{
62 return container_of(vb, struct vpif_cap_buffer, vb); 63 return container_of(vb, struct vpif_cap_buffer, vb);
63} 64}
@@ -72,6 +73,7 @@ static inline struct vpif_cap_buffer *to_vpif_buffer(struct vb2_buffer *vb)
72 */ 73 */
73static int vpif_buffer_prepare(struct vb2_buffer *vb) 74static int vpif_buffer_prepare(struct vb2_buffer *vb)
74{ 75{
76 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
75 struct vb2_queue *q = vb->vb2_queue; 77 struct vb2_queue *q = vb->vb2_queue;
76 struct channel_obj *ch = vb2_get_drv_priv(q); 78 struct channel_obj *ch = vb2_get_drv_priv(q);
77 struct common_obj *common; 79 struct common_obj *common;
@@ -85,7 +87,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
85 if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) 87 if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
86 return -EINVAL; 88 return -EINVAL;
87 89
88 vb->v4l2_buf.field = common->fmt.fmt.pix.field; 90 vbuf->field = common->fmt.fmt.pix.field;
89 91
90 addr = vb2_dma_contig_plane_dma_addr(vb, 0); 92 addr = vb2_dma_contig_plane_dma_addr(vb, 0);
91 if (!IS_ALIGNED((addr + common->ytop_off), 8) || 93 if (!IS_ALIGNED((addr + common->ytop_off), 8) ||
@@ -145,8 +147,9 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
145 */ 147 */
146static void vpif_buffer_queue(struct vb2_buffer *vb) 148static void vpif_buffer_queue(struct vb2_buffer *vb)
147{ 149{
150 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
148 struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); 151 struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
149 struct vpif_cap_buffer *buf = to_vpif_buffer(vb); 152 struct vpif_cap_buffer *buf = to_vpif_buffer(vbuf);
150 struct common_obj *common; 153 struct common_obj *common;
151 unsigned long flags; 154 unsigned long flags;
152 155
@@ -214,7 +217,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
214 list_del(&common->cur_frm->list); 217 list_del(&common->cur_frm->list);
215 spin_unlock_irqrestore(&common->irqlock, flags); 218 spin_unlock_irqrestore(&common->irqlock, flags);
216 219
217 addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0); 220 addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0);
218 221
219 common->set_addr(addr + common->ytop_off, 222 common->set_addr(addr + common->ytop_off,
220 addr + common->ybtm_off, 223 addr + common->ybtm_off,
@@ -243,7 +246,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
243err: 246err:
244 list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) { 247 list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
245 list_del(&buf->list); 248 list_del(&buf->list);
246 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 249 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
247 } 250 }
248 spin_unlock_irqrestore(&common->irqlock, flags); 251 spin_unlock_irqrestore(&common->irqlock, flags);
249 252
@@ -286,13 +289,14 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
286 /* release all active buffers */ 289 /* release all active buffers */
287 spin_lock_irqsave(&common->irqlock, flags); 290 spin_lock_irqsave(&common->irqlock, flags);
288 if (common->cur_frm == common->next_frm) { 291 if (common->cur_frm == common->next_frm) {
289 vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); 292 vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
293 VB2_BUF_STATE_ERROR);
290 } else { 294 } else {
291 if (common->cur_frm != NULL) 295 if (common->cur_frm != NULL)
292 vb2_buffer_done(&common->cur_frm->vb, 296 vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
293 VB2_BUF_STATE_ERROR); 297 VB2_BUF_STATE_ERROR);
294 if (common->next_frm != NULL) 298 if (common->next_frm != NULL)
295 vb2_buffer_done(&common->next_frm->vb, 299 vb2_buffer_done(&common->next_frm->vb.vb2_buf,
296 VB2_BUF_STATE_ERROR); 300 VB2_BUF_STATE_ERROR);
297 } 301 }
298 302
@@ -300,7 +304,8 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
300 common->next_frm = list_entry(common->dma_queue.next, 304 common->next_frm = list_entry(common->dma_queue.next,
301 struct vpif_cap_buffer, list); 305 struct vpif_cap_buffer, list);
302 list_del(&common->next_frm->list); 306 list_del(&common->next_frm->list);
303 vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR); 307 vb2_buffer_done(&common->next_frm->vb.vb2_buf,
308 VB2_BUF_STATE_ERROR);
304 } 309 }
305 spin_unlock_irqrestore(&common->irqlock, flags); 310 spin_unlock_irqrestore(&common->irqlock, flags);
306} 311}
@@ -325,9 +330,8 @@ static struct vb2_ops video_qops = {
325 */ 330 */
326static void vpif_process_buffer_complete(struct common_obj *common) 331static void vpif_process_buffer_complete(struct common_obj *common)
327{ 332{
328 v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp); 333 v4l2_get_timestamp(&common->cur_frm->vb.timestamp);
329 vb2_buffer_done(&common->cur_frm->vb, 334 vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
330 VB2_BUF_STATE_DONE);
331 /* Make curFrm pointing to nextFrm */ 335 /* Make curFrm pointing to nextFrm */
332 common->cur_frm = common->next_frm; 336 common->cur_frm = common->next_frm;
333} 337}
@@ -350,7 +354,7 @@ static void vpif_schedule_next_buffer(struct common_obj *common)
350 /* Remove that buffer from the buffer queue */ 354 /* Remove that buffer from the buffer queue */
351 list_del(&common->next_frm->list); 355 list_del(&common->next_frm->list);
352 spin_unlock(&common->irqlock); 356 spin_unlock(&common->irqlock);
353 addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0); 357 addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0);
354 358
355 /* Set top and bottom field addresses in VPIF registers */ 359 /* Set top and bottom field addresses in VPIF registers */
356 common->set_addr(addr + common->ytop_off, 360 common->set_addr(addr + common->ytop_off,
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/davinci/vpif_capture.h
index 8b8a663f6b22..4a7600929b61 100644
--- a/drivers/media/platform/davinci/vpif_capture.h
+++ b/drivers/media/platform/davinci/vpif_capture.h
@@ -52,7 +52,7 @@ struct video_obj {
52}; 52};
53 53
54struct vpif_cap_buffer { 54struct vpif_cap_buffer {
55 struct vb2_buffer vb; 55 struct vb2_v4l2_buffer vb;
56 struct list_head list; 56 struct list_head list;
57}; 57};
58 58
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 682e5d578bf7..f51518c5b787 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -53,7 +53,8 @@ static struct device *vpif_dev;
53static void vpif_calculate_offsets(struct channel_obj *ch); 53static void vpif_calculate_offsets(struct channel_obj *ch);
54static void vpif_config_addr(struct channel_obj *ch, int muxmode); 54static void vpif_config_addr(struct channel_obj *ch, int muxmode);
55 55
56static inline struct vpif_disp_buffer *to_vpif_buffer(struct vb2_buffer *vb) 56static inline
57struct vpif_disp_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb)
57{ 58{
58 return container_of(vb, struct vpif_disp_buffer, vb); 59 return container_of(vb, struct vpif_disp_buffer, vb);
59} 60}
@@ -68,6 +69,7 @@ static inline struct vpif_disp_buffer *to_vpif_buffer(struct vb2_buffer *vb)
68 */ 69 */
69static int vpif_buffer_prepare(struct vb2_buffer *vb) 70static int vpif_buffer_prepare(struct vb2_buffer *vb)
70{ 71{
72 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
71 struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); 73 struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
72 struct common_obj *common; 74 struct common_obj *common;
73 75
@@ -77,7 +79,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
77 if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) 79 if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
78 return -EINVAL; 80 return -EINVAL;
79 81
80 vb->v4l2_buf.field = common->fmt.fmt.pix.field; 82 vbuf->field = common->fmt.fmt.pix.field;
81 83
82 if (vb->vb2_queue->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) { 84 if (vb->vb2_queue->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
83 unsigned long addr = vb2_dma_contig_plane_dma_addr(vb, 0); 85 unsigned long addr = vb2_dma_contig_plane_dma_addr(vb, 0);
@@ -138,7 +140,8 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
138 */ 140 */
139static void vpif_buffer_queue(struct vb2_buffer *vb) 141static void vpif_buffer_queue(struct vb2_buffer *vb)
140{ 142{
141 struct vpif_disp_buffer *buf = to_vpif_buffer(vb); 143 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
144 struct vpif_disp_buffer *buf = to_vpif_buffer(vbuf);
142 struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); 145 struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
143 struct common_obj *common; 146 struct common_obj *common;
144 unsigned long flags; 147 unsigned long flags;
@@ -197,7 +200,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
197 list_del(&common->cur_frm->list); 200 list_del(&common->cur_frm->list);
198 spin_unlock_irqrestore(&common->irqlock, flags); 201 spin_unlock_irqrestore(&common->irqlock, flags);
199 202
200 addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0); 203 addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0);
201 common->set_addr((addr + common->ytop_off), 204 common->set_addr((addr + common->ytop_off),
202 (addr + common->ybtm_off), 205 (addr + common->ybtm_off),
203 (addr + common->ctop_off), 206 (addr + common->ctop_off),
@@ -229,7 +232,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
229err: 232err:
230 list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) { 233 list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
231 list_del(&buf->list); 234 list_del(&buf->list);
232 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 235 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
233 } 236 }
234 spin_unlock_irqrestore(&common->irqlock, flags); 237 spin_unlock_irqrestore(&common->irqlock, flags);
235 238
@@ -264,13 +267,14 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
264 /* release all active buffers */ 267 /* release all active buffers */
265 spin_lock_irqsave(&common->irqlock, flags); 268 spin_lock_irqsave(&common->irqlock, flags);
266 if (common->cur_frm == common->next_frm) { 269 if (common->cur_frm == common->next_frm) {
267 vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); 270 vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
271 VB2_BUF_STATE_ERROR);
268 } else { 272 } else {
269 if (common->cur_frm != NULL) 273 if (common->cur_frm != NULL)
270 vb2_buffer_done(&common->cur_frm->vb, 274 vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
271 VB2_BUF_STATE_ERROR); 275 VB2_BUF_STATE_ERROR);
272 if (common->next_frm != NULL) 276 if (common->next_frm != NULL)
273 vb2_buffer_done(&common->next_frm->vb, 277 vb2_buffer_done(&common->next_frm->vb.vb2_buf,
274 VB2_BUF_STATE_ERROR); 278 VB2_BUF_STATE_ERROR);
275 } 279 }
276 280
@@ -278,7 +282,8 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
278 common->next_frm = list_entry(common->dma_queue.next, 282 common->next_frm = list_entry(common->dma_queue.next,
279 struct vpif_disp_buffer, list); 283 struct vpif_disp_buffer, list);
280 list_del(&common->next_frm->list); 284 list_del(&common->next_frm->list);
281 vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR); 285 vb2_buffer_done(&common->next_frm->vb.vb2_buf,
286 VB2_BUF_STATE_ERROR);
282 } 287 }
283 spin_unlock_irqrestore(&common->irqlock, flags); 288 spin_unlock_irqrestore(&common->irqlock, flags);
284} 289}
@@ -306,7 +311,7 @@ static void process_progressive_mode(struct common_obj *common)
306 spin_unlock(&common->irqlock); 311 spin_unlock(&common->irqlock);
307 312
308 /* Set top and bottom field addrs in VPIF registers */ 313 /* Set top and bottom field addrs in VPIF registers */
309 addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0); 314 addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0);
310 common->set_addr(addr + common->ytop_off, 315 common->set_addr(addr + common->ytop_off,
311 addr + common->ybtm_off, 316 addr + common->ybtm_off,
312 addr + common->ctop_off, 317 addr + common->ctop_off,
@@ -324,10 +329,10 @@ static void process_interlaced_mode(int fid, struct common_obj *common)
324 /* one frame is displayed If next frame is 329 /* one frame is displayed If next frame is
325 * available, release cur_frm and move on */ 330 * available, release cur_frm and move on */
326 /* Copy frame display time */ 331 /* Copy frame display time */
327 v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp); 332 v4l2_get_timestamp(&common->cur_frm->vb.timestamp);
328 /* Change status of the cur_frm */ 333 /* Change status of the cur_frm */
329 vb2_buffer_done(&common->cur_frm->vb, 334 vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
330 VB2_BUF_STATE_DONE); 335 VB2_BUF_STATE_DONE);
331 /* Make cur_frm pointing to next_frm */ 336 /* Make cur_frm pointing to next_frm */
332 common->cur_frm = common->next_frm; 337 common->cur_frm = common->next_frm;
333 338
@@ -380,10 +385,10 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
380 if (!channel_first_int[i][channel_id]) { 385 if (!channel_first_int[i][channel_id]) {
381 /* Mark status of the cur_frm to 386 /* Mark status of the cur_frm to
382 * done and unlock semaphore on it */ 387 * done and unlock semaphore on it */
383 v4l2_get_timestamp(&common->cur_frm->vb. 388 v4l2_get_timestamp(
384 v4l2_buf.timestamp); 389 &common->cur_frm->vb.timestamp);
385 vb2_buffer_done(&common->cur_frm->vb, 390 vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
386 VB2_BUF_STATE_DONE); 391 VB2_BUF_STATE_DONE);
387 /* Make cur_frm pointing to next_frm */ 392 /* Make cur_frm pointing to next_frm */
388 common->cur_frm = common->next_frm; 393 common->cur_frm = common->next_frm;
389 } 394 }
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h
index 849e0e385f18..e7a1723a1b7a 100644
--- a/drivers/media/platform/davinci/vpif_display.h
+++ b/drivers/media/platform/davinci/vpif_display.h
@@ -62,7 +62,7 @@ struct video_obj {
62}; 62};
63 63
64struct vpif_disp_buffer { 64struct vpif_disp_buffer {
65 struct vb2_buffer vb; 65 struct vb2_v4l2_buffer vb;
66 struct list_head list; 66 struct list_head list;
67}; 67};
68 68
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 769ff50a95a2..e93a2336cfa2 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -136,7 +136,7 @@ struct gsc_fmt {
136 * @idx : index of G-Scaler input buffer 136 * @idx : index of G-Scaler input buffer
137 */ 137 */
138struct gsc_input_buf { 138struct gsc_input_buf {
139 struct vb2_buffer vb; 139 struct vb2_v4l2_buffer vb;
140 struct list_head list; 140 struct list_head list;
141 int idx; 141 int idx;
142}; 142};
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index d5cffef2e227..59d134d1fa93 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -77,7 +77,7 @@ static void gsc_m2m_stop_streaming(struct vb2_queue *q)
77 77
78void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state) 78void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
79{ 79{
80 struct vb2_buffer *src_vb, *dst_vb; 80 struct vb2_v4l2_buffer *src_vb, *dst_vb;
81 81
82 if (!ctx || !ctx->m2m_ctx) 82 if (!ctx || !ctx->m2m_ctx)
83 return; 83 return;
@@ -86,11 +86,11 @@ void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
86 dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); 86 dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
87 87
88 if (src_vb && dst_vb) { 88 if (src_vb && dst_vb) {
89 dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp; 89 dst_vb->timestamp = src_vb->timestamp;
90 dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode; 90 dst_vb->timecode = src_vb->timecode;
91 dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 91 dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
92 dst_vb->v4l2_buf.flags |= 92 dst_vb->flags |=
93 src_vb->v4l2_buf.flags 93 src_vb->flags
94 & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 94 & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
95 95
96 v4l2_m2m_buf_done(src_vb, vb_state); 96 v4l2_m2m_buf_done(src_vb, vb_state);
@@ -109,23 +109,23 @@ static void gsc_m2m_job_abort(void *priv)
109static int gsc_get_bufs(struct gsc_ctx *ctx) 109static int gsc_get_bufs(struct gsc_ctx *ctx)
110{ 110{
111 struct gsc_frame *s_frame, *d_frame; 111 struct gsc_frame *s_frame, *d_frame;
112 struct vb2_buffer *src_vb, *dst_vb; 112 struct vb2_v4l2_buffer *src_vb, *dst_vb;
113 int ret; 113 int ret;
114 114
115 s_frame = &ctx->s_frame; 115 s_frame = &ctx->s_frame;
116 d_frame = &ctx->d_frame; 116 d_frame = &ctx->d_frame;
117 117
118 src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx); 118 src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
119 ret = gsc_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr); 119 ret = gsc_prepare_addr(ctx, &src_vb->vb2_buf, s_frame, &s_frame->addr);
120 if (ret) 120 if (ret)
121 return ret; 121 return ret;
122 122
123 dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); 123 dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
124 ret = gsc_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr); 124 ret = gsc_prepare_addr(ctx, &dst_vb->vb2_buf, d_frame, &d_frame->addr);
125 if (ret) 125 if (ret)
126 return ret; 126 return ret;
127 127
128 dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp; 128 dst_vb->timestamp = src_vb->timestamp;
129 129
130 return 0; 130 return 0;
131} 131}
@@ -255,12 +255,13 @@ static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
255 255
256static void gsc_m2m_buf_queue(struct vb2_buffer *vb) 256static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
257{ 257{
258 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
258 struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 259 struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
259 260
260 pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state); 261 pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
261 262
262 if (ctx->m2m_ctx) 263 if (ctx->m2m_ctx)
263 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); 264 v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
264} 265}
265 266
266static struct vb2_ops gsc_m2m_qops = { 267static struct vb2_ops gsc_m2m_qops = {
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index b1cdadccf2d7..fa698e7b916b 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -103,7 +103,7 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
103 /* Release unused buffers */ 103 /* Release unused buffers */
104 while (!suspend && !list_empty(&cap->pending_buf_q)) { 104 while (!suspend && !list_empty(&cap->pending_buf_q)) {
105 buf = fimc_pending_queue_pop(cap); 105 buf = fimc_pending_queue_pop(cap);
106 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 106 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
107 } 107 }
108 /* If suspending put unused buffers onto pending queue */ 108 /* If suspending put unused buffers onto pending queue */
109 while (!list_empty(&cap->active_buf_q)) { 109 while (!list_empty(&cap->active_buf_q)) {
@@ -111,7 +111,7 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
111 if (suspend) 111 if (suspend)
112 fimc_pending_queue_add(cap, buf); 112 fimc_pending_queue_add(cap, buf);
113 else 113 else
114 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 114 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
115 } 115 }
116 116
117 fimc_hw_reset(fimc); 117 fimc_hw_reset(fimc);
@@ -193,10 +193,10 @@ void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
193 test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) { 193 test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) {
194 v_buf = fimc_active_queue_pop(cap); 194 v_buf = fimc_active_queue_pop(cap);
195 195
196 v4l2_get_timestamp(&v_buf->vb.v4l2_buf.timestamp); 196 v4l2_get_timestamp(&v_buf->vb.timestamp);
197 v_buf->vb.v4l2_buf.sequence = cap->frame_count++; 197 v_buf->vb.sequence = cap->frame_count++;
198 198
199 vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE); 199 vb2_buffer_done(&v_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
200 } 200 }
201 201
202 if (!list_empty(&cap->pending_buf_q)) { 202 if (!list_empty(&cap->pending_buf_q)) {
@@ -227,7 +227,7 @@ void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
227 list_for_each_entry(v_buf, &cap->active_buf_q, list) { 227 list_for_each_entry(v_buf, &cap->active_buf_q, list) {
228 if (v_buf->index != index) 228 if (v_buf->index != index)
229 continue; 229 continue;
230 vaddr = vb2_plane_vaddr(&v_buf->vb, plane); 230 vaddr = vb2_plane_vaddr(&v_buf->vb.vb2_buf, plane);
231 v4l2_subdev_call(csis, video, s_rx_buffer, 231 v4l2_subdev_call(csis, video, s_rx_buffer,
232 vaddr, &size); 232 vaddr, &size);
233 break; 233 break;
@@ -332,7 +332,7 @@ int fimc_capture_resume(struct fimc_dev *fimc)
332 if (list_empty(&vid_cap->pending_buf_q)) 332 if (list_empty(&vid_cap->pending_buf_q))
333 break; 333 break;
334 buf = fimc_pending_queue_pop(vid_cap); 334 buf = fimc_pending_queue_pop(vid_cap);
335 buffer_queue(&buf->vb); 335 buffer_queue(&buf->vb.vb2_buf);
336 } 336 }
337 return 0; 337 return 0;
338 338
@@ -404,8 +404,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
404 404
405static void buffer_queue(struct vb2_buffer *vb) 405static void buffer_queue(struct vb2_buffer *vb)
406{ 406{
407 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
407 struct fimc_vid_buffer *buf 408 struct fimc_vid_buffer *buf
408 = container_of(vb, struct fimc_vid_buffer, vb); 409 = container_of(vbuf, struct fimc_vid_buffer, vb);
409 struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 410 struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
410 struct fimc_dev *fimc = ctx->fimc_dev; 411 struct fimc_dev *fimc = ctx->fimc_dev;
411 struct fimc_vid_cap *vid_cap = &fimc->vid_cap; 412 struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
@@ -414,7 +415,7 @@ static void buffer_queue(struct vb2_buffer *vb)
414 int min_bufs; 415 int min_bufs;
415 416
416 spin_lock_irqsave(&fimc->slock, flags); 417 spin_lock_irqsave(&fimc->slock, flags);
417 fimc_prepare_addr(ctx, &buf->vb, &ctx->d_frame, &buf->paddr); 418 fimc_prepare_addr(ctx, &buf->vb.vb2_buf, &ctx->d_frame, &buf->paddr);
418 419
419 if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) && 420 if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) &&
420 !test_bit(ST_CAPT_STREAM, &fimc->state) && 421 !test_bit(ST_CAPT_STREAM, &fimc->state) &&
@@ -1466,7 +1467,8 @@ void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
1466 if (!list_empty(&fimc->vid_cap.active_buf_q)) { 1467 if (!list_empty(&fimc->vid_cap.active_buf_q)) {
1467 buf = list_entry(fimc->vid_cap.active_buf_q.next, 1468 buf = list_entry(fimc->vid_cap.active_buf_q.next,
1468 struct fimc_vid_buffer, list); 1469 struct fimc_vid_buffer, list);
1469 vb2_set_plane_payload(&buf->vb, 0, *((u32 *)arg)); 1470 vb2_set_plane_payload(&buf->vb.vb2_buf, 0,
1471 *((u32 *)arg));
1470 } 1472 }
1471 fimc_capture_irq_handler(fimc, 1); 1473 fimc_capture_irq_handler(fimc, 1);
1472 fimc_deactivate_capture(fimc); 1474 fimc_deactivate_capture(fimc);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
index ccb5d917292b..d336fa2916df 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -224,7 +224,7 @@ struct fimc_addr {
224 * @index: buffer index for the output DMA engine 224 * @index: buffer index for the output DMA engine
225 */ 225 */
226struct fimc_vid_buffer { 226struct fimc_vid_buffer {
227 struct vb2_buffer vb; 227 struct vb2_v4l2_buffer vb;
228 struct list_head list; 228 struct list_head list;
229 struct fimc_addr paddr; 229 struct fimc_addr paddr;
230 int index; 230 int index;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 195f9b5e9512..bacc3a389b7c 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -194,10 +194,11 @@ static int isp_video_capture_buffer_prepare(struct vb2_buffer *vb)
194 194
195static void isp_video_capture_buffer_queue(struct vb2_buffer *vb) 195static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
196{ 196{
197 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
197 struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue); 198 struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
198 struct fimc_is_video *video = &isp->video_capture; 199 struct fimc_is_video *video = &isp->video_capture;
199 struct fimc_is *is = fimc_isp_to_is(isp); 200 struct fimc_is *is = fimc_isp_to_is(isp);
200 struct isp_video_buf *ivb = to_isp_video_buf(vb); 201 struct isp_video_buf *ivb = to_isp_video_buf(vbuf);
201 unsigned long flags; 202 unsigned long flags;
202 unsigned int i; 203 unsigned int i;
203 204
@@ -220,7 +221,7 @@ static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
220 221
221 isp_dbg(2, &video->ve.vdev, 222 isp_dbg(2, &video->ve.vdev,
222 "dma_buf %pad (%d/%d/%d) addr: %pad\n", 223 "dma_buf %pad (%d/%d/%d) addr: %pad\n",
223 &buf_index, ivb->index, i, vb->v4l2_buf.index, 224 &buf_index, ivb->index, i, vb->index,
224 &ivb->dma_addr[i]); 225 &ivb->dma_addr[i]);
225 } 226 }
226 227
@@ -242,7 +243,7 @@ static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
242void fimc_isp_video_irq_handler(struct fimc_is *is) 243void fimc_isp_video_irq_handler(struct fimc_is *is)
243{ 244{
244 struct fimc_is_video *video = &is->isp.video_capture; 245 struct fimc_is_video *video = &is->isp.video_capture;
245 struct vb2_buffer *vb; 246 struct vb2_v4l2_buffer *vbuf;
246 int buf_index; 247 int buf_index;
247 248
248 /* TODO: Ensure the DMA is really stopped in stop_streaming callback */ 249 /* TODO: Ensure the DMA is really stopped in stop_streaming callback */
@@ -250,10 +251,10 @@ void fimc_isp_video_irq_handler(struct fimc_is *is)
250 return; 251 return;
251 252
252 buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count; 253 buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count;
253 vb = &video->buffers[buf_index]->vb; 254 vbuf = &video->buffers[buf_index]->vb;
254 255
255 v4l2_get_timestamp(&vb->v4l2_buf.timestamp); 256 v4l2_get_timestamp(&vbuf->timestamp);
256 vb2_buffer_done(vb, VB2_BUF_STATE_DONE); 257 vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
257 258
258 video->buf_mask &= ~BIT(buf_index); 259 video->buf_mask &= ~BIT(buf_index);
259 fimc_is_hw_set_isp_buf_mask(is, video->buf_mask); 260 fimc_is_hw_set_isp_buf_mask(is, video->buf_mask);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.h b/drivers/media/platform/exynos4-is/fimc-isp.h
index ad9908bb7966..c2d25df85db9 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.h
+++ b/drivers/media/platform/exynos4-is/fimc-isp.h
@@ -102,7 +102,7 @@ struct fimc_isp_ctrls {
102}; 102};
103 103
104struct isp_video_buf { 104struct isp_video_buf {
105 struct vb2_buffer vb; 105 struct vb2_v4l2_buffer vb;
106 dma_addr_t dma_addr[FIMC_ISP_MAX_PLANES]; 106 dma_addr_t dma_addr[FIMC_ISP_MAX_PLANES];
107 unsigned int index; 107 unsigned int index;
108}; 108};
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index f56c69a63c9b..1920cfbd0369 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -200,7 +200,7 @@ static int fimc_lite_reinit(struct fimc_lite *fimc, bool suspend)
200 /* Release unused buffers */ 200 /* Release unused buffers */
201 while (!suspend && !list_empty(&fimc->pending_buf_q)) { 201 while (!suspend && !list_empty(&fimc->pending_buf_q)) {
202 buf = fimc_lite_pending_queue_pop(fimc); 202 buf = fimc_lite_pending_queue_pop(fimc);
203 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 203 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
204 } 204 }
205 /* If suspending put unused buffers onto pending queue */ 205 /* If suspending put unused buffers onto pending queue */
206 while (!list_empty(&fimc->active_buf_q)) { 206 while (!list_empty(&fimc->active_buf_q)) {
@@ -208,7 +208,7 @@ static int fimc_lite_reinit(struct fimc_lite *fimc, bool suspend)
208 if (suspend) 208 if (suspend)
209 fimc_lite_pending_queue_add(fimc, buf); 209 fimc_lite_pending_queue_add(fimc, buf);
210 else 210 else
211 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 211 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
212 } 212 }
213 213
214 spin_unlock_irqrestore(&fimc->slock, flags); 214 spin_unlock_irqrestore(&fimc->slock, flags);
@@ -292,10 +292,10 @@ static irqreturn_t flite_irq_handler(int irq, void *priv)
292 test_bit(ST_FLITE_RUN, &fimc->state) && 292 test_bit(ST_FLITE_RUN, &fimc->state) &&
293 !list_empty(&fimc->active_buf_q)) { 293 !list_empty(&fimc->active_buf_q)) {
294 vbuf = fimc_lite_active_queue_pop(fimc); 294 vbuf = fimc_lite_active_queue_pop(fimc);
295 v4l2_get_timestamp(&vbuf->vb.v4l2_buf.timestamp); 295 v4l2_get_timestamp(&vbuf->vb.timestamp);
296 vbuf->vb.v4l2_buf.sequence = fimc->frame_count++; 296 vbuf->vb.sequence = fimc->frame_count++;
297 flite_hw_mask_dma_buffer(fimc, vbuf->index); 297 flite_hw_mask_dma_buffer(fimc, vbuf->index);
298 vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE); 298 vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
299 } 299 }
300 300
301 if (test_bit(ST_FLITE_CONFIG, &fimc->state)) 301 if (test_bit(ST_FLITE_CONFIG, &fimc->state))
@@ -417,8 +417,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
417 417
418static void buffer_queue(struct vb2_buffer *vb) 418static void buffer_queue(struct vb2_buffer *vb)
419{ 419{
420 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
420 struct flite_buffer *buf 421 struct flite_buffer *buf
421 = container_of(vb, struct flite_buffer, vb); 422 = container_of(vbuf, struct flite_buffer, vb);
422 struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue); 423 struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue);
423 unsigned long flags; 424 unsigned long flags;
424 425
@@ -1632,7 +1633,7 @@ static int fimc_lite_resume(struct device *dev)
1632 if (list_empty(&fimc->pending_buf_q)) 1633 if (list_empty(&fimc->pending_buf_q))
1633 break; 1634 break;
1634 buf = fimc_lite_pending_queue_pop(fimc); 1635 buf = fimc_lite_pending_queue_pop(fimc);
1635 buffer_queue(&buf->vb); 1636 buffer_queue(&buf->vb.vb2_buf);
1636 } 1637 }
1637 return 0; 1638 return 0;
1638} 1639}
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.h b/drivers/media/platform/exynos4-is/fimc-lite.h
index 7e4c7080e425..b302305dedbe 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.h
+++ b/drivers/media/platform/exynos4-is/fimc-lite.h
@@ -100,7 +100,7 @@ struct flite_frame {
100 * @index: DMA start address register's index 100 * @index: DMA start address register's index
101 */ 101 */
102struct flite_buffer { 102struct flite_buffer {
103 struct vb2_buffer vb; 103 struct vb2_v4l2_buffer vb;
104 struct list_head list; 104 struct list_head list;
105 dma_addr_t paddr; 105 dma_addr_t paddr;
106 unsigned short index; 106 unsigned short index;
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 07bfddb322f8..79b8a3bfbd2b 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -42,7 +42,7 @@ static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
42 42
43void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state) 43void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
44{ 44{
45 struct vb2_buffer *src_vb, *dst_vb; 45 struct vb2_v4l2_buffer *src_vb, *dst_vb;
46 46
47 if (!ctx || !ctx->fh.m2m_ctx) 47 if (!ctx || !ctx->fh.m2m_ctx)
48 return; 48 return;
@@ -99,7 +99,7 @@ static void stop_streaming(struct vb2_queue *q)
99 99
100static void fimc_device_run(void *priv) 100static void fimc_device_run(void *priv)
101{ 101{
102 struct vb2_buffer *src_vb, *dst_vb; 102 struct vb2_v4l2_buffer *src_vb, *dst_vb;
103 struct fimc_ctx *ctx = priv; 103 struct fimc_ctx *ctx = priv;
104 struct fimc_frame *sf, *df; 104 struct fimc_frame *sf, *df;
105 struct fimc_dev *fimc; 105 struct fimc_dev *fimc;
@@ -123,19 +123,19 @@ static void fimc_device_run(void *priv)
123 } 123 }
124 124
125 src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 125 src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
126 ret = fimc_prepare_addr(ctx, src_vb, sf, &sf->paddr); 126 ret = fimc_prepare_addr(ctx, &src_vb->vb2_buf, sf, &sf->paddr);
127 if (ret) 127 if (ret)
128 goto dma_unlock; 128 goto dma_unlock;
129 129
130 dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 130 dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
131 ret = fimc_prepare_addr(ctx, dst_vb, df, &df->paddr); 131 ret = fimc_prepare_addr(ctx, &dst_vb->vb2_buf, df, &df->paddr);
132 if (ret) 132 if (ret)
133 goto dma_unlock; 133 goto dma_unlock;
134 134
135 dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp; 135 dst_vb->timestamp = src_vb->timestamp;
136 dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 136 dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
137 dst_vb->v4l2_buf.flags |= 137 dst_vb->flags |=
138 src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 138 src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
139 139
140 /* Reconfigure hardware if the context has changed. */ 140 /* Reconfigure hardware if the context has changed. */
141 if (fimc->m2m.ctx != ctx) { 141 if (fimc->m2m.ctx != ctx) {
@@ -220,8 +220,9 @@ static int fimc_buf_prepare(struct vb2_buffer *vb)
220 220
221static void fimc_buf_queue(struct vb2_buffer *vb) 221static void fimc_buf_queue(struct vb2_buffer *vb)
222{ 222{
223 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
223 struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 224 struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
224 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); 225 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
225} 226}
226 227
227static struct vb2_ops fimc_qops = { 228static struct vb2_ops fimc_qops = {
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index c07f367aa436..bdd8f11d8fb1 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -200,18 +200,18 @@ static void dma_callback(void *data)
200{ 200{
201 struct deinterlace_ctx *curr_ctx = data; 201 struct deinterlace_ctx *curr_ctx = data;
202 struct deinterlace_dev *pcdev = curr_ctx->dev; 202 struct deinterlace_dev *pcdev = curr_ctx->dev;
203 struct vb2_buffer *src_vb, *dst_vb; 203 struct vb2_v4l2_buffer *src_vb, *dst_vb;
204 204
205 atomic_set(&pcdev->busy, 0); 205 atomic_set(&pcdev->busy, 0);
206 206
207 src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx); 207 src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
208 dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx); 208 dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
209 209
210 dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp; 210 dst_vb->timestamp = src_vb->timestamp;
211 dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 211 dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
212 dst_vb->v4l2_buf.flags |= 212 dst_vb->flags |=
213 src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 213 src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
214 dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode; 214 dst_vb->timecode = src_vb->timecode;
215 215
216 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE); 216 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
217 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE); 217 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
@@ -225,7 +225,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
225 int do_callback) 225 int do_callback)
226{ 226{
227 struct deinterlace_q_data *s_q_data; 227 struct deinterlace_q_data *s_q_data;
228 struct vb2_buffer *src_buf, *dst_buf; 228 struct vb2_v4l2_buffer *src_buf, *dst_buf;
229 struct deinterlace_dev *pcdev = ctx->dev; 229 struct deinterlace_dev *pcdev = ctx->dev;
230 struct dma_chan *chan = pcdev->dma_chan; 230 struct dma_chan *chan = pcdev->dma_chan;
231 struct dma_device *dmadev = chan->device; 231 struct dma_device *dmadev = chan->device;
@@ -243,8 +243,9 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
243 s_height = s_q_data->height; 243 s_height = s_q_data->height;
244 s_size = s_width * s_height; 244 s_size = s_width * s_height;
245 245
246 p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(src_buf, 0); 246 p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
247 p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(dst_buf, 0); 247 p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf,
248 0);
248 if (!p_in || !p_out) { 249 if (!p_in || !p_out) {
249 v4l2_err(&pcdev->v4l2_dev, 250 v4l2_err(&pcdev->v4l2_dev,
250 "Acquiring kernel pointers to buffers failed\n"); 251 "Acquiring kernel pointers to buffers failed\n");
@@ -849,8 +850,10 @@ static int deinterlace_buf_prepare(struct vb2_buffer *vb)
849 850
850static void deinterlace_buf_queue(struct vb2_buffer *vb) 851static void deinterlace_buf_queue(struct vb2_buffer *vb)
851{ 852{
853 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
852 struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 854 struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
853 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); 855
856 v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
854} 857}
855 858
856static struct vb2_ops deinterlace_qops = { 859static struct vb2_ops deinterlace_qops = {
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 5e2b4df48b3c..1d95842e2071 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -201,18 +201,18 @@ struct mcam_dma_desc {
201 201
202/* 202/*
203 * Our buffer type for working with videobuf2. Note that the vb2 203 * Our buffer type for working with videobuf2. Note that the vb2
204 * developers have decreed that struct vb2_buffer must be at the 204 * developers have decreed that struct vb2_v4l2_buffer must be at the
205 * beginning of this structure. 205 * beginning of this structure.
206 */ 206 */
207struct mcam_vb_buffer { 207struct mcam_vb_buffer {
208 struct vb2_buffer vb_buf; 208 struct vb2_v4l2_buffer vb_buf;
209 struct list_head queue; 209 struct list_head queue;
210 struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */ 210 struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
211 dma_addr_t dma_desc_pa; /* Descriptor physical address */ 211 dma_addr_t dma_desc_pa; /* Descriptor physical address */
212 int dma_desc_nent; /* Number of mapped descriptors */ 212 int dma_desc_nent; /* Number of mapped descriptors */
213}; 213};
214 214
215static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb) 215static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
216{ 216{
217 return container_of(vb, struct mcam_vb_buffer, vb_buf); 217 return container_of(vb, struct mcam_vb_buffer, vb_buf);
218} 218}
@@ -221,14 +221,14 @@ static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
221 * Hand a completed buffer back to user space. 221 * Hand a completed buffer back to user space.
222 */ 222 */
223static void mcam_buffer_done(struct mcam_camera *cam, int frame, 223static void mcam_buffer_done(struct mcam_camera *cam, int frame,
224 struct vb2_buffer *vbuf) 224 struct vb2_v4l2_buffer *vbuf)
225{ 225{
226 vbuf->v4l2_buf.bytesused = cam->pix_format.sizeimage; 226 vbuf->vb2_buf.planes[0].bytesused = cam->pix_format.sizeimage;
227 vbuf->v4l2_buf.sequence = cam->buf_seq[frame]; 227 vbuf->sequence = cam->buf_seq[frame];
228 vbuf->v4l2_buf.field = V4L2_FIELD_NONE; 228 vbuf->field = V4L2_FIELD_NONE;
229 v4l2_get_timestamp(&vbuf->v4l2_buf.timestamp); 229 v4l2_get_timestamp(&vbuf->timestamp);
230 vb2_set_plane_payload(vbuf, 0, cam->pix_format.sizeimage); 230 vb2_set_plane_payload(&vbuf->vb2_buf, 0, cam->pix_format.sizeimage);
231 vb2_buffer_done(vbuf, VB2_BUF_STATE_DONE); 231 vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
232} 232}
233 233
234 234
@@ -482,7 +482,8 @@ static void mcam_frame_tasklet(unsigned long data)
482 * Drop the lock during the big copy. This *should* be safe... 482 * Drop the lock during the big copy. This *should* be safe...
483 */ 483 */
484 spin_unlock_irqrestore(&cam->dev_lock, flags); 484 spin_unlock_irqrestore(&cam->dev_lock, flags);
485 memcpy(vb2_plane_vaddr(&buf->vb_buf, 0), cam->dma_bufs[bufno], 485 memcpy(vb2_plane_vaddr(&buf->vb_buf.vb2_buf, 0),
486 cam->dma_bufs[bufno],
486 cam->pix_format.sizeimage); 487 cam->pix_format.sizeimage);
487 mcam_buffer_done(cam, bufno, &buf->vb_buf); 488 mcam_buffer_done(cam, bufno, &buf->vb_buf);
488 spin_lock_irqsave(&cam->dev_lock, flags); 489 spin_lock_irqsave(&cam->dev_lock, flags);
@@ -548,7 +549,7 @@ static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
548{ 549{
549 struct mcam_vb_buffer *buf; 550 struct mcam_vb_buffer *buf;
550 dma_addr_t dma_handle; 551 dma_addr_t dma_handle;
551 struct vb2_buffer *vb; 552 struct vb2_v4l2_buffer *vb;
552 553
553 /* 554 /*
554 * If there are no available buffers, go into single mode 555 * If there are no available buffers, go into single mode
@@ -570,7 +571,7 @@ static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
570 cam->vb_bufs[frame] = buf; 571 cam->vb_bufs[frame] = buf;
571 vb = &buf->vb_buf; 572 vb = &buf->vb_buf;
572 573
573 dma_handle = vb2_dma_contig_plane_dma_addr(vb, 0); 574 dma_handle = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
574 mcam_write_yuv_bases(cam, frame, dma_handle); 575 mcam_write_yuv_bases(cam, frame, dma_handle);
575} 576}
576 577
@@ -1071,7 +1072,8 @@ static int mcam_vb_queue_setup(struct vb2_queue *vq,
1071 1072
1072static void mcam_vb_buf_queue(struct vb2_buffer *vb) 1073static void mcam_vb_buf_queue(struct vb2_buffer *vb)
1073{ 1074{
1074 struct mcam_vb_buffer *mvb = vb_to_mvb(vb); 1075 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1076 struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
1075 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); 1077 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1076 unsigned long flags; 1078 unsigned long flags;
1077 int start; 1079 int start;
@@ -1096,14 +1098,14 @@ static void mcam_vb_requeue_bufs(struct vb2_queue *vq,
1096 1098
1097 spin_lock_irqsave(&cam->dev_lock, flags); 1099 spin_lock_irqsave(&cam->dev_lock, flags);
1098 list_for_each_entry_safe(buf, node, &cam->buffers, queue) { 1100 list_for_each_entry_safe(buf, node, &cam->buffers, queue) {
1099 vb2_buffer_done(&buf->vb_buf, state); 1101 vb2_buffer_done(&buf->vb_buf.vb2_buf, state);
1100 list_del(&buf->queue); 1102 list_del(&buf->queue);
1101 } 1103 }
1102 for (i = 0; i < MAX_DMA_BUFS; i++) { 1104 for (i = 0; i < MAX_DMA_BUFS; i++) {
1103 buf = cam->vb_bufs[i]; 1105 buf = cam->vb_bufs[i];
1104 1106
1105 if (buf) { 1107 if (buf) {
1106 vb2_buffer_done(&buf->vb_buf, state); 1108 vb2_buffer_done(&buf->vb_buf.vb2_buf, state);
1107 cam->vb_bufs[i] = NULL; 1109 cam->vb_bufs[i] = NULL;
1108 } 1110 }
1109 } 1111 }
@@ -1198,7 +1200,8 @@ static const struct vb2_ops mcam_vb2_ops = {
1198 */ 1200 */
1199static int mcam_vb_sg_buf_init(struct vb2_buffer *vb) 1201static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
1200{ 1202{
1201 struct mcam_vb_buffer *mvb = vb_to_mvb(vb); 1203 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1204 struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
1202 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); 1205 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1203 int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1; 1206 int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
1204 1207
@@ -1214,7 +1217,8 @@ static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
1214 1217
1215static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) 1218static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
1216{ 1219{
1217 struct mcam_vb_buffer *mvb = vb_to_mvb(vb); 1220 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1221 struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
1218 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0); 1222 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
1219 struct mcam_dma_desc *desc = mvb->dma_desc; 1223 struct mcam_dma_desc *desc = mvb->dma_desc;
1220 struct scatterlist *sg; 1224 struct scatterlist *sg;
@@ -1230,8 +1234,9 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
1230 1234
1231static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb) 1235static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
1232{ 1236{
1237 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1233 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); 1238 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1234 struct mcam_vb_buffer *mvb = vb_to_mvb(vb); 1239 struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
1235 int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1; 1240 int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
1236 1241
1237 dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc), 1242 dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 87314b743f55..b7cea274d7ea 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -351,7 +351,7 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data)
351{ 351{
352 struct emmaprp_dev *pcdev = data; 352 struct emmaprp_dev *pcdev = data;
353 struct emmaprp_ctx *curr_ctx; 353 struct emmaprp_ctx *curr_ctx;
354 struct vb2_buffer *src_vb, *dst_vb; 354 struct vb2_v4l2_buffer *src_vb, *dst_vb;
355 unsigned long flags; 355 unsigned long flags;
356 u32 irqst; 356 u32 irqst;
357 357
@@ -375,13 +375,13 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data)
375 src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx); 375 src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
376 dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx); 376 dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
377 377
378 dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp; 378 dst_vb->timestamp = src_vb->timestamp;
379 dst_vb->v4l2_buf.flags &= 379 dst_vb->flags &=
380 ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 380 ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
381 dst_vb->v4l2_buf.flags |= 381 dst_vb->flags |=
382 src_vb->v4l2_buf.flags 382 src_vb->flags
383 & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 383 & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
384 dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode; 384 dst_vb->timecode = src_vb->timecode;
385 385
386 spin_lock_irqsave(&pcdev->irqlock, flags); 386 spin_lock_irqsave(&pcdev->irqlock, flags);
387 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE); 387 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
@@ -742,8 +742,9 @@ static int emmaprp_buf_prepare(struct vb2_buffer *vb)
742 742
743static void emmaprp_buf_queue(struct vb2_buffer *vb) 743static void emmaprp_buf_queue(struct vb2_buffer *vb)
744{ 744{
745 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
745 struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 746 struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
746 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); 747 v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
747} 748}
748 749
749static struct vb2_ops emmaprp_qops = { 750static struct vb2_ops emmaprp_qops = {
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 41bb8df91f72..786cc8593f94 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -342,8 +342,9 @@ static int isp_video_queue_setup(struct vb2_queue *queue,
342 342
343static int isp_video_buffer_prepare(struct vb2_buffer *buf) 343static int isp_video_buffer_prepare(struct vb2_buffer *buf)
344{ 344{
345 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
345 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 346 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
346 struct isp_buffer *buffer = to_isp_buffer(buf); 347 struct isp_buffer *buffer = to_isp_buffer(vbuf);
347 struct isp_video *video = vfh->video; 348 struct isp_video *video = vfh->video;
348 dma_addr_t addr; 349 dma_addr_t addr;
349 350
@@ -363,7 +364,8 @@ static int isp_video_buffer_prepare(struct vb2_buffer *buf)
363 return -EINVAL; 364 return -EINVAL;
364 } 365 }
365 366
366 vb2_set_plane_payload(&buffer->vb, 0, vfh->format.fmt.pix.sizeimage); 367 vb2_set_plane_payload(&buffer->vb.vb2_buf, 0,
368 vfh->format.fmt.pix.sizeimage);
367 buffer->dma = addr; 369 buffer->dma = addr;
368 370
369 return 0; 371 return 0;
@@ -380,8 +382,9 @@ static int isp_video_buffer_prepare(struct vb2_buffer *buf)
380 */ 382 */
381static void isp_video_buffer_queue(struct vb2_buffer *buf) 383static void isp_video_buffer_queue(struct vb2_buffer *buf)
382{ 384{
385 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
383 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 386 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
384 struct isp_buffer *buffer = to_isp_buffer(buf); 387 struct isp_buffer *buffer = to_isp_buffer(vbuf);
385 struct isp_video *video = vfh->video; 388 struct isp_video *video = vfh->video;
386 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 389 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
387 enum isp_pipeline_state state; 390 enum isp_pipeline_state state;
@@ -392,7 +395,7 @@ static void isp_video_buffer_queue(struct vb2_buffer *buf)
392 spin_lock_irqsave(&video->irqlock, flags); 395 spin_lock_irqsave(&video->irqlock, flags);
393 396
394 if (unlikely(video->error)) { 397 if (unlikely(video->error)) {
395 vb2_buffer_done(&buffer->vb, VB2_BUF_STATE_ERROR); 398 vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR);
396 spin_unlock_irqrestore(&video->irqlock, flags); 399 spin_unlock_irqrestore(&video->irqlock, flags);
397 return; 400 return;
398 } 401 }
@@ -464,7 +467,7 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
464 list_del(&buf->irqlist); 467 list_del(&buf->irqlist);
465 spin_unlock_irqrestore(&video->irqlock, flags); 468 spin_unlock_irqrestore(&video->irqlock, flags);
466 469
467 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 470 v4l2_get_timestamp(&buf->vb.timestamp);
468 471
469 /* Do frame number propagation only if this is the output video node. 472 /* Do frame number propagation only if this is the output video node.
470 * Frame number either comes from the CSI receivers or it gets 473 * Frame number either comes from the CSI receivers or it gets
@@ -473,15 +476,15 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
473 * first, so the input number might lag behind by 1 in some cases. 476 * first, so the input number might lag behind by 1 in some cases.
474 */ 477 */
475 if (video == pipe->output && !pipe->do_propagation) 478 if (video == pipe->output && !pipe->do_propagation)
476 buf->vb.v4l2_buf.sequence = 479 buf->vb.sequence =
477 atomic_inc_return(&pipe->frame_number); 480 atomic_inc_return(&pipe->frame_number);
478 else 481 else
479 buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number); 482 buf->vb.sequence = atomic_read(&pipe->frame_number);
480 483
481 if (pipe->field != V4L2_FIELD_NONE) 484 if (pipe->field != V4L2_FIELD_NONE)
482 buf->vb.v4l2_buf.sequence /= 2; 485 buf->vb.sequence /= 2;
483 486
484 buf->vb.v4l2_buf.field = pipe->field; 487 buf->vb.field = pipe->field;
485 488
486 /* Report pipeline errors to userspace on the capture device side. */ 489 /* Report pipeline errors to userspace on the capture device side. */
487 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) { 490 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
@@ -491,7 +494,7 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
491 state = VB2_BUF_STATE_DONE; 494 state = VB2_BUF_STATE_DONE;
492 } 495 }
493 496
494 vb2_buffer_done(&buf->vb, state); 497 vb2_buffer_done(&buf->vb.vb2_buf, state);
495 498
496 spin_lock_irqsave(&video->irqlock, flags); 499 spin_lock_irqsave(&video->irqlock, flags);
497 500
@@ -546,7 +549,7 @@ void omap3isp_video_cancel_stream(struct isp_video *video)
546 buf = list_first_entry(&video->dmaqueue, 549 buf = list_first_entry(&video->dmaqueue,
547 struct isp_buffer, irqlist); 550 struct isp_buffer, irqlist);
548 list_del(&buf->irqlist); 551 list_del(&buf->irqlist);
549 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 552 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
550 } 553 }
551 554
552 video->error = true; 555 video->error = true;
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 31c2445f9eb2..bcf0e0acc8f3 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -122,7 +122,7 @@ static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
122 * @dma: DMA address 122 * @dma: DMA address
123 */ 123 */
124struct isp_buffer { 124struct isp_buffer {
125 struct vb2_buffer vb; 125 struct vb2_v4l2_buffer vb;
126 struct list_head irqlist; 126 struct list_head irqlist;
127 dma_addr_t dma; 127 dma_addr_t dma;
128}; 128};
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 18e62d06797c..7533b9e16649 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -471,7 +471,7 @@ static const char *error_to_text[16] = {
471 "Unknown" 471 "Unknown"
472}; 472};
473 473
474static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_buffer *vb) 474static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_v4l2_buffer *vb)
475{ 475{
476 struct v4l2_m2m_buffer *b = 476 struct v4l2_m2m_buffer *b =
477 container_of(vb, struct v4l2_m2m_buffer, vb); 477 container_of(vb, struct v4l2_m2m_buffer, vb);
@@ -1044,6 +1044,7 @@ static int jpu_queue_setup(struct vb2_queue *vq,
1044 1044
1045static int jpu_buf_prepare(struct vb2_buffer *vb) 1045static int jpu_buf_prepare(struct vb2_buffer *vb)
1046{ 1046{
1047 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1047 struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 1048 struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1048 struct jpu_q_data *q_data; 1049 struct jpu_q_data *q_data;
1049 unsigned int i; 1050 unsigned int i;
@@ -1051,9 +1052,9 @@ static int jpu_buf_prepare(struct vb2_buffer *vb)
1051 q_data = jpu_get_q_data(ctx, vb->vb2_queue->type); 1052 q_data = jpu_get_q_data(ctx, vb->vb2_queue->type);
1052 1053
1053 if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { 1054 if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
1054 if (vb->v4l2_buf.field == V4L2_FIELD_ANY) 1055 if (vbuf->field == V4L2_FIELD_ANY)
1055 vb->v4l2_buf.field = V4L2_FIELD_NONE; 1056 vbuf->field = V4L2_FIELD_NONE;
1056 if (vb->v4l2_buf.field != V4L2_FIELD_NONE) { 1057 if (vbuf->field != V4L2_FIELD_NONE) {
1057 dev_err(ctx->jpu->dev, "%s field isn't supported\n", 1058 dev_err(ctx->jpu->dev, "%s field isn't supported\n",
1058 __func__); 1059 __func__);
1059 return -EINVAL; 1060 return -EINVAL;
@@ -1080,10 +1081,11 @@ static int jpu_buf_prepare(struct vb2_buffer *vb)
1080 1081
1081static void jpu_buf_queue(struct vb2_buffer *vb) 1082static void jpu_buf_queue(struct vb2_buffer *vb)
1082{ 1083{
1084 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1083 struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 1085 struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1084 1086
1085 if (!ctx->encoder && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { 1087 if (!ctx->encoder && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
1086 struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vb); 1088 struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
1087 struct jpu_q_data *q_data, adjust; 1089 struct jpu_q_data *q_data, adjust;
1088 void *buffer = vb2_plane_vaddr(vb, 0); 1090 void *buffer = vb2_plane_vaddr(vb, 0);
1089 unsigned long buf_size = vb2_get_plane_payload(vb, 0); 1091 unsigned long buf_size = vb2_get_plane_payload(vb, 0);
@@ -1117,7 +1119,7 @@ static void jpu_buf_queue(struct vb2_buffer *vb)
1117 } 1119 }
1118 1120
1119 if (ctx->fh.m2m_ctx) 1121 if (ctx->fh.m2m_ctx)
1120 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); 1122 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
1121 1123
1122 return; 1124 return;
1123 1125
@@ -1128,14 +1130,15 @@ format_error:
1128 1130
1129static void jpu_buf_finish(struct vb2_buffer *vb) 1131static void jpu_buf_finish(struct vb2_buffer *vb)
1130{ 1132{
1131 struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vb); 1133 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1134 struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
1132 struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 1135 struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1133 struct jpu_q_data *q_data = &ctx->out_q; 1136 struct jpu_q_data *q_data = &ctx->out_q;
1134 enum v4l2_buf_type type = vb->vb2_queue->type; 1137 enum v4l2_buf_type type = vb->vb2_queue->type;
1135 u8 *buffer; 1138 u8 *buffer;
1136 1139
1137 if (vb->state == VB2_BUF_STATE_DONE) 1140 if (vb->state == VB2_BUF_STATE_DONE)
1138 vb->v4l2_buf.sequence = jpu_get_q_data(ctx, type)->sequence++; 1141 vbuf->sequence = jpu_get_q_data(ctx, type)->sequence++;
1139 1142
1140 if (!ctx->encoder || vb->state != VB2_BUF_STATE_DONE || 1143 if (!ctx->encoder || vb->state != VB2_BUF_STATE_DONE ||
1141 V4L2_TYPE_IS_OUTPUT(type)) 1144 V4L2_TYPE_IS_OUTPUT(type))
@@ -1163,7 +1166,7 @@ static int jpu_start_streaming(struct vb2_queue *vq, unsigned count)
1163static void jpu_stop_streaming(struct vb2_queue *vq) 1166static void jpu_stop_streaming(struct vb2_queue *vq)
1164{ 1167{
1165 struct jpu_ctx *ctx = vb2_get_drv_priv(vq); 1168 struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
1166 struct vb2_buffer *vb; 1169 struct vb2_v4l2_buffer *vb;
1167 unsigned long flags; 1170 unsigned long flags;
1168 1171
1169 for (;;) { 1172 for (;;) {
@@ -1327,7 +1330,7 @@ static const struct v4l2_file_operations jpu_fops = {
1327static void jpu_cleanup(struct jpu_ctx *ctx, bool reset) 1330static void jpu_cleanup(struct jpu_ctx *ctx, bool reset)
1328{ 1331{
1329 /* remove current buffers and finish job */ 1332 /* remove current buffers and finish job */
1330 struct vb2_buffer *src_buf, *dst_buf; 1333 struct vb2_v4l2_buffer *src_buf, *dst_buf;
1331 unsigned long flags; 1334 unsigned long flags;
1332 1335
1333 spin_lock_irqsave(&ctx->jpu->lock, flags); 1336 spin_lock_irqsave(&ctx->jpu->lock, flags);
@@ -1353,7 +1356,7 @@ static void jpu_device_run(void *priv)
1353 struct jpu *jpu = ctx->jpu; 1356 struct jpu *jpu = ctx->jpu;
1354 struct jpu_buffer *jpu_buf; 1357 struct jpu_buffer *jpu_buf;
1355 struct jpu_q_data *q_data; 1358 struct jpu_q_data *q_data;
1356 struct vb2_buffer *src_buf, *dst_buf; 1359 struct vb2_v4l2_buffer *src_buf, *dst_buf;
1357 unsigned int w, h, bpl; 1360 unsigned int w, h, bpl;
1358 unsigned char num_planes, subsampling; 1361 unsigned char num_planes, subsampling;
1359 unsigned long flags; 1362 unsigned long flags;
@@ -1389,10 +1392,12 @@ static void jpu_device_run(void *priv)
1389 unsigned long src_1_addr, src_2_addr, dst_addr; 1392 unsigned long src_1_addr, src_2_addr, dst_addr;
1390 unsigned int redu, inft; 1393 unsigned int redu, inft;
1391 1394
1392 dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); 1395 dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
1393 src_1_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); 1396 src_1_addr =
1397 vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
1394 if (num_planes > 1) 1398 if (num_planes > 1)
1395 src_2_addr = vb2_dma_contig_plane_dma_addr(src_buf, 1); 1399 src_2_addr = vb2_dma_contig_plane_dma_addr(
1400 &src_buf->vb2_buf, 1);
1396 else 1401 else
1397 src_2_addr = src_1_addr + w * h; 1402 src_2_addr = src_1_addr + w * h;
1398 1403
@@ -1453,10 +1458,12 @@ static void jpu_device_run(void *priv)
1453 return; 1458 return;
1454 } 1459 }
1455 1460
1456 src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); 1461 src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
1457 dst_1_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); 1462 dst_1_addr =
1463 vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
1458 if (q_data->fmtinfo->num_planes > 1) 1464 if (q_data->fmtinfo->num_planes > 1)
1459 dst_2_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1); 1465 dst_2_addr = vb2_dma_contig_plane_dma_addr(
1466 &dst_buf->vb2_buf, 1);
1460 else 1467 else
1461 dst_2_addr = dst_1_addr + w * h; 1468 dst_2_addr = dst_1_addr + w * h;
1462 1469
@@ -1511,7 +1518,7 @@ static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
1511{ 1518{
1512 struct jpu *jpu = dev_id; 1519 struct jpu *jpu = dev_id;
1513 struct jpu_ctx *curr_ctx; 1520 struct jpu_ctx *curr_ctx;
1514 struct vb2_buffer *src_buf, *dst_buf; 1521 struct vb2_v4l2_buffer *src_buf, *dst_buf;
1515 unsigned int int_status; 1522 unsigned int int_status;
1516 1523
1517 int_status = jpu_read(jpu, JINTS); 1524 int_status = jpu_read(jpu, JINTS);
@@ -1547,18 +1554,18 @@ static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
1547 unsigned long payload_size = jpu_read(jpu, JCDTCU) << 16 1554 unsigned long payload_size = jpu_read(jpu, JCDTCU) << 16
1548 | jpu_read(jpu, JCDTCM) << 8 1555 | jpu_read(jpu, JCDTCM) << 8
1549 | jpu_read(jpu, JCDTCD); 1556 | jpu_read(jpu, JCDTCD);
1550 vb2_set_plane_payload(dst_buf, 0, 1557 vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
1551 payload_size + JPU_JPEG_HDR_SIZE); 1558 payload_size + JPU_JPEG_HDR_SIZE);
1552 } 1559 }
1553 1560
1554 dst_buf->v4l2_buf.field = src_buf->v4l2_buf.field; 1561 dst_buf->field = src_buf->field;
1555 dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp; 1562 dst_buf->timestamp = src_buf->timestamp;
1556 if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE) 1563 if (src_buf->flags & V4L2_BUF_FLAG_TIMECODE)
1557 dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode; 1564 dst_buf->timecode = src_buf->timecode;
1558 dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1565 dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1559 dst_buf->v4l2_buf.flags |= src_buf->v4l2_buf.flags & 1566 dst_buf->flags |= src_buf->flags &
1560 V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1567 V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1561 dst_buf->v4l2_buf.flags = src_buf->v4l2_buf.flags & 1568 dst_buf->flags = src_buf->flags &
1562 (V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME | 1569 (V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME |
1563 V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | 1570 V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME |
1564 V4L2_BUF_FLAG_TSTAMP_SRC_MASK); 1571 V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index bb86cf9b367f..5666766e2b87 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -164,12 +164,12 @@ static int camif_reinitialize(struct camif_vp *vp)
164 /* Release unused buffers */ 164 /* Release unused buffers */
165 while (!list_empty(&vp->pending_buf_q)) { 165 while (!list_empty(&vp->pending_buf_q)) {
166 buf = camif_pending_queue_pop(vp); 166 buf = camif_pending_queue_pop(vp);
167 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 167 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
168 } 168 }
169 169
170 while (!list_empty(&vp->active_buf_q)) { 170 while (!list_empty(&vp->active_buf_q)) {
171 buf = camif_active_queue_pop(vp); 171 buf = camif_active_queue_pop(vp);
172 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 172 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
173 } 173 }
174 174
175 spin_unlock_irqrestore(&camif->slock, flags); 175 spin_unlock_irqrestore(&camif->slock, flags);
@@ -338,9 +338,9 @@ irqreturn_t s3c_camif_irq_handler(int irq, void *priv)
338 338
339 if (!WARN_ON(vbuf == NULL)) { 339 if (!WARN_ON(vbuf == NULL)) {
340 /* Dequeue a filled buffer */ 340 /* Dequeue a filled buffer */
341 v4l2_get_timestamp(&vbuf->vb.v4l2_buf.timestamp); 341 v4l2_get_timestamp(&vbuf->vb.timestamp);
342 vbuf->vb.v4l2_buf.sequence = vp->frame_sequence++; 342 vbuf->vb.sequence = vp->frame_sequence++;
343 vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE); 343 vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
344 344
345 /* Set up an empty buffer at the DMA engine */ 345 /* Set up an empty buffer at the DMA engine */
346 vbuf = camif_pending_queue_pop(vp); 346 vbuf = camif_pending_queue_pop(vp);
@@ -490,13 +490,14 @@ static int buffer_prepare(struct vb2_buffer *vb)
490 490
491static void buffer_queue(struct vb2_buffer *vb) 491static void buffer_queue(struct vb2_buffer *vb)
492{ 492{
493 struct camif_buffer *buf = container_of(vb, struct camif_buffer, vb); 493 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
494 struct camif_buffer *buf = container_of(vbuf, struct camif_buffer, vb);
494 struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue); 495 struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
495 struct camif_dev *camif = vp->camif; 496 struct camif_dev *camif = vp->camif;
496 unsigned long flags; 497 unsigned long flags;
497 498
498 spin_lock_irqsave(&camif->slock, flags); 499 spin_lock_irqsave(&camif->slock, flags);
499 WARN_ON(camif_prepare_addr(vp, &buf->vb, &buf->paddr)); 500 WARN_ON(camif_prepare_addr(vp, &buf->vb.vb2_buf, &buf->paddr));
500 501
501 if (!(vp->state & ST_VP_STREAMING) && vp->active_buffers < 2) { 502 if (!(vp->state & ST_VP_STREAMING) && vp->active_buffers < 2) {
502 /* Schedule an empty buffer in H/W */ 503 /* Schedule an empty buffer in H/W */
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
index 8ef6f26187dd..adaf1969ef63 100644
--- a/drivers/media/platform/s3c-camif/camif-core.h
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -322,7 +322,7 @@ struct camif_addr {
322 * @index: an identifier of this buffer at the DMA engine 322 * @index: an identifier of this buffer at the DMA engine
323 */ 323 */
324struct camif_buffer { 324struct camif_buffer {
325 struct vb2_buffer vb; 325 struct vb2_v4l2_buffer vb;
326 struct list_head list; 326 struct list_head list;
327 struct camif_addr paddr; 327 struct camif_addr paddr;
328 unsigned int index; 328 unsigned int index;
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 81483da2464b..4db507ab6777 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -134,8 +134,9 @@ static int g2d_buf_prepare(struct vb2_buffer *vb)
134 134
135static void g2d_buf_queue(struct vb2_buffer *vb) 135static void g2d_buf_queue(struct vb2_buffer *vb)
136{ 136{
137 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
137 struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 138 struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
138 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); 139 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
139} 140}
140 141
141static struct vb2_ops g2d_qops = { 142static struct vb2_ops g2d_qops = {
@@ -537,7 +538,7 @@ static irqreturn_t g2d_isr(int irq, void *prv)
537{ 538{
538 struct g2d_dev *dev = prv; 539 struct g2d_dev *dev = prv;
539 struct g2d_ctx *ctx = dev->curr; 540 struct g2d_ctx *ctx = dev->curr;
540 struct vb2_buffer *src, *dst; 541 struct vb2_v4l2_buffer *src, *dst;
541 542
542 g2d_clear_int(dev); 543 g2d_clear_int(dev);
543 clk_disable(dev->gate); 544 clk_disable(dev->gate);
@@ -550,11 +551,11 @@ static irqreturn_t g2d_isr(int irq, void *prv)
550 BUG_ON(src == NULL); 551 BUG_ON(src == NULL);
551 BUG_ON(dst == NULL); 552 BUG_ON(dst == NULL);
552 553
553 dst->v4l2_buf.timecode = src->v4l2_buf.timecode; 554 dst->timecode = src->timecode;
554 dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp; 555 dst->timestamp = src->timestamp;
555 dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 556 dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
556 dst->v4l2_buf.flags |= 557 dst->flags |=
557 src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 558 src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
558 559
559 v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE); 560 v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
560 v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); 561 v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 18e754d319cf..f026366dc185 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2483,6 +2483,7 @@ static int s5p_jpeg_buf_prepare(struct vb2_buffer *vb)
2483 2483
2484static void s5p_jpeg_buf_queue(struct vb2_buffer *vb) 2484static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
2485{ 2485{
2486 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
2486 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 2487 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
2487 2488
2488 if (ctx->mode == S5P_JPEG_DECODE && 2489 if (ctx->mode == S5P_JPEG_DECODE &&
@@ -2517,7 +2518,7 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
2517 q_data->h = tmp.h; 2518 q_data->h = tmp.h;
2518 } 2519 }
2519 2520
2520 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); 2521 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
2521} 2522}
2522 2523
2523static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) 2524static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
@@ -2588,7 +2589,7 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
2588{ 2589{
2589 struct s5p_jpeg *jpeg = dev_id; 2590 struct s5p_jpeg *jpeg = dev_id;
2590 struct s5p_jpeg_ctx *curr_ctx; 2591 struct s5p_jpeg_ctx *curr_ctx;
2591 struct vb2_buffer *src_buf, *dst_buf; 2592 struct vb2_v4l2_buffer *src_buf, *dst_buf;
2592 unsigned long payload_size = 0; 2593 unsigned long payload_size = 0;
2593 enum vb2_buffer_state state = VB2_BUF_STATE_DONE; 2594 enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
2594 bool enc_jpeg_too_large = false; 2595 bool enc_jpeg_too_large = false;
@@ -2622,15 +2623,15 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
2622 payload_size = s5p_jpeg_compressed_size(jpeg->regs); 2623 payload_size = s5p_jpeg_compressed_size(jpeg->regs);
2623 } 2624 }
2624 2625
2625 dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode; 2626 dst_buf->timecode = src_buf->timecode;
2626 dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp; 2627 dst_buf->timestamp = src_buf->timestamp;
2627 dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 2628 dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
2628 dst_buf->v4l2_buf.flags |= 2629 dst_buf->flags |=
2629 src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 2630 src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
2630 2631
2631 v4l2_m2m_buf_done(src_buf, state); 2632 v4l2_m2m_buf_done(src_buf, state);
2632 if (curr_ctx->mode == S5P_JPEG_ENCODE) 2633 if (curr_ctx->mode == S5P_JPEG_ENCODE)
2633 vb2_set_plane_payload(dst_buf, 0, payload_size); 2634 vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
2634 v4l2_m2m_buf_done(dst_buf, state); 2635 v4l2_m2m_buf_done(dst_buf, state);
2635 v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx); 2636 v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
2636 2637
@@ -2645,7 +2646,7 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
2645static irqreturn_t exynos4_jpeg_irq(int irq, void *priv) 2646static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
2646{ 2647{
2647 unsigned int int_status; 2648 unsigned int int_status;
2648 struct vb2_buffer *src_vb, *dst_vb; 2649 struct vb2_v4l2_buffer *src_vb, *dst_vb;
2649 struct s5p_jpeg *jpeg = priv; 2650 struct s5p_jpeg *jpeg = priv;
2650 struct s5p_jpeg_ctx *curr_ctx; 2651 struct s5p_jpeg_ctx *curr_ctx;
2651 unsigned long payload_size = 0; 2652 unsigned long payload_size = 0;
@@ -2687,7 +2688,8 @@ static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
2687 if (jpeg->irq_ret == OK_ENC_OR_DEC) { 2688 if (jpeg->irq_ret == OK_ENC_OR_DEC) {
2688 if (curr_ctx->mode == S5P_JPEG_ENCODE) { 2689 if (curr_ctx->mode == S5P_JPEG_ENCODE) {
2689 payload_size = exynos4_jpeg_get_stream_size(jpeg->regs); 2690 payload_size = exynos4_jpeg_get_stream_size(jpeg->regs);
2690 vb2_set_plane_payload(dst_vb, 0, payload_size); 2691 vb2_set_plane_payload(&dst_vb->vb2_buf,
2692 0, payload_size);
2691 } 2693 }
2692 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE); 2694 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
2693 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE); 2695 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
@@ -2708,7 +2710,7 @@ static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id)
2708{ 2710{
2709 struct s5p_jpeg *jpeg = dev_id; 2711 struct s5p_jpeg *jpeg = dev_id;
2710 struct s5p_jpeg_ctx *curr_ctx; 2712 struct s5p_jpeg_ctx *curr_ctx;
2711 struct vb2_buffer *src_buf, *dst_buf; 2713 struct vb2_v4l2_buffer *src_buf, *dst_buf;
2712 unsigned long payload_size = 0; 2714 unsigned long payload_size = 0;
2713 enum vb2_buffer_state state = VB2_BUF_STATE_DONE; 2715 enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
2714 bool interrupt_timeout = false; 2716 bool interrupt_timeout = false;
@@ -2752,12 +2754,12 @@ static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id)
2752 src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx); 2754 src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
2753 dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx); 2755 dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
2754 2756
2755 dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode; 2757 dst_buf->timecode = src_buf->timecode;
2756 dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp; 2758 dst_buf->timestamp = src_buf->timestamp;
2757 2759
2758 v4l2_m2m_buf_done(src_buf, state); 2760 v4l2_m2m_buf_done(src_buf, state);
2759 if (curr_ctx->mode == S5P_JPEG_ENCODE) 2761 if (curr_ctx->mode == S5P_JPEG_ENCODE)
2760 vb2_set_plane_payload(dst_buf, 0, payload_size); 2762 vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
2761 v4l2_m2m_buf_done(dst_buf, state); 2763 v4l2_m2m_buf_done(dst_buf, state);
2762 v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx); 2764 v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
2763 2765
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index b3758b8a3c1d..7b646c2e51cd 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -199,22 +199,22 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
199 dst_buf = list_entry(ctx->dst_queue.next, 199 dst_buf = list_entry(ctx->dst_queue.next,
200 struct s5p_mfc_buf, list); 200 struct s5p_mfc_buf, list);
201 mfc_debug(2, "Cleaning up buffer: %d\n", 201 mfc_debug(2, "Cleaning up buffer: %d\n",
202 dst_buf->b->v4l2_buf.index); 202 dst_buf->b->vb2_buf.index);
203 vb2_set_plane_payload(dst_buf->b, 0, 0); 203 vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0, 0);
204 vb2_set_plane_payload(dst_buf->b, 1, 0); 204 vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1, 0);
205 list_del(&dst_buf->list); 205 list_del(&dst_buf->list);
206 ctx->dst_queue_cnt--; 206 ctx->dst_queue_cnt--;
207 dst_buf->b->v4l2_buf.sequence = (ctx->sequence++); 207 dst_buf->b->sequence = (ctx->sequence++);
208 208
209 if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) == 209 if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) ==
210 s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx)) 210 s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx))
211 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE; 211 dst_buf->b->field = V4L2_FIELD_NONE;
212 else 212 else
213 dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED; 213 dst_buf->b->field = V4L2_FIELD_INTERLACED;
214 dst_buf->b->v4l2_buf.flags |= V4L2_BUF_FLAG_LAST; 214 dst_buf->b->flags |= V4L2_BUF_FLAG_LAST;
215 215
216 ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index); 216 ctx->dec_dst_flag &= ~(1 << dst_buf->b->vb2_buf.index);
217 vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE); 217 vb2_buffer_done(&dst_buf->b->vb2_buf, VB2_BUF_STATE_DONE);
218 } 218 }
219} 219}
220 220
@@ -235,27 +235,28 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
235 appropriate flags. */ 235 appropriate flags. */
236 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 236 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
237 list_for_each_entry(dst_buf, &ctx->dst_queue, list) { 237 list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
238 if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) { 238 if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
239 dst_buf->b->v4l2_buf.timecode = 239 == dec_y_addr) {
240 src_buf->b->v4l2_buf.timecode; 240 dst_buf->b->timecode =
241 dst_buf->b->v4l2_buf.timestamp = 241 src_buf->b->timecode;
242 src_buf->b->v4l2_buf.timestamp; 242 dst_buf->b->timestamp =
243 dst_buf->b->v4l2_buf.flags &= 243 src_buf->b->timestamp;
244 dst_buf->b->flags &=
244 ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 245 ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
245 dst_buf->b->v4l2_buf.flags |= 246 dst_buf->b->flags |=
246 src_buf->b->v4l2_buf.flags 247 src_buf->b->flags
247 & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 248 & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
248 switch (frame_type) { 249 switch (frame_type) {
249 case S5P_FIMV_DECODE_FRAME_I_FRAME: 250 case S5P_FIMV_DECODE_FRAME_I_FRAME:
250 dst_buf->b->v4l2_buf.flags |= 251 dst_buf->b->flags |=
251 V4L2_BUF_FLAG_KEYFRAME; 252 V4L2_BUF_FLAG_KEYFRAME;
252 break; 253 break;
253 case S5P_FIMV_DECODE_FRAME_P_FRAME: 254 case S5P_FIMV_DECODE_FRAME_P_FRAME:
254 dst_buf->b->v4l2_buf.flags |= 255 dst_buf->b->flags |=
255 V4L2_BUF_FLAG_PFRAME; 256 V4L2_BUF_FLAG_PFRAME;
256 break; 257 break;
257 case S5P_FIMV_DECODE_FRAME_B_FRAME: 258 case S5P_FIMV_DECODE_FRAME_B_FRAME:
258 dst_buf->b->v4l2_buf.flags |= 259 dst_buf->b->flags |=
259 V4L2_BUF_FLAG_BFRAME; 260 V4L2_BUF_FLAG_BFRAME;
260 break; 261 break;
261 default: 262 default:
@@ -296,25 +297,28 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
296 * check which videobuf does it correspond to */ 297 * check which videobuf does it correspond to */
297 list_for_each_entry(dst_buf, &ctx->dst_queue, list) { 298 list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
298 /* Check if this is the buffer we're looking for */ 299 /* Check if this is the buffer we're looking for */
299 if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) { 300 if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
301 == dspl_y_addr) {
300 list_del(&dst_buf->list); 302 list_del(&dst_buf->list);
301 ctx->dst_queue_cnt--; 303 ctx->dst_queue_cnt--;
302 dst_buf->b->v4l2_buf.sequence = ctx->sequence; 304 dst_buf->b->sequence = ctx->sequence;
303 if (s5p_mfc_hw_call(dev->mfc_ops, 305 if (s5p_mfc_hw_call(dev->mfc_ops,
304 get_pic_type_top, ctx) == 306 get_pic_type_top, ctx) ==
305 s5p_mfc_hw_call(dev->mfc_ops, 307 s5p_mfc_hw_call(dev->mfc_ops,
306 get_pic_type_bot, ctx)) 308 get_pic_type_bot, ctx))
307 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE; 309 dst_buf->b->field = V4L2_FIELD_NONE;
308 else 310 else
309 dst_buf->b->v4l2_buf.field = 311 dst_buf->b->field =
310 V4L2_FIELD_INTERLACED; 312 V4L2_FIELD_INTERLACED;
311 vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size); 313 vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0,
312 vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size); 314 ctx->luma_size);
313 clear_bit(dst_buf->b->v4l2_buf.index, 315 vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1,
316 ctx->chroma_size);
317 clear_bit(dst_buf->b->vb2_buf.index,
314 &ctx->dec_dst_flag); 318 &ctx->dec_dst_flag);
315 319
316 vb2_buffer_done(dst_buf->b, 320 vb2_buffer_done(&dst_buf->b->vb2_buf, err ?
317 err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 321 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
318 322
319 break; 323 break;
320 } 324 }
@@ -395,7 +399,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
395 if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC && 399 if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
396 ctx->codec_mode != S5P_MFC_CODEC_VP8_DEC && 400 ctx->codec_mode != S5P_MFC_CODEC_VP8_DEC &&
397 ctx->consumed_stream + STUFF_BYTE < 401 ctx->consumed_stream + STUFF_BYTE <
398 src_buf->b->v4l2_planes[0].bytesused) { 402 src_buf->b->vb2_buf.planes[0].bytesused) {
399 /* Run MFC again on the same buffer */ 403 /* Run MFC again on the same buffer */
400 mfc_debug(2, "Running again the same buffer\n"); 404 mfc_debug(2, "Running again the same buffer\n");
401 ctx->after_packed_pb = 1; 405 ctx->after_packed_pb = 1;
@@ -407,9 +411,11 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
407 list_del(&src_buf->list); 411 list_del(&src_buf->list);
408 ctx->src_queue_cnt--; 412 ctx->src_queue_cnt--;
409 if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0) 413 if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0)
410 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR); 414 vb2_buffer_done(&src_buf->b->vb2_buf,
415 VB2_BUF_STATE_ERROR);
411 else 416 else
412 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE); 417 vb2_buffer_done(&src_buf->b->vb2_buf,
418 VB2_BUF_STATE_DONE);
413 } 419 }
414 } 420 }
415leave_handle_frame: 421leave_handle_frame:
@@ -510,7 +516,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
510 struct s5p_mfc_buf, list); 516 struct s5p_mfc_buf, list);
511 if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream, 517 if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream,
512 dev) < 518 dev) <
513 src_buf->b->v4l2_planes[0].bytesused) 519 src_buf->b->vb2_buf.planes[0].bytesused)
514 ctx->head_processed = 0; 520 ctx->head_processed = 0;
515 else 521 else
516 ctx->head_processed = 1; 522 ctx->head_processed = 1;
@@ -551,7 +557,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
551 struct s5p_mfc_buf, list); 557 struct s5p_mfc_buf, list);
552 list_del(&src_buf->list); 558 list_del(&src_buf->list);
553 ctx->src_queue_cnt--; 559 ctx->src_queue_cnt--;
554 vb2_buffer_done(src_buf->b, 560 vb2_buffer_done(&src_buf->b->vb2_buf,
555 VB2_BUF_STATE_DONE); 561 VB2_BUF_STATE_DONE);
556 } 562 }
557 spin_unlock_irqrestore(&dev->irqlock, flags); 563 spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -592,8 +598,8 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
592 list); 598 list);
593 list_del(&mb_entry->list); 599 list_del(&mb_entry->list);
594 ctx->dst_queue_cnt--; 600 ctx->dst_queue_cnt--;
595 vb2_set_plane_payload(mb_entry->b, 0, 0); 601 vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, 0);
596 vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE); 602 vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
597 } 603 }
598 spin_unlock(&dev->irqlock); 604 spin_unlock(&dev->irqlock);
599 605
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 10884a77f79f..d1a3f9b1bc44 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -179,8 +179,8 @@ struct s5p_mfc_ctx;
179 * struct s5p_mfc_buf - MFC buffer 179 * struct s5p_mfc_buf - MFC buffer
180 */ 180 */
181struct s5p_mfc_buf { 181struct s5p_mfc_buf {
182 struct vb2_v4l2_buffer *b;
182 struct list_head list; 183 struct list_head list;
183 struct vb2_buffer *b;
184 union { 184 union {
185 struct { 185 struct {
186 size_t luma; 186 size_t luma;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 2fd59e7c70c2..1734775a63e6 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -945,6 +945,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
945 945
946static int s5p_mfc_buf_init(struct vb2_buffer *vb) 946static int s5p_mfc_buf_init(struct vb2_buffer *vb)
947{ 947{
948 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
948 struct vb2_queue *vq = vb->vb2_queue; 949 struct vb2_queue *vq = vb->vb2_queue;
949 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); 950 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
950 unsigned int i; 951 unsigned int i;
@@ -964,8 +965,8 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
964 mfc_err("Plane buffer (CAPTURE) is too small\n"); 965 mfc_err("Plane buffer (CAPTURE) is too small\n");
965 return -EINVAL; 966 return -EINVAL;
966 } 967 }
967 i = vb->v4l2_buf.index; 968 i = vb->index;
968 ctx->dst_bufs[i].b = vb; 969 ctx->dst_bufs[i].b = vbuf;
969 ctx->dst_bufs[i].cookie.raw.luma = 970 ctx->dst_bufs[i].cookie.raw.luma =
970 vb2_dma_contig_plane_dma_addr(vb, 0); 971 vb2_dma_contig_plane_dma_addr(vb, 0);
971 ctx->dst_bufs[i].cookie.raw.chroma = 972 ctx->dst_bufs[i].cookie.raw.chroma =
@@ -982,8 +983,8 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
982 return -EINVAL; 983 return -EINVAL;
983 } 984 }
984 985
985 i = vb->v4l2_buf.index; 986 i = vb->index;
986 ctx->src_bufs[i].b = vb; 987 ctx->src_bufs[i].b = vbuf;
987 ctx->src_bufs[i].cookie.stream = 988 ctx->src_bufs[i].cookie.stream =
988 vb2_dma_contig_plane_dma_addr(vb, 0); 989 vb2_dma_contig_plane_dma_addr(vb, 0);
989 ctx->src_bufs_cnt++; 990 ctx->src_bufs_cnt++;
@@ -1065,18 +1066,18 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
1065 struct s5p_mfc_buf *mfc_buf; 1066 struct s5p_mfc_buf *mfc_buf;
1066 1067
1067 if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 1068 if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1068 mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index]; 1069 mfc_buf = &ctx->src_bufs[vb->index];
1069 mfc_buf->flags &= ~MFC_BUF_FLAG_USED; 1070 mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
1070 spin_lock_irqsave(&dev->irqlock, flags); 1071 spin_lock_irqsave(&dev->irqlock, flags);
1071 list_add_tail(&mfc_buf->list, &ctx->src_queue); 1072 list_add_tail(&mfc_buf->list, &ctx->src_queue);
1072 ctx->src_queue_cnt++; 1073 ctx->src_queue_cnt++;
1073 spin_unlock_irqrestore(&dev->irqlock, flags); 1074 spin_unlock_irqrestore(&dev->irqlock, flags);
1074 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 1075 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1075 mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index]; 1076 mfc_buf = &ctx->dst_bufs[vb->index];
1076 mfc_buf->flags &= ~MFC_BUF_FLAG_USED; 1077 mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
1077 /* Mark destination as available for use by MFC */ 1078 /* Mark destination as available for use by MFC */
1078 spin_lock_irqsave(&dev->irqlock, flags); 1079 spin_lock_irqsave(&dev->irqlock, flags);
1079 set_bit(vb->v4l2_buf.index, &ctx->dec_dst_flag); 1080 set_bit(vb->index, &ctx->dec_dst_flag);
1080 list_add_tail(&mfc_buf->list, &ctx->dst_queue); 1081 list_add_tail(&mfc_buf->list, &ctx->dst_queue);
1081 ctx->dst_queue_cnt++; 1082 ctx->dst_queue_cnt++;
1082 spin_unlock_irqrestore(&dev->irqlock, flags); 1083 spin_unlock_irqrestore(&dev->irqlock, flags);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index e42014c1ceca..94868f7214da 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -773,8 +773,8 @@ static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx)
773 773
774 spin_lock_irqsave(&dev->irqlock, flags); 774 spin_lock_irqsave(&dev->irqlock, flags);
775 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); 775 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
776 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); 776 dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
777 dst_size = vb2_plane_size(dst_mb->b, 0); 777 dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
778 s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, 778 s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
779 dst_size); 779 dst_size);
780 spin_unlock_irqrestore(&dev->irqlock, flags); 780 spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -796,10 +796,11 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
796 struct s5p_mfc_buf, list); 796 struct s5p_mfc_buf, list);
797 list_del(&dst_mb->list); 797 list_del(&dst_mb->list);
798 ctx->dst_queue_cnt--; 798 ctx->dst_queue_cnt--;
799 vb2_set_plane_payload(dst_mb->b, 0, 799 vb2_set_plane_payload(&dst_mb->b->vb2_buf, 0,
800 s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, 800 s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size,
801 dev)); 801 dev));
802 vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE); 802 vb2_buffer_done(&dst_mb->b->vb2_buf,
803 VB2_BUF_STATE_DONE);
803 } 804 }
804 spin_unlock_irqrestore(&dev->irqlock, flags); 805 spin_unlock_irqrestore(&dev->irqlock, flags);
805 } 806 }
@@ -831,16 +832,16 @@ static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
831 832
832 spin_lock_irqsave(&dev->irqlock, flags); 833 spin_lock_irqsave(&dev->irqlock, flags);
833 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 834 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
834 src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0); 835 src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0);
835 src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1); 836 src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1);
836 s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_frame_buffer, ctx, 837 s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_frame_buffer, ctx,
837 src_y_addr, src_c_addr); 838 src_y_addr, src_c_addr);
838 spin_unlock_irqrestore(&dev->irqlock, flags); 839 spin_unlock_irqrestore(&dev->irqlock, flags);
839 840
840 spin_lock_irqsave(&dev->irqlock, flags); 841 spin_lock_irqsave(&dev->irqlock, flags);
841 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); 842 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
842 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); 843 dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
843 dst_size = vb2_plane_size(dst_mb->b, 0); 844 dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
844 s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, 845 s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
845 dst_size); 846 dst_size);
846 spin_unlock_irqrestore(&dev->irqlock, flags); 847 spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -869,25 +870,29 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
869 s5p_mfc_hw_call_void(dev->mfc_ops, get_enc_frame_buffer, ctx, 870 s5p_mfc_hw_call_void(dev->mfc_ops, get_enc_frame_buffer, ctx,
870 &enc_y_addr, &enc_c_addr); 871 &enc_y_addr, &enc_c_addr);
871 list_for_each_entry(mb_entry, &ctx->src_queue, list) { 872 list_for_each_entry(mb_entry, &ctx->src_queue, list) {
872 mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0); 873 mb_y_addr = vb2_dma_contig_plane_dma_addr(
873 mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1); 874 &mb_entry->b->vb2_buf, 0);
875 mb_c_addr = vb2_dma_contig_plane_dma_addr(
876 &mb_entry->b->vb2_buf, 1);
874 if ((enc_y_addr == mb_y_addr) && 877 if ((enc_y_addr == mb_y_addr) &&
875 (enc_c_addr == mb_c_addr)) { 878 (enc_c_addr == mb_c_addr)) {
876 list_del(&mb_entry->list); 879 list_del(&mb_entry->list);
877 ctx->src_queue_cnt--; 880 ctx->src_queue_cnt--;
878 vb2_buffer_done(mb_entry->b, 881 vb2_buffer_done(&mb_entry->b->vb2_buf,
879 VB2_BUF_STATE_DONE); 882 VB2_BUF_STATE_DONE);
880 break; 883 break;
881 } 884 }
882 } 885 }
883 list_for_each_entry(mb_entry, &ctx->ref_queue, list) { 886 list_for_each_entry(mb_entry, &ctx->ref_queue, list) {
884 mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0); 887 mb_y_addr = vb2_dma_contig_plane_dma_addr(
885 mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1); 888 &mb_entry->b->vb2_buf, 0);
889 mb_c_addr = vb2_dma_contig_plane_dma_addr(
890 &mb_entry->b->vb2_buf, 1);
886 if ((enc_y_addr == mb_y_addr) && 891 if ((enc_y_addr == mb_y_addr) &&
887 (enc_c_addr == mb_c_addr)) { 892 (enc_c_addr == mb_c_addr)) {
888 list_del(&mb_entry->list); 893 list_del(&mb_entry->list);
889 ctx->ref_queue_cnt--; 894 ctx->ref_queue_cnt--;
890 vb2_buffer_done(mb_entry->b, 895 vb2_buffer_done(&mb_entry->b->vb2_buf,
891 VB2_BUF_STATE_DONE); 896 VB2_BUF_STATE_DONE);
892 break; 897 break;
893 } 898 }
@@ -912,17 +917,17 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
912 ctx->dst_queue_cnt--; 917 ctx->dst_queue_cnt--;
913 switch (slice_type) { 918 switch (slice_type) {
914 case S5P_FIMV_ENC_SI_SLICE_TYPE_I: 919 case S5P_FIMV_ENC_SI_SLICE_TYPE_I:
915 mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; 920 mb_entry->b->flags |= V4L2_BUF_FLAG_KEYFRAME;
916 break; 921 break;
917 case S5P_FIMV_ENC_SI_SLICE_TYPE_P: 922 case S5P_FIMV_ENC_SI_SLICE_TYPE_P:
918 mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME; 923 mb_entry->b->flags |= V4L2_BUF_FLAG_PFRAME;
919 break; 924 break;
920 case S5P_FIMV_ENC_SI_SLICE_TYPE_B: 925 case S5P_FIMV_ENC_SI_SLICE_TYPE_B:
921 mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_BFRAME; 926 mb_entry->b->flags |= V4L2_BUF_FLAG_BFRAME;
922 break; 927 break;
923 } 928 }
924 vb2_set_plane_payload(mb_entry->b, 0, strm_size); 929 vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size);
925 vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE); 930 vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
926 } 931 }
927 spin_unlock_irqrestore(&dev->irqlock, flags); 932 spin_unlock_irqrestore(&dev->irqlock, flags);
928 if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) 933 if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0))
@@ -1806,7 +1811,7 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
1806 return -EINVAL; 1811 return -EINVAL;
1807 } 1812 }
1808 mfc_debug(2, "index: %d, plane[%d] cookie: %pad\n", 1813 mfc_debug(2, "index: %d, plane[%d] cookie: %pad\n",
1809 vb->v4l2_buf.index, i, &dma); 1814 vb->index, i, &dma);
1810 } 1815 }
1811 return 0; 1816 return 0;
1812} 1817}
@@ -1869,6 +1874,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
1869 1874
1870static int s5p_mfc_buf_init(struct vb2_buffer *vb) 1875static int s5p_mfc_buf_init(struct vb2_buffer *vb)
1871{ 1876{
1877 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1872 struct vb2_queue *vq = vb->vb2_queue; 1878 struct vb2_queue *vq = vb->vb2_queue;
1873 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); 1879 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
1874 unsigned int i; 1880 unsigned int i;
@@ -1878,8 +1884,8 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
1878 ret = check_vb_with_fmt(ctx->dst_fmt, vb); 1884 ret = check_vb_with_fmt(ctx->dst_fmt, vb);
1879 if (ret < 0) 1885 if (ret < 0)
1880 return ret; 1886 return ret;
1881 i = vb->v4l2_buf.index; 1887 i = vb->index;
1882 ctx->dst_bufs[i].b = vb; 1888 ctx->dst_bufs[i].b = vbuf;
1883 ctx->dst_bufs[i].cookie.stream = 1889 ctx->dst_bufs[i].cookie.stream =
1884 vb2_dma_contig_plane_dma_addr(vb, 0); 1890 vb2_dma_contig_plane_dma_addr(vb, 0);
1885 ctx->dst_bufs_cnt++; 1891 ctx->dst_bufs_cnt++;
@@ -1887,8 +1893,8 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
1887 ret = check_vb_with_fmt(ctx->src_fmt, vb); 1893 ret = check_vb_with_fmt(ctx->src_fmt, vb);
1888 if (ret < 0) 1894 if (ret < 0)
1889 return ret; 1895 return ret;
1890 i = vb->v4l2_buf.index; 1896 i = vb->index;
1891 ctx->src_bufs[i].b = vb; 1897 ctx->src_bufs[i].b = vbuf;
1892 ctx->src_bufs[i].cookie.raw.luma = 1898 ctx->src_bufs[i].cookie.raw.luma =
1893 vb2_dma_contig_plane_dma_addr(vb, 0); 1899 vb2_dma_contig_plane_dma_addr(vb, 0);
1894 ctx->src_bufs[i].cookie.raw.chroma = 1900 ctx->src_bufs[i].cookie.raw.chroma =
@@ -2012,7 +2018,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
2012 return; 2018 return;
2013 } 2019 }
2014 if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 2020 if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
2015 mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index]; 2021 mfc_buf = &ctx->dst_bufs[vb->index];
2016 mfc_buf->flags &= ~MFC_BUF_FLAG_USED; 2022 mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
2017 /* Mark destination as available for use by MFC */ 2023 /* Mark destination as available for use by MFC */
2018 spin_lock_irqsave(&dev->irqlock, flags); 2024 spin_lock_irqsave(&dev->irqlock, flags);
@@ -2020,7 +2026,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
2020 ctx->dst_queue_cnt++; 2026 ctx->dst_queue_cnt++;
2021 spin_unlock_irqrestore(&dev->irqlock, flags); 2027 spin_unlock_irqrestore(&dev->irqlock, flags);
2022 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 2028 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
2023 mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index]; 2029 mfc_buf = &ctx->src_bufs[vb->index];
2024 mfc_buf->flags &= ~MFC_BUF_FLAG_USED; 2030 mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
2025 spin_lock_irqsave(&dev->irqlock, flags); 2031 spin_lock_irqsave(&dev->irqlock, flags);
2026 list_add_tail(&mfc_buf->list, &ctx->src_queue); 2032 list_add_tail(&mfc_buf->list, &ctx->src_queue);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 6402f76cc620..873c933bc7d4 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1208,11 +1208,11 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
1208 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 1208 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1209 temp_vb->flags |= MFC_BUF_FLAG_USED; 1209 temp_vb->flags |= MFC_BUF_FLAG_USED;
1210 s5p_mfc_set_dec_stream_buffer_v5(ctx, 1210 s5p_mfc_set_dec_stream_buffer_v5(ctx,
1211 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 1211 vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
1212 ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused); 1212 ctx->consumed_stream, temp_vb->b->vb2_buf.planes[0].bytesused);
1213 spin_unlock_irqrestore(&dev->irqlock, flags); 1213 spin_unlock_irqrestore(&dev->irqlock, flags);
1214 dev->curr_ctx = ctx->num; 1214 dev->curr_ctx = ctx->num;
1215 if (temp_vb->b->v4l2_planes[0].bytesused == 0) { 1215 if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) {
1216 last_frame = MFC_DEC_LAST_FRAME; 1216 last_frame = MFC_DEC_LAST_FRAME;
1217 mfc_debug(2, "Setting ctx->state to FINISHING\n"); 1217 mfc_debug(2, "Setting ctx->state to FINISHING\n");
1218 ctx->state = MFCINST_FINISHING; 1218 ctx->state = MFCINST_FINISHING;
@@ -1249,16 +1249,16 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1249 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, 1249 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
1250 list); 1250 list);
1251 src_mb->flags |= MFC_BUF_FLAG_USED; 1251 src_mb->flags |= MFC_BUF_FLAG_USED;
1252 if (src_mb->b->v4l2_planes[0].bytesused == 0) { 1252 if (src_mb->b->vb2_buf.planes[0].bytesused == 0) {
1253 /* send null frame */ 1253 /* send null frame */
1254 s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2, 1254 s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2,
1255 dev->bank2); 1255 dev->bank2);
1256 ctx->state = MFCINST_FINISHING; 1256 ctx->state = MFCINST_FINISHING;
1257 } else { 1257 } else {
1258 src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1258 src_y_addr = vb2_dma_contig_plane_dma_addr(
1259 0); 1259 &src_mb->b->vb2_buf, 0);
1260 src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1260 src_c_addr = vb2_dma_contig_plane_dma_addr(
1261 1); 1261 &src_mb->b->vb2_buf, 1);
1262 s5p_mfc_set_enc_frame_buffer_v5(ctx, src_y_addr, 1262 s5p_mfc_set_enc_frame_buffer_v5(ctx, src_y_addr,
1263 src_c_addr); 1263 src_c_addr);
1264 if (src_mb->flags & MFC_BUF_FLAG_EOS) 1264 if (src_mb->flags & MFC_BUF_FLAG_EOS)
@@ -1267,13 +1267,13 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1267 } 1267 }
1268 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); 1268 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1269 dst_mb->flags |= MFC_BUF_FLAG_USED; 1269 dst_mb->flags |= MFC_BUF_FLAG_USED;
1270 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); 1270 dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
1271 dst_size = vb2_plane_size(dst_mb->b, 0); 1271 dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
1272 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); 1272 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
1273 spin_unlock_irqrestore(&dev->irqlock, flags); 1273 spin_unlock_irqrestore(&dev->irqlock, flags);
1274 dev->curr_ctx = ctx->num; 1274 dev->curr_ctx = ctx->num;
1275 mfc_debug(2, "encoding buffer with index=%d state=%d\n", 1275 mfc_debug(2, "encoding buffer with index=%d state=%d\n",
1276 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); 1276 src_mb ? src_mb->b->vb2_buf.index : -1, ctx->state);
1277 s5p_mfc_encode_one_frame_v5(ctx); 1277 s5p_mfc_encode_one_frame_v5(ctx);
1278 return 0; 1278 return 0;
1279} 1279}
@@ -1289,10 +1289,11 @@ static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
1289 mfc_debug(2, "Preparing to init decoding\n"); 1289 mfc_debug(2, "Preparing to init decoding\n");
1290 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 1290 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1291 s5p_mfc_set_dec_desc_buffer(ctx); 1291 s5p_mfc_set_dec_desc_buffer(ctx);
1292 mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused); 1292 mfc_debug(2, "Header size: %d\n",
1293 temp_vb->b->vb2_buf.planes[0].bytesused);
1293 s5p_mfc_set_dec_stream_buffer_v5(ctx, 1294 s5p_mfc_set_dec_stream_buffer_v5(ctx,
1294 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 1295 vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
1295 0, temp_vb->b->v4l2_planes[0].bytesused); 1296 0, temp_vb->b->vb2_buf.planes[0].bytesused);
1296 spin_unlock_irqrestore(&dev->irqlock, flags); 1297 spin_unlock_irqrestore(&dev->irqlock, flags);
1297 dev->curr_ctx = ctx->num; 1298 dev->curr_ctx = ctx->num;
1298 s5p_mfc_init_decode_v5(ctx); 1299 s5p_mfc_init_decode_v5(ctx);
@@ -1309,8 +1310,8 @@ static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
1309 s5p_mfc_set_enc_ref_buffer_v5(ctx); 1310 s5p_mfc_set_enc_ref_buffer_v5(ctx);
1310 spin_lock_irqsave(&dev->irqlock, flags); 1311 spin_lock_irqsave(&dev->irqlock, flags);
1311 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); 1312 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1312 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); 1313 dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
1313 dst_size = vb2_plane_size(dst_mb->b, 0); 1314 dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
1314 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); 1315 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
1315 spin_unlock_irqrestore(&dev->irqlock, flags); 1316 spin_unlock_irqrestore(&dev->irqlock, flags);
1316 dev->curr_ctx = ctx->num; 1317 dev->curr_ctx = ctx->num;
@@ -1342,10 +1343,11 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
1342 return -EIO; 1343 return -EIO;
1343 } 1344 }
1344 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 1345 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1345 mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused); 1346 mfc_debug(2, "Header size: %d\n",
1347 temp_vb->b->vb2_buf.planes[0].bytesused);
1346 s5p_mfc_set_dec_stream_buffer_v5(ctx, 1348 s5p_mfc_set_dec_stream_buffer_v5(ctx,
1347 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 1349 vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
1348 0, temp_vb->b->v4l2_planes[0].bytesused); 1350 0, temp_vb->b->vb2_buf.planes[0].bytesused);
1349 spin_unlock_irqrestore(&dev->irqlock, flags); 1351 spin_unlock_irqrestore(&dev->irqlock, flags);
1350 dev->curr_ctx = ctx->num; 1352 dev->curr_ctx = ctx->num;
1351 ret = s5p_mfc_set_dec_frame_buffer_v5(ctx); 1353 ret = s5p_mfc_set_dec_frame_buffer_v5(ctx);
@@ -1478,9 +1480,9 @@ static void s5p_mfc_cleanup_queue_v5(struct list_head *lh, struct vb2_queue *vq)
1478 1480
1479 while (!list_empty(lh)) { 1481 while (!list_empty(lh)) {
1480 b = list_entry(lh->next, struct s5p_mfc_buf, list); 1482 b = list_entry(lh->next, struct s5p_mfc_buf, list);
1481 for (i = 0; i < b->b->num_planes; i++) 1483 for (i = 0; i < b->b->vb2_buf.num_planes; i++)
1482 vb2_set_plane_payload(b->b, i, 0); 1484 vb2_set_plane_payload(&b->b->vb2_buf, i, 0);
1483 vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR); 1485 vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR);
1484 list_del(&b->list); 1486 list_del(&b->list);
1485 } 1487 }
1486} 1488}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 04f8c7b5a86f..e0924a5233c8 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1562,13 +1562,13 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
1562 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 1562 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1563 temp_vb->flags |= MFC_BUF_FLAG_USED; 1563 temp_vb->flags |= MFC_BUF_FLAG_USED;
1564 s5p_mfc_set_dec_stream_buffer_v6(ctx, 1564 s5p_mfc_set_dec_stream_buffer_v6(ctx,
1565 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 1565 vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
1566 ctx->consumed_stream, 1566 ctx->consumed_stream,
1567 temp_vb->b->v4l2_planes[0].bytesused); 1567 temp_vb->b->vb2_buf.planes[0].bytesused);
1568 spin_unlock_irqrestore(&dev->irqlock, flags); 1568 spin_unlock_irqrestore(&dev->irqlock, flags);
1569 1569
1570 dev->curr_ctx = ctx->num; 1570 dev->curr_ctx = ctx->num;
1571 if (temp_vb->b->v4l2_planes[0].bytesused == 0) { 1571 if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) {
1572 last_frame = 1; 1572 last_frame = 1;
1573 mfc_debug(2, "Setting ctx->state to FINISHING\n"); 1573 mfc_debug(2, "Setting ctx->state to FINISHING\n");
1574 ctx->state = MFCINST_FINISHING; 1574 ctx->state = MFCINST_FINISHING;
@@ -1606,8 +1606,8 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1606 1606
1607 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 1607 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1608 src_mb->flags |= MFC_BUF_FLAG_USED; 1608 src_mb->flags |= MFC_BUF_FLAG_USED;
1609 src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0); 1609 src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0);
1610 src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1); 1610 src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1);
1611 1611
1612 mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr); 1612 mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
1613 mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr); 1613 mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
@@ -1616,8 +1616,8 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1616 1616
1617 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); 1617 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1618 dst_mb->flags |= MFC_BUF_FLAG_USED; 1618 dst_mb->flags |= MFC_BUF_FLAG_USED;
1619 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); 1619 dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
1620 dst_size = vb2_plane_size(dst_mb->b, 0); 1620 dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
1621 1621
1622 s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); 1622 s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
1623 1623
@@ -1639,10 +1639,11 @@ static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
1639 spin_lock_irqsave(&dev->irqlock, flags); 1639 spin_lock_irqsave(&dev->irqlock, flags);
1640 mfc_debug(2, "Preparing to init decoding.\n"); 1640 mfc_debug(2, "Preparing to init decoding.\n");
1641 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 1641 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1642 mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused); 1642 mfc_debug(2, "Header size: %d\n",
1643 temp_vb->b->vb2_buf.planes[0].bytesused);
1643 s5p_mfc_set_dec_stream_buffer_v6(ctx, 1644 s5p_mfc_set_dec_stream_buffer_v6(ctx,
1644 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0, 1645 vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), 0,
1645 temp_vb->b->v4l2_planes[0].bytesused); 1646 temp_vb->b->vb2_buf.planes[0].bytesused);
1646 spin_unlock_irqrestore(&dev->irqlock, flags); 1647 spin_unlock_irqrestore(&dev->irqlock, flags);
1647 dev->curr_ctx = ctx->num; 1648 dev->curr_ctx = ctx->num;
1648 s5p_mfc_init_decode_v6(ctx); 1649 s5p_mfc_init_decode_v6(ctx);
@@ -1659,8 +1660,8 @@ static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
1659 spin_lock_irqsave(&dev->irqlock, flags); 1660 spin_lock_irqsave(&dev->irqlock, flags);
1660 1661
1661 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); 1662 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1662 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); 1663 dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
1663 dst_size = vb2_plane_size(dst_mb->b, 0); 1664 dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
1664 s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); 1665 s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
1665 spin_unlock_irqrestore(&dev->irqlock, flags); 1666 spin_unlock_irqrestore(&dev->irqlock, flags);
1666 dev->curr_ctx = ctx->num; 1667 dev->curr_ctx = ctx->num;
@@ -1836,9 +1837,9 @@ static void s5p_mfc_cleanup_queue_v6(struct list_head *lh, struct vb2_queue *vq)
1836 1837
1837 while (!list_empty(lh)) { 1838 while (!list_empty(lh)) {
1838 b = list_entry(lh->next, struct s5p_mfc_buf, list); 1839 b = list_entry(lh->next, struct s5p_mfc_buf, list);
1839 for (i = 0; i < b->b->num_planes; i++) 1840 for (i = 0; i < b->b->vb2_buf.num_planes; i++)
1840 vb2_set_plane_payload(b->b, i, 0); 1841 vb2_set_plane_payload(&b->b->vb2_buf, i, 0);
1841 vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR); 1842 vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR);
1842 list_del(&b->list); 1843 list_del(&b->list);
1843 } 1844 }
1844} 1845}
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index 855b7238adaa..42cd2709c41c 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -113,7 +113,7 @@ struct mxr_geometry {
113/** instance of a buffer */ 113/** instance of a buffer */
114struct mxr_buffer { 114struct mxr_buffer {
115 /** common v4l buffer stuff -- must be first */ 115 /** common v4l buffer stuff -- must be first */
116 struct vb2_buffer vb; 116 struct vb2_v4l2_buffer vb;
117 /** node for layer's lists */ 117 /** node for layer's lists */
118 struct list_head list; 118 struct list_head list;
119}; 119};
diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
index 74344c764daa..db3163b23ea0 100644
--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
@@ -86,7 +86,7 @@ static void mxr_graph_buffer_set(struct mxr_layer *layer,
86 dma_addr_t addr = 0; 86 dma_addr_t addr = 0;
87 87
88 if (buf) 88 if (buf)
89 addr = vb2_dma_contig_plane_dma_addr(&buf->vb, 0); 89 addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
90 mxr_reg_graph_buffer(layer->mdev, layer->idx, addr); 90 mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
91} 91}
92 92
diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
index 5127acb1e571..a0ec14a1da13 100644
--- a/drivers/media/platform/s5p-tv/mixer_reg.c
+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
@@ -279,7 +279,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
279 layer->ops.buffer_set(layer, layer->update_buf); 279 layer->ops.buffer_set(layer, layer->update_buf);
280 280
281 if (done && done != layer->shadow_buf) 281 if (done && done != layer->shadow_buf)
282 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE); 282 vb2_buffer_done(&done->vb.vb2_buf, VB2_BUF_STATE_DONE);
283 283
284done: 284done:
285 spin_unlock(&layer->enq_slock); 285 spin_unlock(&layer->enq_slock);
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 751f3b618337..dba92b54a588 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -914,7 +914,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
914 914
915static void buf_queue(struct vb2_buffer *vb) 915static void buf_queue(struct vb2_buffer *vb)
916{ 916{
917 struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb); 917 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
918 struct mxr_buffer *buffer = container_of(vbuf, struct mxr_buffer, vb);
918 struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue); 919 struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
919 struct mxr_device *mdev = layer->mdev; 920 struct mxr_device *mdev = layer->mdev;
920 unsigned long flags; 921 unsigned long flags;
@@ -963,11 +964,13 @@ static void mxr_watchdog(unsigned long arg)
963 if (layer->update_buf == layer->shadow_buf) 964 if (layer->update_buf == layer->shadow_buf)
964 layer->update_buf = NULL; 965 layer->update_buf = NULL;
965 if (layer->update_buf) { 966 if (layer->update_buf) {
966 vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR); 967 vb2_buffer_done(&layer->update_buf->vb.vb2_buf,
968 VB2_BUF_STATE_ERROR);
967 layer->update_buf = NULL; 969 layer->update_buf = NULL;
968 } 970 }
969 if (layer->shadow_buf) { 971 if (layer->shadow_buf) {
970 vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR); 972 vb2_buffer_done(&layer->shadow_buf->vb.vb2_buf,
973 VB2_BUF_STATE_ERROR);
971 layer->shadow_buf = NULL; 974 layer->shadow_buf = NULL;
972 } 975 }
973 spin_unlock_irqrestore(&layer->enq_slock, flags); 976 spin_unlock_irqrestore(&layer->enq_slock, flags);
@@ -991,7 +994,7 @@ static void stop_streaming(struct vb2_queue *vq)
991 /* set all buffer to be done */ 994 /* set all buffer to be done */
992 list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) { 995 list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
993 list_del(&buf->list); 996 list_del(&buf->list);
994 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 997 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
995 } 998 }
996 999
997 spin_unlock_irqrestore(&layer->enq_slock, flags); 1000 spin_unlock_irqrestore(&layer->enq_slock, flags);
diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
index c9388c45ad75..dd002a497dbb 100644
--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
@@ -97,9 +97,10 @@ static void mxr_vp_buffer_set(struct mxr_layer *layer,
97 mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr); 97 mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
98 return; 98 return;
99 } 99 }
100 luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 0); 100 luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
101 if (layer->fmt->num_subframes == 2) { 101 if (layer->fmt->num_subframes == 2) {
102 chroma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 1); 102 chroma_addr[0] =
103 vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 1);
103 } else { 104 } else {
104 /* FIXME: mxr_get_plane_size compute integer division, 105 /* FIXME: mxr_get_plane_size compute integer division,
105 * which is slow and should not be performed in interrupt */ 106 * which is slow and should not be performed in interrupt */
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index f5e3eb3a20ff..6455cb9b3224 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -931,9 +931,10 @@ static int sh_veu_buf_prepare(struct vb2_buffer *vb)
931 931
932static void sh_veu_buf_queue(struct vb2_buffer *vb) 932static void sh_veu_buf_queue(struct vb2_buffer *vb)
933{ 933{
934 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
934 struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue); 935 struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
935 dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->v4l2_buf.type); 936 dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->type);
936 v4l2_m2m_buf_queue(veu->m2m_ctx, vb); 937 v4l2_m2m_buf_queue(veu->m2m_ctx, vbuf);
937} 938}
938 939
939static const struct vb2_ops sh_veu_qops = { 940static const struct vb2_ops sh_veu_qops = {
@@ -1084,8 +1085,8 @@ static irqreturn_t sh_veu_bh(int irq, void *dev_id)
1084static irqreturn_t sh_veu_isr(int irq, void *dev_id) 1085static irqreturn_t sh_veu_isr(int irq, void *dev_id)
1085{ 1086{
1086 struct sh_veu_dev *veu = dev_id; 1087 struct sh_veu_dev *veu = dev_id;
1087 struct vb2_buffer *dst; 1088 struct vb2_v4l2_buffer *dst;
1088 struct vb2_buffer *src; 1089 struct vb2_v4l2_buffer *src;
1089 u32 status = sh_veu_reg_read(veu, VEU_EVTR); 1090 u32 status = sh_veu_reg_read(veu, VEU_EVTR);
1090 1091
1091 /* bundle read mode not used */ 1092 /* bundle read mode not used */
@@ -1105,11 +1106,11 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id)
1105 if (!src || !dst) 1106 if (!src || !dst)
1106 return IRQ_NONE; 1107 return IRQ_NONE;
1107 1108
1108 dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp; 1109 dst->timestamp = src->timestamp;
1109 dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1110 dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1110 dst->v4l2_buf.flags |= 1111 dst->flags |=
1111 src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1112 src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1112 dst->v4l2_buf.timecode = src->v4l2_buf.timecode; 1113 dst->timecode = src->timecode;
1113 1114
1114 spin_lock(&veu->lock); 1115 spin_lock(&veu->lock);
1115 v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE); 1116 v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index fe5c8ab06bd5..7967a75fde36 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -27,6 +27,7 @@
27#include <media/v4l2-device.h> 27#include <media/v4l2-device.h>
28#include <media/v4l2-ioctl.h> 28#include <media/v4l2-ioctl.h>
29#include <media/v4l2-mediabus.h> 29#include <media/v4l2-mediabus.h>
30#include <media/videobuf2-v4l2.h>
30#include <media/videobuf2-dma-contig.h> 31#include <media/videobuf2-dma-contig.h>
31 32
32/* Mirror addresses are not available for all registers */ 33/* Mirror addresses are not available for all registers */
@@ -62,11 +63,12 @@ enum sh_vou_status {
62#define VOU_MIN_IMAGE_HEIGHT 16 63#define VOU_MIN_IMAGE_HEIGHT 16
63 64
64struct sh_vou_buffer { 65struct sh_vou_buffer {
65 struct vb2_buffer vb; 66 struct vb2_v4l2_buffer vb;
66 struct list_head list; 67 struct list_head list;
67}; 68};
68 69
69static inline struct sh_vou_buffer *to_sh_vou_buffer(struct vb2_buffer *vb2) 70static inline struct
71sh_vou_buffer *to_sh_vou_buffer(struct vb2_v4l2_buffer *vb2)
70{ 72{
71 return container_of(vb2, struct sh_vou_buffer, vb); 73 return container_of(vb2, struct sh_vou_buffer, vb);
72} 74}
@@ -193,11 +195,11 @@ static struct sh_vou_fmt vou_fmt[] = {
193}; 195};
194 196
195static void sh_vou_schedule_next(struct sh_vou_device *vou_dev, 197static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
196 struct vb2_buffer *vb) 198 struct vb2_v4l2_buffer *vbuf)
197{ 199{
198 dma_addr_t addr1, addr2; 200 dma_addr_t addr1, addr2;
199 201
200 addr1 = vb2_dma_contig_plane_dma_addr(vb, 0); 202 addr1 = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
201 switch (vou_dev->pix.pixelformat) { 203 switch (vou_dev->pix.pixelformat) {
202 case V4L2_PIX_FMT_NV12: 204 case V4L2_PIX_FMT_NV12:
203 case V4L2_PIX_FMT_NV16: 205 case V4L2_PIX_FMT_NV16:
@@ -282,8 +284,9 @@ static int sh_vou_buf_prepare(struct vb2_buffer *vb)
282/* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */ 284/* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */
283static void sh_vou_buf_queue(struct vb2_buffer *vb) 285static void sh_vou_buf_queue(struct vb2_buffer *vb)
284{ 286{
287 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
285 struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue); 288 struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
286 struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vb); 289 struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vbuf);
287 unsigned long flags; 290 unsigned long flags;
288 291
289 spin_lock_irqsave(&vou_dev->lock, flags); 292 spin_lock_irqsave(&vou_dev->lock, flags);
@@ -302,7 +305,8 @@ static int sh_vou_start_streaming(struct vb2_queue *vq, unsigned int count)
302 video, s_stream, 1); 305 video, s_stream, 1);
303 if (ret < 0 && ret != -ENOIOCTLCMD) { 306 if (ret < 0 && ret != -ENOIOCTLCMD) {
304 list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) { 307 list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
305 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 308 vb2_buffer_done(&buf->vb.vb2_buf,
309 VB2_BUF_STATE_QUEUED);
306 list_del(&buf->list); 310 list_del(&buf->list);
307 } 311 }
308 vou_dev->active = NULL; 312 vou_dev->active = NULL;
@@ -353,7 +357,7 @@ static void sh_vou_stop_streaming(struct vb2_queue *vq)
353 msleep(50); 357 msleep(50);
354 spin_lock_irqsave(&vou_dev->lock, flags); 358 spin_lock_irqsave(&vou_dev->lock, flags);
355 list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) { 359 list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
356 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 360 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
357 list_del(&buf->list); 361 list_del(&buf->list);
358 } 362 }
359 vou_dev->active = NULL; 363 vou_dev->active = NULL;
@@ -1066,10 +1070,10 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id)
1066 1070
1067 list_del(&vb->list); 1071 list_del(&vb->list);
1068 1072
1069 v4l2_get_timestamp(&vb->vb.v4l2_buf.timestamp); 1073 v4l2_get_timestamp(&vb->vb.timestamp);
1070 vb->vb.v4l2_buf.sequence = vou_dev->sequence++; 1074 vb->vb.sequence = vou_dev->sequence++;
1071 vb->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; 1075 vb->vb.field = V4L2_FIELD_INTERLACED;
1072 vb2_buffer_done(&vb->vb, VB2_BUF_STATE_DONE); 1076 vb2_buffer_done(&vb->vb.vb2_buf, VB2_BUF_STATE_DONE);
1073 1077
1074 vou_dev->active = list_entry(vou_dev->buf_list.next, 1078 vou_dev->active = list_entry(vou_dev->buf_list.next,
1075 struct sh_vou_buffer, list); 1079 struct sh_vou_buffer, list);
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 45e304a3dd85..1036f77ab28a 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -60,7 +60,7 @@ struct isi_dma_desc {
60 60
61/* Frame buffer data */ 61/* Frame buffer data */
62struct frame_buffer { 62struct frame_buffer {
63 struct vb2_buffer vb; 63 struct vb2_v4l2_buffer vb;
64 struct isi_dma_desc *p_dma_desc; 64 struct isi_dma_desc *p_dma_desc;
65 struct list_head list; 65 struct list_head list;
66}; 66};
@@ -161,13 +161,13 @@ static bool is_supported(struct soc_camera_device *icd,
161static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi) 161static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
162{ 162{
163 if (isi->active) { 163 if (isi->active) {
164 struct vb2_buffer *vb = &isi->active->vb; 164 struct vb2_v4l2_buffer *vbuf = &isi->active->vb;
165 struct frame_buffer *buf = isi->active; 165 struct frame_buffer *buf = isi->active;
166 166
167 list_del_init(&buf->list); 167 list_del_init(&buf->list);
168 v4l2_get_timestamp(&vb->v4l2_buf.timestamp); 168 v4l2_get_timestamp(&vbuf->timestamp);
169 vb->v4l2_buf.sequence = isi->sequence++; 169 vbuf->sequence = isi->sequence++;
170 vb2_buffer_done(vb, VB2_BUF_STATE_DONE); 170 vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
171 } 171 }
172 172
173 if (list_empty(&isi->video_buffer_list)) { 173 if (list_empty(&isi->video_buffer_list)) {
@@ -277,7 +277,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
277 277
278static int buffer_init(struct vb2_buffer *vb) 278static int buffer_init(struct vb2_buffer *vb)
279{ 279{
280 struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb); 280 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
281 struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
281 282
282 buf->p_dma_desc = NULL; 283 buf->p_dma_desc = NULL;
283 INIT_LIST_HEAD(&buf->list); 284 INIT_LIST_HEAD(&buf->list);
@@ -287,8 +288,9 @@ static int buffer_init(struct vb2_buffer *vb)
287 288
288static int buffer_prepare(struct vb2_buffer *vb) 289static int buffer_prepare(struct vb2_buffer *vb)
289{ 290{
291 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
290 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 292 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
291 struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb); 293 struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
292 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 294 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
293 struct atmel_isi *isi = ici->priv; 295 struct atmel_isi *isi = ici->priv;
294 unsigned long size; 296 unsigned long size;
@@ -302,7 +304,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
302 return -EINVAL; 304 return -EINVAL;
303 } 305 }
304 306
305 vb2_set_plane_payload(&buf->vb, 0, size); 307 vb2_set_plane_payload(vb, 0, size);
306 308
307 if (!buf->p_dma_desc) { 309 if (!buf->p_dma_desc) {
308 if (list_empty(&isi->dma_desc_head)) { 310 if (list_empty(&isi->dma_desc_head)) {
@@ -329,10 +331,11 @@ static int buffer_prepare(struct vb2_buffer *vb)
329 331
330static void buffer_cleanup(struct vb2_buffer *vb) 332static void buffer_cleanup(struct vb2_buffer *vb)
331{ 333{
334 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
332 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 335 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
333 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 336 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
334 struct atmel_isi *isi = ici->priv; 337 struct atmel_isi *isi = ici->priv;
335 struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb); 338 struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
336 339
337 /* This descriptor is available now and we add to head list */ 340 /* This descriptor is available now and we add to head list */
338 if (buf->p_dma_desc) 341 if (buf->p_dma_desc)
@@ -370,10 +373,11 @@ static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
370 373
371static void buffer_queue(struct vb2_buffer *vb) 374static void buffer_queue(struct vb2_buffer *vb)
372{ 375{
376 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
373 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 377 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
374 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 378 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
375 struct atmel_isi *isi = ici->priv; 379 struct atmel_isi *isi = ici->priv;
376 struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb); 380 struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
377 unsigned long flags = 0; 381 unsigned long flags = 0;
378 382
379 spin_lock_irqsave(&isi->lock, flags); 383 spin_lock_irqsave(&isi->lock, flags);
@@ -435,7 +439,7 @@ static void stop_streaming(struct vb2_queue *vq)
435 /* Release all active buffers */ 439 /* Release all active buffers */
436 list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) { 440 list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
437 list_del_init(&buf->list); 441 list_del_init(&buf->list);
438 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 442 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
439 } 443 }
440 spin_unlock_irq(&isi->lock); 444 spin_unlock_irq(&isi->lock);
441 445
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 6e413359b595..9079196708b1 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -225,7 +225,7 @@ struct mx2_buf_internal {
225/* buffer for one video frame */ 225/* buffer for one video frame */
226struct mx2_buffer { 226struct mx2_buffer {
227 /* common v4l buffer stuff -- must be first */ 227 /* common v4l buffer stuff -- must be first */
228 struct vb2_buffer vb; 228 struct vb2_v4l2_buffer vb;
229 struct mx2_buf_internal internal; 229 struct mx2_buf_internal internal;
230}; 230};
231 231
@@ -530,11 +530,12 @@ out:
530 530
531static void mx2_videobuf_queue(struct vb2_buffer *vb) 531static void mx2_videobuf_queue(struct vb2_buffer *vb)
532{ 532{
533 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
533 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 534 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
534 struct soc_camera_host *ici = 535 struct soc_camera_host *ici =
535 to_soc_camera_host(icd->parent); 536 to_soc_camera_host(icd->parent);
536 struct mx2_camera_dev *pcdev = ici->priv; 537 struct mx2_camera_dev *pcdev = ici->priv;
537 struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb); 538 struct mx2_buffer *buf = container_of(vbuf, struct mx2_buffer, vb);
538 unsigned long flags; 539 unsigned long flags;
539 540
540 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, 541 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
@@ -664,7 +665,7 @@ static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
664 buf = list_first_entry(&pcdev->capture, struct mx2_buffer, 665 buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
665 internal.queue); 666 internal.queue);
666 buf->internal.bufnum = 0; 667 buf->internal.bufnum = 0;
667 vb = &buf->vb; 668 vb = &buf->vb.vb2_buf;
668 669
669 phys = vb2_dma_contig_plane_dma_addr(vb, 0); 670 phys = vb2_dma_contig_plane_dma_addr(vb, 0);
670 mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum); 671 mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
@@ -673,7 +674,7 @@ static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
673 buf = list_first_entry(&pcdev->capture, struct mx2_buffer, 674 buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
674 internal.queue); 675 internal.queue);
675 buf->internal.bufnum = 1; 676 buf->internal.bufnum = 1;
676 vb = &buf->vb; 677 vb = &buf->vb.vb2_buf;
677 678
678 phys = vb2_dma_contig_plane_dma_addr(vb, 0); 679 phys = vb2_dma_contig_plane_dma_addr(vb, 0);
679 mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum); 680 mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
@@ -1307,6 +1308,7 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
1307 struct mx2_buf_internal *ibuf; 1308 struct mx2_buf_internal *ibuf;
1308 struct mx2_buffer *buf; 1309 struct mx2_buffer *buf;
1309 struct vb2_buffer *vb; 1310 struct vb2_buffer *vb;
1311 struct vb2_v4l2_buffer *vbuf;
1310 unsigned long phys; 1312 unsigned long phys;
1311 1313
1312 ibuf = list_first_entry(&pcdev->active_bufs, struct mx2_buf_internal, 1314 ibuf = list_first_entry(&pcdev->active_bufs, struct mx2_buf_internal,
@@ -1323,7 +1325,8 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
1323 } else { 1325 } else {
1324 buf = mx2_ibuf_to_buf(ibuf); 1326 buf = mx2_ibuf_to_buf(ibuf);
1325 1327
1326 vb = &buf->vb; 1328 vb = &buf->vb.vb2_buf;
1329 vbuf = to_vb2_v4l2_buffer(vb);
1327#ifdef DEBUG 1330#ifdef DEBUG
1328 phys = vb2_dma_contig_plane_dma_addr(vb, 0); 1331 phys = vb2_dma_contig_plane_dma_addr(vb, 0);
1329 if (prp->cfg.channel == 1) { 1332 if (prp->cfg.channel == 1) {
@@ -1347,8 +1350,8 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
1347 vb2_get_plane_payload(vb, 0)); 1350 vb2_get_plane_payload(vb, 0));
1348 1351
1349 list_del_init(&buf->internal.queue); 1352 list_del_init(&buf->internal.queue);
1350 v4l2_get_timestamp(&vb->v4l2_buf.timestamp); 1353 v4l2_get_timestamp(&vbuf->timestamp);
1351 vb->v4l2_buf.sequence = pcdev->frame_count; 1354 vbuf->sequence = pcdev->frame_count;
1352 if (err) 1355 if (err)
1353 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 1356 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
1354 else 1357 else
@@ -1380,7 +1383,7 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
1380 1383
1381 list_move_tail(pcdev->capture.next, &pcdev->active_bufs); 1384 list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
1382 1385
1383 vb = &buf->vb; 1386 vb = &buf->vb.vb2_buf;
1384 1387
1385 phys = vb2_dma_contig_plane_dma_addr(vb, 0); 1388 phys = vb2_dma_contig_plane_dma_addr(vb, 0);
1386 mx27_update_emma_buf(pcdev, phys, bufnum); 1389 mx27_update_emma_buf(pcdev, phys, bufnum);
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index ace41f53caca..5ea4350ffdd6 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -63,7 +63,7 @@
63 63
64struct mx3_camera_buffer { 64struct mx3_camera_buffer {
65 /* common v4l buffer stuff -- must be first */ 65 /* common v4l buffer stuff -- must be first */
66 struct vb2_buffer vb; 66 struct vb2_v4l2_buffer vb;
67 struct list_head queue; 67 struct list_head queue;
68 68
69 /* One descriptot per scatterlist (per frame) */ 69 /* One descriptot per scatterlist (per frame) */
@@ -133,7 +133,7 @@ static void csi_reg_write(struct mx3_camera_dev *mx3, u32 value, off_t reg)
133 __raw_writel(value, mx3->base + reg); 133 __raw_writel(value, mx3->base + reg);
134} 134}
135 135
136static struct mx3_camera_buffer *to_mx3_vb(struct vb2_buffer *vb) 136static struct mx3_camera_buffer *to_mx3_vb(struct vb2_v4l2_buffer *vb)
137{ 137{
138 return container_of(vb, struct mx3_camera_buffer, vb); 138 return container_of(vb, struct mx3_camera_buffer, vb);
139} 139}
@@ -151,14 +151,14 @@ static void mx3_cam_dma_done(void *arg)
151 151
152 spin_lock(&mx3_cam->lock); 152 spin_lock(&mx3_cam->lock);
153 if (mx3_cam->active) { 153 if (mx3_cam->active) {
154 struct vb2_buffer *vb = &mx3_cam->active->vb; 154 struct vb2_v4l2_buffer *vb = &mx3_cam->active->vb;
155 struct mx3_camera_buffer *buf = to_mx3_vb(vb); 155 struct mx3_camera_buffer *buf = to_mx3_vb(vb);
156 156
157 list_del_init(&buf->queue); 157 list_del_init(&buf->queue);
158 v4l2_get_timestamp(&vb->v4l2_buf.timestamp); 158 v4l2_get_timestamp(&vb->timestamp);
159 vb->v4l2_buf.field = mx3_cam->field; 159 vb->field = mx3_cam->field;
160 vb->v4l2_buf.sequence = mx3_cam->sequence++; 160 vb->sequence = mx3_cam->sequence++;
161 vb2_buffer_done(vb, VB2_BUF_STATE_DONE); 161 vb2_buffer_done(&vb->vb2_buf, VB2_BUF_STATE_DONE);
162 } 162 }
163 163
164 if (list_empty(&mx3_cam->capture)) { 164 if (list_empty(&mx3_cam->capture)) {
@@ -257,10 +257,11 @@ static enum pixel_fmt fourcc_to_ipu_pix(__u32 fourcc)
257 257
258static void mx3_videobuf_queue(struct vb2_buffer *vb) 258static void mx3_videobuf_queue(struct vb2_buffer *vb)
259{ 259{
260 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
260 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 261 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
261 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 262 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
262 struct mx3_camera_dev *mx3_cam = ici->priv; 263 struct mx3_camera_dev *mx3_cam = ici->priv;
263 struct mx3_camera_buffer *buf = to_mx3_vb(vb); 264 struct mx3_camera_buffer *buf = to_mx3_vb(vbuf);
264 struct scatterlist *sg = &buf->sg; 265 struct scatterlist *sg = &buf->sg;
265 struct dma_async_tx_descriptor *txd; 266 struct dma_async_tx_descriptor *txd;
266 struct idmac_channel *ichan = mx3_cam->idmac_channel[0]; 267 struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
@@ -273,7 +274,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
273 274
274 if (vb2_plane_size(vb, 0) < new_size) { 275 if (vb2_plane_size(vb, 0) < new_size) {
275 dev_err(icd->parent, "Buffer #%d too small (%lu < %zu)\n", 276 dev_err(icd->parent, "Buffer #%d too small (%lu < %zu)\n",
276 vb->v4l2_buf.index, vb2_plane_size(vb, 0), new_size); 277 vbuf->vb2_buf.index, vb2_plane_size(vb, 0), new_size);
277 goto error; 278 goto error;
278 } 279 }
279 280
@@ -357,10 +358,11 @@ error:
357 358
358static void mx3_videobuf_release(struct vb2_buffer *vb) 359static void mx3_videobuf_release(struct vb2_buffer *vb)
359{ 360{
361 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
360 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 362 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
361 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 363 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
362 struct mx3_camera_dev *mx3_cam = ici->priv; 364 struct mx3_camera_dev *mx3_cam = ici->priv;
363 struct mx3_camera_buffer *buf = to_mx3_vb(vb); 365 struct mx3_camera_buffer *buf = to_mx3_vb(vbuf);
364 struct dma_async_tx_descriptor *txd = buf->txd; 366 struct dma_async_tx_descriptor *txd = buf->txd;
365 unsigned long flags; 367 unsigned long flags;
366 368
@@ -390,10 +392,11 @@ static void mx3_videobuf_release(struct vb2_buffer *vb)
390 392
391static int mx3_videobuf_init(struct vb2_buffer *vb) 393static int mx3_videobuf_init(struct vb2_buffer *vb)
392{ 394{
395 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
393 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 396 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
394 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 397 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
395 struct mx3_camera_dev *mx3_cam = ici->priv; 398 struct mx3_camera_dev *mx3_cam = ici->priv;
396 struct mx3_camera_buffer *buf = to_mx3_vb(vb); 399 struct mx3_camera_buffer *buf = to_mx3_vb(vbuf);
397 400
398 if (!buf->txd) { 401 if (!buf->txd) {
399 /* This is for locking debugging only */ 402 /* This is for locking debugging only */
@@ -424,7 +427,7 @@ static void mx3_stop_streaming(struct vb2_queue *q)
424 427
425 list_for_each_entry_safe(buf, tmp, &mx3_cam->capture, queue) { 428 list_for_each_entry_safe(buf, tmp, &mx3_cam->capture, queue) {
426 list_del_init(&buf->queue); 429 list_del_init(&buf->queue);
427 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 430 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
428 } 431 }
429 432
430 spin_unlock_irqrestore(&mx3_cam->lock, flags); 433 spin_unlock_irqrestore(&mx3_cam->lock, flags);
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 368ab7cfcbb4..98e2593b94db 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -478,7 +478,7 @@ struct rcar_vin_priv {
478 struct soc_camera_host ici; 478 struct soc_camera_host ici;
479 struct list_head capture; 479 struct list_head capture;
480#define MAX_BUFFER_NUM 3 480#define MAX_BUFFER_NUM 3
481 struct vb2_buffer *queue_buf[MAX_BUFFER_NUM]; 481 struct vb2_v4l2_buffer *queue_buf[MAX_BUFFER_NUM];
482 struct vb2_alloc_ctx *alloc_ctx; 482 struct vb2_alloc_ctx *alloc_ctx;
483 enum v4l2_field field; 483 enum v4l2_field field;
484 unsigned int pdata_flags; 484 unsigned int pdata_flags;
@@ -492,7 +492,7 @@ struct rcar_vin_priv {
492#define is_continuous_transfer(priv) (priv->vb_count > MAX_BUFFER_NUM) 492#define is_continuous_transfer(priv) (priv->vb_count > MAX_BUFFER_NUM)
493 493
494struct rcar_vin_buffer { 494struct rcar_vin_buffer {
495 struct vb2_buffer vb; 495 struct vb2_v4l2_buffer vb;
496 struct list_head list; 496 struct list_head list;
497}; 497};
498 498
@@ -748,7 +748,7 @@ static int rcar_vin_hw_ready(struct rcar_vin_priv *priv)
748/* Moves a buffer from the queue to the HW slots */ 748/* Moves a buffer from the queue to the HW slots */
749static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv) 749static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
750{ 750{
751 struct vb2_buffer *vb; 751 struct vb2_v4l2_buffer *vbuf;
752 dma_addr_t phys_addr_top; 752 dma_addr_t phys_addr_top;
753 int slot; 753 int slot;
754 754
@@ -760,10 +760,11 @@ static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
760 if (slot < 0) 760 if (slot < 0)
761 return 0; 761 return 0;
762 762
763 vb = &list_entry(priv->capture.next, struct rcar_vin_buffer, list)->vb; 763 vbuf = &list_entry(priv->capture.next,
764 list_del_init(to_buf_list(vb)); 764 struct rcar_vin_buffer, list)->vb;
765 priv->queue_buf[slot] = vb; 765 list_del_init(to_buf_list(vbuf));
766 phys_addr_top = vb2_dma_contig_plane_dma_addr(vb, 0); 766 priv->queue_buf[slot] = vbuf;
767 phys_addr_top = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
767 iowrite32(phys_addr_top, priv->base + VNMB_REG(slot)); 768 iowrite32(phys_addr_top, priv->base + VNMB_REG(slot));
768 769
769 return 1; 770 return 1;
@@ -771,6 +772,7 @@ static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
771 772
772static void rcar_vin_videobuf_queue(struct vb2_buffer *vb) 773static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
773{ 774{
775 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
774 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 776 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
775 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 777 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
776 struct rcar_vin_priv *priv = ici->priv; 778 struct rcar_vin_priv *priv = ici->priv;
@@ -780,7 +782,7 @@ static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
780 782
781 if (vb2_plane_size(vb, 0) < size) { 783 if (vb2_plane_size(vb, 0) < size) {
782 dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n", 784 dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
783 vb->v4l2_buf.index, vb2_plane_size(vb, 0), size); 785 vb->index, vb2_plane_size(vb, 0), size);
784 goto error; 786 goto error;
785 } 787 }
786 788
@@ -791,14 +793,14 @@ static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
791 793
792 spin_lock_irq(&priv->lock); 794 spin_lock_irq(&priv->lock);
793 795
794 list_add_tail(to_buf_list(vb), &priv->capture); 796 list_add_tail(to_buf_list(vbuf), &priv->capture);
795 rcar_vin_fill_hw_slot(priv); 797 rcar_vin_fill_hw_slot(priv);
796 798
797 /* If we weren't running, and have enough buffers, start capturing! */ 799 /* If we weren't running, and have enough buffers, start capturing! */
798 if (priv->state != RUNNING && rcar_vin_hw_ready(priv)) { 800 if (priv->state != RUNNING && rcar_vin_hw_ready(priv)) {
799 if (rcar_vin_setup(priv)) { 801 if (rcar_vin_setup(priv)) {
800 /* Submit error */ 802 /* Submit error */
801 list_del_init(to_buf_list(vb)); 803 list_del_init(to_buf_list(vbuf));
802 spin_unlock_irq(&priv->lock); 804 spin_unlock_irq(&priv->lock);
803 goto error; 805 goto error;
804 } 806 }
@@ -854,7 +856,7 @@ static void rcar_vin_stop_streaming(struct vb2_queue *vq)
854 856
855 for (i = 0; i < MAX_BUFFER_NUM; i++) { 857 for (i = 0; i < MAX_BUFFER_NUM; i++) {
856 if (priv->queue_buf[i]) { 858 if (priv->queue_buf[i]) {
857 vb2_buffer_done(priv->queue_buf[i], 859 vb2_buffer_done(&priv->queue_buf[i]->vb2_buf,
858 VB2_BUF_STATE_ERROR); 860 VB2_BUF_STATE_ERROR);
859 priv->queue_buf[i] = NULL; 861 priv->queue_buf[i] = NULL;
860 } 862 }
@@ -862,7 +864,7 @@ static void rcar_vin_stop_streaming(struct vb2_queue *vq)
862 864
863 list_for_each_safe(buf_head, tmp, &priv->capture) { 865 list_for_each_safe(buf_head, tmp, &priv->capture) {
864 vb2_buffer_done(&list_entry(buf_head, 866 vb2_buffer_done(&list_entry(buf_head,
865 struct rcar_vin_buffer, list)->vb, 867 struct rcar_vin_buffer, list)->vb.vb2_buf,
866 VB2_BUF_STATE_ERROR); 868 VB2_BUF_STATE_ERROR);
867 list_del_init(buf_head); 869 list_del_init(buf_head);
868 } 870 }
@@ -907,10 +909,11 @@ static irqreturn_t rcar_vin_irq(int irq, void *data)
907 else 909 else
908 slot = 0; 910 slot = 0;
909 911
910 priv->queue_buf[slot]->v4l2_buf.field = priv->field; 912 priv->queue_buf[slot]->field = priv->field;
911 priv->queue_buf[slot]->v4l2_buf.sequence = priv->sequence++; 913 priv->queue_buf[slot]->sequence = priv->sequence++;
912 v4l2_get_timestamp(&priv->queue_buf[slot]->v4l2_buf.timestamp); 914 v4l2_get_timestamp(&priv->queue_buf[slot]->timestamp);
913 vb2_buffer_done(priv->queue_buf[slot], VB2_BUF_STATE_DONE); 915 vb2_buffer_done(&priv->queue_buf[slot]->vb2_buf,
916 VB2_BUF_STATE_DONE);
914 priv->queue_buf[slot] = NULL; 917 priv->queue_buf[slot] = NULL;
915 918
916 if (priv->state != STOPPING) 919 if (priv->state != STOPPING)
@@ -964,7 +967,7 @@ static void rcar_vin_remove_device(struct soc_camera_device *icd)
964{ 967{
965 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 968 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
966 struct rcar_vin_priv *priv = ici->priv; 969 struct rcar_vin_priv *priv = ici->priv;
967 struct vb2_buffer *vb; 970 struct vb2_v4l2_buffer *vbuf;
968 int i; 971 int i;
969 972
970 /* disable capture, disable interrupts */ 973 /* disable capture, disable interrupts */
@@ -978,10 +981,10 @@ static void rcar_vin_remove_device(struct soc_camera_device *icd)
978 /* make sure active buffer is cancelled */ 981 /* make sure active buffer is cancelled */
979 spin_lock_irq(&priv->lock); 982 spin_lock_irq(&priv->lock);
980 for (i = 0; i < MAX_BUFFER_NUM; i++) { 983 for (i = 0; i < MAX_BUFFER_NUM; i++) {
981 vb = priv->queue_buf[i]; 984 vbuf = priv->queue_buf[i];
982 if (vb) { 985 if (vbuf) {
983 list_del_init(to_buf_list(vb)); 986 list_del_init(to_buf_list(vbuf));
984 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 987 vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_ERROR);
985 } 988 }
986 } 989 }
987 spin_unlock_irq(&priv->lock); 990 spin_unlock_irq(&priv->lock);
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index efdeea4490e8..171994298fde 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -93,7 +93,7 @@
93 93
94/* per video frame buffer */ 94/* per video frame buffer */
95struct sh_mobile_ceu_buffer { 95struct sh_mobile_ceu_buffer {
96 struct vb2_buffer vb; /* v4l buffer must be first */ 96 struct vb2_v4l2_buffer vb; /* v4l buffer must be first */
97 struct list_head queue; 97 struct list_head queue;
98}; 98};
99 99
@@ -112,7 +112,7 @@ struct sh_mobile_ceu_dev {
112 112
113 spinlock_t lock; /* Protects video buffer lists */ 113 spinlock_t lock; /* Protects video buffer lists */
114 struct list_head capture; 114 struct list_head capture;
115 struct vb2_buffer *active; 115 struct vb2_v4l2_buffer *active;
116 struct vb2_alloc_ctx *alloc_ctx; 116 struct vb2_alloc_ctx *alloc_ctx;
117 117
118 struct sh_mobile_ceu_info *pdata; 118 struct sh_mobile_ceu_info *pdata;
@@ -152,9 +152,9 @@ struct sh_mobile_ceu_cam {
152 u32 code; 152 u32 code;
153}; 153};
154 154
155static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb) 155static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_v4l2_buffer *vbuf)
156{ 156{
157 return container_of(vb, struct sh_mobile_ceu_buffer, vb); 157 return container_of(vbuf, struct sh_mobile_ceu_buffer, vb);
158} 158}
159 159
160static void ceu_write(struct sh_mobile_ceu_dev *priv, 160static void ceu_write(struct sh_mobile_ceu_dev *priv,
@@ -334,7 +334,8 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
334 bottom2 = CDBCR; 334 bottom2 = CDBCR;
335 } 335 }
336 336
337 phys_addr_top = vb2_dma_contig_plane_dma_addr(pcdev->active, 0); 337 phys_addr_top =
338 vb2_dma_contig_plane_dma_addr(&pcdev->active->vb2_buf, 0);
338 339
339 switch (icd->current_fmt->host_fmt->fourcc) { 340 switch (icd->current_fmt->host_fmt->fourcc) {
340 case V4L2_PIX_FMT_NV12: 341 case V4L2_PIX_FMT_NV12:
@@ -369,7 +370,8 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
369 370
370static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb) 371static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
371{ 372{
372 struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); 373 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
374 struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
373 375
374 /* Added list head initialization on alloc */ 376 /* Added list head initialization on alloc */
375 WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb); 377 WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
@@ -379,17 +381,19 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
379 381
380static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb) 382static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
381{ 383{
382 struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); 384 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
385 struct soc_camera_device *icd = container_of(vb->vb2_queue,
386 struct soc_camera_device, vb2_vidq);
383 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 387 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
384 struct sh_mobile_ceu_dev *pcdev = ici->priv; 388 struct sh_mobile_ceu_dev *pcdev = ici->priv;
385 struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); 389 struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
386 unsigned long size; 390 unsigned long size;
387 391
388 size = icd->sizeimage; 392 size = icd->sizeimage;
389 393
390 if (vb2_plane_size(vb, 0) < size) { 394 if (vb2_plane_size(vb, 0) < size) {
391 dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n", 395 dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
392 vb->v4l2_buf.index, vb2_plane_size(vb, 0), size); 396 vb->index, vb2_plane_size(vb, 0), size);
393 goto error; 397 goto error;
394 } 398 }
395 399
@@ -416,7 +420,7 @@ static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
416 * we are not interested in the return value of 420 * we are not interested in the return value of
417 * sh_mobile_ceu_capture here. 421 * sh_mobile_ceu_capture here.
418 */ 422 */
419 pcdev->active = vb; 423 pcdev->active = vbuf;
420 sh_mobile_ceu_capture(pcdev); 424 sh_mobile_ceu_capture(pcdev);
421 } 425 }
422 spin_unlock_irq(&pcdev->lock); 426 spin_unlock_irq(&pcdev->lock);
@@ -429,14 +433,16 @@ error:
429 433
430static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) 434static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
431{ 435{
432 struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); 436 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
437 struct soc_camera_device *icd = container_of(vb->vb2_queue,
438 struct soc_camera_device, vb2_vidq);
433 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 439 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
434 struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); 440 struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
435 struct sh_mobile_ceu_dev *pcdev = ici->priv; 441 struct sh_mobile_ceu_dev *pcdev = ici->priv;
436 442
437 spin_lock_irq(&pcdev->lock); 443 spin_lock_irq(&pcdev->lock);
438 444
439 if (pcdev->active == vb) { 445 if (pcdev->active == vbuf) {
440 /* disable capture (release DMA buffer), reset */ 446 /* disable capture (release DMA buffer), reset */
441 ceu_write(pcdev, CAPSR, 1 << 16); 447 ceu_write(pcdev, CAPSR, 1 << 16);
442 pcdev->active = NULL; 448 pcdev->active = NULL;
@@ -458,7 +464,9 @@ static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
458 464
459static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb) 465static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
460{ 466{
461 struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); 467 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
468 struct soc_camera_device *icd = container_of(vb->vb2_queue,
469 struct soc_camera_device, vb2_vidq);
462 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 470 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
463 struct sh_mobile_ceu_dev *pcdev = ici->priv; 471 struct sh_mobile_ceu_dev *pcdev = ici->priv;
464 472
@@ -467,7 +475,7 @@ static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
467 pcdev->buf_total); 475 pcdev->buf_total);
468 476
469 /* This is for locking debugging only */ 477 /* This is for locking debugging only */
470 INIT_LIST_HEAD(&to_ceu_vb(vb)->queue); 478 INIT_LIST_HEAD(&to_ceu_vb(vbuf)->queue);
471 return 0; 479 return 0;
472} 480}
473 481
@@ -504,17 +512,17 @@ static struct vb2_ops sh_mobile_ceu_videobuf_ops = {
504static irqreturn_t sh_mobile_ceu_irq(int irq, void *data) 512static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
505{ 513{
506 struct sh_mobile_ceu_dev *pcdev = data; 514 struct sh_mobile_ceu_dev *pcdev = data;
507 struct vb2_buffer *vb; 515 struct vb2_v4l2_buffer *vbuf;
508 int ret; 516 int ret;
509 517
510 spin_lock(&pcdev->lock); 518 spin_lock(&pcdev->lock);
511 519
512 vb = pcdev->active; 520 vbuf = pcdev->active;
513 if (!vb) 521 if (!vbuf)
514 /* Stale interrupt from a released buffer */ 522 /* Stale interrupt from a released buffer */
515 goto out; 523 goto out;
516 524
517 list_del_init(&to_ceu_vb(vb)->queue); 525 list_del_init(&to_ceu_vb(vbuf)->queue);
518 526
519 if (!list_empty(&pcdev->capture)) 527 if (!list_empty(&pcdev->capture))
520 pcdev->active = &list_entry(pcdev->capture.next, 528 pcdev->active = &list_entry(pcdev->capture.next,
@@ -523,12 +531,13 @@ static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
523 pcdev->active = NULL; 531 pcdev->active = NULL;
524 532
525 ret = sh_mobile_ceu_capture(pcdev); 533 ret = sh_mobile_ceu_capture(pcdev);
526 v4l2_get_timestamp(&vb->v4l2_buf.timestamp); 534 v4l2_get_timestamp(&vbuf->timestamp);
527 if (!ret) { 535 if (!ret) {
528 vb->v4l2_buf.field = pcdev->field; 536 vbuf->field = pcdev->field;
529 vb->v4l2_buf.sequence = pcdev->sequence++; 537 vbuf->sequence = pcdev->sequence++;
530 } 538 }
531 vb2_buffer_done(vb, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 539 vb2_buffer_done(&vbuf->vb2_buf,
540 ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
532 541
533out: 542out:
534 spin_unlock(&pcdev->lock); 543 spin_unlock(&pcdev->lock);
@@ -633,7 +642,7 @@ static void sh_mobile_ceu_clock_stop(struct soc_camera_host *ici)
633 spin_lock_irq(&pcdev->lock); 642 spin_lock_irq(&pcdev->lock);
634 if (pcdev->active) { 643 if (pcdev->active) {
635 list_del_init(&to_ceu_vb(pcdev->active)->queue); 644 list_del_init(&to_ceu_vb(pcdev->active)->queue);
636 vb2_buffer_done(pcdev->active, VB2_BUF_STATE_ERROR); 645 vb2_buffer_done(&pcdev->active->vb2_buf, VB2_BUF_STATE_ERROR);
637 pcdev->active = NULL; 646 pcdev->active = NULL;
638 } 647 }
639 spin_unlock_irq(&pcdev->lock); 648 spin_unlock_irq(&pcdev->lock);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index df61355b46f1..62b9842aa633 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -180,7 +180,7 @@ static struct bdisp_frame *ctx_get_frame(struct bdisp_ctx *ctx,
180 180
181static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state) 181static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state)
182{ 182{
183 struct vb2_buffer *src_vb, *dst_vb; 183 struct vb2_v4l2_buffer *src_vb, *dst_vb;
184 184
185 if (WARN(!ctx || !ctx->fh.m2m_ctx, "Null hardware context\n")) 185 if (WARN(!ctx || !ctx->fh.m2m_ctx, "Null hardware context\n"))
186 return; 186 return;
@@ -191,10 +191,10 @@ static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state)
191 dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); 191 dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
192 192
193 if (src_vb && dst_vb) { 193 if (src_vb && dst_vb) {
194 dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp; 194 dst_vb->timestamp = src_vb->timestamp;
195 dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode; 195 dst_vb->timecode = src_vb->timecode;
196 dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 196 dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
197 dst_vb->v4l2_buf.flags |= src_vb->v4l2_buf.flags & 197 dst_vb->flags |= src_vb->flags &
198 V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 198 V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
199 199
200 v4l2_m2m_buf_done(src_vb, vb_state); 200 v4l2_m2m_buf_done(src_vb, vb_state);
@@ -281,23 +281,23 @@ static int bdisp_get_addr(struct bdisp_ctx *ctx, struct vb2_buffer *vb,
281static int bdisp_get_bufs(struct bdisp_ctx *ctx) 281static int bdisp_get_bufs(struct bdisp_ctx *ctx)
282{ 282{
283 struct bdisp_frame *src, *dst; 283 struct bdisp_frame *src, *dst;
284 struct vb2_buffer *src_vb, *dst_vb; 284 struct vb2_v4l2_buffer *src_vb, *dst_vb;
285 int ret; 285 int ret;
286 286
287 src = &ctx->src; 287 src = &ctx->src;
288 dst = &ctx->dst; 288 dst = &ctx->dst;
289 289
290 src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 290 src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
291 ret = bdisp_get_addr(ctx, src_vb, src, src->paddr); 291 ret = bdisp_get_addr(ctx, &src_vb->vb2_buf, src, src->paddr);
292 if (ret) 292 if (ret)
293 return ret; 293 return ret;
294 294
295 dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 295 dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
296 ret = bdisp_get_addr(ctx, dst_vb, dst, dst->paddr); 296 ret = bdisp_get_addr(ctx, &dst_vb->vb2_buf, dst, dst->paddr);
297 if (ret) 297 if (ret)
298 return ret; 298 return ret;
299 299
300 dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp; 300 dst_vb->timestamp = src_vb->timestamp;
301 301
302 return 0; 302 return 0;
303} 303}
@@ -483,6 +483,7 @@ static int bdisp_buf_prepare(struct vb2_buffer *vb)
483 483
484static void bdisp_buf_queue(struct vb2_buffer *vb) 484static void bdisp_buf_queue(struct vb2_buffer *vb)
485{ 485{
486 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
486 struct bdisp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 487 struct bdisp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
487 488
488 /* return to V4L2 any 0-size buffer so it can be dequeued by user */ 489 /* return to V4L2 any 0-size buffer so it can be dequeued by user */
@@ -493,13 +494,13 @@ static void bdisp_buf_queue(struct vb2_buffer *vb)
493 } 494 }
494 495
495 if (ctx->fh.m2m_ctx) 496 if (ctx->fh.m2m_ctx)
496 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); 497 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
497} 498}
498 499
499static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count) 500static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
500{ 501{
501 struct bdisp_ctx *ctx = q->drv_priv; 502 struct bdisp_ctx *ctx = q->drv_priv;
502 struct vb2_buffer *buf; 503 struct vb2_v4l2_buffer *buf;
503 int ret = pm_runtime_get_sync(ctx->bdisp_dev->dev); 504 int ret = pm_runtime_get_sync(ctx->bdisp_dev->dev);
504 505
505 if (ret < 0) { 506 if (ret < 0) {
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index d82c2f279dfb..4902453aeb61 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -384,8 +384,8 @@ struct vpe_ctx {
384 unsigned int bufs_completed; /* bufs done in this batch */ 384 unsigned int bufs_completed; /* bufs done in this batch */
385 385
386 struct vpe_q_data q_data[2]; /* src & dst queue data */ 386 struct vpe_q_data q_data[2]; /* src & dst queue data */
387 struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS]; 387 struct vb2_v4l2_buffer *src_vbs[VPE_MAX_SRC_BUFS];
388 struct vb2_buffer *dst_vb; 388 struct vb2_v4l2_buffer *dst_vb;
389 389
390 dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */ 390 dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */
391 void *mv_buf[2]; /* virtual addrs of motion vector bufs */ 391 void *mv_buf[2]; /* virtual addrs of motion vector bufs */
@@ -988,7 +988,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
988{ 988{
989 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST]; 989 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
990 const struct vpe_port_data *p_data = &port_data[port]; 990 const struct vpe_port_data *p_data = &port_data[port];
991 struct vb2_buffer *vb = ctx->dst_vb; 991 struct vb2_buffer *vb = &ctx->dst_vb->vb2_buf;
992 struct vpe_fmt *fmt = q_data->fmt; 992 struct vpe_fmt *fmt = q_data->fmt;
993 const struct vpdma_data_format *vpdma_fmt; 993 const struct vpdma_data_format *vpdma_fmt;
994 int mv_buf_selector = !ctx->src_mv_buf_selector; 994 int mv_buf_selector = !ctx->src_mv_buf_selector;
@@ -1025,11 +1025,12 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
1025{ 1025{
1026 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC]; 1026 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
1027 const struct vpe_port_data *p_data = &port_data[port]; 1027 const struct vpe_port_data *p_data = &port_data[port];
1028 struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index]; 1028 struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
1029 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1029 struct vpe_fmt *fmt = q_data->fmt; 1030 struct vpe_fmt *fmt = q_data->fmt;
1030 const struct vpdma_data_format *vpdma_fmt; 1031 const struct vpdma_data_format *vpdma_fmt;
1031 int mv_buf_selector = ctx->src_mv_buf_selector; 1032 int mv_buf_selector = ctx->src_mv_buf_selector;
1032 int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM; 1033 int field = vbuf->field == V4L2_FIELD_BOTTOM;
1033 int frame_width, frame_height; 1034 int frame_width, frame_height;
1034 dma_addr_t dma_addr; 1035 dma_addr_t dma_addr;
1035 u32 flags = 0; 1036 u32 flags = 0;
@@ -1222,8 +1223,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
1222 struct vpe_dev *dev = (struct vpe_dev *)data; 1223 struct vpe_dev *dev = (struct vpe_dev *)data;
1223 struct vpe_ctx *ctx; 1224 struct vpe_ctx *ctx;
1224 struct vpe_q_data *d_q_data; 1225 struct vpe_q_data *d_q_data;
1225 struct vb2_buffer *s_vb, *d_vb; 1226 struct vb2_v4l2_buffer *s_vb, *d_vb;
1226 struct v4l2_buffer *s_buf, *d_buf;
1227 unsigned long flags; 1227 unsigned long flags;
1228 u32 irqst0, irqst1; 1228 u32 irqst0, irqst1;
1229 1229
@@ -1286,20 +1286,18 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
1286 1286
1287 s_vb = ctx->src_vbs[0]; 1287 s_vb = ctx->src_vbs[0];
1288 d_vb = ctx->dst_vb; 1288 d_vb = ctx->dst_vb;
1289 s_buf = &s_vb->v4l2_buf;
1290 d_buf = &d_vb->v4l2_buf;
1291 1289
1292 d_buf->flags = s_buf->flags; 1290 d_vb->flags = s_vb->flags;
1291 d_vb->timestamp = s_vb->timestamp;
1293 1292
1294 d_buf->timestamp = s_buf->timestamp; 1293 if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE)
1295 if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) 1294 d_vb->timecode = s_vb->timecode;
1296 d_buf->timecode = s_buf->timecode;
1297 1295
1298 d_buf->sequence = ctx->sequence; 1296 d_vb->sequence = ctx->sequence;
1299 1297
1300 d_q_data = &ctx->q_data[Q_DATA_DST]; 1298 d_q_data = &ctx->q_data[Q_DATA_DST];
1301 if (d_q_data->flags & Q_DATA_INTERLACED) { 1299 if (d_q_data->flags & Q_DATA_INTERLACED) {
1302 d_buf->field = ctx->field; 1300 d_vb->field = ctx->field;
1303 if (ctx->field == V4L2_FIELD_BOTTOM) { 1301 if (ctx->field == V4L2_FIELD_BOTTOM) {
1304 ctx->sequence++; 1302 ctx->sequence++;
1305 ctx->field = V4L2_FIELD_TOP; 1303 ctx->field = V4L2_FIELD_TOP;
@@ -1308,7 +1306,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
1308 ctx->field = V4L2_FIELD_BOTTOM; 1306 ctx->field = V4L2_FIELD_BOTTOM;
1309 } 1307 }
1310 } else { 1308 } else {
1311 d_buf->field = V4L2_FIELD_NONE; 1309 d_vb->field = V4L2_FIELD_NONE;
1312 ctx->sequence++; 1310 ctx->sequence++;
1313 } 1311 }
1314 1312
@@ -1825,6 +1823,7 @@ static int vpe_queue_setup(struct vb2_queue *vq,
1825 1823
1826static int vpe_buf_prepare(struct vb2_buffer *vb) 1824static int vpe_buf_prepare(struct vb2_buffer *vb)
1827{ 1825{
1826 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1828 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 1827 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1829 struct vpe_q_data *q_data; 1828 struct vpe_q_data *q_data;
1830 int i, num_planes; 1829 int i, num_planes;
@@ -1836,10 +1835,10 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
1836 1835
1837 if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 1836 if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1838 if (!(q_data->flags & Q_DATA_INTERLACED)) { 1837 if (!(q_data->flags & Q_DATA_INTERLACED)) {
1839 vb->v4l2_buf.field = V4L2_FIELD_NONE; 1838 vbuf->field = V4L2_FIELD_NONE;
1840 } else { 1839 } else {
1841 if (vb->v4l2_buf.field != V4L2_FIELD_TOP && 1840 if (vbuf->field != V4L2_FIELD_TOP &&
1842 vb->v4l2_buf.field != V4L2_FIELD_BOTTOM) 1841 vbuf->field != V4L2_FIELD_BOTTOM)
1843 return -EINVAL; 1842 return -EINVAL;
1844 } 1843 }
1845 } 1844 }
@@ -1862,9 +1861,10 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
1862 1861
1863static void vpe_buf_queue(struct vb2_buffer *vb) 1862static void vpe_buf_queue(struct vb2_buffer *vb)
1864{ 1863{
1864 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1865 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 1865 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1866 1866
1867 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); 1867 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
1868} 1868}
1869 1869
1870static int vpe_start_streaming(struct vb2_queue *q, unsigned int count) 1870static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index d47cfba32c58..7c3ced045304 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -197,8 +197,8 @@ static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
197 197
198 198
199static int device_process(struct vim2m_ctx *ctx, 199static int device_process(struct vim2m_ctx *ctx,
200 struct vb2_buffer *in_vb, 200 struct vb2_v4l2_buffer *in_vb,
201 struct vb2_buffer *out_vb) 201 struct vb2_v4l2_buffer *out_vb)
202{ 202{
203 struct vim2m_dev *dev = ctx->dev; 203 struct vim2m_dev *dev = ctx->dev;
204 struct vim2m_q_data *q_data; 204 struct vim2m_q_data *q_data;
@@ -213,15 +213,16 @@ static int device_process(struct vim2m_ctx *ctx,
213 height = q_data->height; 213 height = q_data->height;
214 bytesperline = (q_data->width * q_data->fmt->depth) >> 3; 214 bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
215 215
216 p_in = vb2_plane_vaddr(in_vb, 0); 216 p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
217 p_out = vb2_plane_vaddr(out_vb, 0); 217 p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
218 if (!p_in || !p_out) { 218 if (!p_in || !p_out) {
219 v4l2_err(&dev->v4l2_dev, 219 v4l2_err(&dev->v4l2_dev,
220 "Acquiring kernel pointers to buffers failed\n"); 220 "Acquiring kernel pointers to buffers failed\n");
221 return -EFAULT; 221 return -EFAULT;
222 } 222 }
223 223
224 if (vb2_plane_size(in_vb, 0) > vb2_plane_size(out_vb, 0)) { 224 if (vb2_plane_size(&in_vb->vb2_buf, 0) >
225 vb2_plane_size(&out_vb->vb2_buf, 0)) {
225 v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n"); 226 v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
226 return -EINVAL; 227 return -EINVAL;
227 } 228 }
@@ -231,13 +232,15 @@ static int device_process(struct vim2m_ctx *ctx,
231 bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES; 232 bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
232 w = 0; 233 w = 0;
233 234
234 out_vb->v4l2_buf.sequence = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++; 235 out_vb->sequence =
235 in_vb->v4l2_buf.sequence = q_data->sequence++; 236 get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
236 out_vb->v4l2_buf.timestamp = in_vb->v4l2_buf.timestamp; 237 in_vb->sequence = q_data->sequence++;
237 if (in_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE) 238 out_vb->timestamp = in_vb->timestamp;
238 out_vb->v4l2_buf.timecode = in_vb->v4l2_buf.timecode; 239
239 out_vb->v4l2_buf.field = in_vb->v4l2_buf.field; 240 if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE)
240 out_vb->v4l2_buf.flags = in_vb->v4l2_buf.flags & 241 out_vb->timecode = in_vb->timecode;
242 out_vb->field = in_vb->field;
243 out_vb->flags = in_vb->flags &
241 (V4L2_BUF_FLAG_TIMECODE | 244 (V4L2_BUF_FLAG_TIMECODE |
242 V4L2_BUF_FLAG_KEYFRAME | 245 V4L2_BUF_FLAG_KEYFRAME |
243 V4L2_BUF_FLAG_PFRAME | 246 V4L2_BUF_FLAG_PFRAME |
@@ -371,7 +374,7 @@ static void device_run(void *priv)
371{ 374{
372 struct vim2m_ctx *ctx = priv; 375 struct vim2m_ctx *ctx = priv;
373 struct vim2m_dev *dev = ctx->dev; 376 struct vim2m_dev *dev = ctx->dev;
374 struct vb2_buffer *src_buf, *dst_buf; 377 struct vb2_v4l2_buffer *src_buf, *dst_buf;
375 378
376 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 379 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
377 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 380 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -386,7 +389,7 @@ static void device_isr(unsigned long priv)
386{ 389{
387 struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv; 390 struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
388 struct vim2m_ctx *curr_ctx; 391 struct vim2m_ctx *curr_ctx;
389 struct vb2_buffer *src_vb, *dst_vb; 392 struct vb2_v4l2_buffer *src_vb, *dst_vb;
390 unsigned long flags; 393 unsigned long flags;
391 394
392 curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev); 395 curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev);
@@ -744,6 +747,7 @@ static int vim2m_queue_setup(struct vb2_queue *vq,
744 747
745static int vim2m_buf_prepare(struct vb2_buffer *vb) 748static int vim2m_buf_prepare(struct vb2_buffer *vb)
746{ 749{
750 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
747 struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 751 struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
748 struct vim2m_q_data *q_data; 752 struct vim2m_q_data *q_data;
749 753
@@ -751,9 +755,9 @@ static int vim2m_buf_prepare(struct vb2_buffer *vb)
751 755
752 q_data = get_q_data(ctx, vb->vb2_queue->type); 756 q_data = get_q_data(ctx, vb->vb2_queue->type);
753 if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { 757 if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
754 if (vb->v4l2_buf.field == V4L2_FIELD_ANY) 758 if (vbuf->field == V4L2_FIELD_ANY)
755 vb->v4l2_buf.field = V4L2_FIELD_NONE; 759 vbuf->field = V4L2_FIELD_NONE;
756 if (vb->v4l2_buf.field != V4L2_FIELD_NONE) { 760 if (vbuf->field != V4L2_FIELD_NONE) {
757 dprintk(ctx->dev, "%s field isn't supported\n", 761 dprintk(ctx->dev, "%s field isn't supported\n",
758 __func__); 762 __func__);
759 return -EINVAL; 763 return -EINVAL;
@@ -773,9 +777,10 @@ static int vim2m_buf_prepare(struct vb2_buffer *vb)
773 777
774static void vim2m_buf_queue(struct vb2_buffer *vb) 778static void vim2m_buf_queue(struct vb2_buffer *vb)
775{ 779{
780 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
776 struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 781 struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
777 782
778 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); 783 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
779} 784}
780 785
781static int vim2m_start_streaming(struct vb2_queue *q, unsigned count) 786static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
@@ -790,18 +795,18 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
790static void vim2m_stop_streaming(struct vb2_queue *q) 795static void vim2m_stop_streaming(struct vb2_queue *q)
791{ 796{
792 struct vim2m_ctx *ctx = vb2_get_drv_priv(q); 797 struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
793 struct vb2_buffer *vb; 798 struct vb2_v4l2_buffer *vbuf;
794 unsigned long flags; 799 unsigned long flags;
795 800
796 for (;;) { 801 for (;;) {
797 if (V4L2_TYPE_IS_OUTPUT(q->type)) 802 if (V4L2_TYPE_IS_OUTPUT(q->type))
798 vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 803 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
799 else 804 else
800 vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); 805 vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
801 if (vb == NULL) 806 if (vbuf == NULL)
802 return; 807 return;
803 spin_lock_irqsave(&ctx->dev->irqlock, flags); 808 spin_lock_irqsave(&ctx->dev->irqlock, flags);
804 v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR); 809 v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
805 spin_unlock_irqrestore(&ctx->dev->irqlock, flags); 810 spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
806 } 811 }
807} 812}
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 3816d3c69f5c..55b304a705d5 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -93,7 +93,7 @@ extern struct vivid_fmt vivid_formats[];
93/* buffer for one video frame */ 93/* buffer for one video frame */
94struct vivid_buffer { 94struct vivid_buffer {
95 /* common v4l buffer stuff -- must be first */ 95 /* common v4l buffer stuff -- must be first */
96 struct vb2_buffer vb; 96 struct vb2_v4l2_buffer vb;
97 struct list_head list; 97 struct list_head list;
98}; 98};
99 99
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 1727f5453f0b..83cc6d3b4784 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -236,8 +236,8 @@ static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
236 void *vbuf; 236 void *vbuf;
237 237
238 if (p == 0 || tpg_g_buffers(tpg) > 1) 238 if (p == 0 || tpg_g_buffers(tpg) > 1)
239 return vb2_plane_vaddr(&buf->vb, p); 239 return vb2_plane_vaddr(&buf->vb.vb2_buf, p);
240 vbuf = vb2_plane_vaddr(&buf->vb, 0); 240 vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
241 for (i = 0; i < p; i++) 241 for (i = 0; i < p; i++)
242 vbuf += bpl[i] * h / tpg->vdownsampling[i]; 242 vbuf += bpl[i] * h / tpg->vdownsampling[i];
243 return vbuf; 243 return vbuf;
@@ -246,7 +246,7 @@ static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
246static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf, 246static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
247 struct vivid_buffer *vid_cap_buf) 247 struct vivid_buffer *vid_cap_buf)
248{ 248{
249 bool blank = dev->must_blank[vid_cap_buf->vb.v4l2_buf.index]; 249 bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index];
250 struct tpg_data *tpg = &dev->tpg; 250 struct tpg_data *tpg = &dev->tpg;
251 struct vivid_buffer *vid_out_buf = NULL; 251 struct vivid_buffer *vid_out_buf = NULL;
252 unsigned vdiv = dev->fmt_out->vdownsampling[p]; 252 unsigned vdiv = dev->fmt_out->vdownsampling[p];
@@ -283,12 +283,12 @@ static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
283 if (vid_out_buf == NULL) 283 if (vid_out_buf == NULL)
284 return -ENODATA; 284 return -ENODATA;
285 285
286 vid_cap_buf->vb.v4l2_buf.field = vid_out_buf->vb.v4l2_buf.field; 286 vid_cap_buf->vb.field = vid_out_buf->vb.field;
287 287
288 voutbuf = plane_vaddr(tpg, vid_out_buf, p, 288 voutbuf = plane_vaddr(tpg, vid_out_buf, p,
289 dev->bytesperline_out, dev->fmt_out_rect.height); 289 dev->bytesperline_out, dev->fmt_out_rect.height);
290 if (p < dev->fmt_out->buffers) 290 if (p < dev->fmt_out->buffers)
291 voutbuf += vid_out_buf->vb.v4l2_planes[p].data_offset; 291 voutbuf += vid_out_buf->vb.vb2_buf.planes[p].data_offset;
292 voutbuf += tpg_hdiv(tpg, p, dev->loop_vid_out.left) + 292 voutbuf += tpg_hdiv(tpg, p, dev->loop_vid_out.left) +
293 (dev->loop_vid_out.top / vdiv) * stride_out; 293 (dev->loop_vid_out.top / vdiv) * stride_out;
294 vcapbuf += tpg_hdiv(tpg, p, dev->compose_cap.left) + 294 vcapbuf += tpg_hdiv(tpg, p, dev->compose_cap.left) +
@@ -429,17 +429,19 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
429 bool is_loop = false; 429 bool is_loop = false;
430 430
431 if (dev->loop_video && dev->can_loop_video && 431 if (dev->loop_video && dev->can_loop_video &&
432 ((vivid_is_svid_cap(dev) && !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) || 432 ((vivid_is_svid_cap(dev) &&
433 (vivid_is_hdmi_cap(dev) && !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode)))) 433 !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) ||
434 (vivid_is_hdmi_cap(dev) &&
435 !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode))))
434 is_loop = true; 436 is_loop = true;
435 437
436 buf->vb.v4l2_buf.sequence = dev->vid_cap_seq_count; 438 buf->vb.sequence = dev->vid_cap_seq_count;
437 /* 439 /*
438 * Take the timestamp now if the timestamp source is set to 440 * Take the timestamp now if the timestamp source is set to
439 * "Start of Exposure". 441 * "Start of Exposure".
440 */ 442 */
441 if (dev->tstamp_src_is_soe) 443 if (dev->tstamp_src_is_soe)
442 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 444 v4l2_get_timestamp(&buf->vb.timestamp);
443 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 445 if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
444 /* 446 /*
445 * 60 Hz standards start with the bottom field, 50 Hz standards 447 * 60 Hz standards start with the bottom field, 50 Hz standards
@@ -447,19 +449,19 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
447 * then the field is TOP for 50 Hz and BOTTOM for 60 Hz 449 * then the field is TOP for 50 Hz and BOTTOM for 60 Hz
448 * standards. 450 * standards.
449 */ 451 */
450 buf->vb.v4l2_buf.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ? 452 buf->vb.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
451 V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP; 453 V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
452 /* 454 /*
453 * The sequence counter counts frames, not fields. So divide 455 * The sequence counter counts frames, not fields. So divide
454 * by two. 456 * by two.
455 */ 457 */
456 buf->vb.v4l2_buf.sequence /= 2; 458 buf->vb.sequence /= 2;
457 } else { 459 } else {
458 buf->vb.v4l2_buf.field = dev->field_cap; 460 buf->vb.field = dev->field_cap;
459 } 461 }
460 tpg_s_field(tpg, buf->vb.v4l2_buf.field, 462 tpg_s_field(tpg, buf->vb.field,
461 dev->field_cap == V4L2_FIELD_ALTERNATE); 463 dev->field_cap == V4L2_FIELD_ALTERNATE);
462 tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.v4l2_buf.index]); 464 tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.vb2_buf.index]);
463 465
464 vivid_precalc_copy_rects(dev); 466 vivid_precalc_copy_rects(dev);
465 467
@@ -479,13 +481,16 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
479 } 481 }
480 tpg_calc_text_basep(tpg, basep, p, vbuf); 482 tpg_calc_text_basep(tpg, basep, p, vbuf);
481 if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf)) 483 if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf))
482 tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev), p, vbuf); 484 tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev),
485 p, vbuf);
483 } 486 }
484 dev->must_blank[buf->vb.v4l2_buf.index] = false; 487 dev->must_blank[buf->vb.vb2_buf.index] = false;
485 488
486 /* Updates stream time, only update at the start of a new frame. */ 489 /* Updates stream time, only update at the start of a new frame. */
487 if (dev->field_cap != V4L2_FIELD_ALTERNATE || (buf->vb.v4l2_buf.sequence & 1) == 0) 490 if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
488 dev->ms_vid_cap = jiffies_to_msecs(jiffies - dev->jiffies_vid_cap); 491 (buf->vb.sequence & 1) == 0)
492 dev->ms_vid_cap =
493 jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
489 494
490 ms = dev->ms_vid_cap; 495 ms = dev->ms_vid_cap;
491 if (dev->osd_mode <= 1) { 496 if (dev->osd_mode <= 1) {
@@ -494,9 +499,9 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
494 (ms / (60 * 1000)) % 60, 499 (ms / (60 * 1000)) % 60,
495 (ms / 1000) % 60, 500 (ms / 1000) % 60,
496 ms % 1000, 501 ms % 1000,
497 buf->vb.v4l2_buf.sequence, 502 buf->vb.sequence,
498 (dev->field_cap == V4L2_FIELD_ALTERNATE) ? 503 (dev->field_cap == V4L2_FIELD_ALTERNATE) ?
499 (buf->vb.v4l2_buf.field == V4L2_FIELD_TOP ? 504 (buf->vb.field == V4L2_FIELD_TOP ?
500 " top" : " bottom") : ""); 505 " top" : " bottom") : "");
501 tpg_gen_text(tpg, basep, line++ * line_height, 16, str); 506 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
502 } 507 }
@@ -553,8 +558,8 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
553 * the timestamp now. 558 * the timestamp now.
554 */ 559 */
555 if (!dev->tstamp_src_is_soe) 560 if (!dev->tstamp_src_is_soe)
556 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 561 v4l2_get_timestamp(&buf->vb.timestamp);
557 buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset; 562 buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
558} 563}
559 564
560/* 565/*
@@ -600,7 +605,7 @@ static void vivid_overlay(struct vivid_dev *dev, struct vivid_buffer *buf)
600 struct tpg_data *tpg = &dev->tpg; 605 struct tpg_data *tpg = &dev->tpg;
601 unsigned pixsize = tpg_g_twopixelsize(tpg, 0) / 2; 606 unsigned pixsize = tpg_g_twopixelsize(tpg, 0) / 2;
602 void *vbase = dev->fb_vbase_cap; 607 void *vbase = dev->fb_vbase_cap;
603 void *vbuf = vb2_plane_vaddr(&buf->vb, 0); 608 void *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
604 unsigned img_width = dev->compose_cap.width; 609 unsigned img_width = dev->compose_cap.width;
605 unsigned img_height = dev->compose_cap.height; 610 unsigned img_height = dev->compose_cap.height;
606 unsigned stride = tpg->bytesperline[0]; 611 unsigned stride = tpg->bytesperline[0];
@@ -616,7 +621,7 @@ static void vivid_overlay(struct vivid_dev *dev, struct vivid_buffer *buf)
616 return; 621 return;
617 if ((dev->overlay_cap_field == V4L2_FIELD_TOP || 622 if ((dev->overlay_cap_field == V4L2_FIELD_TOP ||
618 dev->overlay_cap_field == V4L2_FIELD_BOTTOM) && 623 dev->overlay_cap_field == V4L2_FIELD_BOTTOM) &&
619 dev->overlay_cap_field != buf->vb.v4l2_buf.field) 624 dev->overlay_cap_field != buf->vb.field)
620 return; 625 return;
621 626
622 vbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride; 627 vbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride;
@@ -699,17 +704,17 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
699 /* Fill buffer */ 704 /* Fill buffer */
700 vivid_fillbuff(dev, vid_cap_buf); 705 vivid_fillbuff(dev, vid_cap_buf);
701 dprintk(dev, 1, "filled buffer %d\n", 706 dprintk(dev, 1, "filled buffer %d\n",
702 vid_cap_buf->vb.v4l2_buf.index); 707 vid_cap_buf->vb.vb2_buf.index);
703 708
704 /* Handle overlay */ 709 /* Handle overlay */
705 if (dev->overlay_cap_owner && dev->fb_cap.base && 710 if (dev->overlay_cap_owner && dev->fb_cap.base &&
706 dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc) 711 dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc)
707 vivid_overlay(dev, vid_cap_buf); 712 vivid_overlay(dev, vid_cap_buf);
708 713
709 vb2_buffer_done(&vid_cap_buf->vb, dev->dqbuf_error ? 714 vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
710 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 715 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
711 dprintk(dev, 2, "vid_cap buffer %d done\n", 716 dprintk(dev, 2, "vid_cap buffer %d done\n",
712 vid_cap_buf->vb.v4l2_buf.index); 717 vid_cap_buf->vb.vb2_buf.index);
713 } 718 }
714 719
715 if (vbi_cap_buf) { 720 if (vbi_cap_buf) {
@@ -717,10 +722,10 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
717 vivid_sliced_vbi_cap_process(dev, vbi_cap_buf); 722 vivid_sliced_vbi_cap_process(dev, vbi_cap_buf);
718 else 723 else
719 vivid_raw_vbi_cap_process(dev, vbi_cap_buf); 724 vivid_raw_vbi_cap_process(dev, vbi_cap_buf);
720 vb2_buffer_done(&vbi_cap_buf->vb, dev->dqbuf_error ? 725 vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
721 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 726 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
722 dprintk(dev, 2, "vbi_cap %d done\n", 727 dprintk(dev, 2, "vbi_cap %d done\n",
723 vbi_cap_buf->vb.v4l2_buf.index); 728 vbi_cap_buf->vb.vb2_buf.index);
724 } 729 }
725 dev->dqbuf_error = false; 730 dev->dqbuf_error = false;
726 731
@@ -884,9 +889,9 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
884 buf = list_entry(dev->vid_cap_active.next, 889 buf = list_entry(dev->vid_cap_active.next,
885 struct vivid_buffer, list); 890 struct vivid_buffer, list);
886 list_del(&buf->list); 891 list_del(&buf->list);
887 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 892 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
888 dprintk(dev, 2, "vid_cap buffer %d done\n", 893 dprintk(dev, 2, "vid_cap buffer %d done\n",
889 buf->vb.v4l2_buf.index); 894 buf->vb.vb2_buf.index);
890 } 895 }
891 } 896 }
892 897
@@ -897,9 +902,9 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
897 buf = list_entry(dev->vbi_cap_active.next, 902 buf = list_entry(dev->vbi_cap_active.next,
898 struct vivid_buffer, list); 903 struct vivid_buffer, list);
899 list_del(&buf->list); 904 list_del(&buf->list);
900 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 905 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
901 dprintk(dev, 2, "vbi_cap buffer %d done\n", 906 dprintk(dev, 2, "vbi_cap buffer %d done\n",
902 buf->vb.v4l2_buf.index); 907 buf->vb.vb2_buf.index);
903 } 908 }
904 } 909 }
905 910
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index d9f36ccd7efb..c2c46dcdbe95 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -87,33 +87,33 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
87 return; 87 return;
88 88
89 if (vid_out_buf) { 89 if (vid_out_buf) {
90 vid_out_buf->vb.v4l2_buf.sequence = dev->vid_out_seq_count; 90 vid_out_buf->vb.sequence = dev->vid_out_seq_count;
91 if (dev->field_out == V4L2_FIELD_ALTERNATE) { 91 if (dev->field_out == V4L2_FIELD_ALTERNATE) {
92 /* 92 /*
93 * The sequence counter counts frames, not fields. So divide 93 * The sequence counter counts frames, not fields.
94 * by two. 94 * So divide by two.
95 */ 95 */
96 vid_out_buf->vb.v4l2_buf.sequence /= 2; 96 vid_out_buf->vb.sequence /= 2;
97 } 97 }
98 v4l2_get_timestamp(&vid_out_buf->vb.v4l2_buf.timestamp); 98 v4l2_get_timestamp(&vid_out_buf->vb.timestamp);
99 vid_out_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset; 99 vid_out_buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
100 vb2_buffer_done(&vid_out_buf->vb, dev->dqbuf_error ? 100 vb2_buffer_done(&vid_out_buf->vb.vb2_buf, dev->dqbuf_error ?
101 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 101 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
102 dprintk(dev, 2, "vid_out buffer %d done\n", 102 dprintk(dev, 2, "vid_out buffer %d done\n",
103 vid_out_buf->vb.v4l2_buf.index); 103 vid_out_buf->vb.vb2_buf.index);
104 } 104 }
105 105
106 if (vbi_out_buf) { 106 if (vbi_out_buf) {
107 if (dev->stream_sliced_vbi_out) 107 if (dev->stream_sliced_vbi_out)
108 vivid_sliced_vbi_out_process(dev, vbi_out_buf); 108 vivid_sliced_vbi_out_process(dev, vbi_out_buf);
109 109
110 vbi_out_buf->vb.v4l2_buf.sequence = dev->vbi_out_seq_count; 110 vbi_out_buf->vb.sequence = dev->vbi_out_seq_count;
111 v4l2_get_timestamp(&vbi_out_buf->vb.v4l2_buf.timestamp); 111 v4l2_get_timestamp(&vbi_out_buf->vb.timestamp);
112 vbi_out_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset; 112 vbi_out_buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
113 vb2_buffer_done(&vbi_out_buf->vb, dev->dqbuf_error ? 113 vb2_buffer_done(&vbi_out_buf->vb.vb2_buf, dev->dqbuf_error ?
114 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 114 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
115 dprintk(dev, 2, "vbi_out buffer %d done\n", 115 dprintk(dev, 2, "vbi_out buffer %d done\n",
116 vbi_out_buf->vb.v4l2_buf.index); 116 vbi_out_buf->vb.vb2_buf.index);
117 } 117 }
118 dev->dqbuf_error = false; 118 dev->dqbuf_error = false;
119} 119}
@@ -274,9 +274,9 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
274 buf = list_entry(dev->vid_out_active.next, 274 buf = list_entry(dev->vid_out_active.next,
275 struct vivid_buffer, list); 275 struct vivid_buffer, list);
276 list_del(&buf->list); 276 list_del(&buf->list);
277 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 277 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
278 dprintk(dev, 2, "vid_out buffer %d done\n", 278 dprintk(dev, 2, "vid_out buffer %d done\n",
279 buf->vb.v4l2_buf.index); 279 buf->vb.vb2_buf.index);
280 } 280 }
281 } 281 }
282 282
@@ -287,9 +287,9 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
287 buf = list_entry(dev->vbi_out_active.next, 287 buf = list_entry(dev->vbi_out_active.next,
288 struct vivid_buffer, list); 288 struct vivid_buffer, list);
289 list_del(&buf->list); 289 list_del(&buf->list);
290 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 290 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
291 dprintk(dev, 2, "vbi_out buffer %d done\n", 291 dprintk(dev, 2, "vbi_out buffer %d done\n",
292 buf->vb.v4l2_buf.index); 292 buf->vb.vb2_buf.index);
293 } 293 }
294 } 294 }
295 295
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index d0c26577c06e..536a6259b847 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -115,11 +115,11 @@ static void vivid_thread_sdr_cap_tick(struct vivid_dev *dev)
115 spin_unlock(&dev->slock); 115 spin_unlock(&dev->slock);
116 116
117 if (sdr_cap_buf) { 117 if (sdr_cap_buf) {
118 sdr_cap_buf->vb.v4l2_buf.sequence = dev->sdr_cap_seq_count; 118 sdr_cap_buf->vb.sequence = dev->sdr_cap_seq_count;
119 vivid_sdr_cap_process(dev, sdr_cap_buf); 119 vivid_sdr_cap_process(dev, sdr_cap_buf);
120 v4l2_get_timestamp(&sdr_cap_buf->vb.v4l2_buf.timestamp); 120 v4l2_get_timestamp(&sdr_cap_buf->vb.timestamp);
121 sdr_cap_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset; 121 sdr_cap_buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
122 vb2_buffer_done(&sdr_cap_buf->vb, dev->dqbuf_error ? 122 vb2_buffer_done(&sdr_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
123 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 123 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
124 dev->dqbuf_error = false; 124 dev->dqbuf_error = false;
125 } 125 }
@@ -162,7 +162,8 @@ static int vivid_thread_sdr_cap(void *data)
162 /* Calculate the number of jiffies since we started streaming */ 162 /* Calculate the number of jiffies since we started streaming */
163 jiffies_since_start = cur_jiffies - dev->jiffies_sdr_cap; 163 jiffies_since_start = cur_jiffies - dev->jiffies_sdr_cap;
164 /* Get the number of buffers streamed since the start */ 164 /* Get the number of buffers streamed since the start */
165 buffers_since_start = (u64)jiffies_since_start * dev->sdr_adc_freq + 165 buffers_since_start =
166 (u64)jiffies_since_start * dev->sdr_adc_freq +
166 (HZ * SDR_CAP_SAMPLES_PER_BUF) / 2; 167 (HZ * SDR_CAP_SAMPLES_PER_BUF) / 2;
167 do_div(buffers_since_start, HZ * SDR_CAP_SAMPLES_PER_BUF); 168 do_div(buffers_since_start, HZ * SDR_CAP_SAMPLES_PER_BUF);
168 169
@@ -177,7 +178,8 @@ static int vivid_thread_sdr_cap(void *data)
177 dev->sdr_cap_seq_offset = buffers_since_start; 178 dev->sdr_cap_seq_offset = buffers_since_start;
178 buffers_since_start = 0; 179 buffers_since_start = 0;
179 } 180 }
180 dev->sdr_cap_seq_count = buffers_since_start + dev->sdr_cap_seq_offset; 181 dev->sdr_cap_seq_count =
182 buffers_since_start + dev->sdr_cap_seq_offset;
181 183
182 vivid_thread_sdr_cap_tick(dev); 184 vivid_thread_sdr_cap_tick(dev);
183 mutex_unlock(&dev->mutex); 185 mutex_unlock(&dev->mutex);
@@ -248,8 +250,9 @@ static int sdr_cap_buf_prepare(struct vb2_buffer *vb)
248 250
249static void sdr_cap_buf_queue(struct vb2_buffer *vb) 251static void sdr_cap_buf_queue(struct vb2_buffer *vb)
250{ 252{
253 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
251 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 254 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
252 struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb); 255 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
253 256
254 dprintk(dev, 1, "%s\n", __func__); 257 dprintk(dev, 1, "%s\n", __func__);
255 258
@@ -283,7 +286,8 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
283 286
284 list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) { 287 list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
285 list_del(&buf->list); 288 list_del(&buf->list);
286 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 289 vb2_buffer_done(&buf->vb.vb2_buf,
290 VB2_BUF_STATE_QUEUED);
287 } 291 }
288 } 292 }
289 return err; 293 return err;
@@ -300,9 +304,10 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
300 while (!list_empty(&dev->sdr_cap_active)) { 304 while (!list_empty(&dev->sdr_cap_active)) {
301 struct vivid_buffer *buf; 305 struct vivid_buffer *buf;
302 306
303 buf = list_entry(dev->sdr_cap_active.next, struct vivid_buffer, list); 307 buf = list_entry(dev->sdr_cap_active.next,
308 struct vivid_buffer, list);
304 list_del(&buf->list); 309 list_del(&buf->list);
305 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 310 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
306 } 311 }
307 312
308 /* shutdown control thread */ 313 /* shutdown control thread */
@@ -322,7 +327,8 @@ const struct vb2_ops vivid_sdr_cap_qops = {
322 .wait_finish = vb2_ops_wait_finish, 327 .wait_finish = vb2_ops_wait_finish,
323}; 328};
324 329
325int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band) 330int vivid_sdr_enum_freq_bands(struct file *file, void *fh,
331 struct v4l2_frequency_band *band)
326{ 332{
327 switch (band->tuner) { 333 switch (band->tuner) {
328 case 0: 334 case 0:
@@ -340,7 +346,8 @@ int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency
340 } 346 }
341} 347}
342 348
343int vivid_sdr_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 349int vivid_sdr_g_frequency(struct file *file, void *fh,
350 struct v4l2_frequency *vf)
344{ 351{
345 struct vivid_dev *dev = video_drvdata(file); 352 struct vivid_dev *dev = video_drvdata(file);
346 353
@@ -358,7 +365,8 @@ int vivid_sdr_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf
358 } 365 }
359} 366}
360 367
361int vivid_sdr_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 368int vivid_sdr_s_frequency(struct file *file, void *fh,
369 const struct v4l2_frequency *vf)
362{ 370{
363 struct vivid_dev *dev = video_drvdata(file); 371 struct vivid_dev *dev = video_drvdata(file);
364 unsigned freq = vf->frequency; 372 unsigned freq = vf->frequency;
@@ -404,14 +412,16 @@ int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
404 case 0: 412 case 0:
405 strlcpy(vt->name, "ADC", sizeof(vt->name)); 413 strlcpy(vt->name, "ADC", sizeof(vt->name));
406 vt->type = V4L2_TUNER_ADC; 414 vt->type = V4L2_TUNER_ADC;
407 vt->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; 415 vt->capability =
416 V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
408 vt->rangelow = bands_adc[0].rangelow; 417 vt->rangelow = bands_adc[0].rangelow;
409 vt->rangehigh = bands_adc[2].rangehigh; 418 vt->rangehigh = bands_adc[2].rangehigh;
410 return 0; 419 return 0;
411 case 1: 420 case 1:
412 strlcpy(vt->name, "RF", sizeof(vt->name)); 421 strlcpy(vt->name, "RF", sizeof(vt->name));
413 vt->type = V4L2_TUNER_RF; 422 vt->type = V4L2_TUNER_RF;
414 vt->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; 423 vt->capability =
424 V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
415 vt->rangelow = bands_fm[0].rangelow; 425 vt->rangelow = bands_fm[0].rangelow;
416 vt->rangehigh = bands_fm[0].rangehigh; 426 vt->rangehigh = bands_fm[0].rangehigh;
417 return 0; 427 return 0;
@@ -493,9 +503,9 @@ int vidioc_try_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
493 503
494void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf) 504void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
495{ 505{
496 u8 *vbuf = vb2_plane_vaddr(&buf->vb, 0); 506 u8 *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
497 unsigned long i; 507 unsigned long i;
498 unsigned long plane_size = vb2_plane_size(&buf->vb, 0); 508 unsigned long plane_size = vb2_plane_size(&buf->vb.vb2_buf, 0);
499 s64 s64tmp; 509 s64 s64tmp;
500 s32 src_phase_step; 510 s32 src_phase_step;
501 s32 mod_phase_step; 511 s32 mod_phase_step;
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index ef81b01b53d2..29931497fa0f 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -94,36 +94,38 @@ static void vivid_g_fmt_vbi_cap(struct vivid_dev *dev, struct v4l2_vbi_format *v
94void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf) 94void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
95{ 95{
96 struct v4l2_vbi_format vbi; 96 struct v4l2_vbi_format vbi;
97 u8 *vbuf = vb2_plane_vaddr(&buf->vb, 0); 97 u8 *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
98 98
99 vivid_g_fmt_vbi_cap(dev, &vbi); 99 vivid_g_fmt_vbi_cap(dev, &vbi);
100 buf->vb.v4l2_buf.sequence = dev->vbi_cap_seq_count; 100 buf->vb.sequence = dev->vbi_cap_seq_count;
101 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 101 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
102 buf->vb.v4l2_buf.sequence /= 2; 102 buf->vb.sequence /= 2;
103 103
104 vivid_sliced_vbi_cap_fill(dev, buf->vb.v4l2_buf.sequence); 104 vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence);
105 105
106 memset(vbuf, 0x10, vb2_plane_size(&buf->vb, 0)); 106 memset(vbuf, 0x10, vb2_plane_size(&buf->vb.vb2_buf, 0));
107 107
108 if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode)) 108 if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode))
109 vivid_vbi_gen_raw(&dev->vbi_gen, &vbi, vbuf); 109 vivid_vbi_gen_raw(&dev->vbi_gen, &vbi, vbuf);
110 110
111 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 111 v4l2_get_timestamp(&buf->vb.timestamp);
112 buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset; 112 buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
113} 113}
114 114
115 115
116void vivid_sliced_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf) 116void vivid_sliced_vbi_cap_process(struct vivid_dev *dev,
117 struct vivid_buffer *buf)
117{ 118{
118 struct v4l2_sliced_vbi_data *vbuf = vb2_plane_vaddr(&buf->vb, 0); 119 struct v4l2_sliced_vbi_data *vbuf =
120 vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
119 121
120 buf->vb.v4l2_buf.sequence = dev->vbi_cap_seq_count; 122 buf->vb.sequence = dev->vbi_cap_seq_count;
121 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 123 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
122 buf->vb.v4l2_buf.sequence /= 2; 124 buf->vb.sequence /= 2;
123 125
124 vivid_sliced_vbi_cap_fill(dev, buf->vb.v4l2_buf.sequence); 126 vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence);
125 127
126 memset(vbuf, 0, vb2_plane_size(&buf->vb, 0)); 128 memset(vbuf, 0, vb2_plane_size(&buf->vb.vb2_buf, 0));
127 if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode)) { 129 if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
128 unsigned i; 130 unsigned i;
129 131
@@ -131,13 +133,14 @@ void vivid_sliced_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *bu
131 vbuf[i] = dev->vbi_gen.data[i]; 133 vbuf[i] = dev->vbi_gen.data[i];
132 } 134 }
133 135
134 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 136 v4l2_get_timestamp(&buf->vb.timestamp);
135 buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset; 137 buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
136} 138}
137 139
138static int vbi_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, 140static int vbi_cap_queue_setup(struct vb2_queue *vq,
139 unsigned *nbuffers, unsigned *nplanes, 141 const struct v4l2_format *fmt,
140 unsigned sizes[], void *alloc_ctxs[]) 142 unsigned *nbuffers, unsigned *nplanes,
143 unsigned sizes[], void *alloc_ctxs[])
141{ 144{
142 struct vivid_dev *dev = vb2_get_drv_priv(vq); 145 struct vivid_dev *dev = vb2_get_drv_priv(vq);
143 bool is_60hz = dev->std_cap & V4L2_STD_525_60; 146 bool is_60hz = dev->std_cap & V4L2_STD_525_60;
@@ -187,8 +190,9 @@ static int vbi_cap_buf_prepare(struct vb2_buffer *vb)
187 190
188static void vbi_cap_buf_queue(struct vb2_buffer *vb) 191static void vbi_cap_buf_queue(struct vb2_buffer *vb)
189{ 192{
193 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
190 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 194 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
191 struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb); 195 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
192 196
193 dprintk(dev, 1, "%s\n", __func__); 197 dprintk(dev, 1, "%s\n", __func__);
194 198
@@ -215,7 +219,8 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
215 219
216 list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) { 220 list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
217 list_del(&buf->list); 221 list_del(&buf->list);
218 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 222 vb2_buffer_done(&buf->vb.vb2_buf,
223 VB2_BUF_STATE_QUEUED);
219 } 224 }
220 } 225 }
221 return err; 226 return err;
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
index 4e4c70e1e04a..91c168841477 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -79,8 +79,9 @@ static int vbi_out_buf_prepare(struct vb2_buffer *vb)
79 79
80static void vbi_out_buf_queue(struct vb2_buffer *vb) 80static void vbi_out_buf_queue(struct vb2_buffer *vb)
81{ 81{
82 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
82 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 83 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
83 struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb); 84 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
84 85
85 dprintk(dev, 1, "%s\n", __func__); 86 dprintk(dev, 1, "%s\n", __func__);
86 87
@@ -107,7 +108,8 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
107 108
108 list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) { 109 list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
109 list_del(&buf->list); 110 list_del(&buf->list);
110 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 111 vb2_buffer_done(&buf->vb.vb2_buf,
112 VB2_BUF_STATE_QUEUED);
111 } 113 }
112 } 114 }
113 return err; 115 return err;
@@ -201,7 +203,8 @@ int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_forma
201 return 0; 203 return 0;
202} 204}
203 205
204int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) 206int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh,
207 struct v4l2_format *fmt)
205{ 208{
206 struct vivid_dev *dev = video_drvdata(file); 209 struct vivid_dev *dev = video_drvdata(file);
207 struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced; 210 struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
@@ -217,10 +220,13 @@ int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format
217 return 0; 220 return 0;
218} 221}
219 222
220void vivid_sliced_vbi_out_process(struct vivid_dev *dev, struct vivid_buffer *buf) 223void vivid_sliced_vbi_out_process(struct vivid_dev *dev,
224 struct vivid_buffer *buf)
221{ 225{
222 struct v4l2_sliced_vbi_data *vbi = vb2_plane_vaddr(&buf->vb, 0); 226 struct v4l2_sliced_vbi_data *vbi =
223 unsigned elems = vb2_get_plane_payload(&buf->vb, 0) / sizeof(*vbi); 227 vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
228 unsigned elems =
229 vb2_get_plane_payload(&buf->vb.vb2_buf, 0) / sizeof(*vbi);
224 230
225 dev->vbi_out_have_cc[0] = false; 231 dev->vbi_out_have_cc[0] = false;
226 dev->vbi_out_have_cc[1] = false; 232 dev->vbi_out_have_cc[1] = false;
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index ed0b8788a66f..2497107e3b98 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -198,7 +198,7 @@ static int vid_cap_buf_prepare(struct vb2_buffer *vb)
198 } 198 }
199 199
200 vb2_set_plane_payload(vb, p, size); 200 vb2_set_plane_payload(vb, p, size);
201 vb->v4l2_planes[p].data_offset = dev->fmt_cap->data_offset[p]; 201 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p];
202 } 202 }
203 203
204 return 0; 204 return 0;
@@ -206,10 +206,11 @@ static int vid_cap_buf_prepare(struct vb2_buffer *vb)
206 206
207static void vid_cap_buf_finish(struct vb2_buffer *vb) 207static void vid_cap_buf_finish(struct vb2_buffer *vb)
208{ 208{
209 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
209 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 210 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
210 struct v4l2_timecode *tc = &vb->v4l2_buf.timecode; 211 struct v4l2_timecode *tc = &vbuf->timecode;
211 unsigned fps = 25; 212 unsigned fps = 25;
212 unsigned seq = vb->v4l2_buf.sequence; 213 unsigned seq = vbuf->sequence;
213 214
214 if (!vivid_is_sdtv_cap(dev)) 215 if (!vivid_is_sdtv_cap(dev))
215 return; 216 return;
@@ -218,7 +219,7 @@ static void vid_cap_buf_finish(struct vb2_buffer *vb)
218 * Set the timecode. Rarely used, so it is interesting to 219 * Set the timecode. Rarely used, so it is interesting to
219 * test this. 220 * test this.
220 */ 221 */
221 vb->v4l2_buf.flags |= V4L2_BUF_FLAG_TIMECODE; 222 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE;
222 if (dev->std_cap & V4L2_STD_525_60) 223 if (dev->std_cap & V4L2_STD_525_60)
223 fps = 30; 224 fps = 30;
224 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 225 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
@@ -231,8 +232,9 @@ static void vid_cap_buf_finish(struct vb2_buffer *vb)
231 232
232static void vid_cap_buf_queue(struct vb2_buffer *vb) 233static void vid_cap_buf_queue(struct vb2_buffer *vb)
233{ 234{
235 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
234 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 236 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
235 struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb); 237 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
236 238
237 dprintk(dev, 1, "%s\n", __func__); 239 dprintk(dev, 1, "%s\n", __func__);
238 240
@@ -268,7 +270,8 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
268 270
269 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 271 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
270 list_del(&buf->list); 272 list_del(&buf->list);
271 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 273 vb2_buffer_done(&buf->vb.vb2_buf,
274 VB2_BUF_STATE_QUEUED);
272 } 275 }
273 } 276 }
274 return err; 277 return err;
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index c404e275eae0..376f865f90b9 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -109,6 +109,7 @@ static int vid_out_queue_setup(struct vb2_queue *vq, const struct v4l2_format *f
109 109
110static int vid_out_buf_prepare(struct vb2_buffer *vb) 110static int vid_out_buf_prepare(struct vb2_buffer *vb)
111{ 111{
112 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
112 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 113 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
113 unsigned long size; 114 unsigned long size;
114 unsigned planes; 115 unsigned planes;
@@ -131,14 +132,14 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
131 } 132 }
132 133
133 if (dev->field_out != V4L2_FIELD_ALTERNATE) 134 if (dev->field_out != V4L2_FIELD_ALTERNATE)
134 vb->v4l2_buf.field = dev->field_out; 135 vbuf->field = dev->field_out;
135 else if (vb->v4l2_buf.field != V4L2_FIELD_TOP && 136 else if (vbuf->field != V4L2_FIELD_TOP &&
136 vb->v4l2_buf.field != V4L2_FIELD_BOTTOM) 137 vbuf->field != V4L2_FIELD_BOTTOM)
137 return -EINVAL; 138 return -EINVAL;
138 139
139 for (p = 0; p < planes; p++) { 140 for (p = 0; p < planes; p++) {
140 size = dev->bytesperline_out[p] * dev->fmt_out_rect.height + 141 size = dev->bytesperline_out[p] * dev->fmt_out_rect.height +
141 vb->v4l2_planes[p].data_offset; 142 vb->planes[p].data_offset;
142 143
143 if (vb2_get_plane_payload(vb, p) < size) { 144 if (vb2_get_plane_payload(vb, p) < size) {
144 dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %lu)\n", 145 dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %lu)\n",
@@ -152,8 +153,9 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
152 153
153static void vid_out_buf_queue(struct vb2_buffer *vb) 154static void vid_out_buf_queue(struct vb2_buffer *vb)
154{ 155{
156 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
155 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 157 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
156 struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb); 158 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
157 159
158 dprintk(dev, 1, "%s\n", __func__); 160 dprintk(dev, 1, "%s\n", __func__);
159 161
@@ -186,7 +188,8 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
186 188
187 list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) { 189 list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
188 list_del(&buf->list); 190 list_del(&buf->list);
189 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 191 vb2_buffer_done(&buf->vb.vb2_buf,
192 VB2_BUF_STATE_QUEUED);
190 } 193 }
191 } 194 }
192 return err; 195 return err;
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 3294529a3108..cd5248a9a271 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -200,10 +200,10 @@ static void rpf_vdev_queue(struct vsp1_video *video,
200 200
201 vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y, 201 vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
202 buf->addr[0] + rpf->offsets[0]); 202 buf->addr[0] + rpf->offsets[0]);
203 if (buf->buf.num_planes > 1) 203 if (buf->buf.vb2_buf.num_planes > 1)
204 vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0, 204 vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
205 buf->addr[1] + rpf->offsets[1]); 205 buf->addr[1] + rpf->offsets[1]);
206 if (buf->buf.num_planes > 2) 206 if (buf->buf.vb2_buf.num_planes > 2)
207 vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1, 207 vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
208 buf->addr[2] + rpf->offsets[1]); 208 buf->addr[2] + rpf->offsets[1]);
209} 209}
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index dfd45c74c2ae..13e4fdcd4db0 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -610,11 +610,11 @@ vsp1_video_complete_buffer(struct vsp1_video *video)
610 610
611 spin_unlock_irqrestore(&video->irqlock, flags); 611 spin_unlock_irqrestore(&video->irqlock, flags);
612 612
613 done->buf.v4l2_buf.sequence = video->sequence++; 613 done->buf.sequence = video->sequence++;
614 v4l2_get_timestamp(&done->buf.v4l2_buf.timestamp); 614 v4l2_get_timestamp(&done->buf.timestamp);
615 for (i = 0; i < done->buf.num_planes; ++i) 615 for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
616 vb2_set_plane_payload(&done->buf, i, done->length[i]); 616 vb2_set_plane_payload(&done->buf.vb2_buf, i, done->length[i]);
617 vb2_buffer_done(&done->buf, VB2_BUF_STATE_DONE); 617 vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
618 618
619 return next; 619 return next;
620} 620}
@@ -820,8 +820,9 @@ vsp1_video_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
820 820
821static int vsp1_video_buffer_prepare(struct vb2_buffer *vb) 821static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
822{ 822{
823 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
823 struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue); 824 struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
824 struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vb); 825 struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vbuf);
825 const struct v4l2_pix_format_mplane *format = &video->format; 826 const struct v4l2_pix_format_mplane *format = &video->format;
826 unsigned int i; 827 unsigned int i;
827 828
@@ -841,9 +842,10 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
841 842
842static void vsp1_video_buffer_queue(struct vb2_buffer *vb) 843static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
843{ 844{
845 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
844 struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue); 846 struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
845 struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity); 847 struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
846 struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vb); 848 struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vbuf);
847 unsigned long flags; 849 unsigned long flags;
848 bool empty; 850 bool empty;
849 851
@@ -954,7 +956,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
954 /* Remove all buffers from the IRQ queue. */ 956 /* Remove all buffers from the IRQ queue. */
955 spin_lock_irqsave(&video->irqlock, flags); 957 spin_lock_irqsave(&video->irqlock, flags);
956 list_for_each_entry(buffer, &video->irqqueue, queue) 958 list_for_each_entry(buffer, &video->irqqueue, queue)
957 vb2_buffer_done(&buffer->buf, VB2_BUF_STATE_ERROR); 959 vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
958 INIT_LIST_HEAD(&video->irqqueue); 960 INIT_LIST_HEAD(&video->irqqueue);
959 spin_unlock_irqrestore(&video->irqlock, flags); 961 spin_unlock_irqrestore(&video->irqlock, flags);
960} 962}
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
index d808301a071f..a929aa81cdbf 100644
--- a/drivers/media/platform/vsp1/vsp1_video.h
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -94,7 +94,7 @@ static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
94} 94}
95 95
96struct vsp1_video_buffer { 96struct vsp1_video_buffer {
97 struct vb2_buffer buf; 97 struct vb2_v4l2_buffer buf;
98 struct list_head queue; 98 struct list_head queue;
99 99
100 dma_addr_t addr[3]; 100 dma_addr_t addr[3];
@@ -102,9 +102,9 @@ struct vsp1_video_buffer {
102}; 102};
103 103
104static inline struct vsp1_video_buffer * 104static inline struct vsp1_video_buffer *
105to_vsp1_video_buffer(struct vb2_buffer *vb) 105to_vsp1_video_buffer(struct vb2_v4l2_buffer *vbuf)
106{ 106{
107 return container_of(vb, struct vsp1_video_buffer, buf); 107 return container_of(vbuf, struct vsp1_video_buffer, buf);
108} 108}
109 109
110struct vsp1_video_operations { 110struct vsp1_video_operations {
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 1d2b3a2f1573..95b62f4f77e7 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -201,9 +201,9 @@ static void wpf_vdev_queue(struct vsp1_video *video,
201 struct vsp1_rwpf *wpf = container_of(video, struct vsp1_rwpf, video); 201 struct vsp1_rwpf *wpf = container_of(video, struct vsp1_rwpf, video);
202 202
203 vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, buf->addr[0]); 203 vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, buf->addr[0]);
204 if (buf->buf.num_planes > 1) 204 if (buf->buf.vb2_buf.num_planes > 1)
205 vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, buf->addr[1]); 205 vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, buf->addr[1]);
206 if (buf->buf.num_planes > 2) 206 if (buf->buf.vb2_buf.num_planes > 2)
207 vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, buf->addr[2]); 207 vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, buf->addr[2]);
208} 208}
209 209
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index d9dcd4be2792..5af66c20475b 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -285,7 +285,7 @@ done:
285 * @dma: DMA channel that uses the buffer 285 * @dma: DMA channel that uses the buffer
286 */ 286 */
287struct xvip_dma_buffer { 287struct xvip_dma_buffer {
288 struct vb2_buffer buf; 288 struct vb2_v4l2_buffer buf;
289 struct list_head queue; 289 struct list_head queue;
290 struct xvip_dma *dma; 290 struct xvip_dma *dma;
291}; 291};
@@ -301,11 +301,11 @@ static void xvip_dma_complete(void *param)
301 list_del(&buf->queue); 301 list_del(&buf->queue);
302 spin_unlock(&dma->queued_lock); 302 spin_unlock(&dma->queued_lock);
303 303
304 buf->buf.v4l2_buf.field = V4L2_FIELD_NONE; 304 buf->buf.field = V4L2_FIELD_NONE;
305 buf->buf.v4l2_buf.sequence = dma->sequence++; 305 buf->buf.sequence = dma->sequence++;
306 v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp); 306 v4l2_get_timestamp(&buf->buf.timestamp);
307 vb2_set_plane_payload(&buf->buf, 0, dma->format.sizeimage); 307 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
308 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE); 308 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
309} 309}
310 310
311static int 311static int
@@ -329,8 +329,9 @@ xvip_dma_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
329 329
330static int xvip_dma_buffer_prepare(struct vb2_buffer *vb) 330static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
331{ 331{
332 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
332 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 333 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
333 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb); 334 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
334 335
335 buf->dma = dma; 336 buf->dma = dma;
336 337
@@ -339,8 +340,9 @@ static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
339 340
340static void xvip_dma_buffer_queue(struct vb2_buffer *vb) 341static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
341{ 342{
343 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
342 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 344 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
343 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb); 345 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
344 struct dma_async_tx_descriptor *desc; 346 struct dma_async_tx_descriptor *desc;
345 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0); 347 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
346 u32 flags; 348 u32 flags;
@@ -367,7 +369,7 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
367 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags); 369 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
368 if (!desc) { 370 if (!desc) {
369 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n"); 371 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
370 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); 372 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
371 return; 373 return;
372 } 374 }
373 desc->callback = xvip_dma_complete; 375 desc->callback = xvip_dma_complete;
@@ -434,7 +436,7 @@ error:
434 /* Give back all queued buffers to videobuf2. */ 436 /* Give back all queued buffers to videobuf2. */
435 spin_lock_irq(&dma->queued_lock); 437 spin_lock_irq(&dma->queued_lock);
436 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { 438 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
437 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_QUEUED); 439 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
438 list_del(&buf->queue); 440 list_del(&buf->queue);
439 } 441 }
440 spin_unlock_irq(&dma->queued_lock); 442 spin_unlock_irq(&dma->queued_lock);
@@ -461,7 +463,7 @@ static void xvip_dma_stop_streaming(struct vb2_queue *vq)
461 /* Give back all queued buffers to videobuf2. */ 463 /* Give back all queued buffers to videobuf2. */
462 spin_lock_irq(&dma->queued_lock); 464 spin_lock_irq(&dma->queued_lock);
463 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { 465 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
464 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); 466 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
465 list_del(&buf->queue); 467 list_del(&buf->queue);
466 } 468 }
467 spin_unlock_irq(&dma->queued_lock); 469 spin_unlock_irq(&dma->queued_lock);
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 8f2e1c277c5f..2542af3b94be 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -21,6 +21,7 @@
21#include <media/v4l2-ioctl.h> 21#include <media/v4l2-ioctl.h>
22#include <media/v4l2-ctrls.h> 22#include <media/v4l2-ctrls.h>
23#include <media/v4l2-event.h> 23#include <media/v4l2-event.h>
24#include <media/videobuf2-v4l2.h>
24#include <media/videobuf2-vmalloc.h> 25#include <media/videobuf2-vmalloc.h>
25 26
26/* AirSpy USB API commands (from AirSpy Library) */ 27/* AirSpy USB API commands (from AirSpy Library) */
@@ -97,7 +98,8 @@ static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
97 98
98/* intermediate buffers with raw data from the USB device */ 99/* intermediate buffers with raw data from the USB device */
99struct airspy_frame_buf { 100struct airspy_frame_buf {
100 struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */ 101 /* common v4l buffer stuff -- must be first */
102 struct vb2_v4l2_buffer vb;
101 struct list_head list; 103 struct list_head list;
102}; 104};
103 105
@@ -310,13 +312,13 @@ static void airspy_urb_complete(struct urb *urb)
310 } 312 }
311 313
312 /* fill framebuffer */ 314 /* fill framebuffer */
313 ptr = vb2_plane_vaddr(&fbuf->vb, 0); 315 ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
314 len = airspy_convert_stream(s, ptr, urb->transfer_buffer, 316 len = airspy_convert_stream(s, ptr, urb->transfer_buffer,
315 urb->actual_length); 317 urb->actual_length);
316 vb2_set_plane_payload(&fbuf->vb, 0, len); 318 vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len);
317 v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp); 319 v4l2_get_timestamp(&fbuf->vb.timestamp);
318 fbuf->vb.v4l2_buf.sequence = s->sequence++; 320 fbuf->vb.sequence = s->sequence++;
319 vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE); 321 vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
320 } 322 }
321skip: 323skip:
322 usb_submit_urb(urb, GFP_ATOMIC); 324 usb_submit_urb(urb, GFP_ATOMIC);
@@ -459,7 +461,7 @@ static void airspy_cleanup_queued_bufs(struct airspy *s)
459 buf = list_entry(s->queued_bufs.next, 461 buf = list_entry(s->queued_bufs.next,
460 struct airspy_frame_buf, list); 462 struct airspy_frame_buf, list);
461 list_del(&buf->list); 463 list_del(&buf->list);
462 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 464 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
463 } 465 }
464 spin_unlock_irqrestore(&s->queued_bufs_lock, flags); 466 spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
465} 467}
@@ -505,14 +507,15 @@ static int airspy_queue_setup(struct vb2_queue *vq,
505 507
506static void airspy_buf_queue(struct vb2_buffer *vb) 508static void airspy_buf_queue(struct vb2_buffer *vb)
507{ 509{
510 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
508 struct airspy *s = vb2_get_drv_priv(vb->vb2_queue); 511 struct airspy *s = vb2_get_drv_priv(vb->vb2_queue);
509 struct airspy_frame_buf *buf = 512 struct airspy_frame_buf *buf =
510 container_of(vb, struct airspy_frame_buf, vb); 513 container_of(vbuf, struct airspy_frame_buf, vb);
511 unsigned long flags; 514 unsigned long flags;
512 515
513 /* Check the device has not disconnected between prep and queuing */ 516 /* Check the device has not disconnected between prep and queuing */
514 if (unlikely(!s->udev)) { 517 if (unlikely(!s->udev)) {
515 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 518 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
516 return; 519 return;
517 } 520 }
518 521
@@ -571,7 +574,8 @@ err_clear_bit:
571 574
572 list_for_each_entry_safe(buf, tmp, &s->queued_bufs, list) { 575 list_for_each_entry_safe(buf, tmp, &s->queued_bufs, list) {
573 list_del(&buf->list); 576 list_del(&buf->list);
574 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 577 vb2_buffer_done(&buf->vb.vb2_buf,
578 VB2_BUF_STATE_QUEUED);
575 } 579 }
576 } 580 }
577 581
diff --git a/drivers/media/usb/au0828/au0828-vbi.c b/drivers/media/usb/au0828/au0828-vbi.c
index f67247cf1a5a..5ec507e9e66a 100644
--- a/drivers/media/usb/au0828/au0828-vbi.c
+++ b/drivers/media/usb/au0828/au0828-vbi.c
@@ -52,7 +52,6 @@ static int vbi_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
52static int vbi_buffer_prepare(struct vb2_buffer *vb) 52static int vbi_buffer_prepare(struct vb2_buffer *vb)
53{ 53{
54 struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 54 struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
55 struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb);
56 unsigned long size; 55 unsigned long size;
57 56
58 size = dev->vbi_width * dev->vbi_height * 2; 57 size = dev->vbi_width * dev->vbi_height * 2;
@@ -62,7 +61,7 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
62 __func__, vb2_plane_size(vb, 0), size); 61 __func__, vb2_plane_size(vb, 0), size);
63 return -EINVAL; 62 return -EINVAL;
64 } 63 }
65 vb2_set_plane_payload(&buf->vb, 0, size); 64 vb2_set_plane_payload(vb, 0, size);
66 65
67 return 0; 66 return 0;
68} 67}
@@ -71,7 +70,9 @@ static void
71vbi_buffer_queue(struct vb2_buffer *vb) 70vbi_buffer_queue(struct vb2_buffer *vb)
72{ 71{
73 struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 72 struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
74 struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb); 73 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
74 struct au0828_buffer *buf =
75 container_of(vbuf, struct au0828_buffer, vb);
75 struct au0828_dmaqueue *vbiq = &dev->vbiq; 76 struct au0828_dmaqueue *vbiq = &dev->vbiq;
76 unsigned long flags = 0; 77 unsigned long flags = 0;
77 78
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 1a362a041ab3..065b9c8d2a8e 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -302,20 +302,20 @@ static inline void buffer_filled(struct au0828_dev *dev,
302 struct au0828_dmaqueue *dma_q, 302 struct au0828_dmaqueue *dma_q,
303 struct au0828_buffer *buf) 303 struct au0828_buffer *buf)
304{ 304{
305 struct vb2_buffer *vb = &buf->vb; 305 struct vb2_v4l2_buffer *vb = &buf->vb;
306 struct vb2_queue *q = vb->vb2_queue; 306 struct vb2_queue *q = vb->vb2_buf.vb2_queue;
307 307
308 /* Advice that buffer was filled */ 308 /* Advice that buffer was filled */
309 au0828_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field); 309 au0828_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
310 310
311 if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 311 if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
312 vb->v4l2_buf.sequence = dev->frame_count++; 312 vb->sequence = dev->frame_count++;
313 else 313 else
314 vb->v4l2_buf.sequence = dev->vbi_frame_count++; 314 vb->sequence = dev->vbi_frame_count++;
315 315
316 vb->v4l2_buf.field = V4L2_FIELD_INTERLACED; 316 vb->field = V4L2_FIELD_INTERLACED;
317 v4l2_get_timestamp(&vb->v4l2_buf.timestamp); 317 v4l2_get_timestamp(&vb->timestamp);
318 vb2_buffer_done(vb, VB2_BUF_STATE_DONE); 318 vb2_buffer_done(&vb->vb2_buf, VB2_BUF_STATE_DONE);
319} 319}
320 320
321/* 321/*
@@ -531,11 +531,11 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
531 531
532 buf = dev->isoc_ctl.buf; 532 buf = dev->isoc_ctl.buf;
533 if (buf != NULL) 533 if (buf != NULL)
534 outp = vb2_plane_vaddr(&buf->vb, 0); 534 outp = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
535 535
536 vbi_buf = dev->isoc_ctl.vbi_buf; 536 vbi_buf = dev->isoc_ctl.vbi_buf;
537 if (vbi_buf != NULL) 537 if (vbi_buf != NULL)
538 vbioutp = vb2_plane_vaddr(&vbi_buf->vb, 0); 538 vbioutp = vb2_plane_vaddr(&vbi_buf->vb.vb2_buf, 0);
539 539
540 for (i = 0; i < urb->number_of_packets; i++) { 540 for (i = 0; i < urb->number_of_packets; i++) {
541 int status = urb->iso_frame_desc[i].status; 541 int status = urb->iso_frame_desc[i].status;
@@ -574,7 +574,7 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
574 vbioutp = NULL; 574 vbioutp = NULL;
575 else 575 else
576 vbioutp = vb2_plane_vaddr( 576 vbioutp = vb2_plane_vaddr(
577 &vbi_buf->vb, 0); 577 &vbi_buf->vb.vb2_buf, 0);
578 578
579 /* Video */ 579 /* Video */
580 if (buf != NULL) 580 if (buf != NULL)
@@ -583,7 +583,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
583 if (buf == NULL) 583 if (buf == NULL)
584 outp = NULL; 584 outp = NULL;
585 else 585 else
586 outp = vb2_plane_vaddr(&buf->vb, 0); 586 outp = vb2_plane_vaddr(
587 &buf->vb.vb2_buf, 0);
587 588
588 /* As long as isoc traffic is arriving, keep 589 /* As long as isoc traffic is arriving, keep
589 resetting the timer */ 590 resetting the timer */
@@ -658,7 +659,9 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
658static int 659static int
659buffer_prepare(struct vb2_buffer *vb) 660buffer_prepare(struct vb2_buffer *vb)
660{ 661{
661 struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb); 662 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
663 struct au0828_buffer *buf = container_of(vbuf,
664 struct au0828_buffer, vb);
662 struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 665 struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
663 666
664 buf->length = dev->height * dev->bytesperline; 667 buf->length = dev->height * dev->bytesperline;
@@ -668,14 +671,15 @@ buffer_prepare(struct vb2_buffer *vb)
668 __func__, vb2_plane_size(vb, 0), buf->length); 671 __func__, vb2_plane_size(vb, 0), buf->length);
669 return -EINVAL; 672 return -EINVAL;
670 } 673 }
671 vb2_set_plane_payload(&buf->vb, 0, buf->length); 674 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->length);
672 return 0; 675 return 0;
673} 676}
674 677
675static void 678static void
676buffer_queue(struct vb2_buffer *vb) 679buffer_queue(struct vb2_buffer *vb)
677{ 680{
678 struct au0828_buffer *buf = container_of(vb, 681 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
682 struct au0828_buffer *buf = container_of(vbuf,
679 struct au0828_buffer, 683 struct au0828_buffer,
680 vb); 684 vb);
681 struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 685 struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
@@ -826,14 +830,15 @@ static void au0828_stop_streaming(struct vb2_queue *vq)
826 830
827 spin_lock_irqsave(&dev->slock, flags); 831 spin_lock_irqsave(&dev->slock, flags);
828 if (dev->isoc_ctl.buf != NULL) { 832 if (dev->isoc_ctl.buf != NULL) {
829 vb2_buffer_done(&dev->isoc_ctl.buf->vb, VB2_BUF_STATE_ERROR); 833 vb2_buffer_done(&dev->isoc_ctl.buf->vb.vb2_buf,
834 VB2_BUF_STATE_ERROR);
830 dev->isoc_ctl.buf = NULL; 835 dev->isoc_ctl.buf = NULL;
831 } 836 }
832 while (!list_empty(&vidq->active)) { 837 while (!list_empty(&vidq->active)) {
833 struct au0828_buffer *buf; 838 struct au0828_buffer *buf;
834 839
835 buf = list_entry(vidq->active.next, struct au0828_buffer, list); 840 buf = list_entry(vidq->active.next, struct au0828_buffer, list);
836 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 841 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
837 list_del(&buf->list); 842 list_del(&buf->list);
838 } 843 }
839 spin_unlock_irqrestore(&dev->slock, flags); 844 spin_unlock_irqrestore(&dev->slock, flags);
@@ -853,7 +858,7 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
853 858
854 spin_lock_irqsave(&dev->slock, flags); 859 spin_lock_irqsave(&dev->slock, flags);
855 if (dev->isoc_ctl.vbi_buf != NULL) { 860 if (dev->isoc_ctl.vbi_buf != NULL) {
856 vb2_buffer_done(&dev->isoc_ctl.vbi_buf->vb, 861 vb2_buffer_done(&dev->isoc_ctl.vbi_buf->vb.vb2_buf,
857 VB2_BUF_STATE_ERROR); 862 VB2_BUF_STATE_ERROR);
858 dev->isoc_ctl.vbi_buf = NULL; 863 dev->isoc_ctl.vbi_buf = NULL;
859 } 864 }
@@ -862,7 +867,7 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
862 867
863 buf = list_entry(vbiq->active.next, struct au0828_buffer, list); 868 buf = list_entry(vbiq->active.next, struct au0828_buffer, list);
864 list_del(&buf->list); 869 list_del(&buf->list);
865 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 870 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
866 } 871 }
867 spin_unlock_irqrestore(&dev->slock, flags); 872 spin_unlock_irqrestore(&dev->slock, flags);
868 873
@@ -911,7 +916,7 @@ static void au0828_vid_buffer_timeout(unsigned long data)
911 916
912 buf = dev->isoc_ctl.buf; 917 buf = dev->isoc_ctl.buf;
913 if (buf != NULL) { 918 if (buf != NULL) {
914 vid_data = vb2_plane_vaddr(&buf->vb, 0); 919 vid_data = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
915 memset(vid_data, 0x00, buf->length); /* Blank green frame */ 920 memset(vid_data, 0x00, buf->length); /* Blank green frame */
916 buffer_filled(dev, dma_q, buf); 921 buffer_filled(dev, dma_q, buf);
917 } 922 }
@@ -935,7 +940,7 @@ static void au0828_vbi_buffer_timeout(unsigned long data)
935 940
936 buf = dev->isoc_ctl.vbi_buf; 941 buf = dev->isoc_ctl.vbi_buf;
937 if (buf != NULL) { 942 if (buf != NULL) {
938 vbi_data = vb2_plane_vaddr(&buf->vb, 0); 943 vbi_data = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
939 memset(vbi_data, 0x00, buf->length); 944 memset(vbi_data, 0x00, buf->length);
940 buffer_filled(dev, dma_q, buf); 945 buffer_filled(dev, dma_q, buf);
941 } 946 }
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 3b480005ce3b..60b59391ea2a 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -28,6 +28,7 @@
28 28
29/* Analog */ 29/* Analog */
30#include <linux/videodev2.h> 30#include <linux/videodev2.h>
31#include <media/videobuf2-v4l2.h>
31#include <media/videobuf2-vmalloc.h> 32#include <media/videobuf2-vmalloc.h>
32#include <media/v4l2-device.h> 33#include <media/v4l2-device.h>
33#include <media/v4l2-ctrls.h> 34#include <media/v4l2-ctrls.h>
@@ -167,7 +168,7 @@ struct au0828_usb_isoc_ctl {
167/* buffer for one video frame */ 168/* buffer for one video frame */
168struct au0828_buffer { 169struct au0828_buffer {
169 /* common v4l buffer stuff -- must be first */ 170 /* common v4l buffer stuff -- must be first */
170 struct vb2_buffer vb; 171 struct vb2_v4l2_buffer vb;
171 struct list_head list; 172 struct list_head list;
172 173
173 void *mem; 174 void *mem;
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 744e7ed743e1..23a614810166 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -61,7 +61,6 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
61{ 61{
62 struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue); 62 struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
63 struct em28xx_v4l2 *v4l2 = dev->v4l2; 63 struct em28xx_v4l2 *v4l2 = dev->v4l2;
64 struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
65 unsigned long size; 64 unsigned long size;
66 65
67 size = v4l2->vbi_width * v4l2->vbi_height * 2; 66 size = v4l2->vbi_width * v4l2->vbi_height * 2;
@@ -71,7 +70,7 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
71 __func__, vb2_plane_size(vb, 0), size); 70 __func__, vb2_plane_size(vb, 0), size);
72 return -EINVAL; 71 return -EINVAL;
73 } 72 }
74 vb2_set_plane_payload(&buf->vb, 0, size); 73 vb2_set_plane_payload(vb, 0, size);
75 74
76 return 0; 75 return 0;
77} 76}
@@ -79,8 +78,10 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
79static void 78static void
80vbi_buffer_queue(struct vb2_buffer *vb) 79vbi_buffer_queue(struct vb2_buffer *vb)
81{ 80{
81 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
82 struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue); 82 struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
83 struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb); 83 struct em28xx_buffer *buf =
84 container_of(vbuf, struct em28xx_buffer, vb);
84 struct em28xx_dmaqueue *vbiq = &dev->vbiq; 85 struct em28xx_dmaqueue *vbiq = &dev->vbiq;
85 unsigned long flags = 0; 86 unsigned long flags = 0;
86 87
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 4397ce5e78df..262e0325297e 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -433,14 +433,14 @@ static inline void finish_buffer(struct em28xx *dev,
433{ 433{
434 em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field); 434 em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
435 435
436 buf->vb.v4l2_buf.sequence = dev->v4l2->field_count++; 436 buf->vb.sequence = dev->v4l2->field_count++;
437 if (dev->v4l2->progressive) 437 if (dev->v4l2->progressive)
438 buf->vb.v4l2_buf.field = V4L2_FIELD_NONE; 438 buf->vb.field = V4L2_FIELD_NONE;
439 else 439 else
440 buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; 440 buf->vb.field = V4L2_FIELD_INTERLACED;
441 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 441 v4l2_get_timestamp(&buf->vb.timestamp);
442 442
443 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 443 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
444} 444}
445 445
446/* 446/*
@@ -900,12 +900,12 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
900static int 900static int
901buffer_prepare(struct vb2_buffer *vb) 901buffer_prepare(struct vb2_buffer *vb)
902{ 902{
903 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
903 struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue); 904 struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
904 struct em28xx_v4l2 *v4l2 = dev->v4l2; 905 struct em28xx_v4l2 *v4l2 = dev->v4l2;
905 struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
906 unsigned long size; 906 unsigned long size;
907 907
908 em28xx_videodbg("%s, field=%d\n", __func__, vb->v4l2_buf.field); 908 em28xx_videodbg("%s, field=%d\n", __func__, vbuf->field);
909 909
910 size = (v4l2->width * v4l2->height * v4l2->format->depth + 7) >> 3; 910 size = (v4l2->width * v4l2->height * v4l2->format->depth + 7) >> 3;
911 911
@@ -914,7 +914,7 @@ buffer_prepare(struct vb2_buffer *vb)
914 __func__, vb2_plane_size(vb, 0), size); 914 __func__, vb2_plane_size(vb, 0), size);
915 return -EINVAL; 915 return -EINVAL;
916 } 916 }
917 vb2_set_plane_payload(&buf->vb, 0, size); 917 vb2_set_plane_payload(vb, 0, size);
918 918
919 return 0; 919 return 0;
920} 920}
@@ -924,6 +924,7 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
924 struct em28xx *dev = vb2_get_drv_priv(vq); 924 struct em28xx *dev = vb2_get_drv_priv(vq);
925 struct em28xx_v4l2 *v4l2 = dev->v4l2; 925 struct em28xx_v4l2 *v4l2 = dev->v4l2;
926 struct v4l2_frequency f; 926 struct v4l2_frequency f;
927 struct v4l2_fh *owner;
927 int rc = 0; 928 int rc = 0;
928 929
929 em28xx_videodbg("%s\n", __func__); 930 em28xx_videodbg("%s\n", __func__);
@@ -964,7 +965,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
964 /* Ask tuner to go to analog or radio mode */ 965 /* Ask tuner to go to analog or radio mode */
965 memset(&f, 0, sizeof(f)); 966 memset(&f, 0, sizeof(f));
966 f.frequency = v4l2->frequency; 967 f.frequency = v4l2->frequency;
967 if (vq->owner && vq->owner->vdev->vfl_type == VFL_TYPE_RADIO) 968 owner = (struct v4l2_fh *)vq->owner;
969 if (owner && owner->vdev->vfl_type == VFL_TYPE_RADIO)
968 f.type = V4L2_TUNER_RADIO; 970 f.type = V4L2_TUNER_RADIO;
969 else 971 else
970 f.type = V4L2_TUNER_ANALOG_TV; 972 f.type = V4L2_TUNER_ANALOG_TV;
@@ -995,7 +997,8 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
995 997
996 spin_lock_irqsave(&dev->slock, flags); 998 spin_lock_irqsave(&dev->slock, flags);
997 if (dev->usb_ctl.vid_buf != NULL) { 999 if (dev->usb_ctl.vid_buf != NULL) {
998 vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR); 1000 vb2_buffer_done(&dev->usb_ctl.vid_buf->vb.vb2_buf,
1001 VB2_BUF_STATE_ERROR);
999 dev->usb_ctl.vid_buf = NULL; 1002 dev->usb_ctl.vid_buf = NULL;
1000 } 1003 }
1001 while (!list_empty(&vidq->active)) { 1004 while (!list_empty(&vidq->active)) {
@@ -1003,7 +1006,7 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
1003 1006
1004 buf = list_entry(vidq->active.next, struct em28xx_buffer, list); 1007 buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
1005 list_del(&buf->list); 1008 list_del(&buf->list);
1006 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 1009 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1007 } 1010 }
1008 spin_unlock_irqrestore(&dev->slock, flags); 1011 spin_unlock_irqrestore(&dev->slock, flags);
1009} 1012}
@@ -1026,7 +1029,8 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
1026 1029
1027 spin_lock_irqsave(&dev->slock, flags); 1030 spin_lock_irqsave(&dev->slock, flags);
1028 if (dev->usb_ctl.vbi_buf != NULL) { 1031 if (dev->usb_ctl.vbi_buf != NULL) {
1029 vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR); 1032 vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb.vb2_buf,
1033 VB2_BUF_STATE_ERROR);
1030 dev->usb_ctl.vbi_buf = NULL; 1034 dev->usb_ctl.vbi_buf = NULL;
1031 } 1035 }
1032 while (!list_empty(&vbiq->active)) { 1036 while (!list_empty(&vbiq->active)) {
@@ -1034,7 +1038,7 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
1034 1038
1035 buf = list_entry(vbiq->active.next, struct em28xx_buffer, list); 1039 buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
1036 list_del(&buf->list); 1040 list_del(&buf->list);
1037 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 1041 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1038 } 1042 }
1039 spin_unlock_irqrestore(&dev->slock, flags); 1043 spin_unlock_irqrestore(&dev->slock, flags);
1040} 1044}
@@ -1042,8 +1046,10 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
1042static void 1046static void
1043buffer_queue(struct vb2_buffer *vb) 1047buffer_queue(struct vb2_buffer *vb)
1044{ 1048{
1049 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1045 struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue); 1050 struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
1046 struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb); 1051 struct em28xx_buffer *buf =
1052 container_of(vbuf, struct em28xx_buffer, vb);
1047 struct em28xx_dmaqueue *vidq = &dev->vidq; 1053 struct em28xx_dmaqueue *vidq = &dev->vidq;
1048 unsigned long flags = 0; 1054 unsigned long flags = 0;
1049 1055
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index e6559c6f143c..76bf8ba372b3 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -35,6 +35,7 @@
35#include <linux/kref.h> 35#include <linux/kref.h>
36#include <linux/videodev2.h> 36#include <linux/videodev2.h>
37 37
38#include <media/videobuf2-v4l2.h>
38#include <media/videobuf2-vmalloc.h> 39#include <media/videobuf2-vmalloc.h>
39#include <media/v4l2-device.h> 40#include <media/v4l2-device.h>
40#include <media/v4l2-ctrls.h> 41#include <media/v4l2-ctrls.h>
@@ -264,7 +265,7 @@ struct em28xx_fmt {
264/* buffer for one video frame */ 265/* buffer for one video frame */
265struct em28xx_buffer { 266struct em28xx_buffer {
266 /* common v4l buffer stuff -- must be first */ 267 /* common v4l buffer stuff -- must be first */
267 struct vb2_buffer vb; 268 struct vb2_v4l2_buffer vb;
268 struct list_head list; 269 struct list_head list;
269 270
270 void *mem; 271 void *mem;
diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
index 0ab81ec8897a..ae1cfa792c58 100644
--- a/drivers/media/usb/go7007/go7007-driver.c
+++ b/drivers/media/usb/go7007/go7007-driver.c
@@ -386,10 +386,10 @@ start_error:
386 */ 386 */
387static inline void store_byte(struct go7007_buffer *vb, u8 byte) 387static inline void store_byte(struct go7007_buffer *vb, u8 byte)
388{ 388{
389 if (vb && vb->vb.v4l2_planes[0].bytesused < GO7007_BUF_SIZE) { 389 if (vb && vb->vb.vb2_buf.planes[0].bytesused < GO7007_BUF_SIZE) {
390 u8 *ptr = vb2_plane_vaddr(&vb->vb, 0); 390 u8 *ptr = vb2_plane_vaddr(&vb->vb.vb2_buf, 0);
391 391
392 ptr[vb->vb.v4l2_planes[0].bytesused++] = byte; 392 ptr[vb->vb.vb2_buf.planes[0].bytesused++] = byte;
393 } 393 }
394} 394}
395 395
@@ -401,7 +401,7 @@ static void go7007_set_motion_regions(struct go7007 *go, struct go7007_buffer *v
401 .type = V4L2_EVENT_MOTION_DET, 401 .type = V4L2_EVENT_MOTION_DET,
402 .u.motion_det = { 402 .u.motion_det = {
403 .flags = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ, 403 .flags = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
404 .frame_sequence = vb->vb.v4l2_buf.sequence, 404 .frame_sequence = vb->vb.sequence,
405 .region_mask = motion_regions, 405 .region_mask = motion_regions,
406 }, 406 },
407 }; 407 };
@@ -417,7 +417,7 @@ static void go7007_set_motion_regions(struct go7007 *go, struct go7007_buffer *v
417 */ 417 */
418static void go7007_motion_regions(struct go7007 *go, struct go7007_buffer *vb) 418static void go7007_motion_regions(struct go7007 *go, struct go7007_buffer *vb)
419{ 419{
420 u32 *bytesused = &vb->vb.v4l2_planes[0].bytesused; 420 u32 *bytesused = &vb->vb.vb2_buf.planes[0].bytesused;
421 unsigned motion[4] = { 0, 0, 0, 0 }; 421 unsigned motion[4] = { 0, 0, 0, 0 };
422 u32 motion_regions = 0; 422 u32 motion_regions = 0;
423 unsigned stride = (go->width + 7) >> 3; 423 unsigned stride = (go->width + 7) >> 3;
@@ -458,25 +458,26 @@ static struct go7007_buffer *frame_boundary(struct go7007 *go, struct go7007_buf
458 go->next_seq++; 458 go->next_seq++;
459 return vb; 459 return vb;
460 } 460 }
461 bytesused = &vb->vb.v4l2_planes[0].bytesused; 461 bytesused = &vb->vb.vb2_buf.planes[0].bytesused;
462 462
463 vb->vb.v4l2_buf.sequence = go->next_seq++; 463 vb->vb.sequence = go->next_seq++;
464 if (vb->modet_active && *bytesused + 216 < GO7007_BUF_SIZE) 464 if (vb->modet_active && *bytesused + 216 < GO7007_BUF_SIZE)
465 go7007_motion_regions(go, vb); 465 go7007_motion_regions(go, vb);
466 else 466 else
467 go7007_set_motion_regions(go, vb, 0); 467 go7007_set_motion_regions(go, vb, 0);
468 468
469 v4l2_get_timestamp(&vb->vb.v4l2_buf.timestamp); 469 v4l2_get_timestamp(&vb->vb.timestamp);
470 vb_tmp = vb; 470 vb_tmp = vb;
471 spin_lock(&go->spinlock); 471 spin_lock(&go->spinlock);
472 list_del(&vb->list); 472 list_del(&vb->list);
473 if (list_empty(&go->vidq_active)) 473 if (list_empty(&go->vidq_active))
474 vb = NULL; 474 vb = NULL;
475 else 475 else
476 vb = list_first_entry(&go->vidq_active, struct go7007_buffer, list); 476 vb = list_first_entry(&go->vidq_active,
477 struct go7007_buffer, list);
477 go->active_buf = vb; 478 go->active_buf = vb;
478 spin_unlock(&go->spinlock); 479 spin_unlock(&go->spinlock);
479 vb2_buffer_done(&vb_tmp->vb, VB2_BUF_STATE_DONE); 480 vb2_buffer_done(&vb_tmp->vb.vb2_buf, VB2_BUF_STATE_DONE);
480 return vb; 481 return vb;
481} 482}
482 483
@@ -519,9 +520,10 @@ void go7007_parse_video_stream(struct go7007 *go, u8 *buf, int length)
519 } 520 }
520 521
521 for (i = 0; i < length; ++i) { 522 for (i = 0; i < length; ++i) {
522 if (vb && vb->vb.v4l2_planes[0].bytesused >= GO7007_BUF_SIZE - 3) { 523 if (vb && vb->vb.vb2_buf.planes[0].bytesused >=
524 GO7007_BUF_SIZE - 3) {
523 v4l2_info(&go->v4l2_dev, "dropping oversized frame\n"); 525 v4l2_info(&go->v4l2_dev, "dropping oversized frame\n");
524 vb->vb.v4l2_planes[0].bytesused = 0; 526 vb->vb.vb2_buf.planes[0].bytesused = 0;
525 vb->frame_offset = 0; 527 vb->frame_offset = 0;
526 vb->modet_active = 0; 528 vb->modet_active = 0;
527 vb = go->active_buf = NULL; 529 vb = go->active_buf = NULL;
@@ -601,7 +603,8 @@ void go7007_parse_video_stream(struct go7007 *go, u8 *buf, int length)
601 vb = frame_boundary(go, vb); 603 vb = frame_boundary(go, vb);
602 go->seen_frame = buf[i] == frame_start_code; 604 go->seen_frame = buf[i] == frame_start_code;
603 if (vb && go->seen_frame) 605 if (vb && go->seen_frame)
604 vb->frame_offset = vb->vb.v4l2_planes[0].bytesused; 606 vb->frame_offset =
607 vb->vb.vb2_buf.planes[0].bytesused;
605 } 608 }
606 /* Handle any special chunk types, or just write the 609 /* Handle any special chunk types, or just write the
607 * start code to the (potentially new) buffer */ 610 * start code to the (potentially new) buffer */
diff --git a/drivers/media/usb/go7007/go7007-priv.h b/drivers/media/usb/go7007/go7007-priv.h
index 9e83bbf289d0..745185eb060b 100644
--- a/drivers/media/usb/go7007/go7007-priv.h
+++ b/drivers/media/usb/go7007/go7007-priv.h
@@ -136,7 +136,7 @@ struct go7007_hpi_ops {
136#define GO7007_BUF_SIZE (GO7007_BUF_PAGES << PAGE_SHIFT) 136#define GO7007_BUF_SIZE (GO7007_BUF_PAGES << PAGE_SHIFT)
137 137
138struct go7007_buffer { 138struct go7007_buffer {
139 struct vb2_buffer vb; 139 struct vb2_v4l2_buffer vb;
140 struct list_head list; 140 struct list_head list;
141 unsigned int frame_offset; 141 unsigned int frame_offset;
142 u32 modet_active; 142 u32 modet_active;
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index c57207e268c3..63d87a2755ae 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -52,7 +52,7 @@ static bool valid_pixelformat(u32 pixelformat)
52 52
53static u32 get_frame_type_flag(struct go7007_buffer *vb, int format) 53static u32 get_frame_type_flag(struct go7007_buffer *vb, int format)
54{ 54{
55 u8 *ptr = vb2_plane_vaddr(&vb->vb, 0); 55 u8 *ptr = vb2_plane_vaddr(&vb->vb.vb2_buf, 0);
56 56
57 switch (format) { 57 switch (format) {
58 case V4L2_PIX_FMT_MJPEG: 58 case V4L2_PIX_FMT_MJPEG:
@@ -386,8 +386,9 @@ static void go7007_buf_queue(struct vb2_buffer *vb)
386{ 386{
387 struct vb2_queue *vq = vb->vb2_queue; 387 struct vb2_queue *vq = vb->vb2_queue;
388 struct go7007 *go = vb2_get_drv_priv(vq); 388 struct go7007 *go = vb2_get_drv_priv(vq);
389 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
389 struct go7007_buffer *go7007_vb = 390 struct go7007_buffer *go7007_vb =
390 container_of(vb, struct go7007_buffer, vb); 391 container_of(vbuf, struct go7007_buffer, vb);
391 unsigned long flags; 392 unsigned long flags;
392 393
393 spin_lock_irqsave(&go->spinlock, flags); 394 spin_lock_irqsave(&go->spinlock, flags);
@@ -397,12 +398,13 @@ static void go7007_buf_queue(struct vb2_buffer *vb)
397 398
398static int go7007_buf_prepare(struct vb2_buffer *vb) 399static int go7007_buf_prepare(struct vb2_buffer *vb)
399{ 400{
401 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
400 struct go7007_buffer *go7007_vb = 402 struct go7007_buffer *go7007_vb =
401 container_of(vb, struct go7007_buffer, vb); 403 container_of(vbuf, struct go7007_buffer, vb);
402 404
403 go7007_vb->modet_active = 0; 405 go7007_vb->modet_active = 0;
404 go7007_vb->frame_offset = 0; 406 go7007_vb->frame_offset = 0;
405 vb->v4l2_planes[0].bytesused = 0; 407 vb->planes[0].bytesused = 0;
406 return 0; 408 return 0;
407} 409}
408 410
@@ -410,15 +412,15 @@ static void go7007_buf_finish(struct vb2_buffer *vb)
410{ 412{
411 struct vb2_queue *vq = vb->vb2_queue; 413 struct vb2_queue *vq = vb->vb2_queue;
412 struct go7007 *go = vb2_get_drv_priv(vq); 414 struct go7007 *go = vb2_get_drv_priv(vq);
415 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
413 struct go7007_buffer *go7007_vb = 416 struct go7007_buffer *go7007_vb =
414 container_of(vb, struct go7007_buffer, vb); 417 container_of(vbuf, struct go7007_buffer, vb);
415 u32 frame_type_flag = get_frame_type_flag(go7007_vb, go->format); 418 u32 frame_type_flag = get_frame_type_flag(go7007_vb, go->format);
416 struct v4l2_buffer *buf = &vb->v4l2_buf;
417 419
418 buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_BFRAME | 420 vbuf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_BFRAME |
419 V4L2_BUF_FLAG_PFRAME); 421 V4L2_BUF_FLAG_PFRAME);
420 buf->flags |= frame_type_flag; 422 vbuf->flags |= frame_type_flag;
421 buf->field = V4L2_FIELD_NONE; 423 vbuf->field = V4L2_FIELD_NONE;
422} 424}
423 425
424static int go7007_start_streaming(struct vb2_queue *q, unsigned int count) 426static int go7007_start_streaming(struct vb2_queue *q, unsigned int count)
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index fd1fa412e094..e1d4d16ade05 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -21,6 +21,7 @@
21#include <media/v4l2-ioctl.h> 21#include <media/v4l2-ioctl.h>
22#include <media/v4l2-ctrls.h> 22#include <media/v4l2-ctrls.h>
23#include <media/v4l2-event.h> 23#include <media/v4l2-event.h>
24#include <media/videobuf2-v4l2.h>
24#include <media/videobuf2-vmalloc.h> 25#include <media/videobuf2-vmalloc.h>
25 26
26/* HackRF USB API commands (from HackRF Library) */ 27/* HackRF USB API commands (from HackRF Library) */
@@ -85,7 +86,8 @@ static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
85 86
86/* intermediate buffers with raw data from the USB device */ 87/* intermediate buffers with raw data from the USB device */
87struct hackrf_frame_buf { 88struct hackrf_frame_buf {
88 struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */ 89 /* common v4l buffer stuff -- must be first */
90 struct vb2_v4l2_buffer vb;
89 struct list_head list; 91 struct list_head list;
90}; 92};
91 93
@@ -287,13 +289,13 @@ static void hackrf_urb_complete(struct urb *urb)
287 } 289 }
288 290
289 /* fill framebuffer */ 291 /* fill framebuffer */
290 ptr = vb2_plane_vaddr(&fbuf->vb, 0); 292 ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
291 len = hackrf_convert_stream(dev, ptr, urb->transfer_buffer, 293 len = hackrf_convert_stream(dev, ptr, urb->transfer_buffer,
292 urb->actual_length); 294 urb->actual_length);
293 vb2_set_plane_payload(&fbuf->vb, 0, len); 295 vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len);
294 v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp); 296 v4l2_get_timestamp(&fbuf->vb.timestamp);
295 fbuf->vb.v4l2_buf.sequence = dev->sequence++; 297 fbuf->vb.sequence = dev->sequence++;
296 vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE); 298 vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
297 } 299 }
298skip: 300skip:
299 usb_submit_urb(urb, GFP_ATOMIC); 301 usb_submit_urb(urb, GFP_ATOMIC);
@@ -437,7 +439,7 @@ static void hackrf_cleanup_queued_bufs(struct hackrf_dev *dev)
437 buf = list_entry(dev->queued_bufs.next, 439 buf = list_entry(dev->queued_bufs.next,
438 struct hackrf_frame_buf, list); 440 struct hackrf_frame_buf, list);
439 list_del(&buf->list); 441 list_del(&buf->list);
440 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 442 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
441 } 443 }
442 spin_unlock_irqrestore(&dev->queued_bufs_lock, flags); 444 spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
443} 445}
@@ -483,9 +485,10 @@ static int hackrf_queue_setup(struct vb2_queue *vq,
483 485
484static void hackrf_buf_queue(struct vb2_buffer *vb) 486static void hackrf_buf_queue(struct vb2_buffer *vb)
485{ 487{
488 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
486 struct hackrf_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 489 struct hackrf_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
487 struct hackrf_frame_buf *buf = 490 struct hackrf_frame_buf *buf =
488 container_of(vb, struct hackrf_frame_buf, vb); 491 container_of(vbuf, struct hackrf_frame_buf, vb);
489 unsigned long flags; 492 unsigned long flags;
490 493
491 spin_lock_irqsave(&dev->queued_bufs_lock, flags); 494 spin_lock_irqsave(&dev->queued_bufs_lock, flags);
@@ -539,7 +542,8 @@ err:
539 542
540 list_for_each_entry_safe(buf, tmp, &dev->queued_bufs, list) { 543 list_for_each_entry_safe(buf, tmp, &dev->queued_bufs, list) {
541 list_del(&buf->list); 544 list_del(&buf->list);
542 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 545 vb2_buffer_done(&buf->vb.vb2_buf,
546 VB2_BUF_STATE_QUEUED);
543 } 547 }
544 } 548 }
545 549
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 3f276d921cca..26a76e0fe3d8 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -28,6 +28,7 @@
28#include <media/v4l2-ctrls.h> 28#include <media/v4l2-ctrls.h>
29#include <media/v4l2-event.h> 29#include <media/v4l2-event.h>
30#include <linux/usb.h> 30#include <linux/usb.h>
31#include <media/videobuf2-v4l2.h>
31#include <media/videobuf2-vmalloc.h> 32#include <media/videobuf2-vmalloc.h>
32#include <linux/spi/spi.h> 33#include <linux/spi/spi.h>
33 34
@@ -112,7 +113,8 @@ static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
112 113
113/* intermediate buffers with raw data from the USB device */ 114/* intermediate buffers with raw data from the USB device */
114struct msi2500_frame_buf { 115struct msi2500_frame_buf {
115 struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */ 116 /* common v4l buffer stuff -- must be first */
117 struct vb2_v4l2_buffer vb;
116 struct list_head list; 118 struct list_head list;
117}; 119};
118 120
@@ -431,10 +433,10 @@ static void msi2500_isoc_handler(struct urb *urb)
431 } 433 }
432 434
433 /* fill framebuffer */ 435 /* fill framebuffer */
434 ptr = vb2_plane_vaddr(&fbuf->vb, 0); 436 ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
435 flen = msi2500_convert_stream(dev, ptr, iso_buf, flen); 437 flen = msi2500_convert_stream(dev, ptr, iso_buf, flen);
436 vb2_set_plane_payload(&fbuf->vb, 0, flen); 438 vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, flen);
437 vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE); 439 vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
438 } 440 }
439 441
440handler_end: 442handler_end:
@@ -569,7 +571,7 @@ static void msi2500_cleanup_queued_bufs(struct msi2500_dev *dev)
569 buf = list_entry(dev->queued_bufs.next, 571 buf = list_entry(dev->queued_bufs.next,
570 struct msi2500_frame_buf, list); 572 struct msi2500_frame_buf, list);
571 list_del(&buf->list); 573 list_del(&buf->list);
572 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 574 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
573 } 575 }
574 spin_unlock_irqrestore(&dev->queued_bufs_lock, flags); 576 spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
575} 577}
@@ -633,15 +635,16 @@ static int msi2500_queue_setup(struct vb2_queue *vq,
633 635
634static void msi2500_buf_queue(struct vb2_buffer *vb) 636static void msi2500_buf_queue(struct vb2_buffer *vb)
635{ 637{
638 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
636 struct msi2500_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 639 struct msi2500_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
637 struct msi2500_frame_buf *buf = container_of(vb, 640 struct msi2500_frame_buf *buf = container_of(vbuf,
638 struct msi2500_frame_buf, 641 struct msi2500_frame_buf,
639 vb); 642 vb);
640 unsigned long flags; 643 unsigned long flags;
641 644
642 /* Check the device has not disconnected between prep and queuing */ 645 /* Check the device has not disconnected between prep and queuing */
643 if (unlikely(!dev->udev)) { 646 if (unlikely(!dev->udev)) {
644 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 647 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
645 return; 648 return;
646 } 649 }
647 650
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 702267e208ba..3f5395a7fafe 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -240,9 +240,9 @@ static void pwc_frame_complete(struct pwc_device *pdev)
240 PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);" 240 PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);"
241 " discarded.\n", fbuf->filled); 241 " discarded.\n", fbuf->filled);
242 } else { 242 } else {
243 fbuf->vb.v4l2_buf.field = V4L2_FIELD_NONE; 243 fbuf->vb.field = V4L2_FIELD_NONE;
244 fbuf->vb.v4l2_buf.sequence = pdev->vframe_count; 244 fbuf->vb.sequence = pdev->vframe_count;
245 vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE); 245 vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
246 pdev->fill_buf = NULL; 246 pdev->fill_buf = NULL;
247 pdev->vsync = 0; 247 pdev->vsync = 0;
248 } 248 }
@@ -287,7 +287,7 @@ static void pwc_isoc_handler(struct urb *urb)
287 { 287 {
288 PWC_ERROR("Too many ISOC errors, bailing out.\n"); 288 PWC_ERROR("Too many ISOC errors, bailing out.\n");
289 if (pdev->fill_buf) { 289 if (pdev->fill_buf) {
290 vb2_buffer_done(&pdev->fill_buf->vb, 290 vb2_buffer_done(&pdev->fill_buf->vb.vb2_buf,
291 VB2_BUF_STATE_ERROR); 291 VB2_BUF_STATE_ERROR);
292 pdev->fill_buf = NULL; 292 pdev->fill_buf = NULL;
293 } 293 }
@@ -317,7 +317,7 @@ static void pwc_isoc_handler(struct urb *urb)
317 317
318 if (pdev->vsync == 1) { 318 if (pdev->vsync == 1) {
319 v4l2_get_timestamp( 319 v4l2_get_timestamp(
320 &fbuf->vb.v4l2_buf.timestamp); 320 &fbuf->vb.timestamp);
321 pdev->vsync = 2; 321 pdev->vsync = 2;
322 } 322 }
323 323
@@ -520,7 +520,7 @@ static void pwc_cleanup_queued_bufs(struct pwc_device *pdev,
520 buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf, 520 buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf,
521 list); 521 list);
522 list_del(&buf->list); 522 list_del(&buf->list);
523 vb2_buffer_done(&buf->vb, state); 523 vb2_buffer_done(&buf->vb.vb2_buf, state);
524 } 524 }
525 spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags); 525 spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
526} 526}
@@ -594,7 +594,9 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
594 594
595static int buffer_init(struct vb2_buffer *vb) 595static int buffer_init(struct vb2_buffer *vb)
596{ 596{
597 struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb); 597 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
598 struct pwc_frame_buf *buf =
599 container_of(vbuf, struct pwc_frame_buf, vb);
598 600
599 /* need vmalloc since frame buffer > 128K */ 601 /* need vmalloc since frame buffer > 128K */
600 buf->data = vzalloc(PWC_FRAME_SIZE); 602 buf->data = vzalloc(PWC_FRAME_SIZE);
@@ -618,7 +620,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
618static void buffer_finish(struct vb2_buffer *vb) 620static void buffer_finish(struct vb2_buffer *vb)
619{ 621{
620 struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue); 622 struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
621 struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb); 623 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
624 struct pwc_frame_buf *buf =
625 container_of(vbuf, struct pwc_frame_buf, vb);
622 626
623 if (vb->state == VB2_BUF_STATE_DONE) { 627 if (vb->state == VB2_BUF_STATE_DONE) {
624 /* 628 /*
@@ -633,7 +637,9 @@ static void buffer_finish(struct vb2_buffer *vb)
633 637
634static void buffer_cleanup(struct vb2_buffer *vb) 638static void buffer_cleanup(struct vb2_buffer *vb)
635{ 639{
636 struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb); 640 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
641 struct pwc_frame_buf *buf =
642 container_of(vbuf, struct pwc_frame_buf, vb);
637 643
638 vfree(buf->data); 644 vfree(buf->data);
639} 645}
@@ -641,12 +647,14 @@ static void buffer_cleanup(struct vb2_buffer *vb)
641static void buffer_queue(struct vb2_buffer *vb) 647static void buffer_queue(struct vb2_buffer *vb)
642{ 648{
643 struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue); 649 struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
644 struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb); 650 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
651 struct pwc_frame_buf *buf =
652 container_of(vbuf, struct pwc_frame_buf, vb);
645 unsigned long flags = 0; 653 unsigned long flags = 0;
646 654
647 /* Check the device has not disconnected between prep and queuing */ 655 /* Check the device has not disconnected between prep and queuing */
648 if (!pdev->udev) { 656 if (!pdev->udev) {
649 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 657 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
650 return; 658 return;
651 } 659 }
652 660
@@ -695,7 +703,8 @@ static void stop_streaming(struct vb2_queue *vq)
695 703
696 pwc_cleanup_queued_bufs(pdev, VB2_BUF_STATE_ERROR); 704 pwc_cleanup_queued_bufs(pdev, VB2_BUF_STATE_ERROR);
697 if (pdev->fill_buf) 705 if (pdev->fill_buf)
698 vb2_buffer_done(&pdev->fill_buf->vb, VB2_BUF_STATE_ERROR); 706 vb2_buffer_done(&pdev->fill_buf->vb.vb2_buf,
707 VB2_BUF_STATE_ERROR);
699 mutex_unlock(&pdev->v4l2_lock); 708 mutex_unlock(&pdev->v4l2_lock);
700} 709}
701 710
diff --git a/drivers/media/usb/pwc/pwc-uncompress.c b/drivers/media/usb/pwc/pwc-uncompress.c
index b65903fbcf0d..98c46f93f119 100644
--- a/drivers/media/usb/pwc/pwc-uncompress.c
+++ b/drivers/media/usb/pwc/pwc-uncompress.c
@@ -40,7 +40,7 @@ int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
40 u16 *src; 40 u16 *src;
41 u16 *dsty, *dstu, *dstv; 41 u16 *dsty, *dstu, *dstv;
42 42
43 image = vb2_plane_vaddr(&fbuf->vb, 0); 43 image = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
44 44
45 yuv = fbuf->data + pdev->frame_header_size; /* Skip header */ 45 yuv = fbuf->data + pdev->frame_header_size; /* Skip header */
46 46
@@ -55,12 +55,12 @@ int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
55 * determine this using the type of the webcam */ 55 * determine this using the type of the webcam */
56 memcpy(raw_frame->cmd, pdev->cmd_buf, 4); 56 memcpy(raw_frame->cmd, pdev->cmd_buf, 4);
57 memcpy(raw_frame+1, yuv, pdev->frame_size); 57 memcpy(raw_frame+1, yuv, pdev->frame_size);
58 vb2_set_plane_payload(&fbuf->vb, 0, 58 vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0,
59 pdev->frame_size + sizeof(struct pwc_raw_frame)); 59 pdev->frame_size + sizeof(struct pwc_raw_frame));
60 return 0; 60 return 0;
61 } 61 }
62 62
63 vb2_set_plane_payload(&fbuf->vb, 0, 63 vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0,
64 pdev->width * pdev->height * 3 / 2); 64 pdev->width * pdev->height * 3 / 2);
65 65
66 if (pdev->vbandlength == 0) { 66 if (pdev->vbandlength == 0) {
diff --git a/drivers/media/usb/pwc/pwc.h b/drivers/media/usb/pwc/pwc.h
index 81b017a554bc..3c73bdaae450 100644
--- a/drivers/media/usb/pwc/pwc.h
+++ b/drivers/media/usb/pwc/pwc.h
@@ -40,6 +40,7 @@
40#include <media/v4l2-ctrls.h> 40#include <media/v4l2-ctrls.h>
41#include <media/v4l2-fh.h> 41#include <media/v4l2-fh.h>
42#include <media/v4l2-event.h> 42#include <media/v4l2-event.h>
43#include <media/videobuf2-v4l2.h>
43#include <media/videobuf2-vmalloc.h> 44#include <media/videobuf2-vmalloc.h>
44#ifdef CONFIG_USB_PWC_INPUT_EVDEV 45#ifdef CONFIG_USB_PWC_INPUT_EVDEV
45#include <linux/input.h> 46#include <linux/input.h>
@@ -210,7 +211,8 @@ struct pwc_raw_frame {
210/* intermediate buffers with raw data from the USB cam */ 211/* intermediate buffers with raw data from the USB cam */
211struct pwc_frame_buf 212struct pwc_frame_buf
212{ 213{
213 struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */ 214 /* common v4l buffer stuff -- must be first */
215 struct vb2_v4l2_buffer vb;
214 struct list_head list; 216 struct list_head list;
215 void *data; 217 void *data;
216 int filled; /* number of bytes filled */ 218 int filled; /* number of bytes filled */
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 0f3c34d47ec3..32b511510f0d 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -45,6 +45,7 @@
45#include <linux/mm.h> 45#include <linux/mm.h>
46#include <linux/vmalloc.h> 46#include <linux/vmalloc.h>
47#include <linux/usb.h> 47#include <linux/usb.h>
48#include <media/videobuf2-v4l2.h>
48#include <media/videobuf2-vmalloc.h> 49#include <media/videobuf2-vmalloc.h>
49#include <media/v4l2-common.h> 50#include <media/v4l2-common.h>
50#include <media/v4l2-device.h> 51#include <media/v4l2-device.h>
@@ -293,7 +294,7 @@ struct s2255_fmt {
293/* buffer for one video frame */ 294/* buffer for one video frame */
294struct s2255_buffer { 295struct s2255_buffer {
295 /* common v4l buffer stuff -- must be first */ 296 /* common v4l buffer stuff -- must be first */
296 struct vb2_buffer vb; 297 struct vb2_v4l2_buffer vb;
297 struct list_head list; 298 struct list_head list;
298}; 299};
299 300
@@ -573,14 +574,14 @@ static void s2255_got_frame(struct s2255_vc *vc, int jpgsize)
573 buf = list_entry(vc->buf_list.next, 574 buf = list_entry(vc->buf_list.next,
574 struct s2255_buffer, list); 575 struct s2255_buffer, list);
575 list_del(&buf->list); 576 list_del(&buf->list);
576 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 577 v4l2_get_timestamp(&buf->vb.timestamp);
577 buf->vb.v4l2_buf.field = vc->field; 578 buf->vb.field = vc->field;
578 buf->vb.v4l2_buf.sequence = vc->frame_count; 579 buf->vb.sequence = vc->frame_count;
579 spin_unlock_irqrestore(&vc->qlock, flags); 580 spin_unlock_irqrestore(&vc->qlock, flags);
580 581
581 s2255_fillbuff(vc, buf, jpgsize); 582 s2255_fillbuff(vc, buf, jpgsize);
582 /* tell v4l buffer was filled */ 583 /* tell v4l buffer was filled */
583 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 584 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
584 dprintk(dev, 2, "%s: [buf] [%p]\n", __func__, buf); 585 dprintk(dev, 2, "%s: [buf] [%p]\n", __func__, buf);
585} 586}
586 587
@@ -612,7 +613,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
612{ 613{
613 int pos = 0; 614 int pos = 0;
614 const char *tmpbuf; 615 const char *tmpbuf;
615 char *vbuf = vb2_plane_vaddr(&buf->vb, 0); 616 char *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
616 unsigned long last_frame; 617 unsigned long last_frame;
617 struct s2255_dev *dev = vc->dev; 618 struct s2255_dev *dev = vc->dev;
618 619
@@ -635,7 +636,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
635 break; 636 break;
636 case V4L2_PIX_FMT_JPEG: 637 case V4L2_PIX_FMT_JPEG:
637 case V4L2_PIX_FMT_MJPEG: 638 case V4L2_PIX_FMT_MJPEG:
638 vb2_set_plane_payload(&buf->vb, 0, jpgsize); 639 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, jpgsize);
639 memcpy(vbuf, tmpbuf, jpgsize); 640 memcpy(vbuf, tmpbuf, jpgsize);
640 break; 641 break;
641 case V4L2_PIX_FMT_YUV422P: 642 case V4L2_PIX_FMT_YUV422P:
@@ -674,7 +675,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
674static int buffer_prepare(struct vb2_buffer *vb) 675static int buffer_prepare(struct vb2_buffer *vb)
675{ 676{
676 struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue); 677 struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue);
677 struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb); 678 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
679 struct s2255_buffer *buf = container_of(vbuf, struct s2255_buffer, vb);
678 int w = vc->width; 680 int w = vc->width;
679 int h = vc->height; 681 int h = vc->height;
680 unsigned long size; 682 unsigned long size;
@@ -696,13 +698,14 @@ static int buffer_prepare(struct vb2_buffer *vb)
696 return -EINVAL; 698 return -EINVAL;
697 } 699 }
698 700
699 vb2_set_plane_payload(&buf->vb, 0, size); 701 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
700 return 0; 702 return 0;
701} 703}
702 704
703static void buffer_queue(struct vb2_buffer *vb) 705static void buffer_queue(struct vb2_buffer *vb)
704{ 706{
705 struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb); 707 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
708 struct s2255_buffer *buf = container_of(vbuf, struct s2255_buffer, vb);
706 struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue); 709 struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue);
707 unsigned long flags = 0; 710 unsigned long flags = 0;
708 dprintk(vc->dev, 1, "%s\n", __func__); 711 dprintk(vc->dev, 1, "%s\n", __func__);
@@ -1116,9 +1119,9 @@ static void stop_streaming(struct vb2_queue *vq)
1116 spin_lock_irqsave(&vc->qlock, flags); 1119 spin_lock_irqsave(&vc->qlock, flags);
1117 list_for_each_entry_safe(buf, node, &vc->buf_list, list) { 1120 list_for_each_entry_safe(buf, node, &vc->buf_list, list) {
1118 list_del(&buf->list); 1121 list_del(&buf->list);
1119 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 1122 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1120 dprintk(vc->dev, 2, "[%p/%d] done\n", 1123 dprintk(vc->dev, 2, "[%p/%d] done\n",
1121 buf, buf->vb.v4l2_buf.index); 1124 buf, buf->vb.vb2_buf.index);
1122 } 1125 }
1123 spin_unlock_irqrestore(&vc->qlock, flags); 1126 spin_unlock_irqrestore(&vc->qlock, flags);
1124} 1127}
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index e12b10352871..10e35e6479ad 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -695,8 +695,9 @@ static void buffer_queue(struct vb2_buffer *vb)
695{ 695{
696 unsigned long flags; 696 unsigned long flags;
697 struct stk1160 *dev = vb2_get_drv_priv(vb->vb2_queue); 697 struct stk1160 *dev = vb2_get_drv_priv(vb->vb2_queue);
698 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
698 struct stk1160_buffer *buf = 699 struct stk1160_buffer *buf =
699 container_of(vb, struct stk1160_buffer, vb); 700 container_of(vbuf, struct stk1160_buffer, vb);
700 701
701 spin_lock_irqsave(&dev->buf_lock, flags); 702 spin_lock_irqsave(&dev->buf_lock, flags);
702 if (!dev->udev) { 703 if (!dev->udev) {
@@ -704,7 +705,7 @@ static void buffer_queue(struct vb2_buffer *vb)
704 * If the device is disconnected return the buffer to userspace 705 * If the device is disconnected return the buffer to userspace
705 * directly. The next QBUF call will fail with -ENODEV. 706 * directly. The next QBUF call will fail with -ENODEV.
706 */ 707 */
707 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 708 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
708 } else { 709 } else {
709 710
710 buf->mem = vb2_plane_vaddr(vb, 0); 711 buf->mem = vb2_plane_vaddr(vb, 0);
@@ -717,7 +718,7 @@ static void buffer_queue(struct vb2_buffer *vb)
717 * the buffer to userspace directly. 718 * the buffer to userspace directly.
718 */ 719 */
719 if (buf->length < dev->width * dev->height * 2) 720 if (buf->length < dev->width * dev->height * 2)
720 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 721 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
721 else 722 else
722 list_add_tail(&buf->list, &dev->avail_bufs); 723 list_add_tail(&buf->list, &dev->avail_bufs);
723 724
@@ -769,9 +770,9 @@ void stk1160_clear_queue(struct stk1160 *dev)
769 buf = list_first_entry(&dev->avail_bufs, 770 buf = list_first_entry(&dev->avail_bufs,
770 struct stk1160_buffer, list); 771 struct stk1160_buffer, list);
771 list_del(&buf->list); 772 list_del(&buf->list);
772 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 773 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
773 stk1160_dbg("buffer [%p/%d] aborted\n", 774 stk1160_dbg("buffer [%p/%d] aborted\n",
774 buf, buf->vb.v4l2_buf.index); 775 buf, buf->vb.vb2_buf.index);
775 } 776 }
776 777
777 /* It's important to release the current buffer */ 778 /* It's important to release the current buffer */
@@ -779,9 +780,9 @@ void stk1160_clear_queue(struct stk1160 *dev)
779 buf = dev->isoc_ctl.buf; 780 buf = dev->isoc_ctl.buf;
780 dev->isoc_ctl.buf = NULL; 781 dev->isoc_ctl.buf = NULL;
781 782
782 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 783 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
783 stk1160_dbg("buffer [%p/%d] aborted\n", 784 stk1160_dbg("buffer [%p/%d] aborted\n",
784 buf, buf->vb.v4l2_buf.index); 785 buf, buf->vb.vb2_buf.index);
785 } 786 }
786 spin_unlock_irqrestore(&dev->buf_lock, flags); 787 spin_unlock_irqrestore(&dev->buf_lock, flags);
787} 788}
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 940c3eaea507..75654e676e80 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -96,13 +96,13 @@ void stk1160_buffer_done(struct stk1160 *dev)
96{ 96{
97 struct stk1160_buffer *buf = dev->isoc_ctl.buf; 97 struct stk1160_buffer *buf = dev->isoc_ctl.buf;
98 98
99 buf->vb.v4l2_buf.sequence = dev->sequence++; 99 buf->vb.sequence = dev->sequence++;
100 buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; 100 buf->vb.field = V4L2_FIELD_INTERLACED;
101 buf->vb.v4l2_buf.bytesused = buf->bytesused; 101 buf->vb.vb2_buf.planes[0].bytesused = buf->bytesused;
102 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 102 v4l2_get_timestamp(&buf->vb.timestamp);
103 103
104 vb2_set_plane_payload(&buf->vb, 0, buf->bytesused); 104 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->bytesused);
105 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 105 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
106 106
107 dev->isoc_ctl.buf = NULL; 107 dev->isoc_ctl.buf = NULL;
108} 108}
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index 047131b873f1..1ed1cc43cdb2 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -77,7 +77,7 @@
77/* Buffer for one video frame */ 77/* Buffer for one video frame */
78struct stk1160_buffer { 78struct stk1160_buffer {
79 /* common v4l buffer stuff -- must be first */ 79 /* common v4l buffer stuff -- must be first */
80 struct vb2_buffer vb; 80 struct vb2_v4l2_buffer vb;
81 struct list_head list; 81 struct list_head list;
82 82
83 void *mem; 83 void *mem;
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index a46766c9173d..ce5d5028e4c2 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -306,7 +306,7 @@ static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk)
306 306
307 /* First available buffer. */ 307 /* First available buffer. */
308 buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list); 308 buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
309 frame = vb2_plane_vaddr(&buf->vb, 0); 309 frame = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
310 310
311 /* Copy the chunk data. */ 311 /* Copy the chunk data. */
312 usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd); 312 usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
@@ -314,17 +314,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk)
314 314
315 /* Last chunk in a frame, signalling an end */ 315 /* Last chunk in a frame, signalling an end */
316 if (odd && chunk_no == usbtv->n_chunks-1) { 316 if (odd && chunk_no == usbtv->n_chunks-1) {
317 int size = vb2_plane_size(&buf->vb, 0); 317 int size = vb2_plane_size(&buf->vb.vb2_buf, 0);
318 enum vb2_buffer_state state = usbtv->chunks_done == 318 enum vb2_buffer_state state = usbtv->chunks_done ==
319 usbtv->n_chunks ? 319 usbtv->n_chunks ?
320 VB2_BUF_STATE_DONE : 320 VB2_BUF_STATE_DONE :
321 VB2_BUF_STATE_ERROR; 321 VB2_BUF_STATE_ERROR;
322 322
323 buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; 323 buf->vb.field = V4L2_FIELD_INTERLACED;
324 buf->vb.v4l2_buf.sequence = usbtv->sequence++; 324 buf->vb.sequence = usbtv->sequence++;
325 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 325 v4l2_get_timestamp(&buf->vb.timestamp);
326 vb2_set_plane_payload(&buf->vb, 0, size); 326 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
327 vb2_buffer_done(&buf->vb, state); 327 vb2_buffer_done(&buf->vb.vb2_buf, state);
328 list_del(&buf->list); 328 list_del(&buf->list);
329 } 329 }
330 330
@@ -422,7 +422,7 @@ static void usbtv_stop(struct usbtv *usbtv)
422 while (!list_empty(&usbtv->bufs)) { 422 while (!list_empty(&usbtv->bufs)) {
423 struct usbtv_buf *buf = list_first_entry(&usbtv->bufs, 423 struct usbtv_buf *buf = list_first_entry(&usbtv->bufs,
424 struct usbtv_buf, list); 424 struct usbtv_buf, list);
425 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 425 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
426 list_del(&buf->list); 426 list_del(&buf->list);
427 } 427 }
428 spin_unlock_irqrestore(&usbtv->buflock, flags); 428 spin_unlock_irqrestore(&usbtv->buflock, flags);
@@ -617,8 +617,9 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
617 617
618static void usbtv_buf_queue(struct vb2_buffer *vb) 618static void usbtv_buf_queue(struct vb2_buffer *vb)
619{ 619{
620 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
620 struct usbtv *usbtv = vb2_get_drv_priv(vb->vb2_queue); 621 struct usbtv *usbtv = vb2_get_drv_priv(vb->vb2_queue);
621 struct usbtv_buf *buf = container_of(vb, struct usbtv_buf, vb); 622 struct usbtv_buf *buf = container_of(vbuf, struct usbtv_buf, vb);
622 unsigned long flags; 623 unsigned long flags;
623 624
624 if (usbtv->udev == NULL) { 625 if (usbtv->udev == NULL) {
diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h
index 968119581fab..19cb8bf7c4e9 100644
--- a/drivers/media/usb/usbtv/usbtv.h
+++ b/drivers/media/usb/usbtv/usbtv.h
@@ -24,6 +24,7 @@
24#include <linux/usb.h> 24#include <linux/usb.h>
25 25
26#include <media/v4l2-device.h> 26#include <media/v4l2-device.h>
27#include <media/videobuf2-v4l2.h>
27#include <media/videobuf2-vmalloc.h> 28#include <media/videobuf2-vmalloc.h>
28 29
29/* Hardware. */ 30/* Hardware. */
@@ -61,7 +62,7 @@ struct usbtv_norm_params {
61 62
62/* A single videobuf2 frame buffer. */ 63/* A single videobuf2 frame buffer. */
63struct usbtv_buf { 64struct usbtv_buf {
64 struct vb2_buffer vb; 65 struct vb2_v4l2_buffer vb;
65 struct list_head list; 66 struct list_head list;
66}; 67};
67 68
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index f16b9b42689d..b49bcab0c38c 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -20,6 +20,7 @@
20#include <linux/videodev2.h> 20#include <linux/videodev2.h>
21#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
22#include <linux/wait.h> 22#include <linux/wait.h>
23#include <media/videobuf2-v4l2.h>
23#include <media/videobuf2-vmalloc.h> 24#include <media/videobuf2-vmalloc.h>
24 25
25#include "uvcvideo.h" 26#include "uvcvideo.h"
@@ -60,7 +61,7 @@ static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
60 queue); 61 queue);
61 list_del(&buf->queue); 62 list_del(&buf->queue);
62 buf->state = state; 63 buf->state = state;
63 vb2_buffer_done(&buf->buf, vb2_state); 64 vb2_buffer_done(&buf->buf.vb2_buf, vb2_state);
64 } 65 }
65} 66}
66 67
@@ -89,10 +90,11 @@ static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
89 90
90static int uvc_buffer_prepare(struct vb2_buffer *vb) 91static int uvc_buffer_prepare(struct vb2_buffer *vb)
91{ 92{
93 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
92 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 94 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
93 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); 95 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
94 96
95 if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT && 97 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
96 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { 98 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
97 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); 99 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
98 return -EINVAL; 100 return -EINVAL;
@@ -105,7 +107,7 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
105 buf->error = 0; 107 buf->error = 0;
106 buf->mem = vb2_plane_vaddr(vb, 0); 108 buf->mem = vb2_plane_vaddr(vb, 0);
107 buf->length = vb2_plane_size(vb, 0); 109 buf->length = vb2_plane_size(vb, 0);
108 if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 110 if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
109 buf->bytesused = 0; 111 buf->bytesused = 0;
110 else 112 else
111 buf->bytesused = vb2_get_plane_payload(vb, 0); 113 buf->bytesused = vb2_get_plane_payload(vb, 0);
@@ -115,8 +117,9 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
115 117
116static void uvc_buffer_queue(struct vb2_buffer *vb) 118static void uvc_buffer_queue(struct vb2_buffer *vb)
117{ 119{
120 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
118 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 121 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
119 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); 122 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
120 unsigned long flags; 123 unsigned long flags;
121 124
122 spin_lock_irqsave(&queue->irqlock, flags); 125 spin_lock_irqsave(&queue->irqlock, flags);
@@ -127,7 +130,7 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
127 * directly. The next QBUF call will fail with -ENODEV. 130 * directly. The next QBUF call will fail with -ENODEV.
128 */ 131 */
129 buf->state = UVC_BUF_STATE_ERROR; 132 buf->state = UVC_BUF_STATE_ERROR;
130 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); 133 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
131 } 134 }
132 135
133 spin_unlock_irqrestore(&queue->irqlock, flags); 136 spin_unlock_irqrestore(&queue->irqlock, flags);
@@ -135,12 +138,13 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
135 138
136static void uvc_buffer_finish(struct vb2_buffer *vb) 139static void uvc_buffer_finish(struct vb2_buffer *vb)
137{ 140{
141 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
138 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 142 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
139 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 143 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
140 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); 144 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
141 145
142 if (vb->state == VB2_BUF_STATE_DONE) 146 if (vb->state == VB2_BUF_STATE_DONE)
143 uvc_video_clock_update(stream, &vb->v4l2_buf, buf); 147 uvc_video_clock_update(stream, vbuf, buf);
144} 148}
145 149
146static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count) 150static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -398,7 +402,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
398 buf->error = 0; 402 buf->error = 0;
399 buf->state = UVC_BUF_STATE_QUEUED; 403 buf->state = UVC_BUF_STATE_QUEUED;
400 buf->bytesused = 0; 404 buf->bytesused = 0;
401 vb2_set_plane_payload(&buf->buf, 0, 0); 405 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
402 return buf; 406 return buf;
403 } 407 }
404 408
@@ -412,8 +416,8 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
412 spin_unlock_irqrestore(&queue->irqlock, flags); 416 spin_unlock_irqrestore(&queue->irqlock, flags);
413 417
414 buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; 418 buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
415 vb2_set_plane_payload(&buf->buf, 0, buf->bytesused); 419 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
416 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE); 420 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
417 421
418 return nextbuf; 422 return nextbuf;
419} 423}
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 1c4a11766fd1..2b276ab7764f 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -606,7 +606,7 @@ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample)
606 * timestamp of the sliding window to 1s. 606 * timestamp of the sliding window to 1s.
607 */ 607 */
608void uvc_video_clock_update(struct uvc_streaming *stream, 608void uvc_video_clock_update(struct uvc_streaming *stream,
609 struct v4l2_buffer *v4l2_buf, 609 struct vb2_v4l2_buffer *vbuf,
610 struct uvc_buffer *buf) 610 struct uvc_buffer *buf)
611{ 611{
612 struct uvc_clock *clock = &stream->clock; 612 struct uvc_clock *clock = &stream->clock;
@@ -699,14 +699,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
699 stream->dev->name, 699 stream->dev->name,
700 sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536), 700 sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
701 y, ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC, 701 y, ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC,
702 v4l2_buf->timestamp.tv_sec, 702 vbuf->timestamp.tv_sec,
703 (unsigned long)v4l2_buf->timestamp.tv_usec, 703 (unsigned long)vbuf->timestamp.tv_usec,
704 x1, first->host_sof, first->dev_sof, 704 x1, first->host_sof, first->dev_sof,
705 x2, last->host_sof, last->dev_sof, y1, y2); 705 x2, last->host_sof, last->dev_sof, y1, y2);
706 706
707 /* Update the V4L2 buffer. */ 707 /* Update the V4L2 buffer. */
708 v4l2_buf->timestamp.tv_sec = ts.tv_sec; 708 vbuf->timestamp.tv_sec = ts.tv_sec;
709 v4l2_buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC; 709 vbuf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
710 710
711done: 711done:
712 spin_unlock_irqrestore(&stream->clock.lock, flags); 712 spin_unlock_irqrestore(&stream->clock.lock, flags);
@@ -1032,10 +1032,10 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
1032 1032
1033 uvc_video_get_ts(&ts); 1033 uvc_video_get_ts(&ts);
1034 1034
1035 buf->buf.v4l2_buf.field = V4L2_FIELD_NONE; 1035 buf->buf.field = V4L2_FIELD_NONE;
1036 buf->buf.v4l2_buf.sequence = stream->sequence; 1036 buf->buf.sequence = stream->sequence;
1037 buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec; 1037 buf->buf.timestamp.tv_sec = ts.tv_sec;
1038 buf->buf.v4l2_buf.timestamp.tv_usec = 1038 buf->buf.timestamp.tv_usec =
1039 ts.tv_nsec / NSEC_PER_USEC; 1039 ts.tv_nsec / NSEC_PER_USEC;
1040 1040
1041 /* TODO: Handle PTS and SCR. */ 1041 /* TODO: Handle PTS and SCR. */
@@ -1308,7 +1308,7 @@ static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
1308 if (buf->bytesused == stream->queue.buf_used) { 1308 if (buf->bytesused == stream->queue.buf_used) {
1309 stream->queue.buf_used = 0; 1309 stream->queue.buf_used = 0;
1310 buf->state = UVC_BUF_STATE_READY; 1310 buf->state = UVC_BUF_STATE_READY;
1311 buf->buf.v4l2_buf.sequence = ++stream->sequence; 1311 buf->buf.sequence = ++stream->sequence;
1312 uvc_queue_next_buffer(&stream->queue, buf); 1312 uvc_queue_next_buffer(&stream->queue, buf);
1313 stream->last_fid ^= UVC_STREAM_FID; 1313 stream->last_fid ^= UVC_STREAM_FID;
1314 } 1314 }
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 79fa829f42ec..f0f2391e1b43 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -354,7 +354,7 @@ enum uvc_buffer_state {
354}; 354};
355 355
356struct uvc_buffer { 356struct uvc_buffer {
357 struct vb2_buffer buf; 357 struct vb2_v4l2_buffer buf;
358 struct list_head queue; 358 struct list_head queue;
359 359
360 enum uvc_buffer_state state; 360 enum uvc_buffer_state state;
@@ -674,7 +674,7 @@ extern int uvc_probe_video(struct uvc_streaming *stream,
674extern int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit, 674extern int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
675 __u8 intfnum, __u8 cs, void *data, __u16 size); 675 __u8 intfnum, __u8 cs, void *data, __u16 size);
676void uvc_video_clock_update(struct uvc_streaming *stream, 676void uvc_video_clock_update(struct uvc_streaming *stream,
677 struct v4l2_buffer *v4l2_buf, 677 struct vb2_v4l2_buffer *vbuf,
678 struct uvc_buffer *buf); 678 struct uvc_buffer *buf);
679 679
680/* Status */ 680/* Status */
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index fdcf72c6e486..61d56c940f80 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -766,13 +766,15 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
766 * 766 *
767 * Call from buf_queue(), videobuf_queue_ops callback. 767 * Call from buf_queue(), videobuf_queue_ops callback.
768 */ 768 */
769void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb) 769void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
770 struct vb2_v4l2_buffer *vbuf)
770{ 771{
771 struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb); 772 struct v4l2_m2m_buffer *b = container_of(vbuf,
773 struct v4l2_m2m_buffer, vb);
772 struct v4l2_m2m_queue_ctx *q_ctx; 774 struct v4l2_m2m_queue_ctx *q_ctx;
773 unsigned long flags; 775 unsigned long flags;
774 776
775 q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type); 777 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
776 if (!q_ctx) 778 if (!q_ctx)
777 return; 779 return;
778 780
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 9518ebd2d73b..8c456f7b1995 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -53,7 +53,7 @@ module_param(debug, int, 0644);
53 53
54#define log_memop(vb, op) \ 54#define log_memop(vb, op) \
55 dprintk(2, "call_memop(%p, %d, %s)%s\n", \ 55 dprintk(2, "call_memop(%p, %d, %s)%s\n", \
56 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \ 56 (vb)->vb2_queue, (vb)->index, #op, \
57 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") 57 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
58 58
59#define call_memop(vb, op, args...) \ 59#define call_memop(vb, op, args...) \
@@ -115,7 +115,7 @@ module_param(debug, int, 0644);
115 115
116#define log_vb_qop(vb, op, args...) \ 116#define log_vb_qop(vb, op, args...) \
117 dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \ 117 dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
118 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \ 118 (vb)->vb2_queue, (vb)->index, #op, \
119 (vb)->vb2_queue->ops->op ? "" : " (nop)") 119 (vb)->vb2_queue->ops->op ? "" : " (nop)")
120 120
121#define call_vb_qop(vb, op, args...) \ 121#define call_vb_qop(vb, op, args...) \
@@ -211,7 +211,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
211 211
212 /* Associate allocator private data with this plane */ 212 /* Associate allocator private data with this plane */
213 vb->planes[plane].mem_priv = mem_priv; 213 vb->planes[plane].mem_priv = mem_priv;
214 vb->v4l2_planes[plane].length = q->plane_sizes[plane]; 214 vb->planes[plane].length = q->plane_sizes[plane];
215 } 215 }
216 216
217 return 0; 217 return 0;
@@ -235,8 +235,7 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)
235 for (plane = 0; plane < vb->num_planes; ++plane) { 235 for (plane = 0; plane < vb->num_planes; ++plane) {
236 call_void_memop(vb, put, vb->planes[plane].mem_priv); 236 call_void_memop(vb, put, vb->planes[plane].mem_priv);
237 vb->planes[plane].mem_priv = NULL; 237 vb->planes[plane].mem_priv = NULL;
238 dprintk(3, "freed plane %d of buffer %d\n", plane, 238 dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index);
239 vb->v4l2_buf.index);
240 } 239 }
241} 240}
242 241
@@ -269,7 +268,9 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
269 268
270 call_void_memop(vb, detach_dmabuf, p->mem_priv); 269 call_void_memop(vb, detach_dmabuf, p->mem_priv);
271 dma_buf_put(p->dbuf); 270 dma_buf_put(p->dbuf);
272 memset(p, 0, sizeof(*p)); 271 p->mem_priv = NULL;
272 p->dbuf = NULL;
273 p->dbuf_mapped = 0;
273} 274}
274 275
275/** 276/**
@@ -299,7 +300,7 @@ static void __setup_lengths(struct vb2_queue *q, unsigned int n)
299 continue; 300 continue;
300 301
301 for (plane = 0; plane < vb->num_planes; ++plane) 302 for (plane = 0; plane < vb->num_planes; ++plane)
302 vb->v4l2_planes[plane].length = q->plane_sizes[plane]; 303 vb->planes[plane].length = q->plane_sizes[plane];
303 } 304 }
304} 305}
305 306
@@ -314,10 +315,10 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
314 unsigned long off; 315 unsigned long off;
315 316
316 if (q->num_buffers) { 317 if (q->num_buffers) {
317 struct v4l2_plane *p; 318 struct vb2_plane *p;
318 vb = q->bufs[q->num_buffers - 1]; 319 vb = q->bufs[q->num_buffers - 1];
319 p = &vb->v4l2_planes[vb->num_planes - 1]; 320 p = &vb->planes[vb->num_planes - 1];
320 off = PAGE_ALIGN(p->m.mem_offset + p->length); 321 off = PAGE_ALIGN(p->m.offset + p->length);
321 } else { 322 } else {
322 off = 0; 323 off = 0;
323 } 324 }
@@ -328,12 +329,12 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
328 continue; 329 continue;
329 330
330 for (plane = 0; plane < vb->num_planes; ++plane) { 331 for (plane = 0; plane < vb->num_planes; ++plane) {
331 vb->v4l2_planes[plane].m.mem_offset = off; 332 vb->planes[plane].m.offset = off;
332 333
333 dprintk(3, "buffer %d, plane %d offset 0x%08lx\n", 334 dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
334 buffer, plane, off); 335 buffer, plane, off);
335 336
336 off += vb->v4l2_planes[plane].length; 337 off += vb->planes[plane].length;
337 off = PAGE_ALIGN(off); 338 off = PAGE_ALIGN(off);
338 } 339 }
339 } 340 }
@@ -361,16 +362,12 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
361 break; 362 break;
362 } 363 }
363 364
364 /* Length stores number of planes for multiplanar buffers */
365 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
366 vb->v4l2_buf.length = num_planes;
367
368 vb->state = VB2_BUF_STATE_DEQUEUED; 365 vb->state = VB2_BUF_STATE_DEQUEUED;
369 vb->vb2_queue = q; 366 vb->vb2_queue = q;
370 vb->num_planes = num_planes; 367 vb->num_planes = num_planes;
371 vb->v4l2_buf.index = q->num_buffers + buffer; 368 vb->index = q->num_buffers + buffer;
372 vb->v4l2_buf.type = q->type; 369 vb->type = q->type;
373 vb->v4l2_buf.memory = memory; 370 vb->memory = memory;
374 371
375 /* Allocate video buffer memory for the MMAP type */ 372 /* Allocate video buffer memory for the MMAP type */
376 if (memory == V4L2_MEMORY_MMAP) { 373 if (memory == V4L2_MEMORY_MMAP) {
@@ -592,7 +589,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
592 length = (b->memory == V4L2_MEMORY_USERPTR || 589 length = (b->memory == V4L2_MEMORY_USERPTR ||
593 b->memory == V4L2_MEMORY_DMABUF) 590 b->memory == V4L2_MEMORY_DMABUF)
594 ? b->m.planes[plane].length 591 ? b->m.planes[plane].length
595 : vb->v4l2_planes[plane].length; 592 : vb->planes[plane].length;
596 bytesused = b->m.planes[plane].bytesused 593 bytesused = b->m.planes[plane].bytesused
597 ? b->m.planes[plane].bytesused : length; 594 ? b->m.planes[plane].bytesused : length;
598 595
@@ -605,8 +602,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
605 } 602 }
606 } else { 603 } else {
607 length = (b->memory == V4L2_MEMORY_USERPTR) 604 length = (b->memory == V4L2_MEMORY_USERPTR)
608 ? b->length : vb->v4l2_planes[0].length; 605 ? b->length : vb->planes[0].length;
609 bytesused = b->bytesused ? b->bytesused : length;
610 606
611 if (b->bytesused > length) 607 if (b->bytesused > length)
612 return -EINVAL; 608 return -EINVAL;
@@ -656,12 +652,23 @@ static bool __buffers_in_use(struct vb2_queue *q)
656 */ 652 */
657static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) 653static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
658{ 654{
655 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
659 struct vb2_queue *q = vb->vb2_queue; 656 struct vb2_queue *q = vb->vb2_queue;
657 unsigned int plane;
660 658
661 /* Copy back data such as timestamp, flags, etc. */ 659 /* Copy back data such as timestamp, flags, etc. */
662 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); 660 b->index = vb->index;
663 b->reserved2 = vb->v4l2_buf.reserved2; 661 b->type = vb->type;
664 b->reserved = vb->v4l2_buf.reserved; 662 b->memory = vb->memory;
663 b->bytesused = 0;
664
665 b->flags = vbuf->flags;
666 b->field = vbuf->field;
667 b->timestamp = vbuf->timestamp;
668 b->timecode = vbuf->timecode;
669 b->sequence = vbuf->sequence;
670 b->reserved2 = 0;
671 b->reserved = 0;
665 672
666 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) { 673 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
667 /* 674 /*
@@ -669,21 +676,34 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
669 * for it. The caller has already verified memory and size. 676 * for it. The caller has already verified memory and size.
670 */ 677 */
671 b->length = vb->num_planes; 678 b->length = vb->num_planes;
672 memcpy(b->m.planes, vb->v4l2_planes, 679 for (plane = 0; plane < vb->num_planes; ++plane) {
673 b->length * sizeof(struct v4l2_plane)); 680 struct v4l2_plane *pdst = &b->m.planes[plane];
681 struct vb2_plane *psrc = &vb->planes[plane];
682
683 pdst->bytesused = psrc->bytesused;
684 pdst->length = psrc->length;
685 if (q->memory == V4L2_MEMORY_MMAP)
686 pdst->m.mem_offset = psrc->m.offset;
687 else if (q->memory == V4L2_MEMORY_USERPTR)
688 pdst->m.userptr = psrc->m.userptr;
689 else if (q->memory == V4L2_MEMORY_DMABUF)
690 pdst->m.fd = psrc->m.fd;
691 pdst->data_offset = psrc->data_offset;
692 memset(pdst->reserved, 0, sizeof(pdst->reserved));
693 }
674 } else { 694 } else {
675 /* 695 /*
676 * We use length and offset in v4l2_planes array even for 696 * We use length and offset in v4l2_planes array even for
677 * single-planar buffers, but userspace does not. 697 * single-planar buffers, but userspace does not.
678 */ 698 */
679 b->length = vb->v4l2_planes[0].length; 699 b->length = vb->planes[0].length;
680 b->bytesused = vb->v4l2_planes[0].bytesused; 700 b->bytesused = vb->planes[0].bytesused;
681 if (q->memory == V4L2_MEMORY_MMAP) 701 if (q->memory == V4L2_MEMORY_MMAP)
682 b->m.offset = vb->v4l2_planes[0].m.mem_offset; 702 b->m.offset = vb->planes[0].m.offset;
683 else if (q->memory == V4L2_MEMORY_USERPTR) 703 else if (q->memory == V4L2_MEMORY_USERPTR)
684 b->m.userptr = vb->v4l2_planes[0].m.userptr; 704 b->m.userptr = vb->planes[0].m.userptr;
685 else if (q->memory == V4L2_MEMORY_DMABUF) 705 else if (q->memory == V4L2_MEMORY_DMABUF)
686 b->m.fd = vb->v4l2_planes[0].m.fd; 706 b->m.fd = vb->planes[0].m.fd;
687 } 707 }
688 708
689 /* 709 /*
@@ -1197,7 +1217,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1197 vb->cnt_buf_done++; 1217 vb->cnt_buf_done++;
1198#endif 1218#endif
1199 dprintk(4, "done processing on buffer %d, state: %d\n", 1219 dprintk(4, "done processing on buffer %d, state: %d\n",
1200 vb->v4l2_buf.index, state); 1220 vb->index, state);
1201 1221
1202 /* sync buffers */ 1222 /* sync buffers */
1203 for (plane = 0; plane < vb->num_planes; ++plane) 1223 for (plane = 0; plane < vb->num_planes; ++plane)
@@ -1278,25 +1298,26 @@ static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
1278 * v4l2_buffer by the userspace. The caller has already verified that struct 1298 * v4l2_buffer by the userspace. The caller has already verified that struct
1279 * v4l2_buffer has a valid number of planes. 1299 * v4l2_buffer has a valid number of planes.
1280 */ 1300 */
1281static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b, 1301static void __fill_vb2_buffer(struct vb2_buffer *vb,
1282 struct v4l2_plane *v4l2_planes) 1302 const struct v4l2_buffer *b, struct vb2_plane *planes)
1283{ 1303{
1304 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1284 unsigned int plane; 1305 unsigned int plane;
1285 1306
1286 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { 1307 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
1287 if (b->memory == V4L2_MEMORY_USERPTR) { 1308 if (b->memory == V4L2_MEMORY_USERPTR) {
1288 for (plane = 0; plane < vb->num_planes; ++plane) { 1309 for (plane = 0; plane < vb->num_planes; ++plane) {
1289 v4l2_planes[plane].m.userptr = 1310 planes[plane].m.userptr =
1290 b->m.planes[plane].m.userptr; 1311 b->m.planes[plane].m.userptr;
1291 v4l2_planes[plane].length = 1312 planes[plane].length =
1292 b->m.planes[plane].length; 1313 b->m.planes[plane].length;
1293 } 1314 }
1294 } 1315 }
1295 if (b->memory == V4L2_MEMORY_DMABUF) { 1316 if (b->memory == V4L2_MEMORY_DMABUF) {
1296 for (plane = 0; plane < vb->num_planes; ++plane) { 1317 for (plane = 0; plane < vb->num_planes; ++plane) {
1297 v4l2_planes[plane].m.fd = 1318 planes[plane].m.fd =
1298 b->m.planes[plane].m.fd; 1319 b->m.planes[plane].m.fd;
1299 v4l2_planes[plane].length = 1320 planes[plane].length =
1300 b->m.planes[plane].length; 1321 b->m.planes[plane].length;
1301 } 1322 }
1302 } 1323 }
@@ -1320,7 +1341,7 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
1320 * applications working. 1341 * applications working.
1321 */ 1342 */
1322 for (plane = 0; plane < vb->num_planes; ++plane) { 1343 for (plane = 0; plane < vb->num_planes; ++plane) {
1323 struct v4l2_plane *pdst = &v4l2_planes[plane]; 1344 struct vb2_plane *pdst = &planes[plane];
1324 struct v4l2_plane *psrc = &b->m.planes[plane]; 1345 struct v4l2_plane *psrc = &b->m.planes[plane];
1325 1346
1326 if (psrc->bytesused == 0) 1347 if (psrc->bytesused == 0)
@@ -1350,13 +1371,13 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
1350 * old userspace applications working. 1371 * old userspace applications working.
1351 */ 1372 */
1352 if (b->memory == V4L2_MEMORY_USERPTR) { 1373 if (b->memory == V4L2_MEMORY_USERPTR) {
1353 v4l2_planes[0].m.userptr = b->m.userptr; 1374 planes[0].m.userptr = b->m.userptr;
1354 v4l2_planes[0].length = b->length; 1375 planes[0].length = b->length;
1355 } 1376 }
1356 1377
1357 if (b->memory == V4L2_MEMORY_DMABUF) { 1378 if (b->memory == V4L2_MEMORY_DMABUF) {
1358 v4l2_planes[0].m.fd = b->m.fd; 1379 planes[0].m.fd = b->m.fd;
1359 v4l2_planes[0].length = b->length; 1380 planes[0].length = b->length;
1360 } 1381 }
1361 1382
1362 if (V4L2_TYPE_IS_OUTPUT(b->type)) { 1383 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
@@ -1364,17 +1385,17 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
1364 vb2_warn_zero_bytesused(vb); 1385 vb2_warn_zero_bytesused(vb);
1365 1386
1366 if (vb->vb2_queue->allow_zero_bytesused) 1387 if (vb->vb2_queue->allow_zero_bytesused)
1367 v4l2_planes[0].bytesused = b->bytesused; 1388 planes[0].bytesused = b->bytesused;
1368 else 1389 else
1369 v4l2_planes[0].bytesused = b->bytesused ? 1390 planes[0].bytesused = b->bytesused ?
1370 b->bytesused : v4l2_planes[0].length; 1391 b->bytesused : planes[0].length;
1371 } else 1392 } else
1372 v4l2_planes[0].bytesused = 0; 1393 planes[0].bytesused = 0;
1373 1394
1374 } 1395 }
1375 1396
1376 /* Zero flags that the vb2 core handles */ 1397 /* Zero flags that the vb2 core handles */
1377 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; 1398 vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
1378 if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) != 1399 if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
1379 V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) { 1400 V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
1380 /* 1401 /*
@@ -1382,7 +1403,7 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
1382 * their timestamp and timestamp source flags from the 1403 * their timestamp and timestamp source flags from the
1383 * queue. 1404 * queue.
1384 */ 1405 */
1385 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1406 vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1386 } 1407 }
1387 1408
1388 if (V4L2_TYPE_IS_OUTPUT(b->type)) { 1409 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
@@ -1392,11 +1413,11 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
1392 * The 'field' is valid metadata for this output buffer 1413 * The 'field' is valid metadata for this output buffer
1393 * and so that needs to be copied here. 1414 * and so that needs to be copied here.
1394 */ 1415 */
1395 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE; 1416 vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
1396 vb->v4l2_buf.field = b->field; 1417 vbuf->field = b->field;
1397 } else { 1418 } else {
1398 /* Zero any output buffer flags as this is a capture buffer */ 1419 /* Zero any output buffer flags as this is a capture buffer */
1399 vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS; 1420 vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
1400 } 1421 }
1401} 1422}
1402 1423
@@ -1405,7 +1426,7 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
1405 */ 1426 */
1406static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) 1427static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1407{ 1428{
1408 __fill_vb2_buffer(vb, b, vb->v4l2_planes); 1429 __fill_vb2_buffer(vb, b, vb->planes);
1409 return call_vb_qop(vb, buf_prepare, vb); 1430 return call_vb_qop(vb, buf_prepare, vb);
1410} 1431}
1411 1432
@@ -1414,7 +1435,7 @@ static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1414 */ 1435 */
1415static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) 1436static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1416{ 1437{
1417 struct v4l2_plane planes[VIDEO_MAX_PLANES]; 1438 struct vb2_plane planes[VIDEO_MAX_PLANES];
1418 struct vb2_queue *q = vb->vb2_queue; 1439 struct vb2_queue *q = vb->vb2_queue;
1419 void *mem_priv; 1440 void *mem_priv;
1420 unsigned int plane; 1441 unsigned int plane;
@@ -1429,9 +1450,9 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1429 1450
1430 for (plane = 0; plane < vb->num_planes; ++plane) { 1451 for (plane = 0; plane < vb->num_planes; ++plane) {
1431 /* Skip the plane if already verified */ 1452 /* Skip the plane if already verified */
1432 if (vb->v4l2_planes[plane].m.userptr && 1453 if (vb->planes[plane].m.userptr &&
1433 vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr 1454 vb->planes[plane].m.userptr == planes[plane].m.userptr
1434 && vb->v4l2_planes[plane].length == planes[plane].length) 1455 && vb->planes[plane].length == planes[plane].length)
1435 continue; 1456 continue;
1436 1457
1437 dprintk(3, "userspace address for plane %d changed, " 1458 dprintk(3, "userspace address for plane %d changed, "
@@ -1457,7 +1478,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1457 } 1478 }
1458 1479
1459 vb->planes[plane].mem_priv = NULL; 1480 vb->planes[plane].mem_priv = NULL;
1460 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); 1481 vb->planes[plane].bytesused = 0;
1482 vb->planes[plane].length = 0;
1483 vb->planes[plane].m.userptr = 0;
1484 vb->planes[plane].data_offset = 0;
1461 1485
1462 /* Acquire each plane's memory */ 1486 /* Acquire each plane's memory */
1463 mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane], 1487 mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
@@ -1476,8 +1500,12 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1476 * Now that everything is in order, copy relevant information 1500 * Now that everything is in order, copy relevant information
1477 * provided by userspace. 1501 * provided by userspace.
1478 */ 1502 */
1479 for (plane = 0; plane < vb->num_planes; ++plane) 1503 for (plane = 0; plane < vb->num_planes; ++plane) {
1480 vb->v4l2_planes[plane] = planes[plane]; 1504 vb->planes[plane].bytesused = planes[plane].bytesused;
1505 vb->planes[plane].length = planes[plane].length;
1506 vb->planes[plane].m.userptr = planes[plane].m.userptr;
1507 vb->planes[plane].data_offset = planes[plane].data_offset;
1508 }
1481 1509
1482 if (reacquired) { 1510 if (reacquired) {
1483 /* 1511 /*
@@ -1504,10 +1532,11 @@ err:
1504 /* In case of errors, release planes that were already acquired */ 1532 /* In case of errors, release planes that were already acquired */
1505 for (plane = 0; plane < vb->num_planes; ++plane) { 1533 for (plane = 0; plane < vb->num_planes; ++plane) {
1506 if (vb->planes[plane].mem_priv) 1534 if (vb->planes[plane].mem_priv)
1507 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 1535 call_void_memop(vb, put_userptr,
1536 vb->planes[plane].mem_priv);
1508 vb->planes[plane].mem_priv = NULL; 1537 vb->planes[plane].mem_priv = NULL;
1509 vb->v4l2_planes[plane].m.userptr = 0; 1538 vb->planes[plane].m.userptr = 0;
1510 vb->v4l2_planes[plane].length = 0; 1539 vb->planes[plane].length = 0;
1511 } 1540 }
1512 1541
1513 return ret; 1542 return ret;
@@ -1518,7 +1547,7 @@ err:
1518 */ 1547 */
1519static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) 1548static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1520{ 1549{
1521 struct v4l2_plane planes[VIDEO_MAX_PLANES]; 1550 struct vb2_plane planes[VIDEO_MAX_PLANES];
1522 struct vb2_queue *q = vb->vb2_queue; 1551 struct vb2_queue *q = vb->vb2_queue;
1523 void *mem_priv; 1552 void *mem_priv;
1524 unsigned int plane; 1553 unsigned int plane;
@@ -1554,7 +1583,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1554 1583
1555 /* Skip the plane if already verified */ 1584 /* Skip the plane if already verified */
1556 if (dbuf == vb->planes[plane].dbuf && 1585 if (dbuf == vb->planes[plane].dbuf &&
1557 vb->v4l2_planes[plane].length == planes[plane].length) { 1586 vb->planes[plane].length == planes[plane].length) {
1558 dma_buf_put(dbuf); 1587 dma_buf_put(dbuf);
1559 continue; 1588 continue;
1560 } 1589 }
@@ -1568,11 +1597,15 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1568 1597
1569 /* Release previously acquired memory if present */ 1598 /* Release previously acquired memory if present */
1570 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 1599 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
1571 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); 1600 vb->planes[plane].bytesused = 0;
1601 vb->planes[plane].length = 0;
1602 vb->planes[plane].m.fd = 0;
1603 vb->planes[plane].data_offset = 0;
1572 1604
1573 /* Acquire each plane's memory */ 1605 /* Acquire each plane's memory */
1574 mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane], 1606 mem_priv = call_ptr_memop(vb, attach_dmabuf,
1575 dbuf, planes[plane].length, dma_dir); 1607 q->alloc_ctx[plane], dbuf, planes[plane].length,
1608 dma_dir);
1576 if (IS_ERR(mem_priv)) { 1609 if (IS_ERR(mem_priv)) {
1577 dprintk(1, "failed to attach dmabuf\n"); 1610 dprintk(1, "failed to attach dmabuf\n");
1578 ret = PTR_ERR(mem_priv); 1611 ret = PTR_ERR(mem_priv);
@@ -1602,8 +1635,12 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1602 * Now that everything is in order, copy relevant information 1635 * Now that everything is in order, copy relevant information
1603 * provided by userspace. 1636 * provided by userspace.
1604 */ 1637 */
1605 for (plane = 0; plane < vb->num_planes; ++plane) 1638 for (plane = 0; plane < vb->num_planes; ++plane) {
1606 vb->v4l2_planes[plane] = planes[plane]; 1639 vb->planes[plane].bytesused = planes[plane].bytesused;
1640 vb->planes[plane].length = planes[plane].length;
1641 vb->planes[plane].m.fd = planes[plane].m.fd;
1642 vb->planes[plane].data_offset = planes[plane].data_offset;
1643 }
1607 1644
1608 if (reacquired) { 1645 if (reacquired) {
1609 /* 1646 /*
@@ -1654,6 +1691,7 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
1654 1691
1655static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) 1692static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1656{ 1693{
1694 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1657 struct vb2_queue *q = vb->vb2_queue; 1695 struct vb2_queue *q = vb->vb2_queue;
1658 int ret; 1696 int ret;
1659 1697
@@ -1682,9 +1720,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1682 } 1720 }
1683 1721
1684 vb->state = VB2_BUF_STATE_PREPARING; 1722 vb->state = VB2_BUF_STATE_PREPARING;
1685 vb->v4l2_buf.timestamp.tv_sec = 0; 1723 vbuf->timestamp.tv_sec = 0;
1686 vb->v4l2_buf.timestamp.tv_usec = 0; 1724 vbuf->timestamp.tv_usec = 0;
1687 vb->v4l2_buf.sequence = 0; 1725 vbuf->sequence = 0;
1688 1726
1689 switch (q->memory) { 1727 switch (q->memory) {
1690 case V4L2_MEMORY_MMAP: 1728 case V4L2_MEMORY_MMAP:
@@ -1776,7 +1814,7 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
1776 /* Fill buffer information for the userspace */ 1814 /* Fill buffer information for the userspace */
1777 __fill_v4l2_buffer(vb, b); 1815 __fill_v4l2_buffer(vb, b);
1778 1816
1779 dprintk(1, "prepare of buffer %d succeeded\n", vb->v4l2_buf.index); 1817 dprintk(1, "prepare of buffer %d succeeded\n", vb->index);
1780 } 1818 }
1781 return ret; 1819 return ret;
1782} 1820}
@@ -1818,7 +1856,7 @@ static int vb2_start_streaming(struct vb2_queue *q)
1818 /* 1856 /*
1819 * If you see this warning, then the driver isn't cleaning up properly 1857 * If you see this warning, then the driver isn't cleaning up properly
1820 * after a failed start_streaming(). See the start_streaming() 1858 * after a failed start_streaming(). See the start_streaming()
1821 * documentation in videobuf2-v4l2.h for more information how buffers 1859 * documentation in videobuf2-core.h for more information how buffers
1822 * should be returned to vb2 in start_streaming(). 1860 * should be returned to vb2 in start_streaming().
1823 */ 1861 */
1824 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 1862 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
@@ -1849,11 +1887,13 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1849{ 1887{
1850 int ret = vb2_queue_or_prepare_buf(q, b, "qbuf"); 1888 int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
1851 struct vb2_buffer *vb; 1889 struct vb2_buffer *vb;
1890 struct vb2_v4l2_buffer *vbuf;
1852 1891
1853 if (ret) 1892 if (ret)
1854 return ret; 1893 return ret;
1855 1894
1856 vb = q->bufs[b->index]; 1895 vb = q->bufs[b->index];
1896 vbuf = to_vb2_v4l2_buffer(vb);
1857 1897
1858 switch (vb->state) { 1898 switch (vb->state) {
1859 case VB2_BUF_STATE_DEQUEUED: 1899 case VB2_BUF_STATE_DEQUEUED:
@@ -1886,10 +1926,10 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1886 */ 1926 */
1887 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == 1927 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
1888 V4L2_BUF_FLAG_TIMESTAMP_COPY) 1928 V4L2_BUF_FLAG_TIMESTAMP_COPY)
1889 vb->v4l2_buf.timestamp = b->timestamp; 1929 vbuf->timestamp = b->timestamp;
1890 vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE; 1930 vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
1891 if (b->flags & V4L2_BUF_FLAG_TIMECODE) 1931 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
1892 vb->v4l2_buf.timecode = b->timecode; 1932 vbuf->timecode = b->timecode;
1893 } 1933 }
1894 1934
1895 trace_vb2_qbuf(q, vb); 1935 trace_vb2_qbuf(q, vb);
@@ -1917,7 +1957,7 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1917 return ret; 1957 return ret;
1918 } 1958 }
1919 1959
1920 dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index); 1960 dprintk(1, "qbuf of buffer %d succeeded\n", vb->index);
1921 return 0; 1961 return 0;
1922} 1962}
1923 1963
@@ -2107,9 +2147,11 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
2107 } 2147 }
2108} 2148}
2109 2149
2110static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) 2150static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b,
2151 bool nonblocking)
2111{ 2152{
2112 struct vb2_buffer *vb = NULL; 2153 struct vb2_buffer *vb = NULL;
2154 struct vb2_v4l2_buffer *vbuf = NULL;
2113 int ret; 2155 int ret;
2114 2156
2115 if (b->type != q->type) { 2157 if (b->type != q->type) {
@@ -2142,14 +2184,15 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool n
2142 2184
2143 trace_vb2_dqbuf(q, vb); 2185 trace_vb2_dqbuf(q, vb);
2144 2186
2187 vbuf = to_vb2_v4l2_buffer(vb);
2145 if (!V4L2_TYPE_IS_OUTPUT(q->type) && 2188 if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
2146 vb->v4l2_buf.flags & V4L2_BUF_FLAG_LAST) 2189 vbuf->flags & V4L2_BUF_FLAG_LAST)
2147 q->last_buffer_dequeued = true; 2190 q->last_buffer_dequeued = true;
2148 /* go back to dequeued state */ 2191 /* go back to dequeued state */
2149 __vb2_dqbuf(vb); 2192 __vb2_dqbuf(vb);
2150 2193
2151 dprintk(1, "dqbuf of buffer %d, with state %d\n", 2194 dprintk(1, "dqbuf of buffer %d, with state %d\n",
2152 vb->v4l2_buf.index, vb->state); 2195 vb->index, vb->state);
2153 2196
2154 return 0; 2197 return 0;
2155} 2198}
@@ -2205,7 +2248,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
2205 /* 2248 /*
2206 * If you see this warning, then the driver isn't cleaning up properly 2249 * If you see this warning, then the driver isn't cleaning up properly
2207 * in stop_streaming(). See the stop_streaming() documentation in 2250 * in stop_streaming(). See the stop_streaming() documentation in
2208 * videobuf2-v4l2.h for more information how buffers should be returned 2251 * videobuf2-core.h for more information how buffers should be returned
2209 * to vb2 in stop_streaming(). 2252 * to vb2 in stop_streaming().
2210 */ 2253 */
2211 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 2254 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
@@ -2407,7 +2450,7 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
2407 vb = q->bufs[buffer]; 2450 vb = q->bufs[buffer];
2408 2451
2409 for (plane = 0; plane < vb->num_planes; ++plane) { 2452 for (plane = 0; plane < vb->num_planes; ++plane) {
2410 if (vb->v4l2_planes[plane].m.mem_offset == off) { 2453 if (vb->planes[plane].m.offset == off) {
2411 *_buffer = buffer; 2454 *_buffer = buffer;
2412 *_plane = plane; 2455 *_plane = plane;
2413 return 0; 2456 return 0;
@@ -2565,7 +2608,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2565 * The buffer length was page_aligned at __vb2_buf_mem_alloc(), 2608 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2566 * so, we need to do the same here. 2609 * so, we need to do the same here.
2567 */ 2610 */
2568 length = PAGE_ALIGN(vb->v4l2_planes[plane].length); 2611 length = PAGE_ALIGN(vb->planes[plane].length);
2569 if (length < (vma->vm_end - vma->vm_start)) { 2612 if (length < (vma->vm_end - vma->vm_start)) {
2570 dprintk(1, 2613 dprintk(1,
2571 "MMAP invalid, as it would overflow buffer length\n"); 2614 "MMAP invalid, as it would overflow buffer length\n");
@@ -2739,7 +2782,7 @@ EXPORT_SYMBOL_GPL(vb2_poll);
2739 * responsible of clearing it's content and setting initial values for some 2782 * responsible of clearing it's content and setting initial values for some
2740 * required entries before calling this function. 2783 * required entries before calling this function.
2741 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer 2784 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
2742 * to the struct vb2_queue description in include/media/videobuf2-v4l2.h 2785 * to the struct vb2_queue description in include/media/videobuf2-core.h
2743 * for more information. 2786 * for more information.
2744 */ 2787 */
2745int vb2_queue_init(struct vb2_queue *q) 2788int vb2_queue_init(struct vb2_queue *q)
@@ -2770,7 +2813,7 @@ int vb2_queue_init(struct vb2_queue *q)
2770 init_waitqueue_head(&q->done_wq); 2813 init_waitqueue_head(&q->done_wq);
2771 2814
2772 if (q->buf_struct_size == 0) 2815 if (q->buf_struct_size == 0)
2773 q->buf_struct_size = sizeof(struct vb2_buffer); 2816 q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
2774 2817
2775 return 0; 2818 return 0;
2776} 2819}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 87048a14c34d..fbcc1c3e91d5 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -428,8 +428,8 @@ vpfe_video_get_next_buffer(struct vpfe_video_device *video)
428 struct vpfe_cap_buffer, list); 428 struct vpfe_cap_buffer, list);
429 429
430 list_del(&video->next_frm->list); 430 list_del(&video->next_frm->list);
431 video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE; 431 video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
432 return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0); 432 return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0);
433} 433}
434 434
435/* schedule the next buffer which is available on dma queue */ 435/* schedule the next buffer which is available on dma queue */
@@ -448,8 +448,8 @@ void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video)
448 video->cur_frm = video->next_frm; 448 video->cur_frm = video->next_frm;
449 449
450 list_del(&video->next_frm->list); 450 list_del(&video->next_frm->list);
451 video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE; 451 video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
452 addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0); 452 addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0);
453 video->ops->queue(vpfe_dev, addr); 453 video->ops->queue(vpfe_dev, addr);
454 video->state = VPFE_VIDEO_BUFFER_QUEUED; 454 video->state = VPFE_VIDEO_BUFFER_QUEUED;
455} 455}
@@ -460,7 +460,7 @@ void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video)
460 struct vpfe_device *vpfe_dev = video->vpfe_dev; 460 struct vpfe_device *vpfe_dev = video->vpfe_dev;
461 unsigned long addr; 461 unsigned long addr;
462 462
463 addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0); 463 addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0);
464 addr += video->field_off; 464 addr += video->field_off;
465 video->ops->queue(vpfe_dev, addr); 465 video->ops->queue(vpfe_dev, addr);
466} 466}
@@ -470,8 +470,8 @@ void vpfe_video_process_buffer_complete(struct vpfe_video_device *video)
470{ 470{
471 struct vpfe_pipeline *pipe = &video->pipe; 471 struct vpfe_pipeline *pipe = &video->pipe;
472 472
473 v4l2_get_timestamp(&video->cur_frm->vb.v4l2_buf.timestamp); 473 v4l2_get_timestamp(&video->cur_frm->vb.timestamp);
474 vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_DONE); 474 vb2_buffer_done(&video->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
475 if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS) 475 if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
476 video->cur_frm = video->next_frm; 476 video->cur_frm = video->next_frm;
477} 477}
@@ -1138,12 +1138,13 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
1138 1138
1139static void vpfe_buffer_queue(struct vb2_buffer *vb) 1139static void vpfe_buffer_queue(struct vb2_buffer *vb)
1140{ 1140{
1141 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1141 /* Get the file handle object and device object */ 1142 /* Get the file handle object and device object */
1142 struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue); 1143 struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
1143 struct vpfe_video_device *video = fh->video; 1144 struct vpfe_video_device *video = fh->video;
1144 struct vpfe_device *vpfe_dev = video->vpfe_dev; 1145 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1145 struct vpfe_pipeline *pipe = &video->pipe; 1146 struct vpfe_pipeline *pipe = &video->pipe;
1146 struct vpfe_cap_buffer *buf = container_of(vb, 1147 struct vpfe_cap_buffer *buf = container_of(vbuf,
1147 struct vpfe_cap_buffer, vb); 1148 struct vpfe_cap_buffer, vb);
1148 unsigned long flags; 1149 unsigned long flags;
1149 unsigned long empty; 1150 unsigned long empty;
@@ -1203,10 +1204,10 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
1203 /* Remove buffer from the buffer queue */ 1204 /* Remove buffer from the buffer queue */
1204 list_del(&video->cur_frm->list); 1205 list_del(&video->cur_frm->list);
1205 /* Mark state of the current frame to active */ 1206 /* Mark state of the current frame to active */
1206 video->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE; 1207 video->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
1207 /* Initialize field_id and started member */ 1208 /* Initialize field_id and started member */
1208 video->field_id = 0; 1209 video->field_id = 0;
1209 addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0); 1210 addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0);
1210 video->ops->queue(vpfe_dev, addr); 1211 video->ops->queue(vpfe_dev, addr);
1211 video->state = VPFE_VIDEO_BUFFER_QUEUED; 1212 video->state = VPFE_VIDEO_BUFFER_QUEUED;
1212 1213
@@ -1214,10 +1215,12 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
1214 if (ret) { 1215 if (ret) {
1215 struct vpfe_cap_buffer *buf, *tmp; 1216 struct vpfe_cap_buffer *buf, *tmp;
1216 1217
1217 vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_QUEUED); 1218 vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
1219 VB2_BUF_STATE_QUEUED);
1218 list_for_each_entry_safe(buf, tmp, &video->dma_queue, list) { 1220 list_for_each_entry_safe(buf, tmp, &video->dma_queue, list) {
1219 list_del(&buf->list); 1221 list_del(&buf->list);
1220 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 1222 vb2_buffer_done(&buf->vb.vb2_buf,
1223 VB2_BUF_STATE_QUEUED);
1221 } 1224 }
1222 goto unlock_out; 1225 goto unlock_out;
1223 } 1226 }
@@ -1234,7 +1237,8 @@ streamoff:
1234 1237
1235static int vpfe_buffer_init(struct vb2_buffer *vb) 1238static int vpfe_buffer_init(struct vb2_buffer *vb)
1236{ 1239{
1237 struct vpfe_cap_buffer *buf = container_of(vb, 1240 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1241 struct vpfe_cap_buffer *buf = container_of(vbuf,
1238 struct vpfe_cap_buffer, vb); 1242 struct vpfe_cap_buffer, vb);
1239 1243
1240 INIT_LIST_HEAD(&buf->list); 1244 INIT_LIST_HEAD(&buf->list);
@@ -1249,13 +1253,14 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
1249 1253
1250 /* release all active buffers */ 1254 /* release all active buffers */
1251 if (video->cur_frm == video->next_frm) { 1255 if (video->cur_frm == video->next_frm) {
1252 vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR); 1256 vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
1257 VB2_BUF_STATE_ERROR);
1253 } else { 1258 } else {
1254 if (video->cur_frm != NULL) 1259 if (video->cur_frm != NULL)
1255 vb2_buffer_done(&video->cur_frm->vb, 1260 vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
1256 VB2_BUF_STATE_ERROR); 1261 VB2_BUF_STATE_ERROR);
1257 if (video->next_frm != NULL) 1262 if (video->next_frm != NULL)
1258 vb2_buffer_done(&video->next_frm->vb, 1263 vb2_buffer_done(&video->next_frm->vb.vb2_buf,
1259 VB2_BUF_STATE_ERROR); 1264 VB2_BUF_STATE_ERROR);
1260 } 1265 }
1261 1266
@@ -1263,16 +1268,18 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
1263 video->next_frm = list_entry(video->dma_queue.next, 1268 video->next_frm = list_entry(video->dma_queue.next,
1264 struct vpfe_cap_buffer, list); 1269 struct vpfe_cap_buffer, list);
1265 list_del(&video->next_frm->list); 1270 list_del(&video->next_frm->list);
1266 vb2_buffer_done(&video->next_frm->vb, VB2_BUF_STATE_ERROR); 1271 vb2_buffer_done(&video->next_frm->vb.vb2_buf,
1272 VB2_BUF_STATE_ERROR);
1267 } 1273 }
1268} 1274}
1269 1275
1270static void vpfe_buf_cleanup(struct vb2_buffer *vb) 1276static void vpfe_buf_cleanup(struct vb2_buffer *vb)
1271{ 1277{
1278 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1272 struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue); 1279 struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
1273 struct vpfe_video_device *video = fh->video; 1280 struct vpfe_video_device *video = fh->video;
1274 struct vpfe_device *vpfe_dev = video->vpfe_dev; 1281 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1275 struct vpfe_cap_buffer *buf = container_of(vb, 1282 struct vpfe_cap_buffer *buf = container_of(vbuf,
1276 struct vpfe_cap_buffer, vb); 1283 struct vpfe_cap_buffer, vb);
1277 1284
1278 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buf_cleanup\n"); 1285 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buf_cleanup\n");
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
index 1b1b6c4a56b7..673cefe3ef61 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.h
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.h
@@ -22,6 +22,7 @@
22#ifndef _DAVINCI_VPFE_VIDEO_H 22#ifndef _DAVINCI_VPFE_VIDEO_H
23#define _DAVINCI_VPFE_VIDEO_H 23#define _DAVINCI_VPFE_VIDEO_H
24 24
25#include <media/videobuf2-v4l2.h>
25#include <media/videobuf2-dma-contig.h> 26#include <media/videobuf2-dma-contig.h>
26 27
27struct vpfe_device; 28struct vpfe_device;
@@ -72,7 +73,7 @@ struct vpfe_pipeline {
72 container_of(vdev, struct vpfe_video_device, video_dev) 73 container_of(vdev, struct vpfe_video_device, video_dev)
73 74
74struct vpfe_cap_buffer { 75struct vpfe_cap_buffer {
75 struct vb2_buffer vb; 76 struct vb2_v4l2_buffer vb;
76 struct list_head list; 77 struct list_head list;
77}; 78};
78 79
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 485a90ce12df..72b4ca8934f8 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -311,7 +311,8 @@ static int iss_video_queue_setup(struct vb2_queue *vq,
311 311
312static void iss_video_buf_cleanup(struct vb2_buffer *vb) 312static void iss_video_buf_cleanup(struct vb2_buffer *vb)
313{ 313{
314 struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb); 314 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
315 struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
315 316
316 if (buffer->iss_addr) 317 if (buffer->iss_addr)
317 buffer->iss_addr = 0; 318 buffer->iss_addr = 0;
@@ -319,8 +320,9 @@ static void iss_video_buf_cleanup(struct vb2_buffer *vb)
319 320
320static int iss_video_buf_prepare(struct vb2_buffer *vb) 321static int iss_video_buf_prepare(struct vb2_buffer *vb)
321{ 322{
323 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
322 struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue); 324 struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
323 struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb); 325 struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
324 struct iss_video *video = vfh->video; 326 struct iss_video *video = vfh->video;
325 unsigned long size = vfh->format.fmt.pix.sizeimage; 327 unsigned long size = vfh->format.fmt.pix.sizeimage;
326 dma_addr_t addr; 328 dma_addr_t addr;
@@ -342,9 +344,10 @@ static int iss_video_buf_prepare(struct vb2_buffer *vb)
342 344
343static void iss_video_buf_queue(struct vb2_buffer *vb) 345static void iss_video_buf_queue(struct vb2_buffer *vb)
344{ 346{
347 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
345 struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue); 348 struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
346 struct iss_video *video = vfh->video; 349 struct iss_video *video = vfh->video;
347 struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb); 350 struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
348 struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity); 351 struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
349 unsigned long flags; 352 unsigned long flags;
350 bool empty; 353 bool empty;
@@ -432,7 +435,7 @@ struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
432 list_del(&buf->list); 435 list_del(&buf->list);
433 spin_unlock_irqrestore(&video->qlock, flags); 436 spin_unlock_irqrestore(&video->qlock, flags);
434 437
435 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 438 v4l2_get_timestamp(&buf->vb.timestamp);
436 439
437 /* Do frame number propagation only if this is the output video node. 440 /* Do frame number propagation only if this is the output video node.
438 * Frame number either comes from the CSI receivers or it gets 441 * Frame number either comes from the CSI receivers or it gets
@@ -441,12 +444,12 @@ struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
441 * first, so the input number might lag behind by 1 in some cases. 444 * first, so the input number might lag behind by 1 in some cases.
442 */ 445 */
443 if (video == pipe->output && !pipe->do_propagation) 446 if (video == pipe->output && !pipe->do_propagation)
444 buf->vb.v4l2_buf.sequence = 447 buf->vb.sequence =
445 atomic_inc_return(&pipe->frame_number); 448 atomic_inc_return(&pipe->frame_number);
446 else 449 else
447 buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number); 450 buf->vb.sequence = atomic_read(&pipe->frame_number);
448 451
449 vb2_buffer_done(&buf->vb, pipe->error ? 452 vb2_buffer_done(&buf->vb.vb2_buf, pipe->error ?
450 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 453 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
451 pipe->error = false; 454 pipe->error = false;
452 455
@@ -477,7 +480,7 @@ struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
477 buf = list_first_entry(&video->dmaqueue, struct iss_buffer, 480 buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
478 list); 481 list);
479 spin_unlock_irqrestore(&video->qlock, flags); 482 spin_unlock_irqrestore(&video->qlock, flags);
480 buf->vb.state = VB2_BUF_STATE_ACTIVE; 483 buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
481 return buf; 484 return buf;
482} 485}
483 486
@@ -500,7 +503,7 @@ void omap4iss_video_cancel_stream(struct iss_video *video)
500 buf = list_first_entry(&video->dmaqueue, struct iss_buffer, 503 buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
501 list); 504 list);
502 list_del(&buf->list); 505 list_del(&buf->list);
503 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 506 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
504 } 507 }
505 508
506 vb2_queue_error(video->queue); 509 vb2_queue_error(video->queue);
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
index 6a57b5648e4d..41532eda1277 100644
--- a/drivers/staging/media/omap4iss/iss_video.h
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -117,12 +117,12 @@ static inline int iss_pipeline_ready(struct iss_pipeline *pipe)
117 */ 117 */
118struct iss_buffer { 118struct iss_buffer {
119 /* common v4l buffer stuff -- must be first */ 119 /* common v4l buffer stuff -- must be first */
120 struct vb2_buffer vb; 120 struct vb2_v4l2_buffer vb;
121 struct list_head list; 121 struct list_head list;
122 dma_addr_t iss_addr; 122 dma_addr_t iss_addr;
123}; 123};
124 124
125#define to_iss_buffer(buf) container_of(buf, struct iss_buffer, buffer) 125#define to_iss_buffer(buf) container_of(buf, struct iss_buffer, vb)
126 126
127enum iss_video_dmaqueue_flags { 127enum iss_video_dmaqueue_flags {
128 /* Set if DMA queue becomes empty when ISS_PIPELINE_STREAM_CONTINUOUS */ 128 /* Set if DMA queue becomes empty when ISS_PIPELINE_STREAM_CONTINUOUS */
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index d617c39a0052..3628938785ac 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -61,9 +61,10 @@ static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
61static int uvc_buffer_prepare(struct vb2_buffer *vb) 61static int uvc_buffer_prepare(struct vb2_buffer *vb)
62{ 62{
63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
64 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); 64 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
65 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
65 66
66 if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT && 67 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
67 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { 68 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
68 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); 69 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
69 return -EINVAL; 70 return -EINVAL;
@@ -75,7 +76,7 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
75 buf->state = UVC_BUF_STATE_QUEUED; 76 buf->state = UVC_BUF_STATE_QUEUED;
76 buf->mem = vb2_plane_vaddr(vb, 0); 77 buf->mem = vb2_plane_vaddr(vb, 0);
77 buf->length = vb2_plane_size(vb, 0); 78 buf->length = vb2_plane_size(vb, 0);
78 if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 79 if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
79 buf->bytesused = 0; 80 buf->bytesused = 0;
80 else 81 else
81 buf->bytesused = vb2_get_plane_payload(vb, 0); 82 buf->bytesused = vb2_get_plane_payload(vb, 0);
@@ -86,7 +87,8 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
86static void uvc_buffer_queue(struct vb2_buffer *vb) 87static void uvc_buffer_queue(struct vb2_buffer *vb)
87{ 88{
88 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 89 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
89 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); 90 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
91 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
90 unsigned long flags; 92 unsigned long flags;
91 93
92 spin_lock_irqsave(&queue->irqlock, flags); 94 spin_lock_irqsave(&queue->irqlock, flags);
@@ -98,7 +100,7 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
98 * directly. The next QBUF call will fail with -ENODEV. 100 * directly. The next QBUF call will fail with -ENODEV.
99 */ 101 */
100 buf->state = UVC_BUF_STATE_ERROR; 102 buf->state = UVC_BUF_STATE_ERROR;
101 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); 103 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
102 } 104 }
103 105
104 spin_unlock_irqrestore(&queue->irqlock, flags); 106 spin_unlock_irqrestore(&queue->irqlock, flags);
@@ -242,7 +244,7 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect)
242 queue); 244 queue);
243 list_del(&buf->queue); 245 list_del(&buf->queue);
244 buf->state = UVC_BUF_STATE_ERROR; 246 buf->state = UVC_BUF_STATE_ERROR;
245 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); 247 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
246 } 248 }
247 /* This must be protected by the irqlock spinlock to avoid race 249 /* This must be protected by the irqlock spinlock to avoid race
248 * conditions between uvc_queue_buffer and the disconnection event that 250 * conditions between uvc_queue_buffer and the disconnection event that
@@ -314,7 +316,7 @@ struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
314 if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) && 316 if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
315 buf->length != buf->bytesused) { 317 buf->length != buf->bytesused) {
316 buf->state = UVC_BUF_STATE_QUEUED; 318 buf->state = UVC_BUF_STATE_QUEUED;
317 vb2_set_plane_payload(&buf->buf, 0, 0); 319 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
318 return buf; 320 return buf;
319 } 321 }
320 322
@@ -325,12 +327,12 @@ struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
325 else 327 else
326 nextbuf = NULL; 328 nextbuf = NULL;
327 329
328 buf->buf.v4l2_buf.field = V4L2_FIELD_NONE; 330 buf->buf.field = V4L2_FIELD_NONE;
329 buf->buf.v4l2_buf.sequence = queue->sequence++; 331 buf->buf.sequence = queue->sequence++;
330 v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp); 332 v4l2_get_timestamp(&buf->buf.timestamp);
331 333
332 vb2_set_plane_payload(&buf->buf, 0, buf->bytesused); 334 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
333 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE); 335 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
334 336
335 return nextbuf; 337 return nextbuf;
336} 338}
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index 0ffe498eaacd..ac461a9a1a70 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -26,7 +26,7 @@ enum uvc_buffer_state {
26}; 26};
27 27
28struct uvc_buffer { 28struct uvc_buffer {
29 struct vb2_buffer buf; 29 struct vb2_v4l2_buffer buf;
30 struct list_head queue; 30 struct list_head queue;
31 31
32 enum uvc_buffer_state state; 32 enum uvc_buffer_state state;
diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h
index fa0247ad815f..e14a9370b67e 100644
--- a/include/media/davinci/vpbe_display.h
+++ b/include/media/davinci/vpbe_display.h
@@ -17,6 +17,7 @@
17#include <linux/videodev2.h> 17#include <linux/videodev2.h>
18#include <media/v4l2-common.h> 18#include <media/v4l2-common.h>
19#include <media/v4l2-fh.h> 19#include <media/v4l2-fh.h>
20#include <media/videobuf2-v4l2.h>
20#include <media/videobuf2-dma-contig.h> 21#include <media/videobuf2-dma-contig.h>
21#include <media/davinci/vpbe_types.h> 22#include <media/davinci/vpbe_types.h>
22#include <media/davinci/vpbe_osd.h> 23#include <media/davinci/vpbe_osd.h>
@@ -64,7 +65,7 @@ struct display_layer_info {
64}; 65};
65 66
66struct vpbe_disp_buffer { 67struct vpbe_disp_buffer {
67 struct vb2_buffer vb; 68 struct vb2_v4l2_buffer vb;
68 struct list_head list; 69 struct list_head list;
69}; 70};
70 71
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 5c60da9986c1..5a9597dd1ee0 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -90,7 +90,7 @@ struct v4l2_m2m_ctx {
90}; 90};
91 91
92struct v4l2_m2m_buffer { 92struct v4l2_m2m_buffer {
93 struct vb2_buffer vb; 93 struct vb2_v4l2_buffer vb;
94 struct list_head list; 94 struct list_head list;
95}; 95};
96 96
@@ -105,9 +105,9 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
105 struct v4l2_m2m_ctx *m2m_ctx); 105 struct v4l2_m2m_ctx *m2m_ctx);
106 106
107static inline void 107static inline void
108v4l2_m2m_buf_done(struct vb2_buffer *buf, enum vb2_buffer_state state) 108v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
109{ 109{
110 vb2_buffer_done(buf, state); 110 vb2_buffer_done(&buf->vb2_buf, state);
111} 111}
112 112
113int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 113int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
@@ -160,7 +160,8 @@ static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
160 160
161void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); 161void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
162 162
163void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb); 163void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
164 struct vb2_v4l2_buffer *vbuf);
164 165
165/** 166/**
166 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for 167 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 19990d7bf270..108fa160168a 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -111,10 +111,38 @@ struct vb2_mem_ops {
111 int (*mmap)(void *buf_priv, struct vm_area_struct *vma); 111 int (*mmap)(void *buf_priv, struct vm_area_struct *vma);
112}; 112};
113 113
114/**
115 * struct vb2_plane - plane information
116 * @mem_priv: private data with this plane
117 * @dbuf: dma_buf - shared buffer object
118 * @dbuf_mapped: flag to show whether dbuf is mapped or not
119 * @bytesused: number of bytes occupied by data in the plane (payload)
120 * @length: size of this plane (NOT the payload) in bytes
121 * @mem_offset: when memory in the associated struct vb2_buffer is
122 * VB2_MEMORY_MMAP, equals the offset from the start of
123 * the device memory for this plane (or is a "cookie" that
124 * should be passed to mmap() called on the video node)
125 * @userptr: when memory is VB2_MEMORY_USERPTR, a userspace pointer
126 * pointing to this plane
127 * @fd: when memory is VB2_MEMORY_DMABUF, a userspace file
128 * descriptor associated with this plane
129 * @data_offset: offset in the plane to the start of data; usually 0,
130 * unless there is a header in front of the data
131 * Should contain enough information to be able to cover all the fields
132 * of struct v4l2_plane at videodev2.h
133 */
114struct vb2_plane { 134struct vb2_plane {
115 void *mem_priv; 135 void *mem_priv;
116 struct dma_buf *dbuf; 136 struct dma_buf *dbuf;
117 unsigned int dbuf_mapped; 137 unsigned int dbuf_mapped;
138 unsigned int bytesused;
139 unsigned int length;
140 union {
141 unsigned int offset;
142 unsigned long userptr;
143 int fd;
144 } m;
145 unsigned int data_offset;
118}; 146};
119 147
120/** 148/**
@@ -163,43 +191,32 @@ struct vb2_queue;
163 191
164/** 192/**
165 * struct vb2_buffer - represents a video buffer 193 * struct vb2_buffer - represents a video buffer
166 * @v4l2_buf: struct v4l2_buffer associated with this buffer; can
167 * be read by the driver and relevant entries can be
168 * changed by the driver in case of CAPTURE types
169 * (such as timestamp)
170 * @v4l2_planes: struct v4l2_planes associated with this buffer; can
171 * be read by the driver and relevant entries can be
172 * changed by the driver in case of CAPTURE types
173 * (such as bytesused); NOTE that even for single-planar
174 * types, the v4l2_planes[0] struct should be used
175 * instead of v4l2_buf for filling bytesused - drivers
176 * should use the vb2_set_plane_payload() function for that
177 * @vb2_queue: the queue to which this driver belongs 194 * @vb2_queue: the queue to which this driver belongs
195 * @index: id number of the buffer
196 * @type: buffer type
197 * @memory: the method, in which the actual data is passed
178 * @num_planes: number of planes in the buffer 198 * @num_planes: number of planes in the buffer
179 * on an internal driver queue 199 * on an internal driver queue
200 * @planes: private per-plane information; do not change
180 * @state: current buffer state; do not change 201 * @state: current buffer state; do not change
181 * @queued_entry: entry on the queued buffers list, which holds all 202 * @queued_entry: entry on the queued buffers list, which holds all
182 * buffers queued from userspace 203 * buffers queued from userspace
183 * @done_entry: entry on the list that stores all buffers ready to 204 * @done_entry: entry on the list that stores all buffers ready to
184 * be dequeued to userspace 205 * be dequeued to userspace
185 * @planes: private per-plane information; do not change
186 */ 206 */
187struct vb2_buffer { 207struct vb2_buffer {
188 struct v4l2_buffer v4l2_buf;
189 struct v4l2_plane v4l2_planes[VIDEO_MAX_PLANES];
190
191 struct vb2_queue *vb2_queue; 208 struct vb2_queue *vb2_queue;
192 209 unsigned int index;
210 unsigned int type;
211 unsigned int memory;
193 unsigned int num_planes; 212 unsigned int num_planes;
213 struct vb2_plane planes[VIDEO_MAX_PLANES];
194 214
195/* Private: internal use only */ 215 /* Private: internal use only */
196 enum vb2_buffer_state state; 216 enum vb2_buffer_state state;
197 217
198 struct list_head queued_entry; 218 struct list_head queued_entry;
199 struct list_head done_entry; 219 struct list_head done_entry;
200
201 struct vb2_plane planes[VIDEO_MAX_PLANES];
202
203#ifdef CONFIG_VIDEO_ADV_DEBUG 220#ifdef CONFIG_VIDEO_ADV_DEBUG
204 /* 221 /*
205 * Counters for how often these buffer-related ops are 222 * Counters for how often these buffer-related ops are
@@ -354,7 +371,8 @@ struct v4l2_fh;
354 * @drv_priv: driver private data 371 * @drv_priv: driver private data
355 * @buf_struct_size: size of the driver-specific buffer structure; 372 * @buf_struct_size: size of the driver-specific buffer structure;
356 * "0" indicates the driver doesn't want to use a custom buffer 373 * "0" indicates the driver doesn't want to use a custom buffer
357 * structure type, so sizeof(struct vb2_buffer) will is used 374 * structure type. for example, sizeof(struct vb2_v4l2_buffer)
375 * will be used for v4l2.
358 * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and 376 * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and
359 * V4L2_BUF_FLAG_TSTAMP_SRC_* 377 * V4L2_BUF_FLAG_TSTAMP_SRC_*
360 * @gfp_flags: additional gfp flags used when allocating the buffers. 378 * @gfp_flags: additional gfp flags used when allocating the buffers.
@@ -573,7 +591,7 @@ static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
573 unsigned int plane_no, unsigned long size) 591 unsigned int plane_no, unsigned long size)
574{ 592{
575 if (plane_no < vb->num_planes) 593 if (plane_no < vb->num_planes)
576 vb->v4l2_planes[plane_no].bytesused = size; 594 vb->planes[plane_no].bytesused = size;
577} 595}
578 596
579/** 597/**
@@ -585,7 +603,7 @@ static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
585 unsigned int plane_no) 603 unsigned int plane_no)
586{ 604{
587 if (plane_no < vb->num_planes) 605 if (plane_no < vb->num_planes)
588 return vb->v4l2_planes[plane_no].bytesused; 606 return vb->planes[plane_no].bytesused;
589 return 0; 607 return 0;
590} 608}
591 609
@@ -598,7 +616,7 @@ static inline unsigned long
598vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) 616vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
599{ 617{
600 if (plane_no < vb->num_planes) 618 if (plane_no < vb->num_planes)
601 return vb->v4l2_planes[plane_no].length; 619 return vb->planes[plane_no].length;
602 return 0; 620 return 0;
603} 621}
604 622
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index d4a4d9acd39b..20d8ad20066c 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -12,6 +12,34 @@
12#ifndef _MEDIA_VIDEOBUF2_V4L2_H 12#ifndef _MEDIA_VIDEOBUF2_V4L2_H
13#define _MEDIA_VIDEOBUF2_V4L2_H 13#define _MEDIA_VIDEOBUF2_V4L2_H
14 14
15#include <linux/videodev2.h>
15#include <media/videobuf2-core.h> 16#include <media/videobuf2-core.h>
16 17
18/**
19 * struct vb2_v4l2_buffer - video buffer information for v4l2
20 * @vb2_buf: video buffer 2
21 * @flags: buffer informational flags
22 * @field: enum v4l2_field; field order of the image in the buffer
23 * @timestamp: frame timestamp
24 * @timecode: frame timecode
25 * @sequence: sequence count of this frame
26 * Should contain enough information to be able to cover all the fields
27 * of struct v4l2_buffer at videodev2.h
28 */
29struct vb2_v4l2_buffer {
30 struct vb2_buffer vb2_buf;
31
32 __u32 flags;
33 __u32 field;
34 struct timeval timestamp;
35 struct v4l2_timecode timecode;
36 __u32 sequence;
37};
38
39/**
40 * to_vb2_v4l2_buffer() - cast struct vb2_buffer * to struct vb2_v4l2_buffer *
41 */
42#define to_vb2_v4l2_buffer(vb) \
43 (container_of(vb, struct vb2_v4l2_buffer, vb2_buf))
44
17#endif /* _MEDIA_VIDEOBUF2_V4L2_H */ 45#endif /* _MEDIA_VIDEOBUF2_V4L2_H */
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index dbf017bfddd9..b015b38a4dda 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -202,27 +202,28 @@ DECLARE_EVENT_CLASS(vb2_event_class,
202 ), 202 ),
203 203
204 TP_fast_assign( 204 TP_fast_assign(
205 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
205 __entry->minor = q->owner ? q->owner->vdev->minor : -1; 206 __entry->minor = q->owner ? q->owner->vdev->minor : -1;
206 __entry->queued_count = q->queued_count; 207 __entry->queued_count = q->queued_count;
207 __entry->owned_by_drv_count = 208 __entry->owned_by_drv_count =
208 atomic_read(&q->owned_by_drv_count); 209 atomic_read(&q->owned_by_drv_count);
209 __entry->index = vb->v4l2_buf.index; 210 __entry->index = vb->index;
210 __entry->type = vb->v4l2_buf.type; 211 __entry->type = vb->type;
211 __entry->bytesused = vb->v4l2_planes[0].bytesused; 212 __entry->bytesused = vb->planes[0].bytesused;
212 __entry->flags = vb->v4l2_buf.flags; 213 __entry->flags = vbuf->flags;
213 __entry->field = vb->v4l2_buf.field; 214 __entry->field = vbuf->field;
214 __entry->timestamp = timeval_to_ns(&vb->v4l2_buf.timestamp); 215 __entry->timestamp = timeval_to_ns(&vbuf->timestamp);
215 __entry->timecode_type = vb->v4l2_buf.timecode.type; 216 __entry->timecode_type = vbuf->timecode.type;
216 __entry->timecode_flags = vb->v4l2_buf.timecode.flags; 217 __entry->timecode_flags = vbuf->timecode.flags;
217 __entry->timecode_frames = vb->v4l2_buf.timecode.frames; 218 __entry->timecode_frames = vbuf->timecode.frames;
218 __entry->timecode_seconds = vb->v4l2_buf.timecode.seconds; 219 __entry->timecode_seconds = vbuf->timecode.seconds;
219 __entry->timecode_minutes = vb->v4l2_buf.timecode.minutes; 220 __entry->timecode_minutes = vbuf->timecode.minutes;
220 __entry->timecode_hours = vb->v4l2_buf.timecode.hours; 221 __entry->timecode_hours = vbuf->timecode.hours;
221 __entry->timecode_userbits0 = vb->v4l2_buf.timecode.userbits[0]; 222 __entry->timecode_userbits0 = vbuf->timecode.userbits[0];
222 __entry->timecode_userbits1 = vb->v4l2_buf.timecode.userbits[1]; 223 __entry->timecode_userbits1 = vbuf->timecode.userbits[1];
223 __entry->timecode_userbits2 = vb->v4l2_buf.timecode.userbits[2]; 224 __entry->timecode_userbits2 = vbuf->timecode.userbits[2];
224 __entry->timecode_userbits3 = vb->v4l2_buf.timecode.userbits[3]; 225 __entry->timecode_userbits3 = vbuf->timecode.userbits[3];
225 __entry->sequence = vb->v4l2_buf.sequence; 226 __entry->sequence = vbuf->sequence;
226 ), 227 ),
227 228
228 TP_printk("minor = %d, queued = %u, owned_by_drv = %d, index = %u, " 229 TP_printk("minor = %d, queued = %u, owned_by_drv = %d, index = %u, "