aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHans Verkuil <hans.verkuil@cisco.com>2014-11-24 06:50:31 -0500
committerMauro Carvalho Chehab <mchehab@osg.samsung.com>2014-11-25 06:01:16 -0500
commitd790b7eda953df474f470169ebdf111c02fa7a2d (patch)
tree31ad9dce666141bf0d83989717188fe8700ac310
parent0c3a14c177aa85afb991e7c2be3921aa9a52a893 (diff)
[media] vb2-dma-sg: move dma_(un)map_sg here
This moves dma_(un)map_sg to the get_userptr/put_userptr and alloc/put memops of videobuf2-dma-sg.c and adds dma_sync_sg_for_device/cpu to the prepare/finish memops. Now that vb2-dma-sg will sync the buffers for you in the prepare/finish memops we can drop that from the drivers that use dma-sg. For the solo6x10 driver that was a bit more involved because it needs to copy JPEG or MPEG headers to the buffer before returning it to userspace, and that cannot be done in the old place since the buffer there is still setup for DMA access, not for CPU access. However, the buf_finish op is the ideal place to do this. By the time buf_finish is called the buffer is available for CPU access, so copying to the buffer is fine. [mchehab@osg.samsung.com: Fix a compilation breakage: drivers/media/v4l2-core/videobuf2-dma-sg.c:150:19: error: 'struct vb2_dma_sg_buf' has no member named 'dma_sgt'] Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Acked-by: Pawel Osciak <pawel@osciak.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c3
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c3
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c9
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c9
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c16
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c15
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c15
-rw-r--r--drivers/media/pci/saa7134/saa7134.h1
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c50
-rw-r--r--drivers/media/pci/tw68/tw68-video.c8
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c18
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c41
14 files changed, 64 insertions, 130 deletions
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index d72a3ec348ef..e4901a503c73 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1167,11 +1167,8 @@ static void buffer_finish(struct vb2_buffer *vb)
1167 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 1167 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
1168 struct cx23885_buffer *buf = container_of(vb, 1168 struct cx23885_buffer *buf = container_of(vb,
1169 struct cx23885_buffer, vb); 1169 struct cx23885_buffer, vb);
1170 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
1171 1170
1172 cx23885_free_buffer(dev, buf); 1171 cx23885_free_buffer(dev, buf);
1173
1174 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
1175} 1172}
1176 1173
1177static void buffer_queue(struct vb2_buffer *vb) 1174static void buffer_queue(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index d452b5c076e6..d07b04a5ce36 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1453,17 +1453,12 @@ int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1453 struct cx23885_dev *dev = port->dev; 1453 struct cx23885_dev *dev = port->dev;
1454 int size = port->ts_packet_size * port->ts_packet_count; 1454 int size = port->ts_packet_size * port->ts_packet_count;
1455 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0); 1455 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
1456 int rc;
1457 1456
1458 dprintk(1, "%s: %p\n", __func__, buf); 1457 dprintk(1, "%s: %p\n", __func__, buf);
1459 if (vb2_plane_size(&buf->vb, 0) < size) 1458 if (vb2_plane_size(&buf->vb, 0) < size)
1460 return -EINVAL; 1459 return -EINVAL;
1461 vb2_set_plane_payload(&buf->vb, 0, size); 1460 vb2_set_plane_payload(&buf->vb, 0, size);
1462 1461
1463 rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
1464 if (!rc)
1465 return -EIO;
1466
1467 cx23885_risc_databuffer(dev->pci, &buf->risc, 1462 cx23885_risc_databuffer(dev->pci, &buf->risc,
1468 sgt->sgl, 1463 sgt->sgl,
1469 port->ts_packet_size, port->ts_packet_count, 0); 1464 port->ts_packet_size, port->ts_packet_count, 0);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 44fafba65c6f..c47d18270cfc 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -123,11 +123,8 @@ static void buffer_finish(struct vb2_buffer *vb)
123 struct cx23885_dev *dev = port->dev; 123 struct cx23885_dev *dev = port->dev;
124 struct cx23885_buffer *buf = container_of(vb, 124 struct cx23885_buffer *buf = container_of(vb,
125 struct cx23885_buffer, vb); 125 struct cx23885_buffer, vb);
126 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
127 126
128 cx23885_free_buffer(dev, buf); 127 cx23885_free_buffer(dev, buf);
129
130 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
131} 128}
132 129
133static void buffer_queue(struct vb2_buffer *vb) 130static void buffer_queue(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index 1d339a69f0c8..d362d3838c84 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -143,7 +143,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
143 struct cx23885_buffer, vb); 143 struct cx23885_buffer, vb);
144 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 144 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
145 unsigned lines = VBI_PAL_LINE_COUNT; 145 unsigned lines = VBI_PAL_LINE_COUNT;
146 int ret;
147 146
148 if (dev->tvnorm & V4L2_STD_525_60) 147 if (dev->tvnorm & V4L2_STD_525_60)
149 lines = VBI_NTSC_LINE_COUNT; 148 lines = VBI_NTSC_LINE_COUNT;
@@ -152,10 +151,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
152 return -EINVAL; 151 return -EINVAL;
153 vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2); 152 vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2);
154 153
155 ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
156 if (!ret)
157 return -EIO;
158
159 cx23885_risc_vbibuffer(dev->pci, &buf->risc, 154 cx23885_risc_vbibuffer(dev->pci, &buf->risc,
160 sgt->sgl, 155 sgt->sgl,
161 0, VBI_LINE_LENGTH * lines, 156 0, VBI_LINE_LENGTH * lines,
@@ -166,14 +161,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
166 161
167static void buffer_finish(struct vb2_buffer *vb) 162static void buffer_finish(struct vb2_buffer *vb)
168{ 163{
169 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
170 struct cx23885_buffer *buf = container_of(vb, 164 struct cx23885_buffer *buf = container_of(vb,
171 struct cx23885_buffer, vb); 165 struct cx23885_buffer, vb);
172 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
173 166
174 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); 167 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
175
176 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
177} 168}
178 169
179/* 170/*
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 371eecfe7b32..5e93c682a3f5 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -335,7 +335,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
335 u32 line0_offset, line1_offset; 335 u32 line0_offset, line1_offset;
336 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 336 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
337 int field_tff; 337 int field_tff;
338 int ret;
339 338
340 buf->bpl = (dev->width * dev->fmt->depth) >> 3; 339 buf->bpl = (dev->width * dev->fmt->depth) >> 3;
341 340
@@ -343,10 +342,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
343 return -EINVAL; 342 return -EINVAL;
344 vb2_set_plane_payload(vb, 0, dev->height * buf->bpl); 343 vb2_set_plane_payload(vb, 0, dev->height * buf->bpl);
345 344
346 ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
347 if (!ret)
348 return -EIO;
349
350 switch (dev->field) { 345 switch (dev->field) {
351 case V4L2_FIELD_TOP: 346 case V4L2_FIELD_TOP:
352 cx23885_risc_buffer(dev->pci, &buf->risc, 347 cx23885_risc_buffer(dev->pci, &buf->risc,
@@ -414,14 +409,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
414 409
415static void buffer_finish(struct vb2_buffer *vb) 410static void buffer_finish(struct vb2_buffer *vb)
416{ 411{
417 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
418 struct cx23885_buffer *buf = container_of(vb, 412 struct cx23885_buffer *buf = container_of(vb,
419 struct cx23885_buffer, vb); 413 struct cx23885_buffer, vb);
420 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
421 414
422 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); 415 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
423
424 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
425} 416}
426 417
427/* 418/*
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 8b3bb78b503b..594dc3ad4750 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -96,7 +96,6 @@ static struct vb2_ops saa7134_empress_qops = {
96 .queue_setup = saa7134_ts_queue_setup, 96 .queue_setup = saa7134_ts_queue_setup,
97 .buf_init = saa7134_ts_buffer_init, 97 .buf_init = saa7134_ts_buffer_init,
98 .buf_prepare = saa7134_ts_buffer_prepare, 98 .buf_prepare = saa7134_ts_buffer_prepare,
99 .buf_finish = saa7134_ts_buffer_finish,
100 .buf_queue = saa7134_vb2_buffer_queue, 99 .buf_queue = saa7134_vb2_buffer_queue,
101 .wait_prepare = vb2_ops_wait_prepare, 100 .wait_prepare = vb2_ops_wait_prepare,
102 .wait_finish = vb2_ops_wait_finish, 101 .wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 8eff4a7d8ba3..2709b83d57b1 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -94,7 +94,6 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
94 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 94 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
95 struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0); 95 struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
96 unsigned int lines, llength, size; 96 unsigned int lines, llength, size;
97 int ret;
98 97
99 dprintk("buffer_prepare [%p]\n", buf); 98 dprintk("buffer_prepare [%p]\n", buf);
100 99
@@ -108,25 +107,11 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
108 vb2_set_plane_payload(vb2, 0, size); 107 vb2_set_plane_payload(vb2, 0, size);
109 vb2->v4l2_buf.field = dev->field; 108 vb2->v4l2_buf.field = dev->field;
110 109
111 ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
112 if (!ret)
113 return -EIO;
114 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, 110 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
115 saa7134_buffer_startpage(buf)); 111 saa7134_buffer_startpage(buf));
116} 112}
117EXPORT_SYMBOL_GPL(saa7134_ts_buffer_prepare); 113EXPORT_SYMBOL_GPL(saa7134_ts_buffer_prepare);
118 114
119void saa7134_ts_buffer_finish(struct vb2_buffer *vb2)
120{
121 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
122 struct saa7134_dev *dev = dmaq->dev;
123 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
124 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
125
126 dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
127}
128EXPORT_SYMBOL_GPL(saa7134_ts_buffer_finish);
129
130int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, 115int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
131 unsigned int *nbuffers, unsigned int *nplanes, 116 unsigned int *nbuffers, unsigned int *nplanes,
132 unsigned int sizes[], void *alloc_ctxs[]) 117 unsigned int sizes[], void *alloc_ctxs[])
@@ -188,7 +173,6 @@ struct vb2_ops saa7134_ts_qops = {
188 .queue_setup = saa7134_ts_queue_setup, 173 .queue_setup = saa7134_ts_queue_setup,
189 .buf_init = saa7134_ts_buffer_init, 174 .buf_init = saa7134_ts_buffer_init,
190 .buf_prepare = saa7134_ts_buffer_prepare, 175 .buf_prepare = saa7134_ts_buffer_prepare,
191 .buf_finish = saa7134_ts_buffer_finish,
192 .buf_queue = saa7134_vb2_buffer_queue, 176 .buf_queue = saa7134_vb2_buffer_queue,
193 .wait_prepare = vb2_ops_wait_prepare, 177 .wait_prepare = vb2_ops_wait_prepare,
194 .wait_finish = vb2_ops_wait_finish, 178 .wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index e2cc684a7c12..5306e549e526 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -120,7 +120,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
120 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 120 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
121 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0); 121 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
122 unsigned int size; 122 unsigned int size;
123 int ret;
124 123
125 if (dma->sgl->offset) { 124 if (dma->sgl->offset) {
126 pr_err("The buffer is not page-aligned\n"); 125 pr_err("The buffer is not page-aligned\n");
@@ -132,9 +131,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
132 131
133 vb2_set_plane_payload(vb2, 0, size); 132 vb2_set_plane_payload(vb2, 0, size);
134 133
135 ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
136 if (!ret)
137 return -EIO;
138 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, 134 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
139 saa7134_buffer_startpage(buf)); 135 saa7134_buffer_startpage(buf));
140} 136}
@@ -170,21 +166,10 @@ static int buffer_init(struct vb2_buffer *vb2)
170 return 0; 166 return 0;
171} 167}
172 168
173static void buffer_finish(struct vb2_buffer *vb2)
174{
175 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
176 struct saa7134_dev *dev = dmaq->dev;
177 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
178 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
179
180 dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
181}
182
183struct vb2_ops saa7134_vbi_qops = { 169struct vb2_ops saa7134_vbi_qops = {
184 .queue_setup = queue_setup, 170 .queue_setup = queue_setup,
185 .buf_init = buffer_init, 171 .buf_init = buffer_init,
186 .buf_prepare = buffer_prepare, 172 .buf_prepare = buffer_prepare,
187 .buf_finish = buffer_finish,
188 .buf_queue = saa7134_vb2_buffer_queue, 173 .buf_queue = saa7134_vb2_buffer_queue,
189 .wait_prepare = vb2_ops_wait_prepare, 174 .wait_prepare = vb2_ops_wait_prepare,
190 .wait_finish = vb2_ops_wait_finish, 175 .wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index ba029953db9d..701b52f34689 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -883,7 +883,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
883 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 883 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
884 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0); 884 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
885 unsigned int size; 885 unsigned int size;
886 int ret;
887 886
888 if (dma->sgl->offset) { 887 if (dma->sgl->offset) {
889 pr_err("The buffer is not page-aligned\n"); 888 pr_err("The buffer is not page-aligned\n");
@@ -896,23 +895,10 @@ static int buffer_prepare(struct vb2_buffer *vb2)
896 vb2_set_plane_payload(vb2, 0, size); 895 vb2_set_plane_payload(vb2, 0, size);
897 vb2->v4l2_buf.field = dev->field; 896 vb2->v4l2_buf.field = dev->field;
898 897
899 ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
900 if (!ret)
901 return -EIO;
902 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, 898 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
903 saa7134_buffer_startpage(buf)); 899 saa7134_buffer_startpage(buf));
904} 900}
905 901
906static void buffer_finish(struct vb2_buffer *vb2)
907{
908 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
909 struct saa7134_dev *dev = dmaq->dev;
910 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
911 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
912
913 dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
914}
915
916static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, 902static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
917 unsigned int *nbuffers, unsigned int *nplanes, 903 unsigned int *nbuffers, unsigned int *nplanes,
918 unsigned int sizes[], void *alloc_ctxs[]) 904 unsigned int sizes[], void *alloc_ctxs[])
@@ -1005,7 +991,6 @@ static struct vb2_ops vb2_qops = {
1005 .queue_setup = queue_setup, 991 .queue_setup = queue_setup,
1006 .buf_init = buffer_init, 992 .buf_init = buffer_init,
1007 .buf_prepare = buffer_prepare, 993 .buf_prepare = buffer_prepare,
1008 .buf_finish = buffer_finish,
1009 .buf_queue = saa7134_vb2_buffer_queue, 994 .buf_queue = saa7134_vb2_buffer_queue,
1010 .wait_prepare = vb2_ops_wait_prepare, 995 .wait_prepare = vb2_ops_wait_prepare,
1011 .wait_finish = vb2_ops_wait_finish, 996 .wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index c644c7da6859..8bf0553b8d2f 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -815,7 +815,6 @@ void saa7134_video_fini(struct saa7134_dev *dev);
815 815
816int saa7134_ts_buffer_init(struct vb2_buffer *vb2); 816int saa7134_ts_buffer_init(struct vb2_buffer *vb2);
817int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2); 817int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2);
818void saa7134_ts_buffer_finish(struct vb2_buffer *vb2);
819int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, 818int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
820 unsigned int *nbuffers, unsigned int *nplanes, 819 unsigned int *nbuffers, unsigned int *nplanes,
821 unsigned int sizes[], void *alloc_ctxs[]); 820 unsigned int sizes[], void *alloc_ctxs[]);
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 4f6bfba58065..6e933d383fa2 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -463,7 +463,6 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
463 struct solo_dev *solo_dev = solo_enc->solo_dev; 463 struct solo_dev *solo_dev = solo_enc->solo_dev;
464 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); 464 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
465 int frame_size; 465 int frame_size;
466 int ret;
467 466
468 vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; 467 vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
469 468
@@ -473,22 +472,10 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
473 frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN); 472 frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN);
474 vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len); 473 vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
475 474
476 /* may discard all previous data in vbuf->sgl */ 475 return solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
477 if (!dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
478 DMA_FROM_DEVICE))
479 return -ENOMEM;
480 ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
481 vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev), 476 vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev),
482 frame_size, SOLO_JPEG_EXT_ADDR(solo_dev), 477 frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
483 SOLO_JPEG_EXT_SIZE(solo_dev)); 478 SOLO_JPEG_EXT_SIZE(solo_dev));
484 dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
485 DMA_FROM_DEVICE);
486
487 /* add the header only after dma_unmap_sg() */
488 sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
489 solo_enc->jpeg_header, solo_enc->jpeg_len);
490
491 return ret;
492} 479}
493 480
494static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, 481static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
@@ -498,7 +485,6 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
498 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); 485 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
499 int frame_off, frame_size; 486 int frame_off, frame_size;
500 int skip = 0; 487 int skip = 0;
501 int ret;
502 488
503 if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh)) 489 if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh))
504 return -EIO; 490 return -EIO;
@@ -521,21 +507,9 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
521 sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev); 507 sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
522 frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN); 508 frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN);
523 509
524 /* may discard all previous data in vbuf->sgl */ 510 return solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
525 if (!dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
526 DMA_FROM_DEVICE))
527 return -ENOMEM;
528 ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
529 SOLO_MP4E_EXT_ADDR(solo_dev), 511 SOLO_MP4E_EXT_ADDR(solo_dev),
530 SOLO_MP4E_EXT_SIZE(solo_dev)); 512 SOLO_MP4E_EXT_SIZE(solo_dev));
531 dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
532 DMA_FROM_DEVICE);
533
534 /* add the header only after dma_unmap_sg() */
535 if (!vop_type(vh))
536 sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
537 solo_enc->vop, solo_enc->vop_len);
538 return ret;
539} 513}
540 514
541static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc, 515static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
@@ -793,9 +767,29 @@ static void solo_enc_stop_streaming(struct vb2_queue *q)
793 spin_unlock_irqrestore(&solo_enc->av_lock, flags); 767 spin_unlock_irqrestore(&solo_enc->av_lock, flags);
794} 768}
795 769
770static void solo_enc_buf_finish(struct vb2_buffer *vb)
771{
772 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vb->vb2_queue);
773 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
774
775 switch (solo_enc->fmt) {
776 case V4L2_PIX_FMT_MPEG4:
777 case V4L2_PIX_FMT_H264:
778 if (vb->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME)
779 sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
780 solo_enc->vop, solo_enc->vop_len);
781 break;
782 default: /* V4L2_PIX_FMT_MJPEG */
783 sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
784 solo_enc->jpeg_header, solo_enc->jpeg_len);
785 break;
786 }
787}
788
796static struct vb2_ops solo_enc_video_qops = { 789static struct vb2_ops solo_enc_video_qops = {
797 .queue_setup = solo_enc_queue_setup, 790 .queue_setup = solo_enc_queue_setup,
798 .buf_queue = solo_enc_buf_queue, 791 .buf_queue = solo_enc_buf_queue,
792 .buf_finish = solo_enc_buf_finish,
799 .start_streaming = solo_enc_start_streaming, 793 .start_streaming = solo_enc_start_streaming,
800 .stop_streaming = solo_enc_stop_streaming, 794 .stop_streaming = solo_enc_stop_streaming,
801 .wait_prepare = vb2_ops_wait_prepare, 795 .wait_prepare = vb2_ops_wait_prepare,
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 50dcce6251f6..8355e55b4e8e 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -462,17 +462,12 @@ static int tw68_buf_prepare(struct vb2_buffer *vb)
462 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb); 462 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
463 struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0); 463 struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
464 unsigned size, bpl; 464 unsigned size, bpl;
465 int rc;
466 465
467 size = (dev->width * dev->height * dev->fmt->depth) >> 3; 466 size = (dev->width * dev->height * dev->fmt->depth) >> 3;
468 if (vb2_plane_size(vb, 0) < size) 467 if (vb2_plane_size(vb, 0) < size)
469 return -EINVAL; 468 return -EINVAL;
470 vb2_set_plane_payload(vb, 0, size); 469 vb2_set_plane_payload(vb, 0, size);
471 470
472 rc = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
473 if (!rc)
474 return -EIO;
475
476 bpl = (dev->width * dev->fmt->depth) >> 3; 471 bpl = (dev->width * dev->fmt->depth) >> 3;
477 switch (dev->field) { 472 switch (dev->field) {
478 case V4L2_FIELD_TOP: 473 case V4L2_FIELD_TOP:
@@ -506,11 +501,8 @@ static void tw68_buf_finish(struct vb2_buffer *vb)
506{ 501{
507 struct vb2_queue *vq = vb->vb2_queue; 502 struct vb2_queue *vq = vb->vb2_queue;
508 struct tw68_dev *dev = vb2_get_drv_priv(vq); 503 struct tw68_dev *dev = vb2_get_drv_priv(vq);
509 struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
510 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb); 504 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
511 505
512 dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
513
514 pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma); 506 pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma);
515} 507}
516 508
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index c3ff5388aeb3..ce00cbaf850e 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1220,17 +1220,12 @@ static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
1220static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) 1220static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
1221{ 1221{
1222 struct mcam_vb_buffer *mvb = vb_to_mvb(vb); 1222 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
1223 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1224 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0); 1223 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
1225 struct mcam_dma_desc *desc = mvb->dma_desc; 1224 struct mcam_dma_desc *desc = mvb->dma_desc;
1226 struct scatterlist *sg; 1225 struct scatterlist *sg;
1227 int i; 1226 int i;
1228 1227
1229 mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl, 1228 for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
1230 sg_table->nents, DMA_FROM_DEVICE);
1231 if (mvb->dma_desc_nent <= 0)
1232 return -EIO; /* Not sure what's right here */
1233 for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
1234 desc->dma_addr = sg_dma_address(sg); 1229 desc->dma_addr = sg_dma_address(sg);
1235 desc->segment_len = sg_dma_len(sg); 1230 desc->segment_len = sg_dma_len(sg);
1236 desc++; 1231 desc++;
@@ -1238,16 +1233,6 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
1238 return 0; 1233 return 0;
1239} 1234}
1240 1235
1241static void mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
1242{
1243 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1244 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
1245
1246 if (sg_table)
1247 dma_unmap_sg(cam->dev, sg_table->sgl,
1248 sg_table->nents, DMA_FROM_DEVICE);
1249}
1250
1251static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb) 1236static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
1252{ 1237{
1253 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); 1238 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
@@ -1264,7 +1249,6 @@ static const struct vb2_ops mcam_vb2_sg_ops = {
1264 .buf_init = mcam_vb_sg_buf_init, 1249 .buf_init = mcam_vb_sg_buf_init,
1265 .buf_prepare = mcam_vb_sg_buf_prepare, 1250 .buf_prepare = mcam_vb_sg_buf_prepare,
1266 .buf_queue = mcam_vb_buf_queue, 1251 .buf_queue = mcam_vb_buf_queue,
1267 .buf_finish = mcam_vb_sg_buf_finish,
1268 .buf_cleanup = mcam_vb_sg_buf_cleanup, 1252 .buf_cleanup = mcam_vb_sg_buf_cleanup,
1269 .start_streaming = mcam_vb_start_streaming, 1253 .start_streaming = mcam_vb_start_streaming,
1270 .stop_streaming = mcam_vb_stop_streaming, 1254 .stop_streaming = mcam_vb_stop_streaming,
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 2bf13dc4df34..346e39b2aae8 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -96,6 +96,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
96{ 96{
97 struct vb2_dma_sg_conf *conf = alloc_ctx; 97 struct vb2_dma_sg_conf *conf = alloc_ctx;
98 struct vb2_dma_sg_buf *buf; 98 struct vb2_dma_sg_buf *buf;
99 struct sg_table *sgt;
99 int ret; 100 int ret;
100 int num_pages; 101 int num_pages;
101 102
@@ -128,6 +129,12 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
128 129
129 /* Prevent the device from being released while the buffer is used */ 130 /* Prevent the device from being released while the buffer is used */
130 buf->dev = get_device(conf->dev); 131 buf->dev = get_device(conf->dev);
132
133 sgt = &buf->sg_table;
134 if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
135 goto fail_map;
136 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
137
131 buf->handler.refcount = &buf->refcount; 138 buf->handler.refcount = &buf->refcount;
132 buf->handler.put = vb2_dma_sg_put; 139 buf->handler.put = vb2_dma_sg_put;
133 buf->handler.arg = buf; 140 buf->handler.arg = buf;
@@ -138,6 +145,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
138 __func__, buf->num_pages); 145 __func__, buf->num_pages);
139 return buf; 146 return buf;
140 147
148fail_map:
149 put_device(buf->dev);
150 sg_free_table(sgt);
141fail_table_alloc: 151fail_table_alloc:
142 num_pages = buf->num_pages; 152 num_pages = buf->num_pages;
143 while (num_pages--) 153 while (num_pages--)
@@ -152,11 +162,13 @@ fail_pages_array_alloc:
152static void vb2_dma_sg_put(void *buf_priv) 162static void vb2_dma_sg_put(void *buf_priv)
153{ 163{
154 struct vb2_dma_sg_buf *buf = buf_priv; 164 struct vb2_dma_sg_buf *buf = buf_priv;
165 struct sg_table *sgt = &buf->sg_table;
155 int i = buf->num_pages; 166 int i = buf->num_pages;
156 167
157 if (atomic_dec_and_test(&buf->refcount)) { 168 if (atomic_dec_and_test(&buf->refcount)) {
158 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, 169 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
159 buf->num_pages); 170 buf->num_pages);
171 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
160 if (buf->vaddr) 172 if (buf->vaddr)
161 vm_unmap_ram(buf->vaddr, buf->num_pages); 173 vm_unmap_ram(buf->vaddr, buf->num_pages);
162 sg_free_table(&buf->sg_table); 174 sg_free_table(&buf->sg_table);
@@ -168,6 +180,22 @@ static void vb2_dma_sg_put(void *buf_priv)
168 } 180 }
169} 181}
170 182
183static void vb2_dma_sg_prepare(void *buf_priv)
184{
185 struct vb2_dma_sg_buf *buf = buf_priv;
186 struct sg_table *sgt = &buf->sg_table;
187
188 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
189}
190
191static void vb2_dma_sg_finish(void *buf_priv)
192{
193 struct vb2_dma_sg_buf *buf = buf_priv;
194 struct sg_table *sgt = &buf->sg_table;
195
196 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
197}
198
171static inline int vma_is_io(struct vm_area_struct *vma) 199static inline int vma_is_io(struct vm_area_struct *vma)
172{ 200{
173 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); 201 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
@@ -177,16 +205,19 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
177 unsigned long size, 205 unsigned long size,
178 enum dma_data_direction dma_dir) 206 enum dma_data_direction dma_dir)
179{ 207{
208 struct vb2_dma_sg_conf *conf = alloc_ctx;
180 struct vb2_dma_sg_buf *buf; 209 struct vb2_dma_sg_buf *buf;
181 unsigned long first, last; 210 unsigned long first, last;
182 int num_pages_from_user; 211 int num_pages_from_user;
183 struct vm_area_struct *vma; 212 struct vm_area_struct *vma;
213 struct sg_table *sgt;
184 214
185 buf = kzalloc(sizeof *buf, GFP_KERNEL); 215 buf = kzalloc(sizeof *buf, GFP_KERNEL);
186 if (!buf) 216 if (!buf)
187 return NULL; 217 return NULL;
188 218
189 buf->vaddr = NULL; 219 buf->vaddr = NULL;
220 buf->dev = conf->dev;
190 buf->dma_dir = dma_dir; 221 buf->dma_dir = dma_dir;
191 buf->offset = vaddr & ~PAGE_MASK; 222 buf->offset = vaddr & ~PAGE_MASK;
192 buf->size = size; 223 buf->size = size;
@@ -246,8 +277,14 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
246 buf->num_pages, buf->offset, size, 0)) 277 buf->num_pages, buf->offset, size, 0))
247 goto userptr_fail_alloc_table_from_pages; 278 goto userptr_fail_alloc_table_from_pages;
248 279
280 sgt = &buf->sg_table;
281 if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
282 goto userptr_fail_map;
283 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
249 return buf; 284 return buf;
250 285
286userptr_fail_map:
287 sg_free_table(&buf->sg_table);
251userptr_fail_alloc_table_from_pages: 288userptr_fail_alloc_table_from_pages:
252userptr_fail_get_user_pages: 289userptr_fail_get_user_pages:
253 dprintk(1, "get_user_pages requested/got: %d/%d]\n", 290 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
@@ -270,10 +307,12 @@ userptr_fail_alloc_pages:
270static void vb2_dma_sg_put_userptr(void *buf_priv) 307static void vb2_dma_sg_put_userptr(void *buf_priv)
271{ 308{
272 struct vb2_dma_sg_buf *buf = buf_priv; 309 struct vb2_dma_sg_buf *buf = buf_priv;
310 struct sg_table *sgt = &buf->sg_table;
273 int i = buf->num_pages; 311 int i = buf->num_pages;
274 312
275 dprintk(1, "%s: Releasing userspace buffer of %d pages\n", 313 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
276 __func__, buf->num_pages); 314 __func__, buf->num_pages);
315 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
277 if (buf->vaddr) 316 if (buf->vaddr)
278 vm_unmap_ram(buf->vaddr, buf->num_pages); 317 vm_unmap_ram(buf->vaddr, buf->num_pages);
279 sg_free_table(&buf->sg_table); 318 sg_free_table(&buf->sg_table);
@@ -360,6 +399,8 @@ const struct vb2_mem_ops vb2_dma_sg_memops = {
360 .put = vb2_dma_sg_put, 399 .put = vb2_dma_sg_put,
361 .get_userptr = vb2_dma_sg_get_userptr, 400 .get_userptr = vb2_dma_sg_get_userptr,
362 .put_userptr = vb2_dma_sg_put_userptr, 401 .put_userptr = vb2_dma_sg_put_userptr,
402 .prepare = vb2_dma_sg_prepare,
403 .finish = vb2_dma_sg_finish,
363 .vaddr = vb2_dma_sg_vaddr, 404 .vaddr = vb2_dma_sg_vaddr,
364 .mmap = vb2_dma_sg_mmap, 405 .mmap = vb2_dma_sg_mmap,
365 .num_users = vb2_dma_sg_num_users, 406 .num_users = vb2_dma_sg_num_users,