aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/platform/xilinx/xilinx-dma.c
diff options
context:
space:
mode:
authorJunghak Sung <jh1009.sung@samsung.com>2015-09-22 09:30:30 -0400
committerMauro Carvalho Chehab <mchehab@osg.samsung.com>2015-10-01 08:04:43 -0400
commit2d7007153f0c9b1dd00c01894df7d26ddc32b79f (patch)
tree8320f9d22f45dd7dcea64088b50ff706bb0082b2 /drivers/media/platform/xilinx/xilinx-dma.c
parentc139990e842d550db2f59bd4f5993bba90f140e0 (diff)
[media] media: videobuf2: Restructure vb2_buffer
Remove v4l2 stuff - v4l2_buf, v4l2_plane - from struct vb2_buffer. Add new member variables - bytesused, length, offset, userptr, fd, data_offset - to struct vb2_plane in order to cover all information of v4l2_plane. struct vb2_plane { <snip> unsigned int bytesused; unsigned int length; union { unsigned int offset; unsigned long userptr; int fd; } m; unsigned int data_offset; } Replace v4l2_buf with new member variables - index, type, memory - which are common fields for buffer management. struct vb2_buffer { <snip> unsigned int index; unsigned int type; unsigned int memory; unsigned int num_planes; struct vb2_plane planes[VIDEO_MAX_PLANES]; <snip> }; v4l2 specific fields - flags, field, timestamp, timecode, sequence - are moved to vb2_v4l2_buffer in videobuf2-v4l2.c struct vb2_v4l2_buffer { struct vb2_buffer vb2_buf; __u32 flags; __u32 field; struct timeval timestamp; struct v4l2_timecode timecode; __u32 sequence; }; Signed-off-by: Junghak Sung <jh1009.sung@samsung.com> Signed-off-by: Geunyoung Kim <nenggun.kim@samsung.com> Acked-by: Seung-Woo Kim <sw0312.kim@samsung.com> Acked-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
Diffstat (limited to 'drivers/media/platform/xilinx/xilinx-dma.c')
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index d9dcd4be2792..5af66c20475b 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -285,7 +285,7 @@ done:
285 * @dma: DMA channel that uses the buffer 285 * @dma: DMA channel that uses the buffer
286 */ 286 */
287struct xvip_dma_buffer { 287struct xvip_dma_buffer {
288 struct vb2_buffer buf; 288 struct vb2_v4l2_buffer buf;
289 struct list_head queue; 289 struct list_head queue;
290 struct xvip_dma *dma; 290 struct xvip_dma *dma;
291}; 291};
@@ -301,11 +301,11 @@ static void xvip_dma_complete(void *param)
301 list_del(&buf->queue); 301 list_del(&buf->queue);
302 spin_unlock(&dma->queued_lock); 302 spin_unlock(&dma->queued_lock);
303 303
304 buf->buf.v4l2_buf.field = V4L2_FIELD_NONE; 304 buf->buf.field = V4L2_FIELD_NONE;
305 buf->buf.v4l2_buf.sequence = dma->sequence++; 305 buf->buf.sequence = dma->sequence++;
306 v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp); 306 v4l2_get_timestamp(&buf->buf.timestamp);
307 vb2_set_plane_payload(&buf->buf, 0, dma->format.sizeimage); 307 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
308 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE); 308 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
309} 309}
310 310
311static int 311static int
@@ -329,8 +329,9 @@ xvip_dma_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
329 329
330static int xvip_dma_buffer_prepare(struct vb2_buffer *vb) 330static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
331{ 331{
332 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
332 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 333 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
333 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb); 334 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
334 335
335 buf->dma = dma; 336 buf->dma = dma;
336 337
@@ -339,8 +340,9 @@ static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
339 340
340static void xvip_dma_buffer_queue(struct vb2_buffer *vb) 341static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
341{ 342{
343 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
342 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 344 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
343 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb); 345 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
344 struct dma_async_tx_descriptor *desc; 346 struct dma_async_tx_descriptor *desc;
345 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0); 347 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
346 u32 flags; 348 u32 flags;
@@ -367,7 +369,7 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
367 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags); 369 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
368 if (!desc) { 370 if (!desc) {
369 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n"); 371 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
370 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); 372 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
371 return; 373 return;
372 } 374 }
373 desc->callback = xvip_dma_complete; 375 desc->callback = xvip_dma_complete;
@@ -434,7 +436,7 @@ error:
434 /* Give back all queued buffers to videobuf2. */ 436 /* Give back all queued buffers to videobuf2. */
435 spin_lock_irq(&dma->queued_lock); 437 spin_lock_irq(&dma->queued_lock);
436 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { 438 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
437 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_QUEUED); 439 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
438 list_del(&buf->queue); 440 list_del(&buf->queue);
439 } 441 }
440 spin_unlock_irq(&dma->queued_lock); 442 spin_unlock_irq(&dma->queued_lock);
@@ -461,7 +463,7 @@ static void xvip_dma_stop_streaming(struct vb2_queue *vq)
461 /* Give back all queued buffers to videobuf2. */ 463 /* Give back all queued buffers to videobuf2. */
462 spin_lock_irq(&dma->queued_lock); 464 spin_lock_irq(&dma->queued_lock);
463 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { 465 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
464 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); 466 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
465 list_del(&buf->queue); 467 list_del(&buf->queue);
466 } 468 }
467 spin_unlock_irq(&dma->queued_lock); 469 spin_unlock_irq(&dma->queued_lock);