aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@s-opensource.com>2016-09-08 09:16:27 -0400
committerMauro Carvalho Chehab <mchehab@s-opensource.com>2016-09-09 09:05:32 -0400
commit4781646c1e13b23ed31eb12f7e314824d98ce066 (patch)
treee1a60c3f9d443a1bb145b7144590d66fbdcebd18
parent82631b5bb268f670613db110177483ae3e85f913 (diff)
[media] v4l2-mem2mem.h: move descriptions from .c file
Several routines are somewhat documented at v4l2-mem2mem.c file. Move what's there to the header file. Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c128
-rw-r--r--include/media/v4l2-mem2mem.h132
2 files changed, 133 insertions, 127 deletions
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 61d56c940f80..6bc27e7b2a33 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -76,9 +76,6 @@ static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
76 return &m2m_ctx->cap_q_ctx; 76 return &m2m_ctx->cap_q_ctx;
77} 77}
78 78
79/**
80 * v4l2_m2m_get_vq() - return vb2_queue for the given type
81 */
82struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 79struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
83 enum v4l2_buf_type type) 80 enum v4l2_buf_type type)
84{ 81{
@@ -92,9 +89,6 @@ struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
92} 89}
93EXPORT_SYMBOL(v4l2_m2m_get_vq); 90EXPORT_SYMBOL(v4l2_m2m_get_vq);
94 91
95/**
96 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
97 */
98void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) 92void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
99{ 93{
100 struct v4l2_m2m_buffer *b; 94 struct v4l2_m2m_buffer *b;
@@ -113,10 +107,6 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
113} 107}
114EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); 108EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
115 109
116/**
117 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
118 * return it
119 */
120void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) 110void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
121{ 111{
122 struct v4l2_m2m_buffer *b; 112 struct v4l2_m2m_buffer *b;
@@ -140,10 +130,6 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
140 * Scheduling handlers 130 * Scheduling handlers
141 */ 131 */
142 132
143/**
144 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
145 * running instance or NULL if no instance is running
146 */
147void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) 133void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
148{ 134{
149 unsigned long flags; 135 unsigned long flags;
@@ -188,26 +174,6 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
188 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); 174 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
189} 175}
190 176
191/**
192 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
193 * the pending job queue and add it if so.
194 * @m2m_ctx: m2m context assigned to the instance to be checked
195 *
196 * There are three basic requirements an instance has to meet to be able to run:
197 * 1) at least one source buffer has to be queued,
198 * 2) at least one destination buffer has to be queued,
199 * 3) streaming has to be on.
200 *
201 * If a queue is buffered (for example a decoder hardware ringbuffer that has
202 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
203 * on that queue.
204 *
205 * There may also be additional, custom requirements. In such case the driver
206 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
207 * return 1 if the instance is ready.
208 * An example of the above could be an instance that requires more than one
209 * src/dst buffer per transaction.
210 */
211void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 177void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
212{ 178{
213 struct v4l2_m2m_dev *m2m_dev; 179 struct v4l2_m2m_dev *m2m_dev;
@@ -311,18 +277,6 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
311 } 277 }
312} 278}
313 279
314/**
315 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
316 * and have it clean up
317 *
318 * Called by a driver to yield back the device after it has finished with it.
319 * Should be called as soon as possible after reaching a state which allows
320 * other instances to take control of the device.
321 *
322 * This function has to be called only after device_run() callback has been
323 * called on the driver. To prevent recursion, it should not be called directly
324 * from the device_run() callback though.
325 */
326void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 280void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
327 struct v4l2_m2m_ctx *m2m_ctx) 281 struct v4l2_m2m_ctx *m2m_ctx)
328{ 282{
@@ -350,9 +304,6 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
350} 304}
351EXPORT_SYMBOL(v4l2_m2m_job_finish); 305EXPORT_SYMBOL(v4l2_m2m_job_finish);
352 306
353/**
354 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
355 */
356int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 307int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
357 struct v4l2_requestbuffers *reqbufs) 308 struct v4l2_requestbuffers *reqbufs)
358{ 309{
@@ -370,11 +321,6 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
370} 321}
371EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); 322EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
372 323
373/**
374 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
375 *
376 * See v4l2_m2m_mmap() documentation for details.
377 */
378int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 324int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
379 struct v4l2_buffer *buf) 325 struct v4l2_buffer *buf)
380{ 326{
@@ -400,10 +346,6 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
400} 346}
401EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); 347EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
402 348
403/**
404 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
405 * the type
406 */
407int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 349int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
408 struct v4l2_buffer *buf) 350 struct v4l2_buffer *buf)
409{ 351{
@@ -419,10 +361,6 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
419} 361}
420EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); 362EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
421 363
422/**
423 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
424 * the type
425 */
426int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 364int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
427 struct v4l2_buffer *buf) 365 struct v4l2_buffer *buf)
428{ 366{
@@ -433,10 +371,6 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
433} 371}
434EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 372EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
435 373
436/**
437 * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
438 * the type
439 */
440int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 374int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
441 struct v4l2_buffer *buf) 375 struct v4l2_buffer *buf)
442{ 376{
@@ -452,10 +386,6 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
452} 386}
453EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 387EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
454 388
455/**
456 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
457 * on the type
458 */
459int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 389int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
460 struct v4l2_create_buffers *create) 390 struct v4l2_create_buffers *create)
461{ 391{
@@ -466,10 +396,6 @@ int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
466} 396}
467EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); 397EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
468 398
469/**
470 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
471 * the type
472 */
473int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 399int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
474 struct v4l2_exportbuffer *eb) 400 struct v4l2_exportbuffer *eb)
475{ 401{
@@ -479,9 +405,7 @@ int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
479 return vb2_expbuf(vq, eb); 405 return vb2_expbuf(vq, eb);
480} 406}
481EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); 407EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
482/** 408
483 * v4l2_m2m_streamon() - turn on streaming for a video queue
484 */
485int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 409int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
486 enum v4l2_buf_type type) 410 enum v4l2_buf_type type)
487{ 411{
@@ -497,9 +421,6 @@ int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
497} 421}
498EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); 422EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
499 423
500/**
501 * v4l2_m2m_streamoff() - turn off streaming for a video queue
502 */
503int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 424int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
504 enum v4l2_buf_type type) 425 enum v4l2_buf_type type)
505{ 426{
@@ -540,14 +461,6 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
540} 461}
541EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); 462EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
542 463
543/**
544 * v4l2_m2m_poll() - poll replacement, for destination buffers only
545 *
546 * Call from the driver's poll() function. Will poll both queues. If a buffer
547 * is available to dequeue (with dqbuf) from the source queue, this will
548 * indicate that a non-blocking write can be performed, while read will be
549 * returned in case of the destination queue.
550 */
551unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 464unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
552 struct poll_table_struct *wait) 465 struct poll_table_struct *wait)
553{ 466{
@@ -626,16 +539,6 @@ end:
626} 539}
627EXPORT_SYMBOL_GPL(v4l2_m2m_poll); 540EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
628 541
629/**
630 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
631 *
632 * Call from driver's mmap() function. Will handle mmap() for both queues
633 * seamlessly for videobuffer, which will receive normal per-queue offsets and
634 * proper videobuf queue pointers. The differentiation is made outside videobuf
635 * by adding a predefined offset to buffers from one of the queues and
636 * subtracting it before passing it back to videobuf. Only drivers (and
637 * thus applications) receive modified offsets.
638 */
639int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 542int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
640 struct vm_area_struct *vma) 543 struct vm_area_struct *vma)
641{ 544{
@@ -653,11 +556,6 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
653} 556}
654EXPORT_SYMBOL(v4l2_m2m_mmap); 557EXPORT_SYMBOL(v4l2_m2m_mmap);
655 558
656/**
657 * v4l2_m2m_init() - initialize per-driver m2m data
658 *
659 * Usually called from driver's probe() function.
660 */
661struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) 559struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
662{ 560{
663 struct v4l2_m2m_dev *m2m_dev; 561 struct v4l2_m2m_dev *m2m_dev;
@@ -679,26 +577,12 @@ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
679} 577}
680EXPORT_SYMBOL_GPL(v4l2_m2m_init); 578EXPORT_SYMBOL_GPL(v4l2_m2m_init);
681 579
682/**
683 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
684 *
685 * Usually called from driver's remove() function.
686 */
687void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) 580void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
688{ 581{
689 kfree(m2m_dev); 582 kfree(m2m_dev);
690} 583}
691EXPORT_SYMBOL_GPL(v4l2_m2m_release); 584EXPORT_SYMBOL_GPL(v4l2_m2m_release);
692 585
693/**
694 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
695 * @priv - driver's instance private data
696 * @m2m_dev - a previously initialized m2m_dev struct
697 * @vq_init - a callback for queue type-specific initialization function to be
698 * used for initializing videobuf_queues
699 *
700 * Usually called from driver's open() function.
701 */
702struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 586struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
703 void *drv_priv, 587 void *drv_priv,
704 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) 588 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
@@ -744,11 +628,6 @@ err:
744} 628}
745EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); 629EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
746 630
747/**
748 * v4l2_m2m_ctx_release() - release m2m context
749 *
750 * Usually called from driver's release() function.
751 */
752void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 631void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
753{ 632{
754 /* wait until the current context is dequeued from job_queue */ 633 /* wait until the current context is dequeued from job_queue */
@@ -761,11 +640,6 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
761} 640}
762EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); 641EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
763 642
764/**
765 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
766 *
767 * Call from buf_queue(), videobuf_queue_ops callback.
768 */
769void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 643void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
770 struct vb2_v4l2_buffer *vbuf) 644 struct vb2_v4l2_buffer *vbuf)
771{ 645{
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 5a9597dd1ee0..e5449a2c8475 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -94,13 +94,52 @@ struct v4l2_m2m_buffer {
94 struct list_head list; 94 struct list_head list;
95}; 95};
96 96
97/**
98 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
99 * running instance or NULL if no instance is running
100 */
97void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); 101void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
98 102
103/**
104 * v4l2_m2m_get_vq() - return vb2_queue for the given type
105 */
99struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 106struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
100 enum v4l2_buf_type type); 107 enum v4l2_buf_type type);
101 108
109/**
110 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
111 * the pending job queue and add it if so.
112 * @m2m_ctx: m2m context assigned to the instance to be checked
113 *
114 * There are three basic requirements an instance has to meet to be able to run:
115 * 1) at least one source buffer has to be queued,
116 * 2) at least one destination buffer has to be queued,
117 * 3) streaming has to be on.
118 *
119 * If a queue is buffered (for example a decoder hardware ringbuffer that has
120 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
121 * on that queue.
122 *
123 * There may also be additional, custom requirements. In such case the driver
124 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
125 * return 1 if the instance is ready.
126 * An example of the above could be an instance that requires more than one
127 * src/dst buffer per transaction.
128 */
102void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); 129void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
103 130
131/**
132 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
133 * and have it clean up
134 *
135 * Called by a driver to yield back the device after it has finished with it.
136 * Should be called as soon as possible after reaching a state which allows
137 * other instances to take control of the device.
138 *
139 * This function has to be called only after device_run() callback has been
140 * called on the driver. To prevent recursion, it should not be called directly
141 * from the device_run() callback though.
142 */
104void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 143void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
105 struct v4l2_m2m_ctx *m2m_ctx); 144 struct v4l2_m2m_ctx *m2m_ctx);
106 145
@@ -110,38 +149,114 @@ v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
110 vb2_buffer_done(&buf->vb2_buf, state); 149 vb2_buffer_done(&buf->vb2_buf, state);
111} 150}
112 151
152/**
153 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
154 */
113int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 155int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
114 struct v4l2_requestbuffers *reqbufs); 156 struct v4l2_requestbuffers *reqbufs);
115 157
158/**
159 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
160 *
161 * See v4l2_m2m_mmap() documentation for details.
162 */
116int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 163int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
117 struct v4l2_buffer *buf); 164 struct v4l2_buffer *buf);
118 165
166/**
167 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
168 * the type
169 */
119int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 170int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
120 struct v4l2_buffer *buf); 171 struct v4l2_buffer *buf);
172
173/**
174 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
175 * the type
176 */
121int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 177int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
122 struct v4l2_buffer *buf); 178 struct v4l2_buffer *buf);
179
180/**
181 * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
182 * the type
183 */
123int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 184int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
124 struct v4l2_buffer *buf); 185 struct v4l2_buffer *buf);
186
187/**
188 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
189 * on the type
190 */
125int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 191int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
126 struct v4l2_create_buffers *create); 192 struct v4l2_create_buffers *create);
127 193
194/**
195 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
196 * the type
197 */
128int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 198int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
129 struct v4l2_exportbuffer *eb); 199 struct v4l2_exportbuffer *eb);
130 200
201/**
202 * v4l2_m2m_streamon() - turn on streaming for a video queue
203 */
131int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 204int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
132 enum v4l2_buf_type type); 205 enum v4l2_buf_type type);
206
207/**
208 * v4l2_m2m_streamoff() - turn off streaming for a video queue
209 */
133int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 210int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
134 enum v4l2_buf_type type); 211 enum v4l2_buf_type type);
135 212
213/**
214 * v4l2_m2m_poll() - poll replacement, for destination buffers only
215 *
216 * Call from the driver's poll() function. Will poll both queues. If a buffer
217 * is available to dequeue (with dqbuf) from the source queue, this will
218 * indicate that a non-blocking write can be performed, while read will be
219 * returned in case of the destination queue.
220 */
136unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 221unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
137 struct poll_table_struct *wait); 222 struct poll_table_struct *wait);
138 223
224/**
225 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
226 *
227 * Call from driver's mmap() function. Will handle mmap() for both queues
228 * seamlessly for videobuffer, which will receive normal per-queue offsets and
229 * proper videobuf queue pointers. The differentiation is made outside videobuf
230 * by adding a predefined offset to buffers from one of the queues and
231 * subtracting it before passing it back to videobuf. Only drivers (and
232 * thus applications) receive modified offsets.
233 */
139int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 234int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
140 struct vm_area_struct *vma); 235 struct vm_area_struct *vma);
141 236
237/**
238 * v4l2_m2m_init() - initialize per-driver m2m data
239 *
240 * Usually called from driver's probe() function.
241 */
142struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); 242struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
243
244/**
245 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
246 *
247 * Usually called from driver's remove() function.
248 */
143void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); 249void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
144 250
251/**
252 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
253 * @priv - driver's instance private data
254 * @m2m_dev - a previously initialized m2m_dev struct
255 * @vq_init - a callback for queue type-specific initialization function to be
256 * used for initializing videobuf_queues
257 *
258 * Usually called from driver's open() function.
259 */
145struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 260struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
146 void *drv_priv, 261 void *drv_priv,
147 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); 262 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
@@ -158,8 +273,18 @@ static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
158 m2m_ctx->cap_q_ctx.buffered = buffered; 273 m2m_ctx->cap_q_ctx.buffered = buffered;
159} 274}
160 275
276/**
277 * v4l2_m2m_ctx_release() - release m2m context
278 *
279 * Usually called from driver's release() function.
280 */
161void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); 281void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
162 282
283/**
284 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
285 *
286 * Call from buf_queue(), videobuf_queue_ops callback.
287 */
163void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 288void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
164 struct vb2_v4l2_buffer *vbuf); 289 struct vb2_v4l2_buffer *vbuf);
165 290
@@ -187,6 +312,9 @@ unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
187 return m2m_ctx->cap_q_ctx.num_rdy; 312 return m2m_ctx->cap_q_ctx.num_rdy;
188} 313}
189 314
315/**
316 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
317 */
190void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); 318void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
191 319
192/** 320/**
@@ -233,6 +361,10 @@ struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
233 return &m2m_ctx->cap_q_ctx.q; 361 return &m2m_ctx->cap_q_ctx.q;
234} 362}
235 363
364/**
365 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
366 * return it
367 */
236void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); 368void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
237 369
238/** 370/**