diff options
author | Mauro Carvalho Chehab <mchehab@s-opensource.com> | 2016-09-08 09:16:27 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@s-opensource.com> | 2016-09-09 09:05:32 -0400 |
commit | 4781646c1e13b23ed31eb12f7e314824d98ce066 (patch) | |
tree | e1a60c3f9d443a1bb145b7144590d66fbdcebd18 | |
parent | 82631b5bb268f670613db110177483ae3e85f913 (diff) |
[media] v4l2-mem2mem.h: move descriptions from .c file
Several routines are somewhat documented at v4l2-mem2mem.c
file. Move what's there to the header file.
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
-rw-r--r-- | drivers/media/v4l2-core/v4l2-mem2mem.c | 128 | ||||
-rw-r--r-- | include/media/v4l2-mem2mem.h | 132 |
2 files changed, 133 insertions, 127 deletions
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 61d56c940f80..6bc27e7b2a33 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c | |||
@@ -76,9 +76,6 @@ static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, | |||
76 | return &m2m_ctx->cap_q_ctx; | 76 | return &m2m_ctx->cap_q_ctx; |
77 | } | 77 | } |
78 | 78 | ||
79 | /** | ||
80 | * v4l2_m2m_get_vq() - return vb2_queue for the given type | ||
81 | */ | ||
82 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, | 79 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, |
83 | enum v4l2_buf_type type) | 80 | enum v4l2_buf_type type) |
84 | { | 81 | { |
@@ -92,9 +89,6 @@ struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, | |||
92 | } | 89 | } |
93 | EXPORT_SYMBOL(v4l2_m2m_get_vq); | 90 | EXPORT_SYMBOL(v4l2_m2m_get_vq); |
94 | 91 | ||
95 | /** | ||
96 | * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers | ||
97 | */ | ||
98 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) | 92 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
99 | { | 93 | { |
100 | struct v4l2_m2m_buffer *b; | 94 | struct v4l2_m2m_buffer *b; |
@@ -113,10 +107,6 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) | |||
113 | } | 107 | } |
114 | EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); | 108 | EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); |
115 | 109 | ||
116 | /** | ||
117 | * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and | ||
118 | * return it | ||
119 | */ | ||
120 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) | 110 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) |
121 | { | 111 | { |
122 | struct v4l2_m2m_buffer *b; | 112 | struct v4l2_m2m_buffer *b; |
@@ -140,10 +130,6 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); | |||
140 | * Scheduling handlers | 130 | * Scheduling handlers |
141 | */ | 131 | */ |
142 | 132 | ||
143 | /** | ||
144 | * v4l2_m2m_get_curr_priv() - return driver private data for the currently | ||
145 | * running instance or NULL if no instance is running | ||
146 | */ | ||
147 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) | 133 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) |
148 | { | 134 | { |
149 | unsigned long flags; | 135 | unsigned long flags; |
@@ -188,26 +174,6 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) | |||
188 | m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); | 174 | m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); |
189 | } | 175 | } |
190 | 176 | ||
191 | /** | ||
192 | * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to | ||
193 | * the pending job queue and add it if so. | ||
194 | * @m2m_ctx: m2m context assigned to the instance to be checked | ||
195 | * | ||
196 | * There are three basic requirements an instance has to meet to be able to run: | ||
197 | * 1) at least one source buffer has to be queued, | ||
198 | * 2) at least one destination buffer has to be queued, | ||
199 | * 3) streaming has to be on. | ||
200 | * | ||
201 | * If a queue is buffered (for example a decoder hardware ringbuffer that has | ||
202 | * to be drained before doing streamoff), allow scheduling without v4l2 buffers | ||
203 | * on that queue. | ||
204 | * | ||
205 | * There may also be additional, custom requirements. In such case the driver | ||
206 | * should supply a custom callback (job_ready in v4l2_m2m_ops) that should | ||
207 | * return 1 if the instance is ready. | ||
208 | * An example of the above could be an instance that requires more than one | ||
209 | * src/dst buffer per transaction. | ||
210 | */ | ||
211 | void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) | 177 | void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) |
212 | { | 178 | { |
213 | struct v4l2_m2m_dev *m2m_dev; | 179 | struct v4l2_m2m_dev *m2m_dev; |
@@ -311,18 +277,6 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) | |||
311 | } | 277 | } |
312 | } | 278 | } |
313 | 279 | ||
314 | /** | ||
315 | * v4l2_m2m_job_finish() - inform the framework that a job has been finished | ||
316 | * and have it clean up | ||
317 | * | ||
318 | * Called by a driver to yield back the device after it has finished with it. | ||
319 | * Should be called as soon as possible after reaching a state which allows | ||
320 | * other instances to take control of the device. | ||
321 | * | ||
322 | * This function has to be called only after device_run() callback has been | ||
323 | * called on the driver. To prevent recursion, it should not be called directly | ||
324 | * from the device_run() callback though. | ||
325 | */ | ||
326 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, | 280 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
327 | struct v4l2_m2m_ctx *m2m_ctx) | 281 | struct v4l2_m2m_ctx *m2m_ctx) |
328 | { | 282 | { |
@@ -350,9 +304,6 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, | |||
350 | } | 304 | } |
351 | EXPORT_SYMBOL(v4l2_m2m_job_finish); | 305 | EXPORT_SYMBOL(v4l2_m2m_job_finish); |
352 | 306 | ||
353 | /** | ||
354 | * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer | ||
355 | */ | ||
356 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 307 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
357 | struct v4l2_requestbuffers *reqbufs) | 308 | struct v4l2_requestbuffers *reqbufs) |
358 | { | 309 | { |
@@ -370,11 +321,6 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |||
370 | } | 321 | } |
371 | EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); | 322 | EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); |
372 | 323 | ||
373 | /** | ||
374 | * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer | ||
375 | * | ||
376 | * See v4l2_m2m_mmap() documentation for details. | ||
377 | */ | ||
378 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 324 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
379 | struct v4l2_buffer *buf) | 325 | struct v4l2_buffer *buf) |
380 | { | 326 | { |
@@ -400,10 +346,6 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |||
400 | } | 346 | } |
401 | EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); | 347 | EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); |
402 | 348 | ||
403 | /** | ||
404 | * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on | ||
405 | * the type | ||
406 | */ | ||
407 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 349 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
408 | struct v4l2_buffer *buf) | 350 | struct v4l2_buffer *buf) |
409 | { | 351 | { |
@@ -419,10 +361,6 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |||
419 | } | 361 | } |
420 | EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); | 362 | EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); |
421 | 363 | ||
422 | /** | ||
423 | * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on | ||
424 | * the type | ||
425 | */ | ||
426 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 364 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
427 | struct v4l2_buffer *buf) | 365 | struct v4l2_buffer *buf) |
428 | { | 366 | { |
@@ -433,10 +371,6 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |||
433 | } | 371 | } |
434 | EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); | 372 | EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); |
435 | 373 | ||
436 | /** | ||
437 | * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on | ||
438 | * the type | ||
439 | */ | ||
440 | int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 374 | int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
441 | struct v4l2_buffer *buf) | 375 | struct v4l2_buffer *buf) |
442 | { | 376 | { |
@@ -452,10 +386,6 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |||
452 | } | 386 | } |
453 | EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); | 387 | EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); |
454 | 388 | ||
455 | /** | ||
456 | * v4l2_m2m_create_bufs() - create a source or destination buffer, depending | ||
457 | * on the type | ||
458 | */ | ||
459 | int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 389 | int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
460 | struct v4l2_create_buffers *create) | 390 | struct v4l2_create_buffers *create) |
461 | { | 391 | { |
@@ -466,10 +396,6 @@ int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |||
466 | } | 396 | } |
467 | EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); | 397 | EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); |
468 | 398 | ||
469 | /** | ||
470 | * v4l2_m2m_expbuf() - export a source or destination buffer, depending on | ||
471 | * the type | ||
472 | */ | ||
473 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 399 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
474 | struct v4l2_exportbuffer *eb) | 400 | struct v4l2_exportbuffer *eb) |
475 | { | 401 | { |
@@ -479,9 +405,7 @@ int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |||
479 | return vb2_expbuf(vq, eb); | 405 | return vb2_expbuf(vq, eb); |
480 | } | 406 | } |
481 | EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); | 407 | EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); |
482 | /** | 408 | |
483 | * v4l2_m2m_streamon() - turn on streaming for a video queue | ||
484 | */ | ||
485 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 409 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
486 | enum v4l2_buf_type type) | 410 | enum v4l2_buf_type type) |
487 | { | 411 | { |
@@ -497,9 +421,6 @@ int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |||
497 | } | 421 | } |
498 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); | 422 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); |
499 | 423 | ||
500 | /** | ||
501 | * v4l2_m2m_streamoff() - turn off streaming for a video queue | ||
502 | */ | ||
503 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 424 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
504 | enum v4l2_buf_type type) | 425 | enum v4l2_buf_type type) |
505 | { | 426 | { |
@@ -540,14 +461,6 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |||
540 | } | 461 | } |
541 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); | 462 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); |
542 | 463 | ||
543 | /** | ||
544 | * v4l2_m2m_poll() - poll replacement, for destination buffers only | ||
545 | * | ||
546 | * Call from the driver's poll() function. Will poll both queues. If a buffer | ||
547 | * is available to dequeue (with dqbuf) from the source queue, this will | ||
548 | * indicate that a non-blocking write can be performed, while read will be | ||
549 | * returned in case of the destination queue. | ||
550 | */ | ||
551 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 464 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
552 | struct poll_table_struct *wait) | 465 | struct poll_table_struct *wait) |
553 | { | 466 | { |
@@ -626,16 +539,6 @@ end: | |||
626 | } | 539 | } |
627 | EXPORT_SYMBOL_GPL(v4l2_m2m_poll); | 540 | EXPORT_SYMBOL_GPL(v4l2_m2m_poll); |
628 | 541 | ||
629 | /** | ||
630 | * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer | ||
631 | * | ||
632 | * Call from driver's mmap() function. Will handle mmap() for both queues | ||
633 | * seamlessly for videobuffer, which will receive normal per-queue offsets and | ||
634 | * proper videobuf queue pointers. The differentiation is made outside videobuf | ||
635 | * by adding a predefined offset to buffers from one of the queues and | ||
636 | * subtracting it before passing it back to videobuf. Only drivers (and | ||
637 | * thus applications) receive modified offsets. | ||
638 | */ | ||
639 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 542 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
640 | struct vm_area_struct *vma) | 543 | struct vm_area_struct *vma) |
641 | { | 544 | { |
@@ -653,11 +556,6 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |||
653 | } | 556 | } |
654 | EXPORT_SYMBOL(v4l2_m2m_mmap); | 557 | EXPORT_SYMBOL(v4l2_m2m_mmap); |
655 | 558 | ||
656 | /** | ||
657 | * v4l2_m2m_init() - initialize per-driver m2m data | ||
658 | * | ||
659 | * Usually called from driver's probe() function. | ||
660 | */ | ||
661 | struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) | 559 | struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) |
662 | { | 560 | { |
663 | struct v4l2_m2m_dev *m2m_dev; | 561 | struct v4l2_m2m_dev *m2m_dev; |
@@ -679,26 +577,12 @@ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) | |||
679 | } | 577 | } |
680 | EXPORT_SYMBOL_GPL(v4l2_m2m_init); | 578 | EXPORT_SYMBOL_GPL(v4l2_m2m_init); |
681 | 579 | ||
682 | /** | ||
683 | * v4l2_m2m_release() - cleans up and frees a m2m_dev structure | ||
684 | * | ||
685 | * Usually called from driver's remove() function. | ||
686 | */ | ||
687 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) | 580 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) |
688 | { | 581 | { |
689 | kfree(m2m_dev); | 582 | kfree(m2m_dev); |
690 | } | 583 | } |
691 | EXPORT_SYMBOL_GPL(v4l2_m2m_release); | 584 | EXPORT_SYMBOL_GPL(v4l2_m2m_release); |
692 | 585 | ||
693 | /** | ||
694 | * v4l2_m2m_ctx_init() - allocate and initialize a m2m context | ||
695 | * @priv - driver's instance private data | ||
696 | * @m2m_dev - a previously initialized m2m_dev struct | ||
697 | * @vq_init - a callback for queue type-specific initialization function to be | ||
698 | * used for initializing videobuf_queues | ||
699 | * | ||
700 | * Usually called from driver's open() function. | ||
701 | */ | ||
702 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, | 586 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, |
703 | void *drv_priv, | 587 | void *drv_priv, |
704 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) | 588 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) |
@@ -744,11 +628,6 @@ err: | |||
744 | } | 628 | } |
745 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); | 629 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); |
746 | 630 | ||
747 | /** | ||
748 | * v4l2_m2m_ctx_release() - release m2m context | ||
749 | * | ||
750 | * Usually called from driver's release() function. | ||
751 | */ | ||
752 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) | 631 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) |
753 | { | 632 | { |
754 | /* wait until the current context is dequeued from job_queue */ | 633 | /* wait until the current context is dequeued from job_queue */ |
@@ -761,11 +640,6 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) | |||
761 | } | 640 | } |
762 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); | 641 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); |
763 | 642 | ||
764 | /** | ||
765 | * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. | ||
766 | * | ||
767 | * Call from buf_queue(), videobuf_queue_ops callback. | ||
768 | */ | ||
769 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, | 643 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, |
770 | struct vb2_v4l2_buffer *vbuf) | 644 | struct vb2_v4l2_buffer *vbuf) |
771 | { | 645 | { |
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 5a9597dd1ee0..e5449a2c8475 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h | |||
@@ -94,13 +94,52 @@ struct v4l2_m2m_buffer { | |||
94 | struct list_head list; | 94 | struct list_head list; |
95 | }; | 95 | }; |
96 | 96 | ||
97 | /** | ||
98 | * v4l2_m2m_get_curr_priv() - return driver private data for the currently | ||
99 | * running instance or NULL if no instance is running | ||
100 | */ | ||
97 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); | 101 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); |
98 | 102 | ||
103 | /** | ||
104 | * v4l2_m2m_get_vq() - return vb2_queue for the given type | ||
105 | */ | ||
99 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, | 106 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, |
100 | enum v4l2_buf_type type); | 107 | enum v4l2_buf_type type); |
101 | 108 | ||
109 | /** | ||
110 | * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to | ||
111 | * the pending job queue and add it if so. | ||
112 | * @m2m_ctx: m2m context assigned to the instance to be checked | ||
113 | * | ||
114 | * There are three basic requirements an instance has to meet to be able to run: | ||
115 | * 1) at least one source buffer has to be queued, | ||
116 | * 2) at least one destination buffer has to be queued, | ||
117 | * 3) streaming has to be on. | ||
118 | * | ||
119 | * If a queue is buffered (for example a decoder hardware ringbuffer that has | ||
120 | * to be drained before doing streamoff), allow scheduling without v4l2 buffers | ||
121 | * on that queue. | ||
122 | * | ||
123 | * There may also be additional, custom requirements. In such case the driver | ||
124 | * should supply a custom callback (job_ready in v4l2_m2m_ops) that should | ||
125 | * return 1 if the instance is ready. | ||
126 | * An example of the above could be an instance that requires more than one | ||
127 | * src/dst buffer per transaction. | ||
128 | */ | ||
102 | void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); | 129 | void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); |
103 | 130 | ||
131 | /** | ||
132 | * v4l2_m2m_job_finish() - inform the framework that a job has been finished | ||
133 | * and have it clean up | ||
134 | * | ||
135 | * Called by a driver to yield back the device after it has finished with it. | ||
136 | * Should be called as soon as possible after reaching a state which allows | ||
137 | * other instances to take control of the device. | ||
138 | * | ||
139 | * This function has to be called only after device_run() callback has been | ||
140 | * called on the driver. To prevent recursion, it should not be called directly | ||
141 | * from the device_run() callback though. | ||
142 | */ | ||
104 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, | 143 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
105 | struct v4l2_m2m_ctx *m2m_ctx); | 144 | struct v4l2_m2m_ctx *m2m_ctx); |
106 | 145 | ||
@@ -110,38 +149,114 @@ v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) | |||
110 | vb2_buffer_done(&buf->vb2_buf, state); | 149 | vb2_buffer_done(&buf->vb2_buf, state); |
111 | } | 150 | } |
112 | 151 | ||
152 | /** | ||
153 | * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer | ||
154 | */ | ||
113 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 155 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
114 | struct v4l2_requestbuffers *reqbufs); | 156 | struct v4l2_requestbuffers *reqbufs); |
115 | 157 | ||
158 | /** | ||
159 | * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer | ||
160 | * | ||
161 | * See v4l2_m2m_mmap() documentation for details. | ||
162 | */ | ||
116 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 163 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
117 | struct v4l2_buffer *buf); | 164 | struct v4l2_buffer *buf); |
118 | 165 | ||
166 | /** | ||
167 | * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on | ||
168 | * the type | ||
169 | */ | ||
119 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 170 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
120 | struct v4l2_buffer *buf); | 171 | struct v4l2_buffer *buf); |
172 | |||
173 | /** | ||
174 | * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on | ||
175 | * the type | ||
176 | */ | ||
121 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 177 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
122 | struct v4l2_buffer *buf); | 178 | struct v4l2_buffer *buf); |
179 | |||
180 | /** | ||
181 | * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on | ||
182 | * the type | ||
183 | */ | ||
123 | int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 184 | int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
124 | struct v4l2_buffer *buf); | 185 | struct v4l2_buffer *buf); |
186 | |||
187 | /** | ||
188 | * v4l2_m2m_create_bufs() - create a source or destination buffer, depending | ||
189 | * on the type | ||
190 | */ | ||
125 | int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 191 | int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
126 | struct v4l2_create_buffers *create); | 192 | struct v4l2_create_buffers *create); |
127 | 193 | ||
194 | /** | ||
195 | * v4l2_m2m_expbuf() - export a source or destination buffer, depending on | ||
196 | * the type | ||
197 | */ | ||
128 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 198 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
129 | struct v4l2_exportbuffer *eb); | 199 | struct v4l2_exportbuffer *eb); |
130 | 200 | ||
201 | /** | ||
202 | * v4l2_m2m_streamon() - turn on streaming for a video queue | ||
203 | */ | ||
131 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 204 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
132 | enum v4l2_buf_type type); | 205 | enum v4l2_buf_type type); |
206 | |||
207 | /** | ||
208 | * v4l2_m2m_streamoff() - turn off streaming for a video queue | ||
209 | */ | ||
133 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 210 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
134 | enum v4l2_buf_type type); | 211 | enum v4l2_buf_type type); |
135 | 212 | ||
213 | /** | ||
214 | * v4l2_m2m_poll() - poll replacement, for destination buffers only | ||
215 | * | ||
216 | * Call from the driver's poll() function. Will poll both queues. If a buffer | ||
217 | * is available to dequeue (with dqbuf) from the source queue, this will | ||
218 | * indicate that a non-blocking write can be performed, while read will be | ||
219 | * returned in case of the destination queue. | ||
220 | */ | ||
136 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 221 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
137 | struct poll_table_struct *wait); | 222 | struct poll_table_struct *wait); |
138 | 223 | ||
224 | /** | ||
225 | * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer | ||
226 | * | ||
227 | * Call from driver's mmap() function. Will handle mmap() for both queues | ||
228 | * seamlessly for videobuffer, which will receive normal per-queue offsets and | ||
229 | * proper videobuf queue pointers. The differentiation is made outside videobuf | ||
230 | * by adding a predefined offset to buffers from one of the queues and | ||
231 | * subtracting it before passing it back to videobuf. Only drivers (and | ||
232 | * thus applications) receive modified offsets. | ||
233 | */ | ||
139 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 234 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
140 | struct vm_area_struct *vma); | 235 | struct vm_area_struct *vma); |
141 | 236 | ||
237 | /** | ||
238 | * v4l2_m2m_init() - initialize per-driver m2m data | ||
239 | * | ||
240 | * Usually called from driver's probe() function. | ||
241 | */ | ||
142 | struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); | 242 | struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); |
243 | |||
244 | /** | ||
245 | * v4l2_m2m_release() - cleans up and frees a m2m_dev structure | ||
246 | * | ||
247 | * Usually called from driver's remove() function. | ||
248 | */ | ||
143 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); | 249 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); |
144 | 250 | ||
251 | /** | ||
252 | * v4l2_m2m_ctx_init() - allocate and initialize a m2m context | ||
253 | * @priv - driver's instance private data | ||
254 | * @m2m_dev - a previously initialized m2m_dev struct | ||
255 | * @vq_init - a callback for queue type-specific initialization function to be | ||
256 | * used for initializing videobuf_queues | ||
257 | * | ||
258 | * Usually called from driver's open() function. | ||
259 | */ | ||
145 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, | 260 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, |
146 | void *drv_priv, | 261 | void *drv_priv, |
147 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); | 262 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); |
@@ -158,8 +273,18 @@ static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx, | |||
158 | m2m_ctx->cap_q_ctx.buffered = buffered; | 273 | m2m_ctx->cap_q_ctx.buffered = buffered; |
159 | } | 274 | } |
160 | 275 | ||
276 | /** | ||
277 | * v4l2_m2m_ctx_release() - release m2m context | ||
278 | * | ||
279 | * Usually called from driver's release() function. | ||
280 | */ | ||
161 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); | 281 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); |
162 | 282 | ||
283 | /** | ||
284 | * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. | ||
285 | * | ||
286 | * Call from buf_queue(), videobuf_queue_ops callback. | ||
287 | */ | ||
163 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, | 288 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, |
164 | struct vb2_v4l2_buffer *vbuf); | 289 | struct vb2_v4l2_buffer *vbuf); |
165 | 290 | ||
@@ -187,6 +312,9 @@ unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) | |||
187 | return m2m_ctx->cap_q_ctx.num_rdy; | 312 | return m2m_ctx->cap_q_ctx.num_rdy; |
188 | } | 313 | } |
189 | 314 | ||
315 | /** | ||
316 | * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers | ||
317 | */ | ||
190 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); | 318 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); |
191 | 319 | ||
192 | /** | 320 | /** |
@@ -233,6 +361,10 @@ struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) | |||
233 | return &m2m_ctx->cap_q_ctx.q; | 361 | return &m2m_ctx->cap_q_ctx.q; |
234 | } | 362 | } |
235 | 363 | ||
364 | /** | ||
365 | * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and | ||
366 | * return it | ||
367 | */ | ||
236 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); | 368 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); |
237 | 369 | ||
238 | /** | 370 | /** |