aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaik Ameer Basha <shaik.ameer@samsung.com>2013-08-13 01:58:07 -0400
committerMauro Carvalho Chehab <m.chehab@samsung.com>2013-08-24 03:04:41 -0400
commitfea564a5f6a4ed6d3241aa90714ae2f0894a768a (patch)
treec77981df3bfb51d2a9e63b0602bf810f14f9698d
parent976f375df1730dd16aa7c101298ec47bdd338d79 (diff)
[media] v4l2-mem2mem: clear m2m context from job_queue before ctx streamoff
When streamoff is called on the context and the context is added to the job_queue, 1] sometimes device_run receives the empty vb2 buffers (as v4l2_m2m_streamoff is dropping the ready queue). 2] sometimes v4l2_m2m_job_finish may not succeed as the m2m_dev->curr_ctx is made NULL in the v4l2_m2m_streamoff() The above points may stop the execution of the other queued contexts. This patch makes sure that before streamoff is executed on any context, that context should "not be running" or "not queued" in the job_queue. 1] If the current context is running, then abort job will be called. 2] If the current context is queued, then the context will be removed from the job_queue. Signed-off-by: Shaik Ameer Basha <shaik.ameer@samsung.com> Signed-off-by: Kamil Debski <k.debski@samsung.com> Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c59
1 files changed, 38 insertions, 21 deletions
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 89b90672088c..7c4371288215 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -266,6 +266,39 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
266} 266}
267 267
268/** 268/**
269 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
270 *
271 * In case of streamoff or release called on any context,
272 * 1] If the context is currently running, then abort job will be called
273 * 2] If the context is queued, then the context will be removed from
274 * the job_queue
275 */
276static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
277{
278 struct v4l2_m2m_dev *m2m_dev;
279 unsigned long flags;
280
281 m2m_dev = m2m_ctx->m2m_dev;
282 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
283 if (m2m_ctx->job_flags & TRANS_RUNNING) {
284 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
285 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
286 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
287 wait_event(m2m_ctx->finished,
288 !(m2m_ctx->job_flags & TRANS_RUNNING));
289 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
290 list_del(&m2m_ctx->queue);
291 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
292 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
293 dprintk("m2m_ctx: %p had been on queue and was removed\n",
294 m2m_ctx);
295 } else {
296 /* Do nothing, was not on queue/running */
297 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
298 }
299}
300
301/**
269 * v4l2_m2m_job_finish() - inform the framework that a job has been finished 302 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
270 * and have it clean up 303 * and have it clean up
271 * 304 *
@@ -436,6 +469,9 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
436 unsigned long flags_job, flags; 469 unsigned long flags_job, flags;
437 int ret; 470 int ret;
438 471
472 /* wait until the current context is dequeued from job_queue */
473 v4l2_m2m_cancel_job(m2m_ctx);
474
439 q_ctx = get_queue_ctx(m2m_ctx, type); 475 q_ctx = get_queue_ctx(m2m_ctx, type);
440 ret = vb2_streamoff(&q_ctx->q, type); 476 ret = vb2_streamoff(&q_ctx->q, type);
441 if (ret) 477 if (ret)
@@ -658,27 +694,8 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
658 */ 694 */
659void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 695void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
660{ 696{
661 struct v4l2_m2m_dev *m2m_dev; 697 /* wait until the current context is dequeued from job_queue */
662 unsigned long flags; 698 v4l2_m2m_cancel_job(m2m_ctx);
663
664 m2m_dev = m2m_ctx->m2m_dev;
665
666 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
667 if (m2m_ctx->job_flags & TRANS_RUNNING) {
668 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
669 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
670 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
671 wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
672 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
673 list_del(&m2m_ctx->queue);
674 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
675 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
676 dprintk("m2m_ctx: %p had been on queue and was removed\n",
677 m2m_ctx);
678 } else {
679 /* Do nothing, was not on queue/running */
680 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
681 }
682 699
683 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); 700 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
684 vb2_queue_release(&m2m_ctx->out_q_ctx.q); 701 vb2_queue_release(&m2m_ctx->out_q_ctx.q);