aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c10
-rw-r--r--include/media/v4l2-mem2mem.h13
2 files changed, 21 insertions, 2 deletions
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index e96497f7c3ed..89b90672088c 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -196,6 +196,10 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
196 * 2) at least one destination buffer has to be queued, 196 * 2) at least one destination buffer has to be queued,
197 * 3) streaming has to be on. 197 * 3) streaming has to be on.
198 * 198 *
199 * If a queue is buffered (for example a decoder hardware ringbuffer that has
200 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
201 * on that queue.
202 *
199 * There may also be additional, custom requirements. In such case the driver 203 * There may also be additional, custom requirements. In such case the driver
200 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should 204 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
201 * return 1 if the instance is ready. 205 * return 1 if the instance is ready.
@@ -224,7 +228,8 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
224 } 228 }
225 229
226 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 230 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
227 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { 231 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
232 && !m2m_ctx->out_q_ctx.buffered) {
228 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, 233 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
229 flags_out); 234 flags_out);
230 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 235 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
@@ -232,7 +237,8 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
232 return; 237 return;
233 } 238 }
234 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 239 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
235 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { 240 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
241 && !m2m_ctx->cap_q_ctx.buffered) {
236 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, 242 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
237 flags_cap); 243 flags_cap);
238 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, 244 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 0f4555b2a31b..44542a20ab81 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -60,6 +60,7 @@ struct v4l2_m2m_queue_ctx {
60 struct list_head rdy_queue; 60 struct list_head rdy_queue;
61 spinlock_t rdy_spinlock; 61 spinlock_t rdy_spinlock;
62 u8 num_rdy; 62 u8 num_rdy;
63 bool buffered;
63}; 64};
64 65
65struct v4l2_m2m_ctx { 66struct v4l2_m2m_ctx {
@@ -134,6 +135,18 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
134 void *drv_priv, 135 void *drv_priv,
135 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); 136 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
136 137
138static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
139 bool buffered)
140{
141 m2m_ctx->out_q_ctx.buffered = buffered;
142}
143
144static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
145 bool buffered)
146{
147 m2m_ctx->cap_q_ctx.buffered = buffered;
148}
149
137void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); 150void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
138 151
139void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb); 152void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);