aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPhilipp Zabel <p.zabel@pengutronix.de>2013-06-03 03:23:48 -0400
committerMauro Carvalho Chehab <m.chehab@samsung.com>2013-07-26 09:00:33 -0400
commit33bdd5a88a0fb7fbd08947261b243fcec4ff089d (patch)
tree60d667ab66261be2cf361c648a7eff54ba0b920a
parentc859e6ef33ac0c9a5e9e934fe11a2232752b4e96 (diff)
[media] mem2mem: add support for hardware buffered queue
On mem2mem decoders with a hardware bitstream ringbuffer, to drain the buffer at the end of the stream, remaining frames might need to be decoded from the bitstream buffer without additional input buffers being provided. To achieve this, allow a queue to be marked as buffered by the driver, and allow scheduling of device_runs when buffered ready queues are empty. This also allows a driver to copy input buffers into their bitstream ringbuffer and immediately mark them as done to be dequeued. The motivation for this patch is hardware assisted h.264 reordering support in the coda driver. For high profile streams, the coda can hold back out-of-order frames, causing a few mem2mem device runs in the beginning, that don't produce any decompressed buffer at the v4l2 capture side. At the same time, the last few frames can be decoded from the bitstream with mem2mem device runs that don't need a new input buffer at the v4l2 output side. The decoder command ioctl can be used to put the decoder into the ringbuffer draining end-of-stream mode. Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de> Acked-by: Sylwester Nawrocki <s.nawrocki@samsung.com> Signed-off-by: Kamil Debski <k.debski@samsung.com> Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c10
-rw-r--r--include/media/v4l2-mem2mem.h13
2 files changed, 21 insertions, 2 deletions
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index e96497f7c3ed..89b90672088c 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -196,6 +196,10 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
196 * 2) at least one destination buffer has to be queued, 196 * 2) at least one destination buffer has to be queued,
197 * 3) streaming has to be on. 197 * 3) streaming has to be on.
198 * 198 *
199 * If a queue is buffered (for example a decoder hardware ringbuffer that has
200 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
201 * on that queue.
202 *
199 * There may also be additional, custom requirements. In such case the driver 203 * There may also be additional, custom requirements. In such case the driver
200 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should 204 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
201 * return 1 if the instance is ready. 205 * return 1 if the instance is ready.
@@ -224,7 +228,8 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
224 } 228 }
225 229
226 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 230 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
227 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { 231 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
232 && !m2m_ctx->out_q_ctx.buffered) {
228 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, 233 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
229 flags_out); 234 flags_out);
230 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 235 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
@@ -232,7 +237,8 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
232 return; 237 return;
233 } 238 }
234 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 239 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
235 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { 240 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
241 && !m2m_ctx->cap_q_ctx.buffered) {
236 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, 242 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
237 flags_cap); 243 flags_cap);
238 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, 244 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 0f4555b2a31b..44542a20ab81 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -60,6 +60,7 @@ struct v4l2_m2m_queue_ctx {
60 struct list_head rdy_queue; 60 struct list_head rdy_queue;
61 spinlock_t rdy_spinlock; 61 spinlock_t rdy_spinlock;
62 u8 num_rdy; 62 u8 num_rdy;
63 bool buffered;
63}; 64};
64 65
65struct v4l2_m2m_ctx { 66struct v4l2_m2m_ctx {
@@ -134,6 +135,18 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
134 void *drv_priv, 135 void *drv_priv,
135 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); 136 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
136 137
138static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
139 bool buffered)
140{
141 m2m_ctx->out_q_ctx.buffered = buffered;
142}
143
144static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
145 bool buffered)
146{
147 m2m_ctx->cap_q_ctx.buffered = buffered;
148}
149
137void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); 150void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
138 151
139void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb); 152void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);