aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/media/video/Kconfig14
-rw-r--r--drivers/media/video/Makefile2
-rw-r--r--drivers/media/video/v4l2-mem2mem.c633
-rw-r--r--include/media/v4l2-mem2mem.h201
4 files changed, 850 insertions, 0 deletions
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 747f106e7b53..f8cbd4adebfd 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -45,6 +45,10 @@ config VIDEO_TUNER
45 tristate 45 tristate
46 depends on MEDIA_TUNER 46 depends on MEDIA_TUNER
47 47
48config V4L2_MEM2MEM_DEV
49 tristate
50 depends on VIDEOBUF_GEN
51
48# 52#
49# Multimedia Video device configuration 53# Multimedia Video device configuration
50# 54#
@@ -1121,3 +1125,13 @@ config USB_S2255
1121 1125
1122endif # V4L_USB_DRIVERS 1126endif # V4L_USB_DRIVERS
1123endif # VIDEO_CAPTURE_DRIVERS 1127endif # VIDEO_CAPTURE_DRIVERS
1128
1129menuconfig V4L_MEM2MEM_DRIVERS
1130 bool "Memory-to-memory multimedia devices"
1131 depends on VIDEO_V4L2
1132 default n
1133 ---help---
1134 Say Y here to enable selecting drivers for V4L devices that
1135 use system memory for both source and destination buffers, as opposed
1136 to capture and output drivers, which use memory buffers for just
1137 one of those.
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 4555f5f31ff0..b57be2f542e8 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -117,6 +117,8 @@ obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
117obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o 117obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
118obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o 118obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
119 119
120obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
121
120obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o 122obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
121 123
122obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o 124obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
diff --git a/drivers/media/video/v4l2-mem2mem.c b/drivers/media/video/v4l2-mem2mem.c
new file mode 100644
index 000000000000..f45f9405ea39
--- /dev/null
+++ b/drivers/media/video/v4l2-mem2mem.c
@@ -0,0 +1,633 @@
1/*
2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3 *
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
6 *
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <p.osciak@samsung.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19
20#include <media/videobuf-core.h>
21#include <media/v4l2-mem2mem.h>
22
23MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
24MODULE_AUTHOR("Pawel Osciak, <p.osciak@samsung.com>");
25MODULE_LICENSE("GPL");
26
27static bool debug;
28module_param(debug, bool, 0644);
29
30#define dprintk(fmt, arg...) \
31 do { \
32 if (debug) \
33 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
34 } while (0)
35
36
37/* Instance is already queued on the job_queue */
38#define TRANS_QUEUED (1 << 0)
39/* Instance is currently running in hardware */
40#define TRANS_RUNNING (1 << 1)
41
42
43/* Offset base for buffers on the destination queue - used to distinguish
44 * between source and destination buffers when mmapping - they receive the same
45 * offsets but for different queues */
46#define DST_QUEUE_OFF_BASE (1 << 30)
47
48
49/**
50 * struct v4l2_m2m_dev - per-device context
51 * @curr_ctx: currently running instance
52 * @job_queue: instances queued to run
53 * @job_spinlock: protects job_queue
54 * @m2m_ops: driver callbacks
55 */
56struct v4l2_m2m_dev {
57 struct v4l2_m2m_ctx *curr_ctx;
58
59 struct list_head job_queue;
60 spinlock_t job_spinlock;
61
62 struct v4l2_m2m_ops *m2m_ops;
63};
64
65static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
66 enum v4l2_buf_type type)
67{
68 switch (type) {
69 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
70 return &m2m_ctx->cap_q_ctx;
71 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
72 return &m2m_ctx->out_q_ctx;
73 default:
74 printk(KERN_ERR "Invalid buffer type\n");
75 return NULL;
76 }
77}
78
79/**
80 * v4l2_m2m_get_vq() - return videobuf_queue for the given type
81 */
82struct videobuf_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
83 enum v4l2_buf_type type)
84{
85 struct v4l2_m2m_queue_ctx *q_ctx;
86
87 q_ctx = get_queue_ctx(m2m_ctx, type);
88 if (!q_ctx)
89 return NULL;
90
91 return &q_ctx->q;
92}
93EXPORT_SYMBOL(v4l2_m2m_get_vq);
94
95/**
96 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
97 */
98void *v4l2_m2m_next_buf(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
99{
100 struct v4l2_m2m_queue_ctx *q_ctx;
101 struct videobuf_buffer *vb = NULL;
102 unsigned long flags;
103
104 q_ctx = get_queue_ctx(m2m_ctx, type);
105 if (!q_ctx)
106 return NULL;
107
108 spin_lock_irqsave(q_ctx->q.irqlock, flags);
109
110 if (list_empty(&q_ctx->rdy_queue))
111 goto end;
112
113 vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer, queue);
114 vb->state = VIDEOBUF_ACTIVE;
115
116end:
117 spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
118 return vb;
119}
120EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
121
122/**
123 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
124 * return it
125 */
126void *v4l2_m2m_buf_remove(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
127{
128 struct v4l2_m2m_queue_ctx *q_ctx;
129 struct videobuf_buffer *vb = NULL;
130 unsigned long flags;
131
132 q_ctx = get_queue_ctx(m2m_ctx, type);
133 if (!q_ctx)
134 return NULL;
135
136 spin_lock_irqsave(q_ctx->q.irqlock, flags);
137 if (!list_empty(&q_ctx->rdy_queue)) {
138 vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer,
139 queue);
140 list_del(&vb->queue);
141 q_ctx->num_rdy--;
142 }
143 spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
144
145 return vb;
146}
147EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
148
149/*
150 * Scheduling handlers
151 */
152
153/**
154 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
155 * running instance or NULL if no instance is running
156 */
157void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
158{
159 unsigned long flags;
160 void *ret = NULL;
161
162 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
163 if (m2m_dev->curr_ctx)
164 ret = m2m_dev->curr_ctx->priv;
165 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
166
167 return ret;
168}
169EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
170
171/**
172 * v4l2_m2m_try_run() - select next job to perform and run it if possible
173 *
174 * Get next transaction (if present) from the waiting jobs list and run it.
175 */
176static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
177{
178 unsigned long flags;
179
180 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
181 if (NULL != m2m_dev->curr_ctx) {
182 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
183 dprintk("Another instance is running, won't run now\n");
184 return;
185 }
186
187 if (list_empty(&m2m_dev->job_queue)) {
188 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
189 dprintk("No job pending\n");
190 return;
191 }
192
193 m2m_dev->curr_ctx = list_entry(m2m_dev->job_queue.next,
194 struct v4l2_m2m_ctx, queue);
195 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
196 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
197
198 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
199}
200
201/**
202 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
203 * the pending job queue and add it if so.
204 * @m2m_ctx: m2m context assigned to the instance to be checked
205 *
206 * There are three basic requirements an instance has to meet to be able to run:
207 * 1) at least one source buffer has to be queued,
208 * 2) at least one destination buffer has to be queued,
209 * 3) streaming has to be on.
210 *
211 * There may also be additional, custom requirements. In such case the driver
212 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
213 * return 1 if the instance is ready.
214 * An example of the above could be an instance that requires more than one
215 * src/dst buffer per transaction.
216 */
217static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
218{
219 struct v4l2_m2m_dev *m2m_dev;
220 unsigned long flags_job, flags;
221
222 m2m_dev = m2m_ctx->m2m_dev;
223 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
224
225 if (!m2m_ctx->out_q_ctx.q.streaming
226 || !m2m_ctx->cap_q_ctx.q.streaming) {
227 dprintk("Streaming needs to be on for both queues\n");
228 return;
229 }
230
231 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
232 if (m2m_ctx->job_flags & TRANS_QUEUED) {
233 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
234 dprintk("On job queue already\n");
235 return;
236 }
237
238 spin_lock_irqsave(m2m_ctx->out_q_ctx.q.irqlock, flags);
239 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
240 spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
241 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
242 dprintk("No input buffers available\n");
243 return;
244 }
245 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
246 spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
247 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
248 dprintk("No output buffers available\n");
249 return;
250 }
251 spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
252
253 if (m2m_dev->m2m_ops->job_ready
254 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
255 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
256 dprintk("Driver not ready\n");
257 return;
258 }
259
260 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
261 m2m_ctx->job_flags |= TRANS_QUEUED;
262
263 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
264
265 v4l2_m2m_try_run(m2m_dev);
266}
267
268/**
269 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
270 * and have it clean up
271 *
272 * Called by a driver to yield back the device after it has finished with it.
273 * Should be called as soon as possible after reaching a state which allows
274 * other instances to take control of the device.
275 *
276 * This function has to be called only after device_run() callback has been
277 * called on the driver. To prevent recursion, it should not be called directly
278 * from the device_run() callback though.
279 */
280void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
281 struct v4l2_m2m_ctx *m2m_ctx)
282{
283 unsigned long flags;
284
285 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
286 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
287 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
288 dprintk("Called by an instance not currently running\n");
289 return;
290 }
291
292 list_del(&m2m_dev->curr_ctx->queue);
293 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
294 m2m_dev->curr_ctx = NULL;
295
296 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
297
298 /* This instance might have more buffers ready, but since we do not
299 * allow more than one job on the job_queue per instance, each has
300 * to be scheduled separately after the previous one finishes. */
301 v4l2_m2m_try_schedule(m2m_ctx);
302 v4l2_m2m_try_run(m2m_dev);
303}
304EXPORT_SYMBOL(v4l2_m2m_job_finish);
305
306/**
307 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
308 */
309int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
310 struct v4l2_requestbuffers *reqbufs)
311{
312 struct videobuf_queue *vq;
313
314 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
315 return videobuf_reqbufs(vq, reqbufs);
316}
317EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
318
319/**
320 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
321 *
322 * See v4l2_m2m_mmap() documentation for details.
323 */
324int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
325 struct v4l2_buffer *buf)
326{
327 struct videobuf_queue *vq;
328 int ret;
329
330 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
331 ret = videobuf_querybuf(vq, buf);
332
333 if (buf->memory == V4L2_MEMORY_MMAP
334 && vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
335 buf->m.offset += DST_QUEUE_OFF_BASE;
336 }
337
338 return ret;
339}
340EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
341
342/**
343 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
344 * the type
345 */
346int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
347 struct v4l2_buffer *buf)
348{
349 struct videobuf_queue *vq;
350 int ret;
351
352 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
353 ret = videobuf_qbuf(vq, buf);
354 if (!ret)
355 v4l2_m2m_try_schedule(m2m_ctx);
356
357 return ret;
358}
359EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
360
361/**
362 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
363 * the type
364 */
365int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
366 struct v4l2_buffer *buf)
367{
368 struct videobuf_queue *vq;
369
370 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
371 return videobuf_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
372}
373EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
374
375/**
376 * v4l2_m2m_streamon() - turn on streaming for a video queue
377 */
378int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
379 enum v4l2_buf_type type)
380{
381 struct videobuf_queue *vq;
382 int ret;
383
384 vq = v4l2_m2m_get_vq(m2m_ctx, type);
385 ret = videobuf_streamon(vq);
386 if (!ret)
387 v4l2_m2m_try_schedule(m2m_ctx);
388
389 return ret;
390}
391EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
392
393/**
394 * v4l2_m2m_streamoff() - turn off streaming for a video queue
395 */
396int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
397 enum v4l2_buf_type type)
398{
399 struct videobuf_queue *vq;
400
401 vq = v4l2_m2m_get_vq(m2m_ctx, type);
402 return videobuf_streamoff(vq);
403}
404EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
405
406/**
407 * v4l2_m2m_poll() - poll replacement, for destination buffers only
408 *
409 * Call from the driver's poll() function. Will poll both queues. If a buffer
410 * is available to dequeue (with dqbuf) from the source queue, this will
411 * indicate that a non-blocking write can be performed, while read will be
412 * returned in case of the destination queue.
413 */
414unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
415 struct poll_table_struct *wait)
416{
417 struct videobuf_queue *src_q, *dst_q;
418 struct videobuf_buffer *src_vb = NULL, *dst_vb = NULL;
419 unsigned int rc = 0;
420
421 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
422 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
423
424 mutex_lock(&src_q->vb_lock);
425 mutex_lock(&dst_q->vb_lock);
426
427 if (src_q->streaming && !list_empty(&src_q->stream))
428 src_vb = list_first_entry(&src_q->stream,
429 struct videobuf_buffer, stream);
430 if (dst_q->streaming && !list_empty(&dst_q->stream))
431 dst_vb = list_first_entry(&dst_q->stream,
432 struct videobuf_buffer, stream);
433
434 if (!src_vb && !dst_vb) {
435 rc = POLLERR;
436 goto end;
437 }
438
439 if (src_vb) {
440 poll_wait(file, &src_vb->done, wait);
441 if (src_vb->state == VIDEOBUF_DONE
442 || src_vb->state == VIDEOBUF_ERROR)
443 rc |= POLLOUT | POLLWRNORM;
444 }
445 if (dst_vb) {
446 poll_wait(file, &dst_vb->done, wait);
447 if (dst_vb->state == VIDEOBUF_DONE
448 || dst_vb->state == VIDEOBUF_ERROR)
449 rc |= POLLIN | POLLRDNORM;
450 }
451
452end:
453 mutex_unlock(&dst_q->vb_lock);
454 mutex_unlock(&src_q->vb_lock);
455 return rc;
456}
457EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
458
459/**
460 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
461 *
462 * Call from driver's mmap() function. Will handle mmap() for both queues
463 * seamlessly for videobuffer, which will receive normal per-queue offsets and
464 * proper videobuf queue pointers. The differentiation is made outside videobuf
465 * by adding a predefined offset to buffers from one of the queues and
466 * subtracting it before passing it back to videobuf. Only drivers (and
467 * thus applications) receive modified offsets.
468 */
469int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
470 struct vm_area_struct *vma)
471{
472 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
473 struct videobuf_queue *vq;
474
475 if (offset < DST_QUEUE_OFF_BASE) {
476 vq = v4l2_m2m_get_src_vq(m2m_ctx);
477 } else {
478 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
479 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
480 }
481
482 return videobuf_mmap_mapper(vq, vma);
483}
484EXPORT_SYMBOL(v4l2_m2m_mmap);
485
486/**
487 * v4l2_m2m_init() - initialize per-driver m2m data
488 *
489 * Usually called from driver's probe() function.
490 */
491struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
492{
493 struct v4l2_m2m_dev *m2m_dev;
494
495 if (!m2m_ops)
496 return ERR_PTR(-EINVAL);
497
498 BUG_ON(!m2m_ops->device_run);
499 BUG_ON(!m2m_ops->job_abort);
500
501 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
502 if (!m2m_dev)
503 return ERR_PTR(-ENOMEM);
504
505 m2m_dev->curr_ctx = NULL;
506 m2m_dev->m2m_ops = m2m_ops;
507 INIT_LIST_HEAD(&m2m_dev->job_queue);
508 spin_lock_init(&m2m_dev->job_spinlock);
509
510 return m2m_dev;
511}
512EXPORT_SYMBOL_GPL(v4l2_m2m_init);
513
514/**
515 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
516 *
517 * Usually called from driver's remove() function.
518 */
519void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
520{
521 kfree(m2m_dev);
522}
523EXPORT_SYMBOL_GPL(v4l2_m2m_release);
524
525/**
526 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
527 * @priv - driver's instance private data
528 * @m2m_dev - a previously initialized m2m_dev struct
529 * @vq_init - a callback for queue type-specific initialization function to be
530 * used for initializing videobuf_queues
531 *
532 * Usually called from driver's open() function.
533 */
534struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(void *priv, struct v4l2_m2m_dev *m2m_dev,
535 void (*vq_init)(void *priv, struct videobuf_queue *,
536 enum v4l2_buf_type))
537{
538 struct v4l2_m2m_ctx *m2m_ctx;
539 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
540
541 if (!vq_init)
542 return ERR_PTR(-EINVAL);
543
544 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
545 if (!m2m_ctx)
546 return ERR_PTR(-ENOMEM);
547
548 m2m_ctx->priv = priv;
549 m2m_ctx->m2m_dev = m2m_dev;
550
551 out_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
552 cap_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
553
554 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
555 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
556
557 INIT_LIST_HEAD(&m2m_ctx->queue);
558
559 vq_init(priv, &out_q_ctx->q, V4L2_BUF_TYPE_VIDEO_OUTPUT);
560 vq_init(priv, &cap_q_ctx->q, V4L2_BUF_TYPE_VIDEO_CAPTURE);
561 out_q_ctx->q.priv_data = cap_q_ctx->q.priv_data = priv;
562
563 return m2m_ctx;
564}
565EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
566
567/**
568 * v4l2_m2m_ctx_release() - release m2m context
569 *
570 * Usually called from driver's release() function.
571 */
572void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
573{
574 struct v4l2_m2m_dev *m2m_dev;
575 struct videobuf_buffer *vb;
576 unsigned long flags;
577
578 m2m_dev = m2m_ctx->m2m_dev;
579
580 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
581 if (m2m_ctx->job_flags & TRANS_RUNNING) {
582 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
583 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
584 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
585 vb = v4l2_m2m_next_dst_buf(m2m_ctx);
586 BUG_ON(NULL == vb);
587 wait_event(vb->done, vb->state != VIDEOBUF_ACTIVE
588 && vb->state != VIDEOBUF_QUEUED);
589 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
590 list_del(&m2m_ctx->queue);
591 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
592 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
593 dprintk("m2m_ctx: %p had been on queue and was removed\n",
594 m2m_ctx);
595 } else {
596 /* Do nothing, was not on queue/running */
597 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
598 }
599
600 videobuf_stop(&m2m_ctx->cap_q_ctx.q);
601 videobuf_stop(&m2m_ctx->out_q_ctx.q);
602
603 videobuf_mmap_free(&m2m_ctx->cap_q_ctx.q);
604 videobuf_mmap_free(&m2m_ctx->out_q_ctx.q);
605
606 kfree(m2m_ctx);
607}
608EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
609
610/**
611 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
612 *
613 * Call from buf_queue(), videobuf_queue_ops callback.
614 *
615 * Locking: Caller holds q->irqlock (taken by videobuf before calling buf_queue
616 * callback in the driver).
617 */
618void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct videobuf_queue *vq,
619 struct videobuf_buffer *vb)
620{
621 struct v4l2_m2m_queue_ctx *q_ctx;
622
623 q_ctx = get_queue_ctx(m2m_ctx, vq->type);
624 if (!q_ctx)
625 return;
626
627 list_add_tail(&vb->queue, &q_ctx->rdy_queue);
628 q_ctx->num_rdy++;
629
630 vb->state = VIDEOBUF_QUEUED;
631}
632EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
633
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
new file mode 100644
index 000000000000..8d149f1c58d0
--- /dev/null
+++ b/include/media/v4l2-mem2mem.h
@@ -0,0 +1,201 @@
1/*
2 * Memory-to-memory device framework for Video for Linux 2.
3 *
4 * Helper functions for devices that use memory buffers for both source
5 * and destination.
6 *
7 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <p.osciak@samsung.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the
14 * License, or (at your option) any later version
15 */
16
17#ifndef _MEDIA_V4L2_MEM2MEM_H
18#define _MEDIA_V4L2_MEM2MEM_H
19
20#include <media/videobuf-core.h>
21
22/**
23 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
24 * @device_run: required. Begin the actual job (transaction) inside this
25 * callback.
26 * The job does NOT have to end before this callback returns
27 * (and it will be the usual case). When the job finishes,
28 * v4l2_m2m_job_finish() has to be called.
29 * @job_ready: optional. Should return 0 if the driver does not have a job
30 * fully prepared to run yet (i.e. it will not be able to finish a
31 * transaction without sleeping). If not provided, it will be
32 * assumed that one source and one destination buffer are all
33 * that is required for the driver to perform one full transaction.
34 * This method may not sleep.
35 * @job_abort: required. Informs the driver that it has to abort the currently
36 * running transaction as soon as possible (i.e. as soon as it can
37 * stop the device safely; e.g. in the next interrupt handler),
38 * even if the transaction would not have been finished by then.
39 * After the driver performs the necessary steps, it has to call
40 * v4l2_m2m_job_finish() (as if the transaction ended normally).
41 * This function does not have to (and will usually not) wait
42 * until the device enters a state when it can be stopped.
43 */
44struct v4l2_m2m_ops {
45 void (*device_run)(void *priv);
46 int (*job_ready)(void *priv);
47 void (*job_abort)(void *priv);
48};
49
50struct v4l2_m2m_dev;
51
52struct v4l2_m2m_queue_ctx {
53/* private: internal use only */
54 struct videobuf_queue q;
55
56 /* Queue for buffers ready to be processed as soon as this
57 * instance receives access to the device */
58 struct list_head rdy_queue;
59 u8 num_rdy;
60};
61
62struct v4l2_m2m_ctx {
63/* private: internal use only */
64 struct v4l2_m2m_dev *m2m_dev;
65
66 /* Capture (output to memory) queue context */
67 struct v4l2_m2m_queue_ctx cap_q_ctx;
68
69 /* Output (input from memory) queue context */
70 struct v4l2_m2m_queue_ctx out_q_ctx;
71
72 /* For device job queue */
73 struct list_head queue;
74 unsigned long job_flags;
75
76 /* Instance private data */
77 void *priv;
78};
79
80void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
81
82struct videobuf_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
83 enum v4l2_buf_type type);
84
85void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
86 struct v4l2_m2m_ctx *m2m_ctx);
87
88int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
89 struct v4l2_requestbuffers *reqbufs);
90
91int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
92 struct v4l2_buffer *buf);
93
94int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
95 struct v4l2_buffer *buf);
96int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
97 struct v4l2_buffer *buf);
98
99int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
100 enum v4l2_buf_type type);
101int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
102 enum v4l2_buf_type type);
103
104unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
105 struct poll_table_struct *wait);
106
107int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
108 struct vm_area_struct *vma);
109
110struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops);
111void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
112
113struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(void *priv, struct v4l2_m2m_dev *m2m_dev,
114 void (*vq_init)(void *priv, struct videobuf_queue *,
115 enum v4l2_buf_type));
116void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
117
118void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct videobuf_queue *vq,
119 struct videobuf_buffer *vb);
120
121/**
122 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
123 * use
124 */
125static inline
126unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
127{
128 return m2m_ctx->cap_q_ctx.num_rdy;
129}
130
131/**
132 * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
133 * ready for use
134 */
135static inline
136unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
137{
138 return m2m_ctx->out_q_ctx.num_rdy;
139}
140
141void *v4l2_m2m_next_buf(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type);
142
143/**
144 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
145 * buffers
146 */
147static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
148{
149 return v4l2_m2m_next_buf(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
150}
151
152/**
153 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
154 * ready buffers
155 */
156static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
157{
158 return v4l2_m2m_next_buf(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
159}
160
161/**
162 * v4l2_m2m_get_src_vq() - return videobuf_queue for source buffers
163 */
164static inline
165struct videobuf_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
166{
167 return v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
168}
169
170/**
171 * v4l2_m2m_get_dst_vq() - return videobuf_queue for destination buffers
172 */
173static inline
174struct videobuf_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
175{
176 return v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
177}
178
179void *v4l2_m2m_buf_remove(struct v4l2_m2m_ctx *m2m_ctx,
180 enum v4l2_buf_type type);
181
182/**
183 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
184 * buffers and return it
185 */
186static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
187{
188 return v4l2_m2m_buf_remove(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
189}
190
191/**
192 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
193 * ready buffers and return it
194 */
195static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
196{
197 return v4l2_m2m_buf_remove(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
198}
199
200#endif /* _MEDIA_V4L2_MEM2MEM_H */
201