aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHans Verkuil <hans.verkuil@cisco.com>2014-01-29 09:53:25 -0500
committerMauro Carvalho Chehab <m.chehab@samsung.com>2014-03-11 05:56:37 -0400
commitb5b4541eef8eac83f5c0d166d8e494f7c9fff202 (patch)
treef4bc458dfff48a3bd2236f235fa3249c356e2db8
parent952c9ee2900de152c4999d94da5c4bd846ae52e8 (diff)
[media] vb2: add debugging code to check for unbalanced ops
When a vb2_queue is freed check if all the mem_ops and queue ops were balanced. So the number of calls to e.g. buf_finish has to match the number of calls to buf_prepare, etc. This code is only enabled if CONFIG_VIDEO_ADV_DEBUG is set. Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Acked-by: Pawel Osciak <pawel@osciak.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c233
-rw-r--r--include/media/videobuf2-core.h43
2 files changed, 226 insertions, 50 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 1dc11eda85e7..917b1cbb5cbf 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -33,12 +33,63 @@ module_param(debug, int, 0644);
33 printk(KERN_DEBUG "vb2: " fmt, ## arg); \ 33 printk(KERN_DEBUG "vb2: " fmt, ## arg); \
34 } while (0) 34 } while (0)
35 35
36#define call_memop(q, op, args...) \ 36#ifdef CONFIG_VIDEO_ADV_DEBUG
37 (((q)->mem_ops->op) ? \ 37
38 ((q)->mem_ops->op(args)) : 0) 38/*
39 * If advanced debugging is on, then count how often each op is called,
40 * which can either be per-buffer or per-queue.
41 *
42 * If the op failed then the 'fail_' variant is called to decrease the
43 * counter. That makes it easy to check that the 'init' and 'cleanup'
44 * (and variations thereof) stay balanced.
45 */
46
47#define call_memop(vb, op, args...) \
48({ \
49 struct vb2_queue *_q = (vb)->vb2_queue; \
50 dprintk(2, "call_memop(%p, %d, %s)%s\n", \
51 _q, (vb)->v4l2_buf.index, #op, \
52 _q->mem_ops->op ? "" : " (nop)"); \
53 (vb)->cnt_mem_ ## op++; \
54 _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
55})
56#define fail_memop(vb, op) ((vb)->cnt_mem_ ## op--)
57
58#define call_qop(q, op, args...) \
59({ \
60 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
61 (q)->ops->op ? "" : " (nop)"); \
62 (q)->cnt_ ## op++; \
63 (q)->ops->op ? (q)->ops->op(args) : 0; \
64})
65#define fail_qop(q, op) ((q)->cnt_ ## op--)
66
67#define call_vb_qop(vb, op, args...) \
68({ \
69 struct vb2_queue *_q = (vb)->vb2_queue; \
70 dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
71 _q, (vb)->v4l2_buf.index, #op, \
72 _q->ops->op ? "" : " (nop)"); \
73 (vb)->cnt_ ## op++; \
74 _q->ops->op ? _q->ops->op(args) : 0; \
75})
76#define fail_vb_qop(vb, op) ((vb)->cnt_ ## op--)
77
78#else
79
80#define call_memop(vb, op, args...) \
81 ((vb)->vb2_queue->mem_ops->op ? (vb)->vb2_queue->mem_ops->op(args) : 0)
82#define fail_memop(vb, op)
39 83
40#define call_qop(q, op, args...) \ 84#define call_qop(q, op, args...) \
41 (((q)->ops->op) ? ((q)->ops->op(args)) : 0) 85 ((q)->ops->op ? (q)->ops->op(args) : 0)
86#define fail_qop(q, op)
87
88#define call_vb_qop(vb, op, args...) \
89 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
90#define fail_vb_qop(vb, op)
91
92#endif
42 93
43/* Flags that are set by the vb2 core */ 94/* Flags that are set by the vb2 core */
44#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ 95#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
@@ -65,7 +116,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
65 for (plane = 0; plane < vb->num_planes; ++plane) { 116 for (plane = 0; plane < vb->num_planes; ++plane) {
66 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); 117 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
67 118
68 mem_priv = call_memop(q, alloc, q->alloc_ctx[plane], 119 mem_priv = call_memop(vb, alloc, q->alloc_ctx[plane],
69 size, q->gfp_flags); 120 size, q->gfp_flags);
70 if (IS_ERR_OR_NULL(mem_priv)) 121 if (IS_ERR_OR_NULL(mem_priv))
71 goto free; 122 goto free;
@@ -77,9 +128,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
77 128
78 return 0; 129 return 0;
79free: 130free:
131 fail_memop(vb, alloc);
80 /* Free already allocated memory if one of the allocations failed */ 132 /* Free already allocated memory if one of the allocations failed */
81 for (; plane > 0; --plane) { 133 for (; plane > 0; --plane) {
82 call_memop(q, put, vb->planes[plane - 1].mem_priv); 134 call_memop(vb, put, vb->planes[plane - 1].mem_priv);
83 vb->planes[plane - 1].mem_priv = NULL; 135 vb->planes[plane - 1].mem_priv = NULL;
84 } 136 }
85 137
@@ -91,11 +143,10 @@ free:
91 */ 143 */
92static void __vb2_buf_mem_free(struct vb2_buffer *vb) 144static void __vb2_buf_mem_free(struct vb2_buffer *vb)
93{ 145{
94 struct vb2_queue *q = vb->vb2_queue;
95 unsigned int plane; 146 unsigned int plane;
96 147
97 for (plane = 0; plane < vb->num_planes; ++plane) { 148 for (plane = 0; plane < vb->num_planes; ++plane) {
98 call_memop(q, put, vb->planes[plane].mem_priv); 149 call_memop(vb, put, vb->planes[plane].mem_priv);
99 vb->planes[plane].mem_priv = NULL; 150 vb->planes[plane].mem_priv = NULL;
100 dprintk(3, "Freed plane %d of buffer %d\n", plane, 151 dprintk(3, "Freed plane %d of buffer %d\n", plane,
101 vb->v4l2_buf.index); 152 vb->v4l2_buf.index);
@@ -108,12 +159,11 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)
108 */ 159 */
109static void __vb2_buf_userptr_put(struct vb2_buffer *vb) 160static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
110{ 161{
111 struct vb2_queue *q = vb->vb2_queue;
112 unsigned int plane; 162 unsigned int plane;
113 163
114 for (plane = 0; plane < vb->num_planes; ++plane) { 164 for (plane = 0; plane < vb->num_planes; ++plane) {
115 if (vb->planes[plane].mem_priv) 165 if (vb->planes[plane].mem_priv)
116 call_memop(q, put_userptr, vb->planes[plane].mem_priv); 166 call_memop(vb, put_userptr, vb->planes[plane].mem_priv);
117 vb->planes[plane].mem_priv = NULL; 167 vb->planes[plane].mem_priv = NULL;
118 } 168 }
119} 169}
@@ -122,15 +172,15 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
122 * __vb2_plane_dmabuf_put() - release memory associated with 172 * __vb2_plane_dmabuf_put() - release memory associated with
123 * a DMABUF shared plane 173 * a DMABUF shared plane
124 */ 174 */
125static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p) 175static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
126{ 176{
127 if (!p->mem_priv) 177 if (!p->mem_priv)
128 return; 178 return;
129 179
130 if (p->dbuf_mapped) 180 if (p->dbuf_mapped)
131 call_memop(q, unmap_dmabuf, p->mem_priv); 181 call_memop(vb, unmap_dmabuf, p->mem_priv);
132 182
133 call_memop(q, detach_dmabuf, p->mem_priv); 183 call_memop(vb, detach_dmabuf, p->mem_priv);
134 dma_buf_put(p->dbuf); 184 dma_buf_put(p->dbuf);
135 memset(p, 0, sizeof(*p)); 185 memset(p, 0, sizeof(*p));
136} 186}
@@ -141,11 +191,10 @@ static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
141 */ 191 */
142static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) 192static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
143{ 193{
144 struct vb2_queue *q = vb->vb2_queue;
145 unsigned int plane; 194 unsigned int plane;
146 195
147 for (plane = 0; plane < vb->num_planes; ++plane) 196 for (plane = 0; plane < vb->num_planes; ++plane)
148 __vb2_plane_dmabuf_put(q, &vb->planes[plane]); 197 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
149} 198}
150 199
151/** 200/**
@@ -250,10 +299,11 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
250 * callback, if given. An error in initialization 299 * callback, if given. An error in initialization
251 * results in queue setup failure. 300 * results in queue setup failure.
252 */ 301 */
253 ret = call_qop(q, buf_init, vb); 302 ret = call_vb_qop(vb, buf_init, vb);
254 if (ret) { 303 if (ret) {
255 dprintk(1, "Buffer %d %p initialization" 304 dprintk(1, "Buffer %d %p initialization"
256 " failed\n", buffer, vb); 305 " failed\n", buffer, vb);
306 fail_vb_qop(vb, buf_init);
257 __vb2_buf_mem_free(vb); 307 __vb2_buf_mem_free(vb);
258 kfree(vb); 308 kfree(vb);
259 break; 309 break;
@@ -325,18 +375,77 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
325 } 375 }
326 376
327 /* Call driver-provided cleanup function for each buffer, if provided */ 377 /* Call driver-provided cleanup function for each buffer, if provided */
328 if (q->ops->buf_cleanup) { 378 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
329 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 379 ++buffer) {
330 ++buffer) { 380 if (q->bufs[buffer])
331 if (NULL == q->bufs[buffer]) 381 call_vb_qop(q->bufs[buffer], buf_cleanup, q->bufs[buffer]);
332 continue;
333 q->ops->buf_cleanup(q->bufs[buffer]);
334 }
335 } 382 }
336 383
337 /* Release video buffer memory */ 384 /* Release video buffer memory */
338 __vb2_free_mem(q, buffers); 385 __vb2_free_mem(q, buffers);
339 386
387#ifdef CONFIG_VIDEO_ADV_DEBUG
388 /*
389 * Check that all the calls were balances during the life-time of this
390 * queue. If not (or if the debug level is 1 or up), then dump the
391 * counters to the kernel log.
392 */
393 if (q->num_buffers) {
394 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
395 q->cnt_wait_prepare != q->cnt_wait_finish;
396
397 if (unbalanced || debug) {
398 pr_info("vb2: counters for queue %p:%s\n", q,
399 unbalanced ? " UNBALANCED!" : "");
400 pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
401 q->cnt_queue_setup, q->cnt_start_streaming,
402 q->cnt_stop_streaming);
403 pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
404 q->cnt_wait_prepare, q->cnt_wait_finish);
405 }
406 q->cnt_queue_setup = 0;
407 q->cnt_wait_prepare = 0;
408 q->cnt_wait_finish = 0;
409 q->cnt_start_streaming = 0;
410 q->cnt_stop_streaming = 0;
411 }
412 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
413 struct vb2_buffer *vb = q->bufs[buffer];
414 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
415 vb->cnt_mem_prepare != vb->cnt_mem_finish ||
416 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
417 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
418 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
419 vb->cnt_buf_queue != vb->cnt_buf_done ||
420 vb->cnt_buf_prepare != vb->cnt_buf_finish ||
421 vb->cnt_buf_init != vb->cnt_buf_cleanup;
422
423 if (unbalanced || debug) {
424 pr_info("vb2: counters for queue %p, buffer %d:%s\n",
425 q, buffer, unbalanced ? " UNBALANCED!" : "");
426 pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
427 vb->cnt_buf_init, vb->cnt_buf_cleanup,
428 vb->cnt_buf_prepare, vb->cnt_buf_finish);
429 pr_info("vb2: buf_queue: %u buf_done: %u\n",
430 vb->cnt_buf_queue, vb->cnt_buf_done);
431 pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
432 vb->cnt_mem_alloc, vb->cnt_mem_put,
433 vb->cnt_mem_prepare, vb->cnt_mem_finish,
434 vb->cnt_mem_mmap);
435 pr_info("vb2: get_userptr: %u put_userptr: %u\n",
436 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
437 pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
438 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
439 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
440 pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
441 vb->cnt_mem_get_dmabuf,
442 vb->cnt_mem_num_users,
443 vb->cnt_mem_vaddr,
444 vb->cnt_mem_cookie);
445 }
446 }
447#endif
448
340 /* Free videobuf buffers */ 449 /* Free videobuf buffers */
341 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 450 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
342 ++buffer) { 451 ++buffer) {
@@ -428,7 +537,7 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
428 * case anyway. If num_users() returns more than 1, 537 * case anyway. If num_users() returns more than 1,
429 * we are not the only user of the plane's memory. 538 * we are not the only user of the plane's memory.
430 */ 539 */
431 if (mem_priv && call_memop(q, num_users, mem_priv) > 1) 540 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
432 return true; 541 return true;
433 } 542 }
434 return false; 543 return false;
@@ -716,8 +825,10 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
716 */ 825 */
717 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, 826 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
718 q->plane_sizes, q->alloc_ctx); 827 q->plane_sizes, q->alloc_ctx);
719 if (ret) 828 if (ret) {
829 fail_qop(q, queue_setup);
720 return ret; 830 return ret;
831 }
721 832
722 /* Finally, allocate buffers and video memory */ 833 /* Finally, allocate buffers and video memory */
723 ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); 834 ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
@@ -736,6 +847,8 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
736 847
737 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, 848 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
738 &num_planes, q->plane_sizes, q->alloc_ctx); 849 &num_planes, q->plane_sizes, q->alloc_ctx);
850 if (ret)
851 fail_qop(q, queue_setup);
739 852
740 if (!ret && allocated_buffers < num_buffers) 853 if (!ret && allocated_buffers < num_buffers)
741 ret = -ENOMEM; 854 ret = -ENOMEM;
@@ -816,8 +929,10 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
816 */ 929 */
817 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, 930 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
818 &num_planes, q->plane_sizes, q->alloc_ctx); 931 &num_planes, q->plane_sizes, q->alloc_ctx);
819 if (ret) 932 if (ret) {
933 fail_qop(q, queue_setup);
820 return ret; 934 return ret;
935 }
821 936
822 /* Finally, allocate buffers and video memory */ 937 /* Finally, allocate buffers and video memory */
823 ret = __vb2_queue_alloc(q, create->memory, num_buffers, 938 ret = __vb2_queue_alloc(q, create->memory, num_buffers,
@@ -841,6 +956,8 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
841 */ 956 */
842 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, 957 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
843 &num_planes, q->plane_sizes, q->alloc_ctx); 958 &num_planes, q->plane_sizes, q->alloc_ctx);
959 if (ret)
960 fail_qop(q, queue_setup);
844 961
845 if (!ret && allocated_buffers < num_buffers) 962 if (!ret && allocated_buffers < num_buffers)
846 ret = -ENOMEM; 963 ret = -ENOMEM;
@@ -895,12 +1012,10 @@ EXPORT_SYMBOL_GPL(vb2_create_bufs);
895 */ 1012 */
896void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) 1013void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
897{ 1014{
898 struct vb2_queue *q = vb->vb2_queue;
899
900 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) 1015 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
901 return NULL; 1016 return NULL;
902 1017
903 return call_memop(q, vaddr, vb->planes[plane_no].mem_priv); 1018 return call_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
904 1019
905} 1020}
906EXPORT_SYMBOL_GPL(vb2_plane_vaddr); 1021EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
@@ -918,12 +1033,10 @@ EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
918 */ 1033 */
919void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) 1034void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
920{ 1035{
921 struct vb2_queue *q = vb->vb2_queue;
922
923 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) 1036 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
924 return NULL; 1037 return NULL;
925 1038
926 return call_memop(q, cookie, vb->planes[plane_no].mem_priv); 1039 return call_memop(vb, cookie, vb->planes[plane_no].mem_priv);
927} 1040}
928EXPORT_SYMBOL_GPL(vb2_plane_cookie); 1041EXPORT_SYMBOL_GPL(vb2_plane_cookie);
929 1042
@@ -951,12 +1064,19 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
951 if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR) 1064 if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
952 return; 1065 return;
953 1066
1067#ifdef CONFIG_VIDEO_ADV_DEBUG
1068 /*
1069 * Although this is not a callback, it still does have to balance
1070 * with the buf_queue op. So update this counter manually.
1071 */
1072 vb->cnt_buf_done++;
1073#endif
954 dprintk(4, "Done processing on buffer %d, state: %d\n", 1074 dprintk(4, "Done processing on buffer %d, state: %d\n",
955 vb->v4l2_buf.index, state); 1075 vb->v4l2_buf.index, state);
956 1076
957 /* sync buffers */ 1077 /* sync buffers */
958 for (plane = 0; plane < vb->num_planes; ++plane) 1078 for (plane = 0; plane < vb->num_planes; ++plane)
959 call_memop(q, finish, vb->planes[plane].mem_priv); 1079 call_memop(vb, finish, vb->planes[plane].mem_priv);
960 1080
961 /* Add the buffer to the done buffers list */ 1081 /* Add the buffer to the done buffers list */
962 spin_lock_irqsave(&q->done_lock, flags); 1082 spin_lock_irqsave(&q->done_lock, flags);
@@ -1102,19 +1222,20 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1102 1222
1103 /* Release previously acquired memory if present */ 1223 /* Release previously acquired memory if present */
1104 if (vb->planes[plane].mem_priv) 1224 if (vb->planes[plane].mem_priv)
1105 call_memop(q, put_userptr, vb->planes[plane].mem_priv); 1225 call_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1106 1226
1107 vb->planes[plane].mem_priv = NULL; 1227 vb->planes[plane].mem_priv = NULL;
1108 vb->v4l2_planes[plane].m.userptr = 0; 1228 vb->v4l2_planes[plane].m.userptr = 0;
1109 vb->v4l2_planes[plane].length = 0; 1229 vb->v4l2_planes[plane].length = 0;
1110 1230
1111 /* Acquire each plane's memory */ 1231 /* Acquire each plane's memory */
1112 mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane], 1232 mem_priv = call_memop(vb, get_userptr, q->alloc_ctx[plane],
1113 planes[plane].m.userptr, 1233 planes[plane].m.userptr,
1114 planes[plane].length, write); 1234 planes[plane].length, write);
1115 if (IS_ERR_OR_NULL(mem_priv)) { 1235 if (IS_ERR_OR_NULL(mem_priv)) {
1116 dprintk(1, "qbuf: failed acquiring userspace " 1236 dprintk(1, "qbuf: failed acquiring userspace "
1117 "memory for plane %d\n", plane); 1237 "memory for plane %d\n", plane);
1238 fail_memop(vb, get_userptr);
1118 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL; 1239 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
1119 goto err; 1240 goto err;
1120 } 1241 }
@@ -1125,9 +1246,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1125 * Call driver-specific initialization on the newly acquired buffer, 1246 * Call driver-specific initialization on the newly acquired buffer,
1126 * if provided. 1247 * if provided.
1127 */ 1248 */
1128 ret = call_qop(q, buf_init, vb); 1249 ret = call_vb_qop(vb, buf_init, vb);
1129 if (ret) { 1250 if (ret) {
1130 dprintk(1, "qbuf: buffer initialization failed\n"); 1251 dprintk(1, "qbuf: buffer initialization failed\n");
1252 fail_vb_qop(vb, buf_init);
1131 goto err; 1253 goto err;
1132 } 1254 }
1133 1255
@@ -1143,7 +1265,7 @@ err:
1143 /* In case of errors, release planes that were already acquired */ 1265 /* In case of errors, release planes that were already acquired */
1144 for (plane = 0; plane < vb->num_planes; ++plane) { 1266 for (plane = 0; plane < vb->num_planes; ++plane) {
1145 if (vb->planes[plane].mem_priv) 1267 if (vb->planes[plane].mem_priv)
1146 call_memop(q, put_userptr, vb->planes[plane].mem_priv); 1268 call_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1147 vb->planes[plane].mem_priv = NULL; 1269 vb->planes[plane].mem_priv = NULL;
1148 vb->v4l2_planes[plane].m.userptr = 0; 1270 vb->v4l2_planes[plane].m.userptr = 0;
1149 vb->v4l2_planes[plane].length = 0; 1271 vb->v4l2_planes[plane].length = 0;
@@ -1208,14 +1330,15 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1208 dprintk(1, "qbuf: buffer for plane %d changed\n", plane); 1330 dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
1209 1331
1210 /* Release previously acquired memory if present */ 1332 /* Release previously acquired memory if present */
1211 __vb2_plane_dmabuf_put(q, &vb->planes[plane]); 1333 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
1212 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); 1334 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1213 1335
1214 /* Acquire each plane's memory */ 1336 /* Acquire each plane's memory */
1215 mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane], 1337 mem_priv = call_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
1216 dbuf, planes[plane].length, write); 1338 dbuf, planes[plane].length, write);
1217 if (IS_ERR(mem_priv)) { 1339 if (IS_ERR(mem_priv)) {
1218 dprintk(1, "qbuf: failed to attach dmabuf\n"); 1340 dprintk(1, "qbuf: failed to attach dmabuf\n");
1341 fail_memop(vb, attach_dmabuf);
1219 ret = PTR_ERR(mem_priv); 1342 ret = PTR_ERR(mem_priv);
1220 dma_buf_put(dbuf); 1343 dma_buf_put(dbuf);
1221 goto err; 1344 goto err;
@@ -1230,10 +1353,11 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1230 * the buffer(s).. 1353 * the buffer(s)..
1231 */ 1354 */
1232 for (plane = 0; plane < vb->num_planes; ++plane) { 1355 for (plane = 0; plane < vb->num_planes; ++plane) {
1233 ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv); 1356 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
1234 if (ret) { 1357 if (ret) {
1235 dprintk(1, "qbuf: failed to map dmabuf for plane %d\n", 1358 dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
1236 plane); 1359 plane);
1360 fail_memop(vb, map_dmabuf);
1237 goto err; 1361 goto err;
1238 } 1362 }
1239 vb->planes[plane].dbuf_mapped = 1; 1363 vb->planes[plane].dbuf_mapped = 1;
@@ -1243,9 +1367,10 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1243 * Call driver-specific initialization on the newly acquired buffer, 1367 * Call driver-specific initialization on the newly acquired buffer,
1244 * if provided. 1368 * if provided.
1245 */ 1369 */
1246 ret = call_qop(q, buf_init, vb); 1370 ret = call_vb_qop(vb, buf_init, vb);
1247 if (ret) { 1371 if (ret) {
1248 dprintk(1, "qbuf: buffer initialization failed\n"); 1372 dprintk(1, "qbuf: buffer initialization failed\n");
1373 fail_vb_qop(vb, buf_init);
1249 goto err; 1374 goto err;
1250 } 1375 }
1251 1376
@@ -1277,9 +1402,9 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
1277 1402
1278 /* sync buffers */ 1403 /* sync buffers */
1279 for (plane = 0; plane < vb->num_planes; ++plane) 1404 for (plane = 0; plane < vb->num_planes; ++plane)
1280 call_memop(q, prepare, vb->planes[plane].mem_priv); 1405 call_memop(vb, prepare, vb->planes[plane].mem_priv);
1281 1406
1282 q->ops->buf_queue(vb); 1407 call_vb_qop(vb, buf_queue, vb);
1283} 1408}
1284 1409
1285static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) 1410static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
@@ -1334,8 +1459,11 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1334 ret = -EINVAL; 1459 ret = -EINVAL;
1335 } 1460 }
1336 1461
1337 if (!ret) 1462 if (!ret) {
1338 ret = call_qop(q, buf_prepare, vb); 1463 ret = call_vb_qop(vb, buf_prepare, vb);
1464 if (ret)
1465 fail_vb_qop(vb, buf_prepare);
1466 }
1339 if (ret) 1467 if (ret)
1340 dprintk(1, "qbuf: buffer preparation failed: %d\n", ret); 1468 dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
1341 vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED; 1469 vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
@@ -1432,6 +1560,8 @@ static int vb2_start_streaming(struct vb2_queue *q)
1432 1560
1433 /* Tell the driver to start streaming */ 1561 /* Tell the driver to start streaming */
1434 ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count)); 1562 ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
1563 if (ret)
1564 fail_qop(q, start_streaming);
1435 1565
1436 /* 1566 /*
1437 * If there are not enough buffers queued to start streaming, then 1567 * If there are not enough buffers queued to start streaming, then
@@ -1686,7 +1816,7 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
1686 for (i = 0; i < vb->num_planes; ++i) { 1816 for (i = 0; i < vb->num_planes; ++i) {
1687 if (!vb->planes[i].dbuf_mapped) 1817 if (!vb->planes[i].dbuf_mapped)
1688 continue; 1818 continue;
1689 call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv); 1819 call_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
1690 vb->planes[i].dbuf_mapped = 0; 1820 vb->planes[i].dbuf_mapped = 0;
1691 } 1821 }
1692} 1822}
@@ -1704,7 +1834,7 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool n
1704 if (ret < 0) 1834 if (ret < 0)
1705 return ret; 1835 return ret;
1706 1836
1707 ret = call_qop(q, buf_finish, vb); 1837 ret = call_vb_qop(vb, buf_finish, vb);
1708 if (ret) { 1838 if (ret) {
1709 dprintk(1, "dqbuf: buffer finish failed\n"); 1839 dprintk(1, "dqbuf: buffer finish failed\n");
1710 return ret; 1840 return ret;
@@ -2002,10 +2132,11 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
2002 2132
2003 vb_plane = &vb->planes[eb->plane]; 2133 vb_plane = &vb->planes[eb->plane];
2004 2134
2005 dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE); 2135 dbuf = call_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
2006 if (IS_ERR_OR_NULL(dbuf)) { 2136 if (IS_ERR_OR_NULL(dbuf)) {
2007 dprintk(1, "Failed to export buffer %d, plane %d\n", 2137 dprintk(1, "Failed to export buffer %d, plane %d\n",
2008 eb->index, eb->plane); 2138 eb->index, eb->plane);
2139 fail_memop(vb, get_dmabuf);
2009 return -EINVAL; 2140 return -EINVAL;
2010 } 2141 }
2011 2142
@@ -2097,9 +2228,11 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2097 return -EINVAL; 2228 return -EINVAL;
2098 } 2229 }
2099 2230
2100 ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma); 2231 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
2101 if (ret) 2232 if (ret) {
2233 fail_memop(vb, mmap);
2102 return ret; 2234 return ret;
2235 }
2103 2236
2104 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane); 2237 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
2105 return 0; 2238 return 0;
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bf6859ee46c3..2fdb08a78b95 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -203,6 +203,37 @@ struct vb2_buffer {
203 struct list_head done_entry; 203 struct list_head done_entry;
204 204
205 struct vb2_plane planes[VIDEO_MAX_PLANES]; 205 struct vb2_plane planes[VIDEO_MAX_PLANES];
206
207#ifdef CONFIG_VIDEO_ADV_DEBUG
208 /*
209 * Counters for how often these buffer-related ops are
210 * called. Used to check for unbalanced ops.
211 */
212 u32 cnt_mem_alloc;
213 u32 cnt_mem_put;
214 u32 cnt_mem_get_dmabuf;
215 u32 cnt_mem_get_userptr;
216 u32 cnt_mem_put_userptr;
217 u32 cnt_mem_prepare;
218 u32 cnt_mem_finish;
219 u32 cnt_mem_attach_dmabuf;
220 u32 cnt_mem_detach_dmabuf;
221 u32 cnt_mem_map_dmabuf;
222 u32 cnt_mem_unmap_dmabuf;
223 u32 cnt_mem_vaddr;
224 u32 cnt_mem_cookie;
225 u32 cnt_mem_num_users;
226 u32 cnt_mem_mmap;
227
228 u32 cnt_buf_init;
229 u32 cnt_buf_prepare;
230 u32 cnt_buf_finish;
231 u32 cnt_buf_cleanup;
232 u32 cnt_buf_queue;
233
234 /* This counts the number of calls to vb2_buffer_done() */
235 u32 cnt_buf_done;
236#endif
206}; 237};
207 238
208/** 239/**
@@ -366,6 +397,18 @@ struct vb2_queue {
366 unsigned int retry_start_streaming:1; 397 unsigned int retry_start_streaming:1;
367 398
368 struct vb2_fileio_data *fileio; 399 struct vb2_fileio_data *fileio;
400
401#ifdef CONFIG_VIDEO_ADV_DEBUG
402 /*
403 * Counters for how often these queue-related ops are
404 * called. Used to check for unbalanced ops.
405 */
406 u32 cnt_queue_setup;
407 u32 cnt_wait_prepare;
408 u32 cnt_wait_finish;
409 u32 cnt_start_streaming;
410 u32 cnt_stop_streaming;
411#endif
369}; 412};
370 413
371void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no); 414void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);