diff options
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-core.c | 211 |
1 files changed, 130 insertions, 81 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index f9059bb73840..98ddeb6c05c1 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
@@ -36,58 +36,133 @@ module_param(debug, int, 0644); | |||
36 | #ifdef CONFIG_VIDEO_ADV_DEBUG | 36 | #ifdef CONFIG_VIDEO_ADV_DEBUG |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * If advanced debugging is on, then count how often each op is called, | 39 | * If advanced debugging is on, then count how often each op is called |
40 | * which can either be per-buffer or per-queue. | 40 | * successfully, which can either be per-buffer or per-queue. |
41 | * | 41 | * |
42 | * If the op failed then the 'fail_' variant is called to decrease the | 42 | * This makes it easy to check that the 'init' and 'cleanup' |
43 | * counter. That makes it easy to check that the 'init' and 'cleanup' | ||
44 | * (and variations thereof) stay balanced. | 43 | * (and variations thereof) stay balanced. |
45 | */ | 44 | */ |
46 | 45 | ||
46 | #define log_memop(vb, op) \ | ||
47 | dprintk(2, "call_memop(%p, %d, %s)%s\n", \ | ||
48 | (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \ | ||
49 | (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") | ||
50 | |||
47 | #define call_memop(vb, op, args...) \ | 51 | #define call_memop(vb, op, args...) \ |
48 | ({ \ | 52 | ({ \ |
49 | struct vb2_queue *_q = (vb)->vb2_queue; \ | 53 | struct vb2_queue *_q = (vb)->vb2_queue; \ |
50 | dprintk(2, "call_memop(%p, %d, %s)%s\n", \ | 54 | int err; \ |
51 | _q, (vb)->v4l2_buf.index, #op, \ | 55 | \ |
52 | _q->mem_ops->op ? "" : " (nop)"); \ | 56 | log_memop(vb, op); \ |
57 | err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ | ||
58 | if (!err) \ | ||
59 | (vb)->cnt_mem_ ## op++; \ | ||
60 | err; \ | ||
61 | }) | ||
62 | |||
63 | #define call_ptr_memop(vb, op, args...) \ | ||
64 | ({ \ | ||
65 | struct vb2_queue *_q = (vb)->vb2_queue; \ | ||
66 | void *ptr; \ | ||
67 | \ | ||
68 | log_memop(vb, op); \ | ||
69 | ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \ | ||
70 | if (!IS_ERR_OR_NULL(ptr)) \ | ||
71 | (vb)->cnt_mem_ ## op++; \ | ||
72 | ptr; \ | ||
73 | }) | ||
74 | |||
75 | #define call_void_memop(vb, op, args...) \ | ||
76 | ({ \ | ||
77 | struct vb2_queue *_q = (vb)->vb2_queue; \ | ||
78 | \ | ||
79 | log_memop(vb, op); \ | ||
80 | if (_q->mem_ops->op) \ | ||
81 | _q->mem_ops->op(args); \ | ||
53 | (vb)->cnt_mem_ ## op++; \ | 82 | (vb)->cnt_mem_ ## op++; \ |
54 | _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ | ||
55 | }) | 83 | }) |
56 | #define fail_memop(vb, op) ((vb)->cnt_mem_ ## op--) | 84 | |
85 | #define log_qop(q, op) \ | ||
86 | dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \ | ||
87 | (q)->ops->op ? "" : " (nop)") | ||
57 | 88 | ||
58 | #define call_qop(q, op, args...) \ | 89 | #define call_qop(q, op, args...) \ |
59 | ({ \ | 90 | ({ \ |
60 | dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \ | 91 | int err; \ |
61 | (q)->ops->op ? "" : " (nop)"); \ | 92 | \ |
93 | log_qop(q, op); \ | ||
94 | err = (q)->ops->op ? (q)->ops->op(args) : 0; \ | ||
95 | if (!err) \ | ||
96 | (q)->cnt_ ## op++; \ | ||
97 | err; \ | ||
98 | }) | ||
99 | |||
100 | #define call_void_qop(q, op, args...) \ | ||
101 | ({ \ | ||
102 | log_qop(q, op); \ | ||
103 | if ((q)->ops->op) \ | ||
104 | (q)->ops->op(args); \ | ||
62 | (q)->cnt_ ## op++; \ | 105 | (q)->cnt_ ## op++; \ |
63 | (q)->ops->op ? (q)->ops->op(args) : 0; \ | ||
64 | }) | 106 | }) |
65 | #define fail_qop(q, op) ((q)->cnt_ ## op--) | 107 | |
108 | #define log_vb_qop(vb, op, args...) \ | ||
109 | dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \ | ||
110 | (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \ | ||
111 | (vb)->vb2_queue->ops->op ? "" : " (nop)") | ||
66 | 112 | ||
67 | #define call_vb_qop(vb, op, args...) \ | 113 | #define call_vb_qop(vb, op, args...) \ |
68 | ({ \ | 114 | ({ \ |
69 | struct vb2_queue *_q = (vb)->vb2_queue; \ | 115 | int err; \ |
70 | dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \ | 116 | \ |
71 | _q, (vb)->v4l2_buf.index, #op, \ | 117 | log_vb_qop(vb, op); \ |
72 | _q->ops->op ? "" : " (nop)"); \ | 118 | err = (vb)->vb2_queue->ops->op ? \ |
119 | (vb)->vb2_queue->ops->op(args) : 0; \ | ||
120 | if (!err) \ | ||
121 | (vb)->cnt_ ## op++; \ | ||
122 | err; \ | ||
123 | }) | ||
124 | |||
125 | #define call_void_vb_qop(vb, op, args...) \ | ||
126 | ({ \ | ||
127 | log_vb_qop(vb, op); \ | ||
128 | if ((vb)->vb2_queue->ops->op) \ | ||
129 | (vb)->vb2_queue->ops->op(args); \ | ||
73 | (vb)->cnt_ ## op++; \ | 130 | (vb)->cnt_ ## op++; \ |
74 | _q->ops->op ? _q->ops->op(args) : 0; \ | ||
75 | }) | 131 | }) |
76 | #define fail_vb_qop(vb, op) ((vb)->cnt_ ## op--) | ||
77 | 132 | ||
78 | #else | 133 | #else |
79 | 134 | ||
80 | #define call_memop(vb, op, args...) \ | 135 | #define call_memop(vb, op, args...) \ |
81 | ((vb)->vb2_queue->mem_ops->op ? (vb)->vb2_queue->mem_ops->op(args) : 0) | 136 | ((vb)->vb2_queue->mem_ops->op ? \ |
82 | #define fail_memop(vb, op) | 137 | (vb)->vb2_queue->mem_ops->op(args) : 0) |
138 | |||
139 | #define call_ptr_memop(vb, op, args...) \ | ||
140 | ((vb)->vb2_queue->mem_ops->op ? \ | ||
141 | (vb)->vb2_queue->mem_ops->op(args) : NULL) | ||
142 | |||
143 | #define call_void_memop(vb, op, args...) \ | ||
144 | do { \ | ||
145 | if ((vb)->vb2_queue->mem_ops->op) \ | ||
146 | (vb)->vb2_queue->mem_ops->op(args); \ | ||
147 | } while (0) | ||
83 | 148 | ||
84 | #define call_qop(q, op, args...) \ | 149 | #define call_qop(q, op, args...) \ |
85 | ((q)->ops->op ? (q)->ops->op(args) : 0) | 150 | ((q)->ops->op ? (q)->ops->op(args) : 0) |
86 | #define fail_qop(q, op) | 151 | |
152 | #define call_void_qop(q, op, args...) \ | ||
153 | do { \ | ||
154 | if ((q)->ops->op) \ | ||
155 | (q)->ops->op(args); \ | ||
156 | } while (0) | ||
87 | 157 | ||
88 | #define call_vb_qop(vb, op, args...) \ | 158 | #define call_vb_qop(vb, op, args...) \ |
89 | ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) | 159 | ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) |
90 | #define fail_vb_qop(vb, op) | 160 | |
161 | #define call_void_vb_qop(vb, op, args...) \ | ||
162 | do { \ | ||
163 | if ((vb)->vb2_queue->ops->op) \ | ||
164 | (vb)->vb2_queue->ops->op(args); \ | ||
165 | } while (0) | ||
91 | 166 | ||
92 | #endif | 167 | #endif |
93 | 168 | ||
@@ -118,7 +193,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) | |||
118 | for (plane = 0; plane < vb->num_planes; ++plane) { | 193 | for (plane = 0; plane < vb->num_planes; ++plane) { |
119 | unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); | 194 | unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); |
120 | 195 | ||
121 | mem_priv = call_memop(vb, alloc, q->alloc_ctx[plane], | 196 | mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane], |
122 | size, q->gfp_flags); | 197 | size, q->gfp_flags); |
123 | if (IS_ERR_OR_NULL(mem_priv)) | 198 | if (IS_ERR_OR_NULL(mem_priv)) |
124 | goto free; | 199 | goto free; |
@@ -130,10 +205,9 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) | |||
130 | 205 | ||
131 | return 0; | 206 | return 0; |
132 | free: | 207 | free: |
133 | fail_memop(vb, alloc); | ||
134 | /* Free already allocated memory if one of the allocations failed */ | 208 | /* Free already allocated memory if one of the allocations failed */ |
135 | for (; plane > 0; --plane) { | 209 | for (; plane > 0; --plane) { |
136 | call_memop(vb, put, vb->planes[plane - 1].mem_priv); | 210 | call_void_memop(vb, put, vb->planes[plane - 1].mem_priv); |
137 | vb->planes[plane - 1].mem_priv = NULL; | 211 | vb->planes[plane - 1].mem_priv = NULL; |
138 | } | 212 | } |
139 | 213 | ||
@@ -148,7 +222,7 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb) | |||
148 | unsigned int plane; | 222 | unsigned int plane; |
149 | 223 | ||
150 | for (plane = 0; plane < vb->num_planes; ++plane) { | 224 | for (plane = 0; plane < vb->num_planes; ++plane) { |
151 | call_memop(vb, put, vb->planes[plane].mem_priv); | 225 | call_void_memop(vb, put, vb->planes[plane].mem_priv); |
152 | vb->planes[plane].mem_priv = NULL; | 226 | vb->planes[plane].mem_priv = NULL; |
153 | dprintk(3, "Freed plane %d of buffer %d\n", plane, | 227 | dprintk(3, "Freed plane %d of buffer %d\n", plane, |
154 | vb->v4l2_buf.index); | 228 | vb->v4l2_buf.index); |
@@ -165,7 +239,7 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb) | |||
165 | 239 | ||
166 | for (plane = 0; plane < vb->num_planes; ++plane) { | 240 | for (plane = 0; plane < vb->num_planes; ++plane) { |
167 | if (vb->planes[plane].mem_priv) | 241 | if (vb->planes[plane].mem_priv) |
168 | call_memop(vb, put_userptr, vb->planes[plane].mem_priv); | 242 | call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); |
169 | vb->planes[plane].mem_priv = NULL; | 243 | vb->planes[plane].mem_priv = NULL; |
170 | } | 244 | } |
171 | } | 245 | } |
@@ -180,9 +254,9 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) | |||
180 | return; | 254 | return; |
181 | 255 | ||
182 | if (p->dbuf_mapped) | 256 | if (p->dbuf_mapped) |
183 | call_memop(vb, unmap_dmabuf, p->mem_priv); | 257 | call_void_memop(vb, unmap_dmabuf, p->mem_priv); |
184 | 258 | ||
185 | call_memop(vb, detach_dmabuf, p->mem_priv); | 259 | call_void_memop(vb, detach_dmabuf, p->mem_priv); |
186 | dma_buf_put(p->dbuf); | 260 | dma_buf_put(p->dbuf); |
187 | memset(p, 0, sizeof(*p)); | 261 | memset(p, 0, sizeof(*p)); |
188 | } | 262 | } |
@@ -305,7 +379,6 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, | |||
305 | if (ret) { | 379 | if (ret) { |
306 | dprintk(1, "Buffer %d %p initialization" | 380 | dprintk(1, "Buffer %d %p initialization" |
307 | " failed\n", buffer, vb); | 381 | " failed\n", buffer, vb); |
308 | fail_vb_qop(vb, buf_init); | ||
309 | __vb2_buf_mem_free(vb); | 382 | __vb2_buf_mem_free(vb); |
310 | kfree(vb); | 383 | kfree(vb); |
311 | break; | 384 | break; |
@@ -382,7 +455,7 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) | |||
382 | struct vb2_buffer *vb = q->bufs[buffer]; | 455 | struct vb2_buffer *vb = q->bufs[buffer]; |
383 | 456 | ||
384 | if (vb && vb->planes[0].mem_priv) | 457 | if (vb && vb->planes[0].mem_priv) |
385 | call_vb_qop(vb, buf_cleanup, vb); | 458 | call_void_vb_qop(vb, buf_cleanup, vb); |
386 | } | 459 | } |
387 | 460 | ||
388 | /* Release video buffer memory */ | 461 | /* Release video buffer memory */ |
@@ -837,10 +910,8 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) | |||
837 | */ | 910 | */ |
838 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, | 911 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, |
839 | q->plane_sizes, q->alloc_ctx); | 912 | q->plane_sizes, q->alloc_ctx); |
840 | if (ret) { | 913 | if (ret) |
841 | fail_qop(q, queue_setup); | ||
842 | return ret; | 914 | return ret; |
843 | } | ||
844 | 915 | ||
845 | /* Finally, allocate buffers and video memory */ | 916 | /* Finally, allocate buffers and video memory */ |
846 | allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); | 917 | allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); |
@@ -864,8 +935,6 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) | |||
864 | 935 | ||
865 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, | 936 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, |
866 | &num_planes, q->plane_sizes, q->alloc_ctx); | 937 | &num_planes, q->plane_sizes, q->alloc_ctx); |
867 | if (ret) | ||
868 | fail_qop(q, queue_setup); | ||
869 | 938 | ||
870 | if (!ret && allocated_buffers < num_buffers) | 939 | if (!ret && allocated_buffers < num_buffers) |
871 | ret = -ENOMEM; | 940 | ret = -ENOMEM; |
@@ -950,10 +1019,8 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create | |||
950 | */ | 1019 | */ |
951 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, | 1020 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, |
952 | &num_planes, q->plane_sizes, q->alloc_ctx); | 1021 | &num_planes, q->plane_sizes, q->alloc_ctx); |
953 | if (ret) { | 1022 | if (ret) |
954 | fail_qop(q, queue_setup); | ||
955 | return ret; | 1023 | return ret; |
956 | } | ||
957 | 1024 | ||
958 | /* Finally, allocate buffers and video memory */ | 1025 | /* Finally, allocate buffers and video memory */ |
959 | allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers, | 1026 | allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers, |
@@ -975,8 +1042,6 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create | |||
975 | */ | 1042 | */ |
976 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, | 1043 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, |
977 | &num_planes, q->plane_sizes, q->alloc_ctx); | 1044 | &num_planes, q->plane_sizes, q->alloc_ctx); |
978 | if (ret) | ||
979 | fail_qop(q, queue_setup); | ||
980 | 1045 | ||
981 | if (!ret && allocated_buffers < num_buffers) | 1046 | if (!ret && allocated_buffers < num_buffers) |
982 | ret = -ENOMEM; | 1047 | ret = -ENOMEM; |
@@ -1038,7 +1103,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) | |||
1038 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) | 1103 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) |
1039 | return NULL; | 1104 | return NULL; |
1040 | 1105 | ||
1041 | return call_memop(vb, vaddr, vb->planes[plane_no].mem_priv); | 1106 | return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); |
1042 | 1107 | ||
1043 | } | 1108 | } |
1044 | EXPORT_SYMBOL_GPL(vb2_plane_vaddr); | 1109 | EXPORT_SYMBOL_GPL(vb2_plane_vaddr); |
@@ -1059,7 +1124,7 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) | |||
1059 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) | 1124 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) |
1060 | return NULL; | 1125 | return NULL; |
1061 | 1126 | ||
1062 | return call_memop(vb, cookie, vb->planes[plane_no].mem_priv); | 1127 | return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv); |
1063 | } | 1128 | } |
1064 | EXPORT_SYMBOL_GPL(vb2_plane_cookie); | 1129 | EXPORT_SYMBOL_GPL(vb2_plane_cookie); |
1065 | 1130 | ||
@@ -1112,7 +1177,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) | |||
1112 | 1177 | ||
1113 | /* sync buffers */ | 1178 | /* sync buffers */ |
1114 | for (plane = 0; plane < vb->num_planes; ++plane) | 1179 | for (plane = 0; plane < vb->num_planes; ++plane) |
1115 | call_memop(vb, finish, vb->planes[plane].mem_priv); | 1180 | call_void_memop(vb, finish, vb->planes[plane].mem_priv); |
1116 | 1181 | ||
1117 | /* Add the buffer to the done buffers list */ | 1182 | /* Add the buffer to the done buffers list */ |
1118 | spin_lock_irqsave(&q->done_lock, flags); | 1183 | spin_lock_irqsave(&q->done_lock, flags); |
@@ -1265,22 +1330,21 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1265 | if (vb->planes[plane].mem_priv) { | 1330 | if (vb->planes[plane].mem_priv) { |
1266 | if (!reacquired) { | 1331 | if (!reacquired) { |
1267 | reacquired = true; | 1332 | reacquired = true; |
1268 | call_vb_qop(vb, buf_cleanup, vb); | 1333 | call_void_vb_qop(vb, buf_cleanup, vb); |
1269 | } | 1334 | } |
1270 | call_memop(vb, put_userptr, vb->planes[plane].mem_priv); | 1335 | call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); |
1271 | } | 1336 | } |
1272 | 1337 | ||
1273 | vb->planes[plane].mem_priv = NULL; | 1338 | vb->planes[plane].mem_priv = NULL; |
1274 | memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); | 1339 | memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); |
1275 | 1340 | ||
1276 | /* Acquire each plane's memory */ | 1341 | /* Acquire each plane's memory */ |
1277 | mem_priv = call_memop(vb, get_userptr, q->alloc_ctx[plane], | 1342 | mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane], |
1278 | planes[plane].m.userptr, | 1343 | planes[plane].m.userptr, |
1279 | planes[plane].length, write); | 1344 | planes[plane].length, write); |
1280 | if (IS_ERR_OR_NULL(mem_priv)) { | 1345 | if (IS_ERR_OR_NULL(mem_priv)) { |
1281 | dprintk(1, "qbuf: failed acquiring userspace " | 1346 | dprintk(1, "qbuf: failed acquiring userspace " |
1282 | "memory for plane %d\n", plane); | 1347 | "memory for plane %d\n", plane); |
1283 | fail_memop(vb, get_userptr); | ||
1284 | ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL; | 1348 | ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL; |
1285 | goto err; | 1349 | goto err; |
1286 | } | 1350 | } |
@@ -1303,7 +1367,6 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1303 | ret = call_vb_qop(vb, buf_init, vb); | 1367 | ret = call_vb_qop(vb, buf_init, vb); |
1304 | if (ret) { | 1368 | if (ret) { |
1305 | dprintk(1, "qbuf: buffer initialization failed\n"); | 1369 | dprintk(1, "qbuf: buffer initialization failed\n"); |
1306 | fail_vb_qop(vb, buf_init); | ||
1307 | goto err; | 1370 | goto err; |
1308 | } | 1371 | } |
1309 | } | 1372 | } |
@@ -1311,8 +1374,7 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1311 | ret = call_vb_qop(vb, buf_prepare, vb); | 1374 | ret = call_vb_qop(vb, buf_prepare, vb); |
1312 | if (ret) { | 1375 | if (ret) { |
1313 | dprintk(1, "qbuf: buffer preparation failed\n"); | 1376 | dprintk(1, "qbuf: buffer preparation failed\n"); |
1314 | fail_vb_qop(vb, buf_prepare); | 1377 | call_void_vb_qop(vb, buf_cleanup, vb); |
1315 | call_vb_qop(vb, buf_cleanup, vb); | ||
1316 | goto err; | 1378 | goto err; |
1317 | } | 1379 | } |
1318 | 1380 | ||
@@ -1321,7 +1383,7 @@ err: | |||
1321 | /* In case of errors, release planes that were already acquired */ | 1383 | /* In case of errors, release planes that were already acquired */ |
1322 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1384 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1323 | if (vb->planes[plane].mem_priv) | 1385 | if (vb->planes[plane].mem_priv) |
1324 | call_memop(vb, put_userptr, vb->planes[plane].mem_priv); | 1386 | call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); |
1325 | vb->planes[plane].mem_priv = NULL; | 1387 | vb->planes[plane].mem_priv = NULL; |
1326 | vb->v4l2_planes[plane].m.userptr = 0; | 1388 | vb->v4l2_planes[plane].m.userptr = 0; |
1327 | vb->v4l2_planes[plane].length = 0; | 1389 | vb->v4l2_planes[plane].length = 0; |
@@ -1335,13 +1397,8 @@ err: | |||
1335 | */ | 1397 | */ |
1336 | static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 1398 | static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
1337 | { | 1399 | { |
1338 | int ret; | ||
1339 | |||
1340 | __fill_vb2_buffer(vb, b, vb->v4l2_planes); | 1400 | __fill_vb2_buffer(vb, b, vb->v4l2_planes); |
1341 | ret = call_vb_qop(vb, buf_prepare, vb); | 1401 | return call_vb_qop(vb, buf_prepare, vb); |
1342 | if (ret) | ||
1343 | fail_vb_qop(vb, buf_prepare); | ||
1344 | return ret; | ||
1345 | } | 1402 | } |
1346 | 1403 | ||
1347 | /** | 1404 | /** |
@@ -1393,7 +1450,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1393 | 1450 | ||
1394 | if (!reacquired) { | 1451 | if (!reacquired) { |
1395 | reacquired = true; | 1452 | reacquired = true; |
1396 | call_vb_qop(vb, buf_cleanup, vb); | 1453 | call_void_vb_qop(vb, buf_cleanup, vb); |
1397 | } | 1454 | } |
1398 | 1455 | ||
1399 | /* Release previously acquired memory if present */ | 1456 | /* Release previously acquired memory if present */ |
@@ -1401,11 +1458,10 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1401 | memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); | 1458 | memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); |
1402 | 1459 | ||
1403 | /* Acquire each plane's memory */ | 1460 | /* Acquire each plane's memory */ |
1404 | mem_priv = call_memop(vb, attach_dmabuf, q->alloc_ctx[plane], | 1461 | mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane], |
1405 | dbuf, planes[plane].length, write); | 1462 | dbuf, planes[plane].length, write); |
1406 | if (IS_ERR(mem_priv)) { | 1463 | if (IS_ERR(mem_priv)) { |
1407 | dprintk(1, "qbuf: failed to attach dmabuf\n"); | 1464 | dprintk(1, "qbuf: failed to attach dmabuf\n"); |
1408 | fail_memop(vb, attach_dmabuf); | ||
1409 | ret = PTR_ERR(mem_priv); | 1465 | ret = PTR_ERR(mem_priv); |
1410 | dma_buf_put(dbuf); | 1466 | dma_buf_put(dbuf); |
1411 | goto err; | 1467 | goto err; |
@@ -1424,7 +1480,6 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1424 | if (ret) { | 1480 | if (ret) { |
1425 | dprintk(1, "qbuf: failed to map dmabuf for plane %d\n", | 1481 | dprintk(1, "qbuf: failed to map dmabuf for plane %d\n", |
1426 | plane); | 1482 | plane); |
1427 | fail_memop(vb, map_dmabuf); | ||
1428 | goto err; | 1483 | goto err; |
1429 | } | 1484 | } |
1430 | vb->planes[plane].dbuf_mapped = 1; | 1485 | vb->planes[plane].dbuf_mapped = 1; |
@@ -1445,7 +1500,6 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1445 | ret = call_vb_qop(vb, buf_init, vb); | 1500 | ret = call_vb_qop(vb, buf_init, vb); |
1446 | if (ret) { | 1501 | if (ret) { |
1447 | dprintk(1, "qbuf: buffer initialization failed\n"); | 1502 | dprintk(1, "qbuf: buffer initialization failed\n"); |
1448 | fail_vb_qop(vb, buf_init); | ||
1449 | goto err; | 1503 | goto err; |
1450 | } | 1504 | } |
1451 | } | 1505 | } |
@@ -1453,8 +1507,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1453 | ret = call_vb_qop(vb, buf_prepare, vb); | 1507 | ret = call_vb_qop(vb, buf_prepare, vb); |
1454 | if (ret) { | 1508 | if (ret) { |
1455 | dprintk(1, "qbuf: buffer preparation failed\n"); | 1509 | dprintk(1, "qbuf: buffer preparation failed\n"); |
1456 | fail_vb_qop(vb, buf_prepare); | 1510 | call_void_vb_qop(vb, buf_cleanup, vb); |
1457 | call_vb_qop(vb, buf_cleanup, vb); | ||
1458 | goto err; | 1511 | goto err; |
1459 | } | 1512 | } |
1460 | 1513 | ||
@@ -1479,9 +1532,9 @@ static void __enqueue_in_driver(struct vb2_buffer *vb) | |||
1479 | 1532 | ||
1480 | /* sync buffers */ | 1533 | /* sync buffers */ |
1481 | for (plane = 0; plane < vb->num_planes; ++plane) | 1534 | for (plane = 0; plane < vb->num_planes; ++plane) |
1482 | call_memop(vb, prepare, vb->planes[plane].mem_priv); | 1535 | call_void_memop(vb, prepare, vb->planes[plane].mem_priv); |
1483 | 1536 | ||
1484 | call_vb_qop(vb, buf_queue, vb); | 1537 | call_void_vb_qop(vb, buf_queue, vb); |
1485 | } | 1538 | } |
1486 | 1539 | ||
1487 | static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 1540 | static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
@@ -1520,9 +1573,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1520 | * mmap_sem and then takes the driver's lock again. | 1573 | * mmap_sem and then takes the driver's lock again. |
1521 | */ | 1574 | */ |
1522 | mmap_sem = ¤t->mm->mmap_sem; | 1575 | mmap_sem = ¤t->mm->mmap_sem; |
1523 | call_qop(q, wait_prepare, q); | 1576 | call_void_qop(q, wait_prepare, q); |
1524 | down_read(mmap_sem); | 1577 | down_read(mmap_sem); |
1525 | call_qop(q, wait_finish, q); | 1578 | call_void_qop(q, wait_finish, q); |
1526 | 1579 | ||
1527 | ret = __qbuf_userptr(vb, b); | 1580 | ret = __qbuf_userptr(vb, b); |
1528 | 1581 | ||
@@ -1647,7 +1700,6 @@ static int vb2_start_streaming(struct vb2_queue *q) | |||
1647 | if (!ret) | 1700 | if (!ret) |
1648 | return 0; | 1701 | return 0; |
1649 | 1702 | ||
1650 | fail_qop(q, start_streaming); | ||
1651 | dprintk(1, "qbuf: driver refused to start streaming\n"); | 1703 | dprintk(1, "qbuf: driver refused to start streaming\n"); |
1652 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { | 1704 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { |
1653 | unsigned i; | 1705 | unsigned i; |
@@ -1812,7 +1864,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) | |||
1812 | * become ready or for streamoff. Driver's lock is released to | 1864 | * become ready or for streamoff. Driver's lock is released to |
1813 | * allow streamoff or qbuf to be called while waiting. | 1865 | * allow streamoff or qbuf to be called while waiting. |
1814 | */ | 1866 | */ |
1815 | call_qop(q, wait_prepare, q); | 1867 | call_void_qop(q, wait_prepare, q); |
1816 | 1868 | ||
1817 | /* | 1869 | /* |
1818 | * All locks have been released, it is safe to sleep now. | 1870 | * All locks have been released, it is safe to sleep now. |
@@ -1825,7 +1877,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) | |||
1825 | * We need to reevaluate both conditions again after reacquiring | 1877 | * We need to reevaluate both conditions again after reacquiring |
1826 | * the locks or return an error if one occurred. | 1878 | * the locks or return an error if one occurred. |
1827 | */ | 1879 | */ |
1828 | call_qop(q, wait_finish, q); | 1880 | call_void_qop(q, wait_finish, q); |
1829 | if (ret) { | 1881 | if (ret) { |
1830 | dprintk(1, "Sleep was interrupted\n"); | 1882 | dprintk(1, "Sleep was interrupted\n"); |
1831 | return ret; | 1883 | return ret; |
@@ -1911,7 +1963,7 @@ static void __vb2_dqbuf(struct vb2_buffer *vb) | |||
1911 | for (i = 0; i < vb->num_planes; ++i) { | 1963 | for (i = 0; i < vb->num_planes; ++i) { |
1912 | if (!vb->planes[i].dbuf_mapped) | 1964 | if (!vb->planes[i].dbuf_mapped) |
1913 | continue; | 1965 | continue; |
1914 | call_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv); | 1966 | call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv); |
1915 | vb->planes[i].dbuf_mapped = 0; | 1967 | vb->planes[i].dbuf_mapped = 0; |
1916 | } | 1968 | } |
1917 | } | 1969 | } |
@@ -1941,7 +1993,7 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool n | |||
1941 | return -EINVAL; | 1993 | return -EINVAL; |
1942 | } | 1994 | } |
1943 | 1995 | ||
1944 | call_vb_qop(vb, buf_finish, vb); | 1996 | call_void_vb_qop(vb, buf_finish, vb); |
1945 | 1997 | ||
1946 | /* Fill buffer information for the userspace */ | 1998 | /* Fill buffer information for the userspace */ |
1947 | __fill_v4l2_buffer(vb, b); | 1999 | __fill_v4l2_buffer(vb, b); |
@@ -2042,7 +2094,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q) | |||
2042 | 2094 | ||
2043 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { | 2095 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { |
2044 | vb->state = VB2_BUF_STATE_PREPARED; | 2096 | vb->state = VB2_BUF_STATE_PREPARED; |
2045 | call_vb_qop(vb, buf_finish, vb); | 2097 | call_void_vb_qop(vb, buf_finish, vb); |
2046 | } | 2098 | } |
2047 | __vb2_dqbuf(vb); | 2099 | __vb2_dqbuf(vb); |
2048 | } | 2100 | } |
@@ -2244,11 +2296,10 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) | |||
2244 | 2296 | ||
2245 | vb_plane = &vb->planes[eb->plane]; | 2297 | vb_plane = &vb->planes[eb->plane]; |
2246 | 2298 | ||
2247 | dbuf = call_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE); | 2299 | dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE); |
2248 | if (IS_ERR_OR_NULL(dbuf)) { | 2300 | if (IS_ERR_OR_NULL(dbuf)) { |
2249 | dprintk(1, "Failed to export buffer %d, plane %d\n", | 2301 | dprintk(1, "Failed to export buffer %d, plane %d\n", |
2250 | eb->index, eb->plane); | 2302 | eb->index, eb->plane); |
2251 | fail_memop(vb, get_dmabuf); | ||
2252 | return -EINVAL; | 2303 | return -EINVAL; |
2253 | } | 2304 | } |
2254 | 2305 | ||
@@ -2341,10 +2392,8 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) | |||
2341 | } | 2392 | } |
2342 | 2393 | ||
2343 | ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); | 2394 | ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); |
2344 | if (ret) { | 2395 | if (ret) |
2345 | fail_memop(vb, mmap); | ||
2346 | return ret; | 2396 | return ret; |
2347 | } | ||
2348 | 2397 | ||
2349 | dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane); | 2398 | dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane); |
2350 | return 0; | 2399 | return 0; |