diff options
author | Tomasz Stanislawski <t.stanislaws@samsung.com> | 2012-06-14 10:32:25 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2012-11-25 14:21:12 -0500 |
commit | 9ef2cbeb819213520a8be77d4c3f7330a1a06ac5 (patch) | |
tree | 19a6c33995de50bedf00716ae6d40f378080dca7 /drivers/media | |
parent | 83ae7c5a1b5b5cd4380ff70797e4c5dcfb61a70d (diff) |
[media] v4l: vb2-dma-contig: add support for DMABUF exporting
This patch adds support for exporting a dma-contig buffer using
DMABUF interface.
Signed-off-by: Tomasz Stanislawski <t.stanislaws@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Hans Verkuil <hans.verkuil@cisco.com>
Tested-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media')
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-contig.c | 200 |
1 files changed, 200 insertions, 0 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 7fc71a0906db..78c281c55ad7 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -36,6 +36,7 @@ struct vb2_dc_buf { | |||
36 | /* MMAP related */ | 36 | /* MMAP related */ |
37 | struct vb2_vmarea_handler handler; | 37 | struct vb2_vmarea_handler handler; |
38 | atomic_t refcount; | 38 | atomic_t refcount; |
39 | struct sg_table *sgt_base; | ||
39 | 40 | ||
40 | /* USERPTR related */ | 41 | /* USERPTR related */ |
41 | struct vm_area_struct *vma; | 42 | struct vm_area_struct *vma; |
@@ -142,6 +143,10 @@ static void vb2_dc_put(void *buf_priv) | |||
142 | if (!atomic_dec_and_test(&buf->refcount)) | 143 | if (!atomic_dec_and_test(&buf->refcount)) |
143 | return; | 144 | return; |
144 | 145 | ||
146 | if (buf->sgt_base) { | ||
147 | sg_free_table(buf->sgt_base); | ||
148 | kfree(buf->sgt_base); | ||
149 | } | ||
145 | dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr); | 150 | dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr); |
146 | kfree(buf); | 151 | kfree(buf); |
147 | } | 152 | } |
@@ -213,6 +218,200 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) | |||
213 | } | 218 | } |
214 | 219 | ||
215 | /*********************************************/ | 220 | /*********************************************/ |
221 | /* DMABUF ops for exporters */ | ||
222 | /*********************************************/ | ||
223 | |||
224 | struct vb2_dc_attachment { | ||
225 | struct sg_table sgt; | ||
226 | enum dma_data_direction dir; | ||
227 | }; | ||
228 | |||
229 | static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | ||
230 | struct dma_buf_attachment *dbuf_attach) | ||
231 | { | ||
232 | struct vb2_dc_attachment *attach; | ||
233 | unsigned int i; | ||
234 | struct scatterlist *rd, *wr; | ||
235 | struct sg_table *sgt; | ||
236 | struct vb2_dc_buf *buf = dbuf->priv; | ||
237 | int ret; | ||
238 | |||
239 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); | ||
240 | if (!attach) | ||
241 | return -ENOMEM; | ||
242 | |||
243 | sgt = &attach->sgt; | ||
244 | /* Copy the buf->base_sgt scatter list to the attachment, as we can't | ||
245 | * map the same scatter list to multiple attachments at the same time. | ||
246 | */ | ||
247 | ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL); | ||
248 | if (ret) { | ||
249 | kfree(attach); | ||
250 | return -ENOMEM; | ||
251 | } | ||
252 | |||
253 | rd = buf->sgt_base->sgl; | ||
254 | wr = sgt->sgl; | ||
255 | for (i = 0; i < sgt->orig_nents; ++i) { | ||
256 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); | ||
257 | rd = sg_next(rd); | ||
258 | wr = sg_next(wr); | ||
259 | } | ||
260 | |||
261 | attach->dir = DMA_NONE; | ||
262 | dbuf_attach->priv = attach; | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, | ||
268 | struct dma_buf_attachment *db_attach) | ||
269 | { | ||
270 | struct vb2_dc_attachment *attach = db_attach->priv; | ||
271 | struct sg_table *sgt; | ||
272 | |||
273 | if (!attach) | ||
274 | return; | ||
275 | |||
276 | sgt = &attach->sgt; | ||
277 | |||
278 | /* release the scatterlist cache */ | ||
279 | if (attach->dir != DMA_NONE) | ||
280 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | ||
281 | attach->dir); | ||
282 | sg_free_table(sgt); | ||
283 | kfree(attach); | ||
284 | db_attach->priv = NULL; | ||
285 | } | ||
286 | |||
287 | static struct sg_table *vb2_dc_dmabuf_ops_map( | ||
288 | struct dma_buf_attachment *db_attach, enum dma_data_direction dir) | ||
289 | { | ||
290 | struct vb2_dc_attachment *attach = db_attach->priv; | ||
291 | /* stealing dmabuf mutex to serialize map/unmap operations */ | ||
292 | struct mutex *lock = &db_attach->dmabuf->lock; | ||
293 | struct sg_table *sgt; | ||
294 | int ret; | ||
295 | |||
296 | mutex_lock(lock); | ||
297 | |||
298 | sgt = &attach->sgt; | ||
299 | /* return previously mapped sg table */ | ||
300 | if (attach->dir == dir) { | ||
301 | mutex_unlock(lock); | ||
302 | return sgt; | ||
303 | } | ||
304 | |||
305 | /* release any previous cache */ | ||
306 | if (attach->dir != DMA_NONE) { | ||
307 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | ||
308 | attach->dir); | ||
309 | attach->dir = DMA_NONE; | ||
310 | } | ||
311 | |||
312 | /* mapping to the client with new direction */ | ||
313 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir); | ||
314 | if (ret <= 0) { | ||
315 | pr_err("failed to map scatterlist\n"); | ||
316 | mutex_unlock(lock); | ||
317 | return ERR_PTR(-EIO); | ||
318 | } | ||
319 | |||
320 | attach->dir = dir; | ||
321 | |||
322 | mutex_unlock(lock); | ||
323 | |||
324 | return sgt; | ||
325 | } | ||
326 | |||
327 | static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, | ||
328 | struct sg_table *sgt, enum dma_data_direction dir) | ||
329 | { | ||
330 | /* nothing to be done here */ | ||
331 | } | ||
332 | |||
333 | static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf) | ||
334 | { | ||
335 | /* drop reference obtained in vb2_dc_get_dmabuf */ | ||
336 | vb2_dc_put(dbuf->priv); | ||
337 | } | ||
338 | |||
339 | static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) | ||
340 | { | ||
341 | struct vb2_dc_buf *buf = dbuf->priv; | ||
342 | |||
343 | return buf->vaddr + pgnum * PAGE_SIZE; | ||
344 | } | ||
345 | |||
346 | static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf) | ||
347 | { | ||
348 | struct vb2_dc_buf *buf = dbuf->priv; | ||
349 | |||
350 | return buf->vaddr; | ||
351 | } | ||
352 | |||
353 | static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, | ||
354 | struct vm_area_struct *vma) | ||
355 | { | ||
356 | return vb2_dc_mmap(dbuf->priv, vma); | ||
357 | } | ||
358 | |||
359 | static struct dma_buf_ops vb2_dc_dmabuf_ops = { | ||
360 | .attach = vb2_dc_dmabuf_ops_attach, | ||
361 | .detach = vb2_dc_dmabuf_ops_detach, | ||
362 | .map_dma_buf = vb2_dc_dmabuf_ops_map, | ||
363 | .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, | ||
364 | .kmap = vb2_dc_dmabuf_ops_kmap, | ||
365 | .kmap_atomic = vb2_dc_dmabuf_ops_kmap, | ||
366 | .vmap = vb2_dc_dmabuf_ops_vmap, | ||
367 | .mmap = vb2_dc_dmabuf_ops_mmap, | ||
368 | .release = vb2_dc_dmabuf_ops_release, | ||
369 | }; | ||
370 | |||
371 | static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) | ||
372 | { | ||
373 | int ret; | ||
374 | struct sg_table *sgt; | ||
375 | |||
376 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | ||
377 | if (!sgt) { | ||
378 | dev_err(buf->dev, "failed to alloc sg table\n"); | ||
379 | return NULL; | ||
380 | } | ||
381 | |||
382 | ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr, | ||
383 | buf->size); | ||
384 | if (ret < 0) { | ||
385 | dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); | ||
386 | kfree(sgt); | ||
387 | return NULL; | ||
388 | } | ||
389 | |||
390 | return sgt; | ||
391 | } | ||
392 | |||
393 | static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) | ||
394 | { | ||
395 | struct vb2_dc_buf *buf = buf_priv; | ||
396 | struct dma_buf *dbuf; | ||
397 | |||
398 | if (!buf->sgt_base) | ||
399 | buf->sgt_base = vb2_dc_get_base_sgt(buf); | ||
400 | |||
401 | if (WARN_ON(!buf->sgt_base)) | ||
402 | return NULL; | ||
403 | |||
404 | dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0); | ||
405 | if (IS_ERR(dbuf)) | ||
406 | return NULL; | ||
407 | |||
408 | /* dmabuf keeps reference to vb2 buffer */ | ||
409 | atomic_inc(&buf->refcount); | ||
410 | |||
411 | return dbuf; | ||
412 | } | ||
413 | |||
414 | /*********************************************/ | ||
216 | /* callbacks for USERPTR buffers */ | 415 | /* callbacks for USERPTR buffers */ |
217 | /*********************************************/ | 416 | /*********************************************/ |
218 | 417 | ||
@@ -519,6 +718,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | |||
519 | const struct vb2_mem_ops vb2_dma_contig_memops = { | 718 | const struct vb2_mem_ops vb2_dma_contig_memops = { |
520 | .alloc = vb2_dc_alloc, | 719 | .alloc = vb2_dc_alloc, |
521 | .put = vb2_dc_put, | 720 | .put = vb2_dc_put, |
721 | .get_dmabuf = vb2_dc_get_dmabuf, | ||
522 | .cookie = vb2_dc_cookie, | 722 | .cookie = vb2_dc_cookie, |
523 | .vaddr = vb2_dc_vaddr, | 723 | .vaddr = vb2_dc_vaddr, |
524 | .mmap = vb2_dc_mmap, | 724 | .mmap = vb2_dc_mmap, |