diff options
author | Hans Verkuil <hansverk@cisco.com> | 2014-11-18 07:51:04 -0500 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2014-11-25 06:06:01 -0500 |
commit | 041c7b6ac74ee7a4375faf80e2864fc2bce78edc (patch) | |
tree | cd357f774915ea17e5362dcdb2f9b3088b984d72 | |
parent | e078b79d8aa70a48fb3fa684e6a6548c5127646b (diff) |
[media] vb2-dma-sg: add support for dmabuf exports
Add DMABUF export support to vb2-dma-sg.
Signed-off-by: Hans Verkuil <hansverk@cisco.com>
Acked-by: Pawel Osciak <pawel@osciak.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-sg.c | 170 |
1 files changed, 170 insertions, 0 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 81e88a067882..0566e94a5a1d 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c | |||
@@ -408,6 +408,175 @@ static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) | |||
408 | } | 408 | } |
409 | 409 | ||
410 | /*********************************************/ | 410 | /*********************************************/ |
411 | /* DMABUF ops for exporters */ | ||
412 | /*********************************************/ | ||
413 | |||
414 | struct vb2_dma_sg_attachment { | ||
415 | struct sg_table sgt; | ||
416 | enum dma_data_direction dma_dir; | ||
417 | }; | ||
418 | |||
419 | static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | ||
420 | struct dma_buf_attachment *dbuf_attach) | ||
421 | { | ||
422 | struct vb2_dma_sg_attachment *attach; | ||
423 | unsigned int i; | ||
424 | struct scatterlist *rd, *wr; | ||
425 | struct sg_table *sgt; | ||
426 | struct vb2_dma_sg_buf *buf = dbuf->priv; | ||
427 | int ret; | ||
428 | |||
429 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); | ||
430 | if (!attach) | ||
431 | return -ENOMEM; | ||
432 | |||
433 | sgt = &attach->sgt; | ||
434 | /* Copy the buf->base_sgt scatter list to the attachment, as we can't | ||
435 | * map the same scatter list to multiple attachments at the same time. | ||
436 | */ | ||
437 | ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL); | ||
438 | if (ret) { | ||
439 | kfree(attach); | ||
440 | return -ENOMEM; | ||
441 | } | ||
442 | |||
443 | rd = buf->dma_sgt->sgl; | ||
444 | wr = sgt->sgl; | ||
445 | for (i = 0; i < sgt->orig_nents; ++i) { | ||
446 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); | ||
447 | rd = sg_next(rd); | ||
448 | wr = sg_next(wr); | ||
449 | } | ||
450 | |||
451 | attach->dma_dir = DMA_NONE; | ||
452 | dbuf_attach->priv = attach; | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf, | ||
458 | struct dma_buf_attachment *db_attach) | ||
459 | { | ||
460 | struct vb2_dma_sg_attachment *attach = db_attach->priv; | ||
461 | struct sg_table *sgt; | ||
462 | |||
463 | if (!attach) | ||
464 | return; | ||
465 | |||
466 | sgt = &attach->sgt; | ||
467 | |||
468 | /* release the scatterlist cache */ | ||
469 | if (attach->dma_dir != DMA_NONE) | ||
470 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | ||
471 | attach->dma_dir); | ||
472 | sg_free_table(sgt); | ||
473 | kfree(attach); | ||
474 | db_attach->priv = NULL; | ||
475 | } | ||
476 | |||
477 | static struct sg_table *vb2_dma_sg_dmabuf_ops_map( | ||
478 | struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) | ||
479 | { | ||
480 | struct vb2_dma_sg_attachment *attach = db_attach->priv; | ||
481 | /* stealing dmabuf mutex to serialize map/unmap operations */ | ||
482 | struct mutex *lock = &db_attach->dmabuf->lock; | ||
483 | struct sg_table *sgt; | ||
484 | int ret; | ||
485 | |||
486 | mutex_lock(lock); | ||
487 | |||
488 | sgt = &attach->sgt; | ||
489 | /* return previously mapped sg table */ | ||
490 | if (attach->dma_dir == dma_dir) { | ||
491 | mutex_unlock(lock); | ||
492 | return sgt; | ||
493 | } | ||
494 | |||
495 | /* release any previous cache */ | ||
496 | if (attach->dma_dir != DMA_NONE) { | ||
497 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | ||
498 | attach->dma_dir); | ||
499 | attach->dma_dir = DMA_NONE; | ||
500 | } | ||
501 | |||
502 | /* mapping to the client with new direction */ | ||
503 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); | ||
504 | if (ret <= 0) { | ||
505 | pr_err("failed to map scatterlist\n"); | ||
506 | mutex_unlock(lock); | ||
507 | return ERR_PTR(-EIO); | ||
508 | } | ||
509 | |||
510 | attach->dma_dir = dma_dir; | ||
511 | |||
512 | mutex_unlock(lock); | ||
513 | |||
514 | return sgt; | ||
515 | } | ||
516 | |||
517 | static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, | ||
518 | struct sg_table *sgt, enum dma_data_direction dma_dir) | ||
519 | { | ||
520 | /* nothing to be done here */ | ||
521 | } | ||
522 | |||
523 | static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf) | ||
524 | { | ||
525 | /* drop reference obtained in vb2_dma_sg_get_dmabuf */ | ||
526 | vb2_dma_sg_put(dbuf->priv); | ||
527 | } | ||
528 | |||
529 | static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) | ||
530 | { | ||
531 | struct vb2_dma_sg_buf *buf = dbuf->priv; | ||
532 | |||
533 | return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; | ||
534 | } | ||
535 | |||
536 | static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf) | ||
537 | { | ||
538 | struct vb2_dma_sg_buf *buf = dbuf->priv; | ||
539 | |||
540 | return vb2_dma_sg_vaddr(buf); | ||
541 | } | ||
542 | |||
543 | static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, | ||
544 | struct vm_area_struct *vma) | ||
545 | { | ||
546 | return vb2_dma_sg_mmap(dbuf->priv, vma); | ||
547 | } | ||
548 | |||
549 | static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { | ||
550 | .attach = vb2_dma_sg_dmabuf_ops_attach, | ||
551 | .detach = vb2_dma_sg_dmabuf_ops_detach, | ||
552 | .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, | ||
553 | .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, | ||
554 | .kmap = vb2_dma_sg_dmabuf_ops_kmap, | ||
555 | .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap, | ||
556 | .vmap = vb2_dma_sg_dmabuf_ops_vmap, | ||
557 | .mmap = vb2_dma_sg_dmabuf_ops_mmap, | ||
558 | .release = vb2_dma_sg_dmabuf_ops_release, | ||
559 | }; | ||
560 | |||
561 | static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags) | ||
562 | { | ||
563 | struct vb2_dma_sg_buf *buf = buf_priv; | ||
564 | struct dma_buf *dbuf; | ||
565 | |||
566 | if (WARN_ON(!buf->dma_sgt)) | ||
567 | return NULL; | ||
568 | |||
569 | dbuf = dma_buf_export(buf, &vb2_dma_sg_dmabuf_ops, buf->size, flags, NULL); | ||
570 | if (IS_ERR(dbuf)) | ||
571 | return NULL; | ||
572 | |||
573 | /* dmabuf keeps reference to vb2 buffer */ | ||
574 | atomic_inc(&buf->refcount); | ||
575 | |||
576 | return dbuf; | ||
577 | } | ||
578 | |||
579 | /*********************************************/ | ||
411 | /* callbacks for DMABUF buffers */ | 580 | /* callbacks for DMABUF buffers */ |
412 | /*********************************************/ | 581 | /*********************************************/ |
413 | 582 | ||
@@ -523,6 +692,7 @@ const struct vb2_mem_ops vb2_dma_sg_memops = { | |||
523 | .vaddr = vb2_dma_sg_vaddr, | 692 | .vaddr = vb2_dma_sg_vaddr, |
524 | .mmap = vb2_dma_sg_mmap, | 693 | .mmap = vb2_dma_sg_mmap, |
525 | .num_users = vb2_dma_sg_num_users, | 694 | .num_users = vb2_dma_sg_num_users, |
695 | .get_dmabuf = vb2_dma_sg_get_dmabuf, | ||
526 | .map_dmabuf = vb2_dma_sg_map_dmabuf, | 696 | .map_dmabuf = vb2_dma_sg_map_dmabuf, |
527 | .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf, | 697 | .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf, |
528 | .attach_dmabuf = vb2_dma_sg_attach_dmabuf, | 698 | .attach_dmabuf = vb2_dma_sg_attach_dmabuf, |