aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHans Verkuil <hans.verkuil@cisco.com>2014-11-18 07:51:03 -0500
committerMauro Carvalho Chehab <mchehab@osg.samsung.com>2014-11-25 06:04:47 -0500
commite078b79d8aa70a48fb3fa684e6a6548c5127646b (patch)
tree5c43c0470bfe466e9f98a92c88fc43c8ab4430c4
parentd790b7eda953df474f470169ebdf111c02fa7a2d (diff)
[media] vb2-dma-sg: add dmabuf import support
Add support for importing dmabuf to videobuf2-dma-sg. Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Acked-by: Pawel Osciak <pawel@osciak.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c151
1 files changed, 137 insertions, 14 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 346e39b2aae8..81e88a067882 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -41,11 +41,19 @@ struct vb2_dma_sg_buf {
41 int offset; 41 int offset;
42 enum dma_data_direction dma_dir; 42 enum dma_data_direction dma_dir;
43 struct sg_table sg_table; 43 struct sg_table sg_table;
44 /*
45 * This will point to sg_table when used with the MMAP or USERPTR
46 * memory model, and to the dma_buf sglist when used with the
47 * DMABUF memory model.
48 */
49 struct sg_table *dma_sgt;
44 size_t size; 50 size_t size;
45 unsigned int num_pages; 51 unsigned int num_pages;
46 atomic_t refcount; 52 atomic_t refcount;
47 struct vb2_vmarea_handler handler; 53 struct vb2_vmarea_handler handler;
48 struct vm_area_struct *vma; 54 struct vm_area_struct *vma;
55
56 struct dma_buf_attachment *db_attach;
49}; 57};
50 58
51static void vb2_dma_sg_put(void *buf_priv); 59static void vb2_dma_sg_put(void *buf_priv);
@@ -112,6 +120,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
112 buf->size = size; 120 buf->size = size;
113 /* size is already page aligned */ 121 /* size is already page aligned */
114 buf->num_pages = size >> PAGE_SHIFT; 122 buf->num_pages = size >> PAGE_SHIFT;
123 buf->dma_sgt = &buf->sg_table;
115 124
116 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), 125 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
117 GFP_KERNEL); 126 GFP_KERNEL);
@@ -122,7 +131,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
122 if (ret) 131 if (ret)
123 goto fail_pages_alloc; 132 goto fail_pages_alloc;
124 133
125 ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, 134 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
126 buf->num_pages, 0, size, GFP_KERNEL); 135 buf->num_pages, 0, size, GFP_KERNEL);
127 if (ret) 136 if (ret)
128 goto fail_table_alloc; 137 goto fail_table_alloc;
@@ -147,7 +156,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
147 156
148fail_map: 157fail_map:
149 put_device(buf->dev); 158 put_device(buf->dev);
150 sg_free_table(sgt); 159 sg_free_table(buf->dma_sgt);
151fail_table_alloc: 160fail_table_alloc:
152 num_pages = buf->num_pages; 161 num_pages = buf->num_pages;
153 while (num_pages--) 162 while (num_pages--)
@@ -171,7 +180,7 @@ static void vb2_dma_sg_put(void *buf_priv)
171 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 180 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
172 if (buf->vaddr) 181 if (buf->vaddr)
173 vm_unmap_ram(buf->vaddr, buf->num_pages); 182 vm_unmap_ram(buf->vaddr, buf->num_pages);
174 sg_free_table(&buf->sg_table); 183 sg_free_table(buf->dma_sgt);
175 while (--i >= 0) 184 while (--i >= 0)
176 __free_page(buf->pages[i]); 185 __free_page(buf->pages[i]);
177 kfree(buf->pages); 186 kfree(buf->pages);
@@ -183,7 +192,11 @@ static void vb2_dma_sg_put(void *buf_priv)
183static void vb2_dma_sg_prepare(void *buf_priv) 192static void vb2_dma_sg_prepare(void *buf_priv)
184{ 193{
185 struct vb2_dma_sg_buf *buf = buf_priv; 194 struct vb2_dma_sg_buf *buf = buf_priv;
186 struct sg_table *sgt = &buf->sg_table; 195 struct sg_table *sgt = buf->dma_sgt;
196
197 /* DMABUF exporter will flush the cache for us */
198 if (buf->db_attach)
199 return;
187 200
188 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 201 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
189} 202}
@@ -191,7 +204,11 @@ static void vb2_dma_sg_prepare(void *buf_priv)
191static void vb2_dma_sg_finish(void *buf_priv) 204static void vb2_dma_sg_finish(void *buf_priv)
192{ 205{
193 struct vb2_dma_sg_buf *buf = buf_priv; 206 struct vb2_dma_sg_buf *buf = buf_priv;
194 struct sg_table *sgt = &buf->sg_table; 207 struct sg_table *sgt = buf->dma_sgt;
208
209 /* DMABUF exporter will flush the cache for us */
210 if (buf->db_attach)
211 return;
195 212
196 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 213 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
197} 214}
@@ -221,6 +238,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
221 buf->dma_dir = dma_dir; 238 buf->dma_dir = dma_dir;
222 buf->offset = vaddr & ~PAGE_MASK; 239 buf->offset = vaddr & ~PAGE_MASK;
223 buf->size = size; 240 buf->size = size;
241 buf->dma_sgt = &buf->sg_table;
224 242
225 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; 243 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
226 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; 244 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
@@ -273,7 +291,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
273 if (num_pages_from_user != buf->num_pages) 291 if (num_pages_from_user != buf->num_pages)
274 goto userptr_fail_get_user_pages; 292 goto userptr_fail_get_user_pages;
275 293
276 if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages, 294 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
277 buf->num_pages, buf->offset, size, 0)) 295 buf->num_pages, buf->offset, size, 0))
278 goto userptr_fail_alloc_table_from_pages; 296 goto userptr_fail_alloc_table_from_pages;
279 297
@@ -315,7 +333,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
315 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 333 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
316 if (buf->vaddr) 334 if (buf->vaddr)
317 vm_unmap_ram(buf->vaddr, buf->num_pages); 335 vm_unmap_ram(buf->vaddr, buf->num_pages);
318 sg_free_table(&buf->sg_table); 336 sg_free_table(buf->dma_sgt);
319 while (--i >= 0) { 337 while (--i >= 0) {
320 if (buf->dma_dir == DMA_FROM_DEVICE) 338 if (buf->dma_dir == DMA_FROM_DEVICE)
321 set_page_dirty_lock(buf->pages[i]); 339 set_page_dirty_lock(buf->pages[i]);
@@ -333,14 +351,16 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
333 351
334 BUG_ON(!buf); 352 BUG_ON(!buf);
335 353
336 if (!buf->vaddr) 354 if (!buf->vaddr) {
337 buf->vaddr = vm_map_ram(buf->pages, 355 if (buf->db_attach)
338 buf->num_pages, 356 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
339 -1, 357 else
340 PAGE_KERNEL); 358 buf->vaddr = vm_map_ram(buf->pages,
359 buf->num_pages, -1, PAGE_KERNEL);
360 }
341 361
342 /* add offset in case userptr is not page-aligned */ 362 /* add offset in case userptr is not page-aligned */
343 return buf->vaddr + buf->offset; 363 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
344} 364}
345 365
346static unsigned int vb2_dma_sg_num_users(void *buf_priv) 366static unsigned int vb2_dma_sg_num_users(void *buf_priv)
@@ -387,11 +407,110 @@ static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
387 return 0; 407 return 0;
388} 408}
389 409
410/*********************************************/
411/* callbacks for DMABUF buffers */
412/*********************************************/
413
414static int vb2_dma_sg_map_dmabuf(void *mem_priv)
415{
416 struct vb2_dma_sg_buf *buf = mem_priv;
417 struct sg_table *sgt;
418
419 if (WARN_ON(!buf->db_attach)) {
420 pr_err("trying to pin a non attached buffer\n");
421 return -EINVAL;
422 }
423
424 if (WARN_ON(buf->dma_sgt)) {
425 pr_err("dmabuf buffer is already pinned\n");
426 return 0;
427 }
428
429 /* get the associated scatterlist for this buffer */
430 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
431 if (IS_ERR(sgt)) {
432 pr_err("Error getting dmabuf scatterlist\n");
433 return -EINVAL;
434 }
435
436 buf->dma_sgt = sgt;
437 buf->vaddr = NULL;
438
439 return 0;
440}
441
442static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
443{
444 struct vb2_dma_sg_buf *buf = mem_priv;
445 struct sg_table *sgt = buf->dma_sgt;
446
447 if (WARN_ON(!buf->db_attach)) {
448 pr_err("trying to unpin a not attached buffer\n");
449 return;
450 }
451
452 if (WARN_ON(!sgt)) {
453 pr_err("dmabuf buffer is already unpinned\n");
454 return;
455 }
456
457 if (buf->vaddr) {
458 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
459 buf->vaddr = NULL;
460 }
461 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
462
463 buf->dma_sgt = NULL;
464}
465
466static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
467{
468 struct vb2_dma_sg_buf *buf = mem_priv;
469
470 /* if vb2 works correctly you should never detach mapped buffer */
471 if (WARN_ON(buf->dma_sgt))
472 vb2_dma_sg_unmap_dmabuf(buf);
473
474 /* detach this attachment */
475 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
476 kfree(buf);
477}
478
479static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
480 unsigned long size, enum dma_data_direction dma_dir)
481{
482 struct vb2_dma_sg_conf *conf = alloc_ctx;
483 struct vb2_dma_sg_buf *buf;
484 struct dma_buf_attachment *dba;
485
486 if (dbuf->size < size)
487 return ERR_PTR(-EFAULT);
488
489 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
490 if (!buf)
491 return ERR_PTR(-ENOMEM);
492
493 buf->dev = conf->dev;
494 /* create attachment for the dmabuf with the user device */
495 dba = dma_buf_attach(dbuf, buf->dev);
496 if (IS_ERR(dba)) {
497 pr_err("failed to attach dmabuf\n");
498 kfree(buf);
499 return dba;
500 }
501
502 buf->dma_dir = dma_dir;
503 buf->size = size;
504 buf->db_attach = dba;
505
506 return buf;
507}
508
390static void *vb2_dma_sg_cookie(void *buf_priv) 509static void *vb2_dma_sg_cookie(void *buf_priv)
391{ 510{
392 struct vb2_dma_sg_buf *buf = buf_priv; 511 struct vb2_dma_sg_buf *buf = buf_priv;
393 512
394 return &buf->sg_table; 513 return buf->dma_sgt;
395} 514}
396 515
397const struct vb2_mem_ops vb2_dma_sg_memops = { 516const struct vb2_mem_ops vb2_dma_sg_memops = {
@@ -404,6 +523,10 @@ const struct vb2_mem_ops vb2_dma_sg_memops = {
404 .vaddr = vb2_dma_sg_vaddr, 523 .vaddr = vb2_dma_sg_vaddr,
405 .mmap = vb2_dma_sg_mmap, 524 .mmap = vb2_dma_sg_mmap,
406 .num_users = vb2_dma_sg_num_users, 525 .num_users = vb2_dma_sg_num_users,
526 .map_dmabuf = vb2_dma_sg_map_dmabuf,
527 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
528 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
529 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
407 .cookie = vb2_dma_sg_cookie, 530 .cookie = vb2_dma_sg_cookie,
408}; 531};
409EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); 532EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);