aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_gem_cma_helper.c
diff options
context:
space:
mode:
authorJoonyoung Shim <jy0922.shim@samsung.com>2013-06-28 01:24:55 -0400
committerDave Airlie <airlied@redhat.com>2013-07-05 01:44:54 -0400
commit6d35dea107834eb549c1fba28fea6ec39c81d0ba (patch)
treebbada84a6400a79ec71d47df6264c55e1485c7fa /drivers/gpu/drm/drm_gem_cma_helper.c
parent78467dc5f70fb9bee4a32c0c3714c99b0b5465c7 (diff)
drm/cma: remove GEM CMA specific dma_buf functionality
We can use prime helpers instead. Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_gem_cma_helper.c')
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c286
1 files changed, 0 insertions, 286 deletions
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 83a45e5cf04e..ece72a8ac245 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -317,292 +317,6 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
317EXPORT_SYMBOL_GPL(drm_gem_cma_describe); 317EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
318#endif 318#endif
319 319
320/* -----------------------------------------------------------------------------
321 * DMA-BUF
322 */
323
324struct drm_gem_cma_dmabuf_attachment {
325 struct sg_table sgt;
326 enum dma_data_direction dir;
327};
328
329static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev,
330 struct dma_buf_attachment *attach)
331{
332 struct drm_gem_cma_dmabuf_attachment *cma_attach;
333
334 cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
335 if (!cma_attach)
336 return -ENOMEM;
337
338 cma_attach->dir = DMA_NONE;
339 attach->priv = cma_attach;
340
341 return 0;
342}
343
344static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
345 struct dma_buf_attachment *attach)
346{
347 struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
348 struct sg_table *sgt;
349
350 if (cma_attach == NULL)
351 return;
352
353 sgt = &cma_attach->sgt;
354
355 if (cma_attach->dir != DMA_NONE)
356 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
357 cma_attach->dir);
358
359 sg_free_table(sgt);
360 kfree(cma_attach);
361 attach->priv = NULL;
362}
363
364static struct sg_table *
365drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
366 enum dma_data_direction dir)
367{
368 struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
369 struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
370 struct drm_device *drm = cma_obj->base.dev;
371 struct scatterlist *rd, *wr;
372 struct sg_table *sgt;
373 unsigned int i;
374 int nents, ret;
375
376 DRM_DEBUG_PRIME("\n");
377
378 if (WARN_ON(dir == DMA_NONE))
379 return ERR_PTR(-EINVAL);
380
381 /* Return the cached mapping when possible. */
382 if (cma_attach->dir == dir)
383 return &cma_attach->sgt;
384
385 /* Two mappings with different directions for the same attachment are
386 * not allowed.
387 */
388 if (WARN_ON(cma_attach->dir != DMA_NONE))
389 return ERR_PTR(-EBUSY);
390
391 sgt = &cma_attach->sgt;
392
393 ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
394 if (ret) {
395 DRM_ERROR("failed to alloc sgt.\n");
396 return ERR_PTR(-ENOMEM);
397 }
398
399 mutex_lock(&drm->struct_mutex);
400
401 rd = cma_obj->sgt->sgl;
402 wr = sgt->sgl;
403 for (i = 0; i < sgt->orig_nents; ++i) {
404 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
405 rd = sg_next(rd);
406 wr = sg_next(wr);
407 }
408
409 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
410 if (!nents) {
411 DRM_ERROR("failed to map sgl with iommu.\n");
412 sg_free_table(sgt);
413 sgt = ERR_PTR(-EIO);
414 goto done;
415 }
416
417 cma_attach->dir = dir;
418 attach->priv = cma_attach;
419
420 DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
421
422done:
423 mutex_unlock(&drm->struct_mutex);
424 return sgt;
425}
426
427static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
428 struct sg_table *sgt,
429 enum dma_data_direction dir)
430{
431 /* Nothing to do. */
432}
433
434static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf)
435{
436 struct drm_gem_cma_object *cma_obj = dmabuf->priv;
437
438 DRM_DEBUG_PRIME("%s\n", __FILE__);
439
440 /*
441 * drm_gem_cma_dmabuf_release() call means that file object's
442 * f_count is 0 and it calls drm_gem_object_handle_unreference()
443 * to drop the references that these values had been increased
444 * at drm_prime_handle_to_fd()
445 */
446 if (cma_obj->base.export_dma_buf == dmabuf) {
447 cma_obj->base.export_dma_buf = NULL;
448
449 /*
450 * drop this gem object refcount to release allocated buffer
451 * and resources.
452 */
453 drm_gem_object_unreference_unlocked(&cma_obj->base);
454 }
455}
456
457static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf,
458 unsigned long page_num)
459{
460 /* TODO */
461
462 return NULL;
463}
464
465static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf,
466 unsigned long page_num, void *addr)
467{
468 /* TODO */
469}
470
471static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf,
472 unsigned long page_num)
473{
474 /* TODO */
475
476 return NULL;
477}
478
479static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf,
480 unsigned long page_num, void *addr)
481{
482 /* TODO */
483}
484
485static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf,
486 struct vm_area_struct *vma)
487{
488 struct drm_gem_cma_object *cma_obj = dmabuf->priv;
489 struct drm_gem_object *gem_obj = &cma_obj->base;
490 struct drm_device *dev = gem_obj->dev;
491 int ret;
492
493 mutex_lock(&dev->struct_mutex);
494 ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
495 mutex_unlock(&dev->struct_mutex);
496 if (ret < 0)
497 return ret;
498
499 return drm_gem_cma_mmap_obj(cma_obj, vma);
500}
501
502static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf)
503{
504 struct drm_gem_cma_object *cma_obj = dmabuf->priv;
505
506 return cma_obj->vaddr;
507}
508
509static struct dma_buf_ops drm_gem_cma_dmabuf_ops = {
510 .attach = drm_gem_cma_dmabuf_attach,
511 .detach = drm_gem_cma_dmabuf_detach,
512 .map_dma_buf = drm_gem_cma_dmabuf_map,
513 .unmap_dma_buf = drm_gem_cma_dmabuf_unmap,
514 .kmap = drm_gem_cma_dmabuf_kmap,
515 .kmap_atomic = drm_gem_cma_dmabuf_kmap_atomic,
516 .kunmap = drm_gem_cma_dmabuf_kunmap,
517 .kunmap_atomic = drm_gem_cma_dmabuf_kunmap_atomic,
518 .mmap = drm_gem_cma_dmabuf_mmap,
519 .vmap = drm_gem_cma_dmabuf_vmap,
520 .release = drm_gem_cma_dmabuf_release,
521};
522
523struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm,
524 struct drm_gem_object *obj, int flags)
525{
526 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
527
528 return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops,
529 cma_obj->base.size, flags);
530}
531EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export);
532
533struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm,
534 struct dma_buf *dma_buf)
535{
536 struct drm_gem_cma_object *cma_obj;
537 struct dma_buf_attachment *attach;
538 struct sg_table *sgt;
539 int ret;
540
541 DRM_DEBUG_PRIME("%s\n", __FILE__);
542
543 /* is this one of own objects? */
544 if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) {
545 struct drm_gem_object *obj;
546
547 cma_obj = dma_buf->priv;
548 obj = &cma_obj->base;
549
550 /* is it from our device? */
551 if (obj->dev == drm) {
552 /*
553 * Importing dmabuf exported from out own gem increases
554 * refcount on gem itself instead of f_count of dmabuf.
555 */
556 drm_gem_object_reference(obj);
557 dma_buf_put(dma_buf);
558 return obj;
559 }
560 }
561
562 /* Create a CMA GEM buffer. */
563 cma_obj = __drm_gem_cma_create(drm, dma_buf->size);
564 if (IS_ERR(cma_obj))
565 return ERR_PTR(PTR_ERR(cma_obj));
566
567 /* Attach to the buffer and map it. Make sure the mapping is contiguous
568 * on the device memory bus, as that's all we support.
569 */
570 attach = dma_buf_attach(dma_buf, drm->dev);
571 if (IS_ERR(attach)) {
572 ret = -EINVAL;
573 goto error_gem_free;
574 }
575
576 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
577 if (IS_ERR_OR_NULL(sgt)) {
578 ret = sgt ? PTR_ERR(sgt) : -ENOMEM;
579 goto error_buf_detach;
580 }
581
582 if (sgt->nents != 1) {
583 ret = -EINVAL;
584 goto error_buf_unmap;
585 }
586
587 cma_obj->base.import_attach = attach;
588 cma_obj->paddr = sg_dma_address(sgt->sgl);
589 cma_obj->sgt = sgt;
590
591 DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr,
592 dma_buf->size);
593
594 return &cma_obj->base;
595
596error_buf_unmap:
597 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
598error_buf_detach:
599 dma_buf_detach(dma_buf, attach);
600error_gem_free:
601 drm_gem_cma_free_object(&cma_obj->base);
602 return ERR_PTR(ret);
603}
604EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import);
605
606/* low-level interface prime helpers */ 320/* low-level interface prime helpers */
607struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj) 321struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
608{ 322{