aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2013-11-08 05:30:50 -0500
committerThomas Hellstrom <thellstrom@vmware.com>2013-11-18 07:12:24 -0500
commitc486d4f894d7c7d0e4148426360aa354384f6dc8 (patch)
tree1804cffda7110bbd58bcd22561544c1b8fcebdd5
parent79e5f810032cd166bc71580ca01401ff212688ed (diff)
drm/vmwgfx: Make vmwgfx dma buffers prime aware
Should we need to share dma buffers using prime, let's make them prime aware. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c45
1 files changed, 25 insertions, 20 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 83be7093c715..efe2b74c5eb1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,7 +35,7 @@
35#define VMW_RES_EVICT_ERR_COUNT 10 35#define VMW_RES_EVICT_ERR_COUNT 10
36 36
37struct vmw_user_dma_buffer { 37struct vmw_user_dma_buffer {
38 struct ttm_base_object base; 38 struct ttm_prime_object prime;
39 struct vmw_dma_buffer dma; 39 struct vmw_dma_buffer dma;
40}; 40};
41 41
@@ -387,7 +387,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
387{ 387{
388 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 388 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
389 389
390 ttm_base_object_kfree(vmw_user_bo, base); 390 ttm_prime_object_kfree(vmw_user_bo, prime);
391} 391}
392 392
393static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 393static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -401,7 +401,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
401 if (unlikely(base == NULL)) 401 if (unlikely(base == NULL))
402 return; 402 return;
403 403
404 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 404 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
405 prime.base);
405 bo = &vmw_user_bo->dma.base; 406 bo = &vmw_user_bo->dma.base;
406 ttm_bo_unref(&bo); 407 ttm_bo_unref(&bo);
407} 408}
@@ -442,18 +443,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
442 return ret; 443 return ret;
443 444
444 tmp = ttm_bo_reference(&user_bo->dma.base); 445 tmp = ttm_bo_reference(&user_bo->dma.base);
445 ret = ttm_base_object_init(tfile, 446 ret = ttm_prime_object_init(tfile,
446 &user_bo->base, 447 size,
447 shareable, 448 &user_bo->prime,
448 ttm_buffer_type, 449 shareable,
449 &vmw_user_dmabuf_release, NULL); 450 ttm_buffer_type,
451 &vmw_user_dmabuf_release, NULL);
450 if (unlikely(ret != 0)) { 452 if (unlikely(ret != 0)) {
451 ttm_bo_unref(&tmp); 453 ttm_bo_unref(&tmp);
452 goto out_no_base_object; 454 goto out_no_base_object;
453 } 455 }
454 456
455 *p_dma_buf = &user_bo->dma; 457 *p_dma_buf = &user_bo->dma;
456 *handle = user_bo->base.hash.key; 458 *handle = user_bo->prime.base.hash.key;
457 459
458out_no_base_object: 460out_no_base_object:
459 return ret; 461 return ret;
@@ -475,8 +477,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
475 return -EPERM; 477 return -EPERM;
476 478
477 vmw_user_bo = vmw_user_dma_buffer(bo); 479 vmw_user_bo = vmw_user_dma_buffer(bo);
478 return (vmw_user_bo->base.tfile == tfile || 480 return (vmw_user_bo->prime.base.tfile == tfile ||
479 vmw_user_bo->base.shareable) ? 0 : -EPERM; 481 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
480} 482}
481 483
482int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 484int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
@@ -538,14 +540,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
538 return -ESRCH; 540 return -ESRCH;
539 } 541 }
540 542
541 if (unlikely(base->object_type != ttm_buffer_type)) { 543 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
542 ttm_base_object_unref(&base); 544 ttm_base_object_unref(&base);
543 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", 545 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
544 (unsigned long)handle); 546 (unsigned long)handle);
545 return -EINVAL; 547 return -EINVAL;
546 } 548 }
547 549
548 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 550 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
551 prime.base);
549 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 552 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
550 ttm_base_object_unref(&base); 553 ttm_base_object_unref(&base);
551 *out = &vmw_user_bo->dma; 554 *out = &vmw_user_bo->dma;
@@ -562,7 +565,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
562 return -EINVAL; 565 return -EINVAL;
563 566
564 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); 567 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
565 return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); 568 return ttm_ref_object_add(tfile, &user_bo->prime.base,
569 TTM_REF_USAGE, NULL);
566} 570}
567 571
568/* 572/*
@@ -807,15 +811,16 @@ int vmw_dumb_create(struct drm_file *file_priv,
807 goto out_no_dmabuf; 811 goto out_no_dmabuf;
808 812
809 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 813 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
810 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, 814 ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
811 &vmw_user_bo->base, 815 args->size,
812 false, 816 &vmw_user_bo->prime,
813 ttm_buffer_type, 817 false,
814 &vmw_user_dmabuf_release, NULL); 818 ttm_buffer_type,
819 &vmw_user_dmabuf_release, NULL);
815 if (unlikely(ret != 0)) 820 if (unlikely(ret != 0))
816 goto out_no_base_object; 821 goto out_no_base_object;
817 822
818 args->handle = vmw_user_bo->base.hash.key; 823 args->handle = vmw_user_bo->prime.base.hash.key;
819 824
820out_no_base_object: 825out_no_base_object:
821 ttm_bo_unref(&tmp); 826 ttm_bo_unref(&tmp);