aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c209
1 files changed, 203 insertions, 6 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9b5ea2ac7ddf..9757b57f8388 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
88 return res; 88 return res;
89} 89}
90 90
91struct vmw_resource *
92vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93{
94 return kref_get_unless_zero(&res->kref) ? res : NULL;
95}
91 96
92/** 97/**
93 * vmw_resource_release_id - release a resource id to the id manager. 98 * vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
136 vmw_dmabuf_unreference(&res->backup); 141 vmw_dmabuf_unreference(&res->backup);
137 } 142 }
138 143
139 if (likely(res->hw_destroy != NULL)) 144 if (likely(res->hw_destroy != NULL)) {
140 res->hw_destroy(res); 145 res->hw_destroy(res);
146 mutex_lock(&dev_priv->binding_mutex);
147 vmw_context_binding_res_list_kill(&res->binding_head);
148 mutex_unlock(&dev_priv->binding_mutex);
149 }
141 150
142 id = res->id; 151 id = res->id;
143 if (res->res_free != NULL) 152 if (res->res_free != NULL)
@@ -215,6 +224,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
215 res->func = func; 224 res->func = func;
216 INIT_LIST_HEAD(&res->lru_head); 225 INIT_LIST_HEAD(&res->lru_head);
217 INIT_LIST_HEAD(&res->mob_head); 226 INIT_LIST_HEAD(&res->mob_head);
227 INIT_LIST_HEAD(&res->binding_head);
218 res->id = -1; 228 res->id = -1;
219 res->backup = NULL; 229 res->backup = NULL;
220 res->backup_offset = 0; 230 res->backup_offset = 0;
@@ -417,8 +427,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
417 INIT_LIST_HEAD(&vmw_bo->res_list); 427 INIT_LIST_HEAD(&vmw_bo->res_list);
418 428
419 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 429 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
420 (user) ? ttm_bo_type_device : 430 ttm_bo_type_device, placement,
421 ttm_bo_type_kernel, placement,
422 0, interruptible, 431 0, interruptible,
423 NULL, acc_size, NULL, bo_free); 432 NULL, acc_size, NULL, bo_free);
424 return ret; 433 return ret;
@@ -441,6 +450,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
441 ttm_bo_unref(&bo); 450 ttm_bo_unref(&bo);
442} 451}
443 452
453static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
454 enum ttm_ref_type ref_type)
455{
456 struct vmw_user_dma_buffer *user_bo;
457 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
458
459 switch (ref_type) {
460 case TTM_REF_SYNCCPU_WRITE:
461 ttm_bo_synccpu_write_release(&user_bo->dma.base);
462 break;
463 default:
464 BUG();
465 }
466}
467
444/** 468/**
445 * vmw_user_dmabuf_alloc - Allocate a user dma buffer 469 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
446 * 470 *
@@ -471,6 +495,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
471 } 495 }
472 496
473 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, 497 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
498 (dev_priv->has_mob) ?
499 &vmw_sys_placement :
474 &vmw_vram_sys_placement, true, 500 &vmw_vram_sys_placement, true,
475 &vmw_user_dmabuf_destroy); 501 &vmw_user_dmabuf_destroy);
476 if (unlikely(ret != 0)) 502 if (unlikely(ret != 0))
@@ -482,7 +508,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
482 &user_bo->prime, 508 &user_bo->prime,
483 shareable, 509 shareable,
484 ttm_buffer_type, 510 ttm_buffer_type,
485 &vmw_user_dmabuf_release, NULL); 511 &vmw_user_dmabuf_release,
512 &vmw_user_dmabuf_ref_obj_release);
486 if (unlikely(ret != 0)) { 513 if (unlikely(ret != 0)) {
487 ttm_bo_unref(&tmp); 514 ttm_bo_unref(&tmp);
488 goto out_no_base_object; 515 goto out_no_base_object;
@@ -515,6 +542,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
515 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; 542 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
516} 543}
517 544
545/**
546 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
547 * access, idling previous GPU operations on the buffer and optionally
548 * blocking it for further command submissions.
549 *
550 * @user_bo: Pointer to the buffer object being grabbed for CPU access
551 * @tfile: Identifying the caller.
552 * @flags: Flags indicating how the grab should be performed.
553 *
554 * A blocking grab will be automatically released when @tfile is closed.
555 */
556static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
557 struct ttm_object_file *tfile,
558 uint32_t flags)
559{
560 struct ttm_buffer_object *bo = &user_bo->dma.base;
561 bool existed;
562 int ret;
563
564 if (flags & drm_vmw_synccpu_allow_cs) {
565 struct ttm_bo_device *bdev = bo->bdev;
566
567 spin_lock(&bdev->fence_lock);
568 ret = ttm_bo_wait(bo, false, true,
569 !!(flags & drm_vmw_synccpu_dontblock));
570 spin_unlock(&bdev->fence_lock);
571 return ret;
572 }
573
574 ret = ttm_bo_synccpu_write_grab
575 (bo, !!(flags & drm_vmw_synccpu_dontblock));
576 if (unlikely(ret != 0))
577 return ret;
578
579 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
580 TTM_REF_SYNCCPU_WRITE, &existed);
581 if (ret != 0 || existed)
582 ttm_bo_synccpu_write_release(&user_bo->dma.base);
583
584 return ret;
585}
586
587/**
588 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
589 * and unblock command submission on the buffer if blocked.
590 *
591 * @handle: Handle identifying the buffer object.
592 * @tfile: Identifying the caller.
593 * @flags: Flags indicating the type of release.
594 */
595static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
596 struct ttm_object_file *tfile,
597 uint32_t flags)
598{
599 if (!(flags & drm_vmw_synccpu_allow_cs))
600 return ttm_ref_object_base_unref(tfile, handle,
601 TTM_REF_SYNCCPU_WRITE);
602
603 return 0;
604}
605
606/**
607 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
608 * functionality.
609 *
610 * @dev: Identifies the drm device.
611 * @data: Pointer to the ioctl argument.
612 * @file_priv: Identifies the caller.
613 *
614 * This function checks the ioctl arguments for validity and calls the
615 * relevant synccpu functions.
616 */
617int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
618 struct drm_file *file_priv)
619{
620 struct drm_vmw_synccpu_arg *arg =
621 (struct drm_vmw_synccpu_arg *) data;
622 struct vmw_dma_buffer *dma_buf;
623 struct vmw_user_dma_buffer *user_bo;
624 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
625 int ret;
626
627 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
628 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
629 drm_vmw_synccpu_dontblock |
630 drm_vmw_synccpu_allow_cs)) != 0) {
631 DRM_ERROR("Illegal synccpu flags.\n");
632 return -EINVAL;
633 }
634
635 switch (arg->op) {
636 case drm_vmw_synccpu_grab:
637 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
638 if (unlikely(ret != 0))
639 return ret;
640
641 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
642 dma);
643 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
644 vmw_dmabuf_unreference(&dma_buf);
645 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
646 ret != -EBUSY)) {
647 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
648 (unsigned int) arg->handle);
649 return ret;
650 }
651 break;
652 case drm_vmw_synccpu_release:
653 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
654 arg->flags);
655 if (unlikely(ret != 0)) {
656 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
657 (unsigned int) arg->handle);
658 return ret;
659 }
660 break;
661 default:
662 DRM_ERROR("Invalid synccpu operation.\n");
663 return -EINVAL;
664 }
665
666 return 0;
667}
668
518int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 669int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
519 struct drm_file *file_priv) 670 struct drm_file *file_priv)
520{ 671{
@@ -591,7 +742,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
591} 742}
592 743
593int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, 744int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
594 struct vmw_dma_buffer *dma_buf) 745 struct vmw_dma_buffer *dma_buf,
746 uint32_t *handle)
595{ 747{
596 struct vmw_user_dma_buffer *user_bo; 748 struct vmw_user_dma_buffer *user_bo;
597 749
@@ -599,6 +751,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
599 return -EINVAL; 751 return -EINVAL;
600 752
601 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); 753 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
754
755 *handle = user_bo->prime.base.hash.key;
602 return ttm_ref_object_add(tfile, &user_bo->prime.base, 756 return ttm_ref_object_add(tfile, &user_bo->prime.base,
603 TTM_REF_USAGE, NULL); 757 TTM_REF_USAGE, NULL);
604} 758}
@@ -1291,11 +1445,54 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1291 * @mem: The truct ttm_mem_reg indicating to what memory 1445 * @mem: The truct ttm_mem_reg indicating to what memory
1292 * region the move is taking place. 1446 * region the move is taking place.
1293 * 1447 *
1294 * For now does nothing. 1448 * Evicts the Guest Backed hardware resource if the backup
1449 * buffer is being moved out of MOB memory.
1450 * Note that this function should not race with the resource
1451 * validation code as long as it accesses only members of struct
1452 * resource that remain static while bo::res is !NULL and
1453 * while we have @bo reserved. struct resource::backup is *not* a
1454 * static member. The resource validation code will take care
1455 * to set @bo::res to NULL, while having @bo reserved when the
1456 * buffer is no longer bound to the resource, so @bo:res can be
1457 * used to determine whether there is a need to unbind and whether
1458 * it is safe to unbind.
1295 */ 1459 */
1296void vmw_resource_move_notify(struct ttm_buffer_object *bo, 1460void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1297 struct ttm_mem_reg *mem) 1461 struct ttm_mem_reg *mem)
1298{ 1462{
1463 struct vmw_dma_buffer *dma_buf;
1464
1465 if (mem == NULL)
1466 return;
1467
1468 if (bo->destroy != vmw_dmabuf_bo_free &&
1469 bo->destroy != vmw_user_dmabuf_destroy)
1470 return;
1471
1472 dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1473
1474 if (mem->mem_type != VMW_PL_MOB) {
1475 struct vmw_resource *res, *n;
1476 struct ttm_bo_device *bdev = bo->bdev;
1477 struct ttm_validate_buffer val_buf;
1478
1479 val_buf.bo = bo;
1480
1481 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1482
1483 if (unlikely(res->func->unbind == NULL))
1484 continue;
1485
1486 (void) res->func->unbind(res, true, &val_buf);
1487 res->backup_dirty = true;
1488 res->res_dirty = false;
1489 list_del_init(&res->mob_head);
1490 }
1491
1492 spin_lock(&bdev->fence_lock);
1493 (void) ttm_bo_wait(bo, false, false, false);
1494 spin_unlock(&bdev->fence_lock);
1495 }
1299} 1496}
1300 1497
1301/** 1498/**