diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 50 |
1 files changed, 24 insertions, 26 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index a432c0db257c..026de7cea0f6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -133,6 +133,7 @@ static void vmw_resource_release(struct kref *kref) | |||
133 | struct ttm_validate_buffer val_buf; | 133 | struct ttm_validate_buffer val_buf; |
134 | 134 | ||
135 | val_buf.bo = bo; | 135 | val_buf.bo = bo; |
136 | val_buf.shared = false; | ||
136 | res->func->unbind(res, false, &val_buf); | 137 | res->func->unbind(res, false, &val_buf); |
137 | } | 138 | } |
138 | res->backup_dirty = false; | 139 | res->backup_dirty = false; |
@@ -429,7 +430,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
429 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 430 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
430 | ttm_bo_type_device, placement, | 431 | ttm_bo_type_device, placement, |
431 | 0, interruptible, | 432 | 0, interruptible, |
432 | NULL, acc_size, NULL, bo_free); | 433 | NULL, acc_size, NULL, NULL, bo_free); |
433 | return ret; | 434 | return ret; |
434 | } | 435 | } |
435 | 436 | ||
@@ -567,13 +568,18 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, | |||
567 | int ret; | 568 | int ret; |
568 | 569 | ||
569 | if (flags & drm_vmw_synccpu_allow_cs) { | 570 | if (flags & drm_vmw_synccpu_allow_cs) { |
570 | struct ttm_bo_device *bdev = bo->bdev; | 571 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
572 | long lret; | ||
571 | 573 | ||
572 | spin_lock(&bdev->fence_lock); | 574 | if (nonblock) |
573 | ret = ttm_bo_wait(bo, false, true, | 575 | return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; |
574 | !!(flags & drm_vmw_synccpu_dontblock)); | 576 | |
575 | spin_unlock(&bdev->fence_lock); | 577 | lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT); |
576 | return ret; | 578 | if (!lret) |
579 | return -EBUSY; | ||
580 | else if (lret < 0) | ||
581 | return lret; | ||
582 | return 0; | ||
577 | } | 583 | } |
578 | 584 | ||
579 | ret = ttm_bo_synccpu_write_grab | 585 | ret = ttm_bo_synccpu_write_grab |
@@ -1214,8 +1220,9 @@ vmw_resource_check_buffer(struct vmw_resource *res, | |||
1214 | 1220 | ||
1215 | INIT_LIST_HEAD(&val_list); | 1221 | INIT_LIST_HEAD(&val_list); |
1216 | val_buf->bo = ttm_bo_reference(&res->backup->base); | 1222 | val_buf->bo = ttm_bo_reference(&res->backup->base); |
1223 | val_buf->shared = false; | ||
1217 | list_add_tail(&val_buf->head, &val_list); | 1224 | list_add_tail(&val_buf->head, &val_list); |
1218 | ret = ttm_eu_reserve_buffers(NULL, &val_list); | 1225 | ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); |
1219 | if (unlikely(ret != 0)) | 1226 | if (unlikely(ret != 0)) |
1220 | goto out_no_reserve; | 1227 | goto out_no_reserve; |
1221 | 1228 | ||
@@ -1307,6 +1314,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) | |||
1307 | BUG_ON(!func->may_evict); | 1314 | BUG_ON(!func->may_evict); |
1308 | 1315 | ||
1309 | val_buf.bo = NULL; | 1316 | val_buf.bo = NULL; |
1317 | val_buf.shared = false; | ||
1310 | ret = vmw_resource_check_buffer(res, interruptible, &val_buf); | 1318 | ret = vmw_resource_check_buffer(res, interruptible, &val_buf); |
1311 | if (unlikely(ret != 0)) | 1319 | if (unlikely(ret != 0)) |
1312 | return ret; | 1320 | return ret; |
@@ -1352,6 +1360,7 @@ int vmw_resource_validate(struct vmw_resource *res) | |||
1352 | return 0; | 1360 | return 0; |
1353 | 1361 | ||
1354 | val_buf.bo = NULL; | 1362 | val_buf.bo = NULL; |
1363 | val_buf.shared = false; | ||
1355 | if (res->backup) | 1364 | if (res->backup) |
1356 | val_buf.bo = &res->backup->base; | 1365 | val_buf.bo = &res->backup->base; |
1357 | do { | 1366 | do { |
@@ -1419,25 +1428,16 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, | |||
1419 | struct vmw_fence_obj *fence) | 1428 | struct vmw_fence_obj *fence) |
1420 | { | 1429 | { |
1421 | struct ttm_bo_device *bdev = bo->bdev; | 1430 | struct ttm_bo_device *bdev = bo->bdev; |
1422 | struct ttm_bo_driver *driver = bdev->driver; | 1431 | |
1423 | struct vmw_fence_obj *old_fence_obj; | ||
1424 | struct vmw_private *dev_priv = | 1432 | struct vmw_private *dev_priv = |
1425 | container_of(bdev, struct vmw_private, bdev); | 1433 | container_of(bdev, struct vmw_private, bdev); |
1426 | 1434 | ||
1427 | if (fence == NULL) | 1435 | if (fence == NULL) { |
1428 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | 1436 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
1429 | else | 1437 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
1430 | driver->sync_obj_ref(fence); | 1438 | fence_put(&fence->base); |
1431 | 1439 | } else | |
1432 | spin_lock(&bdev->fence_lock); | 1440 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
1433 | |||
1434 | old_fence_obj = bo->sync_obj; | ||
1435 | bo->sync_obj = fence; | ||
1436 | |||
1437 | spin_unlock(&bdev->fence_lock); | ||
1438 | |||
1439 | if (old_fence_obj) | ||
1440 | vmw_fence_obj_unreference(&old_fence_obj); | ||
1441 | } | 1441 | } |
1442 | 1442 | ||
1443 | /** | 1443 | /** |
@@ -1475,10 +1475,10 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, | |||
1475 | 1475 | ||
1476 | if (mem->mem_type != VMW_PL_MOB) { | 1476 | if (mem->mem_type != VMW_PL_MOB) { |
1477 | struct vmw_resource *res, *n; | 1477 | struct vmw_resource *res, *n; |
1478 | struct ttm_bo_device *bdev = bo->bdev; | ||
1479 | struct ttm_validate_buffer val_buf; | 1478 | struct ttm_validate_buffer val_buf; |
1480 | 1479 | ||
1481 | val_buf.bo = bo; | 1480 | val_buf.bo = bo; |
1481 | val_buf.shared = false; | ||
1482 | 1482 | ||
1483 | list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { | 1483 | list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { |
1484 | 1484 | ||
@@ -1491,9 +1491,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, | |||
1491 | list_del_init(&res->mob_head); | 1491 | list_del_init(&res->mob_head); |
1492 | } | 1492 | } |
1493 | 1493 | ||
1494 | spin_lock(&bdev->fence_lock); | ||
1495 | (void) ttm_bo_wait(bo, false, false, false); | 1494 | (void) ttm_bo_wait(bo, false, false, false); |
1496 | spin_unlock(&bdev->fence_lock); | ||
1497 | } | 1495 | } |
1498 | } | 1496 | } |
1499 | 1497 | ||