aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2018-06-19 09:02:16 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2018-07-03 14:33:30 -0400
commitf1d34bfd70b1b4543a139ea28bad4c001c5f413d (patch)
tree0d3fb3ee166a2d81f4f7e7e2338dd3c625929554 /drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
parent07c13bb78c8b8a9cb6ee169659528945038d5e85 (diff)
drm/vmwgfx: Replace vmw_dma_buffer with vmw_buffer_object
Initially vmware buffer objects were only used as DMA buffers, so the name DMA buffer was a natural one. However, currently they are used also as dumb buffers and MOBs backing guest backed objects so renaming them to buffer objects is logical. Particularly since there is a dmabuf subsystem in the kernel where a dma buffer means something completely different. This also renames user-space api structures and IOCTL names correspondingly, but the old names remain defined for now and the ABI hasn't changed. There are a couple of minor style changes to make checkpatch happy. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c86
1 files changed, 42 insertions, 44 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c9d5cc237124..a8b194655c40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -92,7 +92,7 @@ struct vmw_resource_val_node {
92 struct list_head head; 92 struct list_head head;
93 struct drm_hash_item hash; 93 struct drm_hash_item hash;
94 struct vmw_resource *res; 94 struct vmw_resource *res;
95 struct vmw_dma_buffer *new_backup; 95 struct vmw_buffer_object *new_backup;
96 struct vmw_ctx_binding_state *staged_bindings; 96 struct vmw_ctx_binding_state *staged_bindings;
97 unsigned long new_backup_offset; 97 unsigned long new_backup_offset;
98 u32 first_usage : 1; 98 u32 first_usage : 1;
@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
126static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 126static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
127 struct vmw_sw_context *sw_context, 127 struct vmw_sw_context *sw_context,
128 SVGAMobId *id, 128 SVGAMobId *id,
129 struct vmw_dma_buffer **vmw_bo_p); 129 struct vmw_buffer_object **vmw_bo_p);
130static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 130static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
131 struct vmw_dma_buffer *vbo, 131 struct vmw_buffer_object *vbo,
132 bool validate_as_mob, 132 bool validate_as_mob,
133 uint32_t *p_val_node); 133 uint32_t *p_val_node);
134/** 134/**
@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
185 } 185 }
186 vmw_resource_unreserve(res, switch_backup, val->new_backup, 186 vmw_resource_unreserve(res, switch_backup, val->new_backup,
187 val->new_backup_offset); 187 val->new_backup_offset);
188 vmw_dmabuf_unreference(&val->new_backup); 188 vmw_bo_unreference(&val->new_backup);
189 } 189 }
190} 190}
191 191
@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
423 } 423 }
424 424
425 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { 425 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
426 struct vmw_dma_buffer *dx_query_mob; 426 struct vmw_buffer_object *dx_query_mob;
427 427
428 dx_query_mob = vmw_context_get_dx_query_mob(ctx); 428 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
429 if (dx_query_mob) 429 if (dx_query_mob)
@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
544 * submission is reached. 544 * submission is reached.
545 */ 545 */
546static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 546static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
547 struct vmw_dma_buffer *vbo, 547 struct vmw_buffer_object *vbo,
548 bool validate_as_mob, 548 bool validate_as_mob,
549 uint32_t *p_val_node) 549 uint32_t *p_val_node)
550{ 550{
@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
616 return ret; 616 return ret;
617 617
618 if (res->backup) { 618 if (res->backup) {
619 struct vmw_dma_buffer *vbo = res->backup; 619 struct vmw_buffer_object *vbo = res->backup;
620 620
621 ret = vmw_bo_to_validate_list 621 ret = vmw_bo_to_validate_list
622 (sw_context, vbo, 622 (sw_context, vbo,
@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
628 } 628 }
629 629
630 if (sw_context->dx_query_mob) { 630 if (sw_context->dx_query_mob) {
631 struct vmw_dma_buffer *expected_dx_query_mob; 631 struct vmw_buffer_object *expected_dx_query_mob;
632 632
633 expected_dx_query_mob = 633 expected_dx_query_mob =
634 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); 634 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
657 657
658 list_for_each_entry(val, &sw_context->resource_list, head) { 658 list_for_each_entry(val, &sw_context->resource_list, head) {
659 struct vmw_resource *res = val->res; 659 struct vmw_resource *res = val->res;
660 struct vmw_dma_buffer *backup = res->backup; 660 struct vmw_buffer_object *backup = res->backup;
661 661
662 ret = vmw_resource_validate(res); 662 ret = vmw_resource_validate(res);
663 if (unlikely(ret != 0)) { 663 if (unlikely(ret != 0)) {
@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
668 668
669 /* Check if the resource switched backup buffer */ 669 /* Check if the resource switched backup buffer */
670 if (backup && res->backup && (backup != res->backup)) { 670 if (backup && res->backup && (backup != res->backup)) {
671 struct vmw_dma_buffer *vbo = res->backup; 671 struct vmw_buffer_object *vbo = res->backup;
672 672
673 ret = vmw_bo_to_validate_list 673 ret = vmw_bo_to_validate_list
674 (sw_context, vbo, 674 (sw_context, vbo,
@@ -821,7 +821,7 @@ out_no_reloc:
821static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) 821static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
822{ 822{
823 struct vmw_private *dev_priv = ctx_res->dev_priv; 823 struct vmw_private *dev_priv = ctx_res->dev_priv;
824 struct vmw_dma_buffer *dx_query_mob; 824 struct vmw_buffer_object *dx_query_mob;
825 struct { 825 struct {
826 SVGA3dCmdHeader header; 826 SVGA3dCmdHeader header;
827 SVGA3dCmdDXBindAllQuery body; 827 SVGA3dCmdDXBindAllQuery body;
@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1152 * command batch. 1152 * command batch.
1153 */ 1153 */
1154static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, 1154static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1155 struct vmw_dma_buffer *new_query_bo, 1155 struct vmw_buffer_object *new_query_bo,
1156 struct vmw_sw_context *sw_context) 1156 struct vmw_sw_context *sw_context)
1157{ 1157{
1158 struct vmw_res_cache_entry *ctx_entry = 1158 struct vmw_res_cache_entry *ctx_entry =
@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1234 if (dev_priv->pinned_bo != sw_context->cur_query_bo) { 1234 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1235 if (dev_priv->pinned_bo) { 1235 if (dev_priv->pinned_bo) {
1236 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 1236 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1237 vmw_dmabuf_unreference(&dev_priv->pinned_bo); 1237 vmw_bo_unreference(&dev_priv->pinned_bo);
1238 } 1238 }
1239 1239
1240 if (!sw_context->needs_post_query_barrier) { 1240 if (!sw_context->needs_post_query_barrier) {
@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1256 dev_priv->query_cid = sw_context->last_query_ctx->id; 1256 dev_priv->query_cid = sw_context->last_query_ctx->id;
1257 dev_priv->query_cid_valid = true; 1257 dev_priv->query_cid_valid = true;
1258 dev_priv->pinned_bo = 1258 dev_priv->pinned_bo =
1259 vmw_dmabuf_reference(sw_context->cur_query_bo); 1259 vmw_bo_reference(sw_context->cur_query_bo);
1260 } 1260 }
1261 } 1261 }
1262} 1262}
@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1282static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 1282static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1283 struct vmw_sw_context *sw_context, 1283 struct vmw_sw_context *sw_context,
1284 SVGAMobId *id, 1284 SVGAMobId *id,
1285 struct vmw_dma_buffer **vmw_bo_p) 1285 struct vmw_buffer_object **vmw_bo_p)
1286{ 1286{
1287 struct vmw_dma_buffer *vmw_bo = NULL; 1287 struct vmw_buffer_object *vmw_bo = NULL;
1288 uint32_t handle = *id; 1288 uint32_t handle = *id;
1289 struct vmw_relocation *reloc; 1289 struct vmw_relocation *reloc;
1290 int ret; 1290 int ret;
1291 1291
1292 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, 1292 ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
1293 NULL);
1294 if (unlikely(ret != 0)) { 1293 if (unlikely(ret != 0)) {
1295 DRM_ERROR("Could not find or use MOB buffer.\n"); 1294 DRM_ERROR("Could not find or use MOB buffer.\n");
1296 ret = -EINVAL; 1295 ret = -EINVAL;
@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1316 return 0; 1315 return 0;
1317 1316
1318out_no_reloc: 1317out_no_reloc:
1319 vmw_dmabuf_unreference(&vmw_bo); 1318 vmw_bo_unreference(&vmw_bo);
1320 *vmw_bo_p = NULL; 1319 *vmw_bo_p = NULL;
1321 return ret; 1320 return ret;
1322} 1321}
@@ -1343,15 +1342,14 @@ out_no_reloc:
1343static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, 1342static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1344 struct vmw_sw_context *sw_context, 1343 struct vmw_sw_context *sw_context,
1345 SVGAGuestPtr *ptr, 1344 SVGAGuestPtr *ptr,
1346 struct vmw_dma_buffer **vmw_bo_p) 1345 struct vmw_buffer_object **vmw_bo_p)
1347{ 1346{
1348 struct vmw_dma_buffer *vmw_bo = NULL; 1347 struct vmw_buffer_object *vmw_bo = NULL;
1349 uint32_t handle = ptr->gmrId; 1348 uint32_t handle = ptr->gmrId;
1350 struct vmw_relocation *reloc; 1349 struct vmw_relocation *reloc;
1351 int ret; 1350 int ret;
1352 1351
1353 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, 1352 ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
1354 NULL);
1355 if (unlikely(ret != 0)) { 1353 if (unlikely(ret != 0)) {
1356 DRM_ERROR("Could not find or use GMR region.\n"); 1354 DRM_ERROR("Could not find or use GMR region.\n");
1357 ret = -EINVAL; 1355 ret = -EINVAL;
@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1376 return 0; 1374 return 0;
1377 1375
1378out_no_reloc: 1376out_no_reloc:
1379 vmw_dmabuf_unreference(&vmw_bo); 1377 vmw_bo_unreference(&vmw_bo);
1380 *vmw_bo_p = NULL; 1378 *vmw_bo_p = NULL;
1381 return ret; 1379 return ret;
1382} 1380}
@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1447 SVGA3dCmdDXBindQuery q; 1445 SVGA3dCmdDXBindQuery q;
1448 } *cmd; 1446 } *cmd;
1449 1447
1450 struct vmw_dma_buffer *vmw_bo; 1448 struct vmw_buffer_object *vmw_bo;
1451 int ret; 1449 int ret;
1452 1450
1453 1451
@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1466 sw_context->dx_query_mob = vmw_bo; 1464 sw_context->dx_query_mob = vmw_bo;
1467 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; 1465 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1468 1466
1469 vmw_dmabuf_unreference(&vmw_bo); 1467 vmw_bo_unreference(&vmw_bo);
1470 1468
1471 return ret; 1469 return ret;
1472} 1470}
@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1549 struct vmw_sw_context *sw_context, 1547 struct vmw_sw_context *sw_context,
1550 SVGA3dCmdHeader *header) 1548 SVGA3dCmdHeader *header)
1551{ 1549{
1552 struct vmw_dma_buffer *vmw_bo; 1550 struct vmw_buffer_object *vmw_bo;
1553 struct vmw_query_cmd { 1551 struct vmw_query_cmd {
1554 SVGA3dCmdHeader header; 1552 SVGA3dCmdHeader header;
1555 SVGA3dCmdEndGBQuery q; 1553 SVGA3dCmdEndGBQuery q;
@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1569 1567
1570 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1568 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1571 1569
1572 vmw_dmabuf_unreference(&vmw_bo); 1570 vmw_bo_unreference(&vmw_bo);
1573 return ret; 1571 return ret;
1574} 1572}
1575 1573
@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1584 struct vmw_sw_context *sw_context, 1582 struct vmw_sw_context *sw_context,
1585 SVGA3dCmdHeader *header) 1583 SVGA3dCmdHeader *header)
1586{ 1584{
1587 struct vmw_dma_buffer *vmw_bo; 1585 struct vmw_buffer_object *vmw_bo;
1588 struct vmw_query_cmd { 1586 struct vmw_query_cmd {
1589 SVGA3dCmdHeader header; 1587 SVGA3dCmdHeader header;
1590 SVGA3dCmdEndQuery q; 1588 SVGA3dCmdEndQuery q;
@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1623 1621
1624 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1622 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1625 1623
1626 vmw_dmabuf_unreference(&vmw_bo); 1624 vmw_bo_unreference(&vmw_bo);
1627 return ret; 1625 return ret;
1628} 1626}
1629 1627
@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1638 struct vmw_sw_context *sw_context, 1636 struct vmw_sw_context *sw_context,
1639 SVGA3dCmdHeader *header) 1637 SVGA3dCmdHeader *header)
1640{ 1638{
1641 struct vmw_dma_buffer *vmw_bo; 1639 struct vmw_buffer_object *vmw_bo;
1642 struct vmw_query_cmd { 1640 struct vmw_query_cmd {
1643 SVGA3dCmdHeader header; 1641 SVGA3dCmdHeader header;
1644 SVGA3dCmdWaitForGBQuery q; 1642 SVGA3dCmdWaitForGBQuery q;
@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1656 if (unlikely(ret != 0)) 1654 if (unlikely(ret != 0))
1657 return ret; 1655 return ret;
1658 1656
1659 vmw_dmabuf_unreference(&vmw_bo); 1657 vmw_bo_unreference(&vmw_bo);
1660 return 0; 1658 return 0;
1661} 1659}
1662 1660
@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1671 struct vmw_sw_context *sw_context, 1669 struct vmw_sw_context *sw_context,
1672 SVGA3dCmdHeader *header) 1670 SVGA3dCmdHeader *header)
1673{ 1671{
1674 struct vmw_dma_buffer *vmw_bo; 1672 struct vmw_buffer_object *vmw_bo;
1675 struct vmw_query_cmd { 1673 struct vmw_query_cmd {
1676 SVGA3dCmdHeader header; 1674 SVGA3dCmdHeader header;
1677 SVGA3dCmdWaitForQuery q; 1675 SVGA3dCmdWaitForQuery q;
@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1708 if (unlikely(ret != 0)) 1706 if (unlikely(ret != 0))
1709 return ret; 1707 return ret;
1710 1708
1711 vmw_dmabuf_unreference(&vmw_bo); 1709 vmw_bo_unreference(&vmw_bo);
1712 return 0; 1710 return 0;
1713} 1711}
1714 1712
@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
1716 struct vmw_sw_context *sw_context, 1714 struct vmw_sw_context *sw_context,
1717 SVGA3dCmdHeader *header) 1715 SVGA3dCmdHeader *header)
1718{ 1716{
1719 struct vmw_dma_buffer *vmw_bo = NULL; 1717 struct vmw_buffer_object *vmw_bo = NULL;
1720 struct vmw_surface *srf = NULL; 1718 struct vmw_surface *srf = NULL;
1721 struct vmw_dma_cmd { 1719 struct vmw_dma_cmd {
1722 SVGA3dCmdHeader header; 1720 SVGA3dCmdHeader header;
@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
1768 header); 1766 header);
1769 1767
1770out_no_surface: 1768out_no_surface:
1771 vmw_dmabuf_unreference(&vmw_bo); 1769 vmw_bo_unreference(&vmw_bo);
1772 return ret; 1770 return ret;
1773} 1771}
1774 1772
@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1887 struct vmw_sw_context *sw_context, 1885 struct vmw_sw_context *sw_context,
1888 void *buf) 1886 void *buf)
1889{ 1887{
1890 struct vmw_dma_buffer *vmw_bo; 1888 struct vmw_buffer_object *vmw_bo;
1891 int ret; 1889 int ret;
1892 1890
1893 struct { 1891 struct {
@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1901 if (unlikely(ret != 0)) 1899 if (unlikely(ret != 0))
1902 return ret; 1900 return ret;
1903 1901
1904 vmw_dmabuf_unreference(&vmw_bo); 1902 vmw_bo_unreference(&vmw_bo);
1905 1903
1906 return ret; 1904 return ret;
1907} 1905}
@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1928 uint32_t *buf_id, 1926 uint32_t *buf_id,
1929 unsigned long backup_offset) 1927 unsigned long backup_offset)
1930{ 1928{
1931 struct vmw_dma_buffer *dma_buf; 1929 struct vmw_buffer_object *dma_buf;
1932 int ret; 1930 int ret;
1933 1931
1934 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); 1932 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1939 if (val_node->first_usage) 1937 if (val_node->first_usage)
1940 val_node->no_buffer_needed = true; 1938 val_node->no_buffer_needed = true;
1941 1939
1942 vmw_dmabuf_unreference(&val_node->new_backup); 1940 vmw_bo_unreference(&val_node->new_backup);
1943 val_node->new_backup = dma_buf; 1941 val_node->new_backup = dma_buf;
1944 val_node->new_backup_offset = backup_offset; 1942 val_node->new_backup_offset = backup_offset;
1945 1943
@@ -3701,8 +3699,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3701 bool interruptible, 3699 bool interruptible,
3702 bool validate_as_mob) 3700 bool validate_as_mob)
3703{ 3701{
3704 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, 3702 struct vmw_buffer_object *vbo =
3705 base); 3703 container_of(bo, struct vmw_buffer_object, base);
3706 struct ttm_operation_ctx ctx = { interruptible, true }; 3704 struct ttm_operation_ctx ctx = { interruptible, true };
3707 int ret; 3705 int ret;
3708 3706
@@ -4423,7 +4421,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4423 4421
4424 ttm_bo_unref(&query_val.bo); 4422 ttm_bo_unref(&query_val.bo);
4425 ttm_bo_unref(&pinned_val.bo); 4423 ttm_bo_unref(&pinned_val.bo);
4426 vmw_dmabuf_unreference(&dev_priv->pinned_bo); 4424 vmw_bo_unreference(&dev_priv->pinned_bo);
4427out_unlock: 4425out_unlock:
4428 return; 4426 return;
4429 4427
@@ -4432,7 +4430,7 @@ out_no_emit:
4432out_no_reserve: 4430out_no_reserve:
4433 ttm_bo_unref(&query_val.bo); 4431 ttm_bo_unref(&query_val.bo);
4434 ttm_bo_unref(&pinned_val.bo); 4432 ttm_bo_unref(&pinned_val.bo);
4435 vmw_dmabuf_unreference(&dev_priv->pinned_bo); 4433 vmw_bo_unreference(&dev_priv->pinned_bo);
4436} 4434}
4437 4435
4438/** 4436/**