diff options
author | Sinclair Yeh <syeh@vmware.com> | 2015-08-10 13:56:15 -0400 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2015-08-12 13:06:51 -0400 |
commit | fd11a3c0bd39162547e8abe44e1aaa11059c15f5 (patch) | |
tree | b9e897a1f74efa43cc76fa9b78bb1c92f6406ef8 | |
parent | 0fca749e9a085ac4623a807ab12c37fc09851e3c (diff) |
drm/vmwgfx: Add DX query support. Various fixes.
Add support for vgpu10 queries. Functional- and formatting fixes.
Signed-off-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 70 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 209 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 101 |
5 files changed, 373 insertions, 26 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 3b349fd2d12d..469a7042037d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -817,9 +817,9 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
817 | /** | 817 | /** |
818 | * vmw_move_notify - TTM move_notify_callback | 818 | * vmw_move_notify - TTM move_notify_callback |
819 | * | 819 | * |
820 | * @bo: The TTM buffer object about to move. | 820 | * @bo: The TTM buffer object about to move. |
821 | * @mem: The truct ttm_mem_reg indicating to what memory | 821 | * @mem: The struct ttm_mem_reg indicating to what memory |
822 | * region the move is taking place. | 822 | * region the move is taking place. |
823 | * | 823 | * |
824 | * Calls move_notify for all subsystems needing it. | 824 | * Calls move_notify for all subsystems needing it. |
825 | * (currently only resources). | 825 | * (currently only resources). |
@@ -828,13 +828,14 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, | |||
828 | struct ttm_mem_reg *mem) | 828 | struct ttm_mem_reg *mem) |
829 | { | 829 | { |
830 | vmw_resource_move_notify(bo, mem); | 830 | vmw_resource_move_notify(bo, mem); |
831 | vmw_query_move_notify(bo, mem); | ||
831 | } | 832 | } |
832 | 833 | ||
833 | 834 | ||
834 | /** | 835 | /** |
835 | * vmw_swap_notify - TTM move_notify_callback | 836 | * vmw_swap_notify - TTM move_notify_callback |
836 | * | 837 | * |
837 | * @bo: The TTM buffer object about to be swapped out. | 838 | * @bo: The TTM buffer object about to be swapped out. |
838 | */ | 839 | */ |
839 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | 840 | static void vmw_swap_notify(struct ttm_buffer_object *bo) |
840 | { | 841 | { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index b14583d6f387..7b3356fed205 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
@@ -121,7 +121,9 @@ static void vmw_context_cotables_unref(struct vmw_user_context *uctx) | |||
121 | res = uctx->cotables[i]; | 121 | res = uctx->cotables[i]; |
122 | uctx->cotables[i] = NULL; | 122 | uctx->cotables[i] = NULL; |
123 | spin_unlock(&uctx->cotable_lock); | 123 | spin_unlock(&uctx->cotable_lock); |
124 | vmw_resource_unreference(&res); | 124 | |
125 | if (res) | ||
126 | vmw_resource_unreference(&res); | ||
125 | } | 127 | } |
126 | } | 128 | } |
127 | 129 | ||
@@ -585,6 +587,8 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, | |||
585 | struct vmw_private *dev_priv = res->dev_priv; | 587 | struct vmw_private *dev_priv = res->dev_priv; |
586 | struct ttm_buffer_object *bo = val_buf->bo; | 588 | struct ttm_buffer_object *bo = val_buf->bo; |
587 | struct vmw_fence_obj *fence; | 589 | struct vmw_fence_obj *fence; |
590 | struct vmw_user_context *uctx = | ||
591 | container_of(res, struct vmw_user_context, res); | ||
588 | 592 | ||
589 | struct { | 593 | struct { |
590 | SVGA3dCmdHeader header; | 594 | SVGA3dCmdHeader header; |
@@ -603,6 +607,13 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, | |||
603 | mutex_lock(&dev_priv->binding_mutex); | 607 | mutex_lock(&dev_priv->binding_mutex); |
604 | vmw_dx_context_scrub_cotables(res, readback); | 608 | vmw_dx_context_scrub_cotables(res, readback); |
605 | 609 | ||
610 | if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx && | ||
611 | readback) { | ||
612 | WARN_ON(uctx->dx_query_mob->dx_query_ctx != res); | ||
613 | if (vmw_query_readback_all(uctx->dx_query_mob)) | ||
614 | DRM_ERROR("Failed to read back query states\n"); | ||
615 | } | ||
616 | |||
606 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | 617 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); |
607 | 618 | ||
608 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 619 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
@@ -692,6 +703,9 @@ static void vmw_user_context_free(struct vmw_resource *res) | |||
692 | 703 | ||
693 | if (ctx->cbs) | 704 | if (ctx->cbs) |
694 | vmw_binding_state_free(ctx->cbs); | 705 | vmw_binding_state_free(ctx->cbs); |
706 | |||
707 | (void) vmw_context_bind_dx_query(res, NULL); | ||
708 | |||
695 | ttm_base_object_kfree(ctx, base); | 709 | ttm_base_object_kfree(ctx, base); |
696 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | 710 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
697 | vmw_user_context_size); | 711 | vmw_user_context_size); |
@@ -867,3 +881,57 @@ vmw_context_binding_state(struct vmw_resource *ctx) | |||
867 | { | 881 | { |
868 | return container_of(ctx, struct vmw_user_context, res)->cbs; | 882 | return container_of(ctx, struct vmw_user_context, res)->cbs; |
869 | } | 883 | } |
884 | |||
885 | /** | ||
886 | * vmw_context_bind_dx_query - | ||
887 | * Sets query MOB for the context. If @mob is NULL, then this function will | ||
888 | * remove the association between the MOB and the context. This function | ||
889 | * assumes the binding_mutex is held. | ||
890 | * | ||
891 | * @ctx_res: The context resource | ||
892 | * @mob: a reference to the query MOB | ||
893 | * | ||
894 | * Returns -EINVAL if a MOB has already been set and does not match the one | ||
895 | * specified in the parameter. 0 otherwise. | ||
896 | */ | ||
897 | int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, | ||
898 | struct vmw_dma_buffer *mob) | ||
899 | { | ||
900 | struct vmw_user_context *uctx = | ||
901 | container_of(ctx_res, struct vmw_user_context, res); | ||
902 | |||
903 | if (mob == NULL) { | ||
904 | if (uctx->dx_query_mob) { | ||
905 | uctx->dx_query_mob->dx_query_ctx = NULL; | ||
906 | vmw_dmabuf_unreference(&uctx->dx_query_mob); | ||
907 | uctx->dx_query_mob = NULL; | ||
908 | } | ||
909 | |||
910 | return 0; | ||
911 | } | ||
912 | |||
913 | /* Can only have one MOB per context for queries */ | ||
914 | if (uctx->dx_query_mob && uctx->dx_query_mob != mob) | ||
915 | return -EINVAL; | ||
916 | |||
917 | mob->dx_query_ctx = ctx_res; | ||
918 | |||
919 | if (!uctx->dx_query_mob) | ||
920 | uctx->dx_query_mob = vmw_dmabuf_reference(mob); | ||
921 | |||
922 | return 0; | ||
923 | } | ||
924 | |||
925 | /** | ||
926 | * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob | ||
927 | * | ||
928 | * @ctx_res: The context resource | ||
929 | */ | ||
930 | struct vmw_dma_buffer * | ||
931 | vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res) | ||
932 | { | ||
933 | struct vmw_user_context *uctx = | ||
934 | container_of(ctx_res, struct vmw_user_context, res); | ||
935 | |||
936 | return uctx->dx_query_mob; | ||
937 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index b88ea50b7d95..0e18dfb28ad5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -88,6 +88,8 @@ struct vmw_dma_buffer { | |||
88 | struct ttm_buffer_object base; | 88 | struct ttm_buffer_object base; |
89 | struct list_head res_list; | 89 | struct list_head res_list; |
90 | s32 pin_count; | 90 | s32 pin_count; |
91 | /* Not ref-counted. Protected by binding_mutex */ | ||
92 | struct vmw_resource *dx_query_ctx; | ||
91 | }; | 93 | }; |
92 | 94 | ||
93 | /** | 95 | /** |
@@ -658,6 +660,9 @@ extern void vmw_resource_unreserve(struct vmw_resource *res, | |||
658 | unsigned long new_backup_offset); | 660 | unsigned long new_backup_offset); |
659 | extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, | 661 | extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
660 | struct ttm_mem_reg *mem); | 662 | struct ttm_mem_reg *mem); |
663 | extern void vmw_query_move_notify(struct ttm_buffer_object *bo, | ||
664 | struct ttm_mem_reg *mem); | ||
665 | extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob); | ||
661 | extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, | 666 | extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, |
662 | struct vmw_fence_obj *fence); | 667 | struct vmw_fence_obj *fence); |
663 | extern void vmw_resource_evict_all(struct vmw_private *dev_priv); | 668 | extern void vmw_resource_evict_all(struct vmw_private *dev_priv); |
@@ -1011,6 +1016,11 @@ extern struct vmw_ctx_binding_state * | |||
1011 | vmw_context_binding_state(struct vmw_resource *ctx); | 1016 | vmw_context_binding_state(struct vmw_resource *ctx); |
1012 | extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, | 1017 | extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, |
1013 | bool readback); | 1018 | bool readback); |
1019 | extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, | ||
1020 | struct vmw_dma_buffer *mob); | ||
1021 | extern struct vmw_dma_buffer * | ||
1022 | vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); | ||
1023 | |||
1014 | 1024 | ||
1015 | /* | 1025 | /* |
1016 | * Surface management - vmwgfx_surface.c | 1026 | * Surface management - vmwgfx_surface.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 2b7ac4918855..b56565457c96 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /************************************************************************** | 1 | /************************************************************************** |
2 | * | 2 | * |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | 3 | * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
@@ -101,19 +101,32 @@ struct vmw_cmd_entry { | |||
101 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | 101 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
102 | struct vmw_sw_context *sw_context, | 102 | struct vmw_sw_context *sw_context, |
103 | struct vmw_resource *ctx); | 103 | struct vmw_resource *ctx); |
104 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | ||
105 | struct vmw_sw_context *sw_context, | ||
106 | SVGAMobId *id, | ||
107 | struct vmw_dma_buffer **vmw_bo_p); | ||
108 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | ||
109 | struct vmw_dma_buffer *vbo, | ||
110 | bool validate_as_mob, | ||
111 | uint32_t *p_val_node); | ||
112 | |||
104 | 113 | ||
105 | /** | 114 | /** |
106 | * vmw_resource_unreserve - unreserve resources previously reserved for | 115 | * vmw_resources_unreserve - unreserve resources previously reserved for |
107 | * command submission. | 116 | * command submission. |
108 | * | 117 | * |
109 | * @list_head: list of resources to unreserve. | 118 | * @sw_context: pointer to the software context |
110 | * @backoff: Whether command submission failed. | 119 | * @backoff: Whether command submission failed. |
111 | */ | 120 | */ |
112 | static void vmw_resource_list_unreserve(struct vmw_sw_context *sw_context, | 121 | static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, |
113 | struct list_head *list, | 122 | bool backoff) |
114 | bool backoff) | ||
115 | { | 123 | { |
116 | struct vmw_resource_val_node *val; | 124 | struct vmw_resource_val_node *val; |
125 | struct list_head *list = &sw_context->resource_list; | ||
126 | |||
127 | if (sw_context->dx_query_mob && !backoff) | ||
128 | vmw_context_bind_dx_query(sw_context->dx_query_ctx, | ||
129 | sw_context->dx_query_mob); | ||
117 | 130 | ||
118 | list_for_each_entry(val, list, head) { | 131 | list_for_each_entry(val, list, head) { |
119 | struct vmw_resource *res = val->res; | 132 | struct vmw_resource *res = val->res; |
@@ -376,6 +389,16 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | |||
376 | break; | 389 | break; |
377 | } | 390 | } |
378 | 391 | ||
392 | if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { | ||
393 | struct vmw_dma_buffer *dx_query_mob; | ||
394 | |||
395 | dx_query_mob = vmw_context_get_dx_query_mob(ctx); | ||
396 | if (dx_query_mob) | ||
397 | ret = vmw_bo_to_validate_list(sw_context, | ||
398 | dx_query_mob, | ||
399 | true, NULL); | ||
400 | } | ||
401 | |||
379 | mutex_unlock(&dev_priv->binding_mutex); | 402 | mutex_unlock(&dev_priv->binding_mutex); |
380 | return ret; | 403 | return ret; |
381 | } | 404 | } |
@@ -533,7 +556,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
533 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) | 556 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) |
534 | { | 557 | { |
535 | struct vmw_resource_val_node *val; | 558 | struct vmw_resource_val_node *val; |
536 | int ret; | 559 | int ret = 0; |
537 | 560 | ||
538 | list_for_each_entry(val, &sw_context->resource_list, head) { | 561 | list_for_each_entry(val, &sw_context->resource_list, head) { |
539 | struct vmw_resource *res = val->res; | 562 | struct vmw_resource *res = val->res; |
@@ -554,7 +577,18 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) | |||
554 | } | 577 | } |
555 | } | 578 | } |
556 | 579 | ||
557 | return 0; | 580 | if (sw_context->dx_query_mob) { |
581 | struct vmw_dma_buffer *expected_dx_query_mob; | ||
582 | |||
583 | expected_dx_query_mob = | ||
584 | vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); | ||
585 | if (expected_dx_query_mob && | ||
586 | expected_dx_query_mob != sw_context->dx_query_mob) { | ||
587 | ret = -EINVAL; | ||
588 | } | ||
589 | } | ||
590 | |||
591 | return ret; | ||
558 | } | 592 | } |
559 | 593 | ||
560 | /** | 594 | /** |
@@ -725,6 +759,46 @@ out_no_reloc: | |||
725 | } | 759 | } |
726 | 760 | ||
727 | /** | 761 | /** |
762 | * vmw_rebind_dx_query - Rebind DX query associated with the context | ||
763 | * | ||
764 | * @ctx_res: context the query belongs to | ||
765 | * | ||
766 | * This function assumes binding_mutex is held. | ||
767 | */ | ||
768 | static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) | ||
769 | { | ||
770 | struct vmw_private *dev_priv = ctx_res->dev_priv; | ||
771 | struct vmw_dma_buffer *dx_query_mob; | ||
772 | struct { | ||
773 | SVGA3dCmdHeader header; | ||
774 | SVGA3dCmdDXBindAllQuery body; | ||
775 | } *cmd; | ||
776 | |||
777 | |||
778 | dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); | ||
779 | |||
780 | if (!dx_query_mob || dx_query_mob->dx_query_ctx) | ||
781 | return 0; | ||
782 | |||
783 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id); | ||
784 | |||
785 | if (cmd == NULL) { | ||
786 | DRM_ERROR("Failed to rebind queries.\n"); | ||
787 | return -ENOMEM; | ||
788 | } | ||
789 | |||
790 | cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; | ||
791 | cmd->header.size = sizeof(cmd->body); | ||
792 | cmd->body.cid = ctx_res->id; | ||
793 | cmd->body.mobid = dx_query_mob->base.mem.start; | ||
794 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
795 | |||
796 | vmw_context_bind_dx_query(ctx_res, dx_query_mob); | ||
797 | |||
798 | return 0; | ||
799 | } | ||
800 | |||
801 | /** | ||
728 | * vmw_rebind_contexts - Rebind all resources previously bound to | 802 | * vmw_rebind_contexts - Rebind all resources previously bound to |
729 | * referenced contexts. | 803 | * referenced contexts. |
730 | * | 804 | * |
@@ -748,6 +822,10 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) | |||
748 | DRM_ERROR("Failed to rebind context.\n"); | 822 | DRM_ERROR("Failed to rebind context.\n"); |
749 | return ret; | 823 | return ret; |
750 | } | 824 | } |
825 | |||
826 | ret = vmw_rebind_all_dx_query(val->res); | ||
827 | if (ret != 0) | ||
828 | return ret; | ||
751 | } | 829 | } |
752 | 830 | ||
753 | return 0; | 831 | return 0; |
@@ -1248,6 +1326,98 @@ out_no_reloc: | |||
1248 | return ret; | 1326 | return ret; |
1249 | } | 1327 | } |
1250 | 1328 | ||
1329 | |||
1330 | |||
1331 | /** | ||
1332 | * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command. | ||
1333 | * | ||
1334 | * @dev_priv: Pointer to a device private struct. | ||
1335 | * @sw_context: The software context used for this command submission. | ||
1336 | * @header: Pointer to the command header in the command stream. | ||
1337 | * | ||
1338 | * This function adds the new query into the query COTABLE | ||
1339 | */ | ||
1340 | static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, | ||
1341 | struct vmw_sw_context *sw_context, | ||
1342 | SVGA3dCmdHeader *header) | ||
1343 | { | ||
1344 | struct vmw_dx_define_query_cmd { | ||
1345 | SVGA3dCmdHeader header; | ||
1346 | SVGA3dCmdDXDefineQuery q; | ||
1347 | } *cmd; | ||
1348 | |||
1349 | int ret; | ||
1350 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
1351 | struct vmw_resource *cotable_res; | ||
1352 | |||
1353 | |||
1354 | if (ctx_node == NULL) { | ||
1355 | DRM_ERROR("DX Context not set for query.\n"); | ||
1356 | return -EINVAL; | ||
1357 | } | ||
1358 | |||
1359 | cmd = container_of(header, struct vmw_dx_define_query_cmd, header); | ||
1360 | |||
1361 | if (cmd->q.type < SVGA3D_QUERYTYPE_MIN || | ||
1362 | cmd->q.type >= SVGA3D_QUERYTYPE_MAX) | ||
1363 | return -EINVAL; | ||
1364 | |||
1365 | cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY); | ||
1366 | ret = vmw_cotable_notify(cotable_res, cmd->q.queryId); | ||
1367 | vmw_resource_unreference(&cotable_res); | ||
1368 | |||
1369 | return ret; | ||
1370 | } | ||
1371 | |||
1372 | |||
1373 | |||
1374 | /** | ||
1375 | * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command. | ||
1376 | * | ||
1377 | * @dev_priv: Pointer to a device private struct. | ||
1378 | * @sw_context: The software context used for this command submission. | ||
1379 | * @header: Pointer to the command header in the command stream. | ||
1380 | * | ||
1381 | * The query bind operation will eventually associate the query ID | ||
1382 | * with its backing MOB. In this function, we take the user mode | ||
1383 | * MOB ID and use vmw_translate_mob_ptr() to translate it to its | ||
1384 | * kernel mode equivalent. | ||
1385 | */ | ||
1386 | static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, | ||
1387 | struct vmw_sw_context *sw_context, | ||
1388 | SVGA3dCmdHeader *header) | ||
1389 | { | ||
1390 | struct vmw_dx_bind_query_cmd { | ||
1391 | SVGA3dCmdHeader header; | ||
1392 | SVGA3dCmdDXBindQuery q; | ||
1393 | } *cmd; | ||
1394 | |||
1395 | struct vmw_dma_buffer *vmw_bo; | ||
1396 | int ret; | ||
1397 | |||
1398 | |||
1399 | cmd = container_of(header, struct vmw_dx_bind_query_cmd, header); | ||
1400 | |||
1401 | /* | ||
1402 | * Look up the buffer pointed to by q.mobid, put it on the relocation | ||
1403 | * list so its kernel mode MOB ID can be filled in later | ||
1404 | */ | ||
1405 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid, | ||
1406 | &vmw_bo); | ||
1407 | |||
1408 | if (ret != 0) | ||
1409 | return ret; | ||
1410 | |||
1411 | sw_context->dx_query_mob = vmw_bo; | ||
1412 | sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; | ||
1413 | |||
1414 | vmw_dmabuf_unreference(&vmw_bo); | ||
1415 | |||
1416 | return ret; | ||
1417 | } | ||
1418 | |||
1419 | |||
1420 | |||
1251 | /** | 1421 | /** |
1252 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. | 1422 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. |
1253 | * | 1423 | * |
@@ -2975,6 +3145,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | |||
2975 | false, false, true), | 3145 | false, false, true), |
2976 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, | 3146 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, |
2977 | false, false, true), | 3147 | false, false, true), |
3148 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid, | ||
3149 | false, false, true), | ||
2978 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, | 3150 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, |
2979 | false, false, true), | 3151 | false, false, true), |
2980 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, | 3152 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, |
@@ -3097,15 +3269,17 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | |||
3097 | &vmw_cmd_dx_cid_check, true, false, true), | 3269 | &vmw_cmd_dx_cid_check, true, false, true), |
3098 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, | 3270 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, |
3099 | &vmw_cmd_dx_cid_check, true, false, true), | 3271 | &vmw_cmd_dx_cid_check, true, false, true), |
3100 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_invalid, | 3272 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, |
3101 | true, false, true), | 3273 | true, false, true), |
3102 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_invalid, | 3274 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok, |
3103 | true, false, true), | 3275 | true, false, true), |
3104 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_invalid, | 3276 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, |
3105 | true, false, true), | 3277 | true, false, true), |
3106 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_invalid, | 3278 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, |
3279 | &vmw_cmd_ok, true, false, true), | ||
3280 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok, | ||
3107 | true, false, true), | 3281 | true, false, true), |
3108 | VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_invalid, | 3282 | VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok, |
3109 | true, false, true), | 3283 | true, false, true), |
3110 | VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, | 3284 | VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, |
3111 | true, false, true), | 3285 | true, false, true), |
@@ -3780,6 +3954,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
3780 | sw_context->last_query_ctx = NULL; | 3954 | sw_context->last_query_ctx = NULL; |
3781 | sw_context->needs_post_query_barrier = false; | 3955 | sw_context->needs_post_query_barrier = false; |
3782 | sw_context->dx_ctx_node = NULL; | 3956 | sw_context->dx_ctx_node = NULL; |
3957 | sw_context->dx_query_mob = NULL; | ||
3958 | sw_context->dx_query_ctx = NULL; | ||
3783 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); | 3959 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
3784 | INIT_LIST_HEAD(&sw_context->validate_nodes); | 3960 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
3785 | INIT_LIST_HEAD(&sw_context->res_relocations); | 3961 | INIT_LIST_HEAD(&sw_context->res_relocations); |
@@ -3803,7 +3979,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
3803 | 3979 | ||
3804 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, | 3980 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
3805 | command_size); | 3981 | command_size); |
3806 | |||
3807 | /* | 3982 | /* |
3808 | * Merge the resource lists before checking the return status | 3983 | * Merge the resource lists before checking the return status |
3809 | * from vmd_cmd_check_all so that all the open hashtabs will | 3984 | * from vmd_cmd_check_all so that all the open hashtabs will |
@@ -3869,8 +4044,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
3869 | if (ret != 0) | 4044 | if (ret != 0) |
3870 | DRM_ERROR("Fence submission error. Syncing.\n"); | 4045 | DRM_ERROR("Fence submission error. Syncing.\n"); |
3871 | 4046 | ||
3872 | vmw_resource_list_unreserve(sw_context, &sw_context->resource_list, | 4047 | vmw_resources_unreserve(sw_context, false); |
3873 | false); | ||
3874 | 4048 | ||
3875 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, | 4049 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
3876 | (void *) fence); | 4050 | (void *) fence); |
@@ -3908,8 +4082,7 @@ out_unlock_binding: | |||
3908 | out_err: | 4082 | out_err: |
3909 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); | 4083 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
3910 | out_err_nores: | 4084 | out_err_nores: |
3911 | vmw_resource_list_unreserve(sw_context, &sw_context->resource_list, | 4085 | vmw_resources_unreserve(sw_context, true); |
3912 | true); | ||
3913 | vmw_resource_relocations_free(&sw_context->res_relocations); | 4086 | vmw_resource_relocations_free(&sw_context->res_relocations); |
3914 | vmw_free_relocations(sw_context); | 4087 | vmw_free_relocations(sw_context); |
3915 | vmw_clear_validations(sw_context); | 4088 | vmw_clear_validations(sw_context); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6186e859dab0..bcd342dd8b96 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -1451,9 +1451,9 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, | |||
1451 | /** | 1451 | /** |
1452 | * vmw_resource_move_notify - TTM move_notify_callback | 1452 | * vmw_resource_move_notify - TTM move_notify_callback |
1453 | * | 1453 | * |
1454 | * @bo: The TTM buffer object about to move. | 1454 | * @bo: The TTM buffer object about to move. |
1455 | * @mem: The truct ttm_mem_reg indicating to what memory | 1455 | * @mem: The struct ttm_mem_reg indicating to what memory |
1456 | * region the move is taking place. | 1456 | * region the move is taking place. |
1457 | * | 1457 | * |
1458 | * Evicts the Guest Backed hardware resource if the backup | 1458 | * Evicts the Guest Backed hardware resource if the backup |
1459 | * buffer is being moved out of MOB memory. | 1459 | * buffer is being moved out of MOB memory. |
@@ -1503,6 +1503,101 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, | |||
1503 | } | 1503 | } |
1504 | } | 1504 | } |
1505 | 1505 | ||
1506 | |||
1507 | |||
1508 | /** | ||
1509 | * vmw_query_readback_all - Read back cached query states | ||
1510 | * | ||
1511 | * @dx_query_mob: Buffer containing the DX query MOB | ||
1512 | * | ||
1513 | * Read back cached states from the device if they exist. This function | ||
1514 | * assumings binding_mutex is held. | ||
1515 | */ | ||
1516 | int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) | ||
1517 | { | ||
1518 | struct vmw_resource *dx_query_ctx; | ||
1519 | struct vmw_private *dev_priv; | ||
1520 | struct { | ||
1521 | SVGA3dCmdHeader header; | ||
1522 | SVGA3dCmdDXReadbackAllQuery body; | ||
1523 | } *cmd; | ||
1524 | |||
1525 | |||
1526 | /* No query bound, so do nothing */ | ||
1527 | if (!dx_query_mob || !dx_query_mob->dx_query_ctx) | ||
1528 | return 0; | ||
1529 | |||
1530 | dx_query_ctx = dx_query_mob->dx_query_ctx; | ||
1531 | dev_priv = dx_query_ctx->dev_priv; | ||
1532 | |||
1533 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id); | ||
1534 | if (unlikely(cmd == NULL)) { | ||
1535 | DRM_ERROR("Failed reserving FIFO space for " | ||
1536 | "query MOB read back.\n"); | ||
1537 | return -ENOMEM; | ||
1538 | } | ||
1539 | |||
1540 | cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY; | ||
1541 | cmd->header.size = sizeof(cmd->body); | ||
1542 | cmd->body.cid = dx_query_ctx->id; | ||
1543 | |||
1544 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
1545 | |||
1546 | /* Triggers a rebind the next time affected context is bound */ | ||
1547 | dx_query_mob->dx_query_ctx = NULL; | ||
1548 | |||
1549 | return 0; | ||
1550 | } | ||
1551 | |||
1552 | |||
1553 | |||
1554 | /** | ||
1555 | * vmw_query_move_notify - Read back cached query states | ||
1556 | * | ||
1557 | * @bo: The TTM buffer object about to move. | ||
1558 | * @mem: The memory region @bo is moving to. | ||
1559 | * | ||
1560 | * Called before the query MOB is swapped out to read back cached query | ||
1561 | * states from the device. | ||
1562 | */ | ||
1563 | void vmw_query_move_notify(struct ttm_buffer_object *bo, | ||
1564 | struct ttm_mem_reg *mem) | ||
1565 | { | ||
1566 | struct vmw_dma_buffer *dx_query_mob; | ||
1567 | struct ttm_bo_device *bdev = bo->bdev; | ||
1568 | struct vmw_private *dev_priv; | ||
1569 | |||
1570 | |||
1571 | dev_priv = container_of(bdev, struct vmw_private, bdev); | ||
1572 | |||
1573 | mutex_lock(&dev_priv->binding_mutex); | ||
1574 | |||
1575 | dx_query_mob = container_of(bo, struct vmw_dma_buffer, base); | ||
1576 | if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { | ||
1577 | mutex_unlock(&dev_priv->binding_mutex); | ||
1578 | return; | ||
1579 | } | ||
1580 | |||
1581 | /* If BO is being moved from MOB to system memory */ | ||
1582 | if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) { | ||
1583 | struct vmw_fence_obj *fence; | ||
1584 | |||
1585 | (void) vmw_query_readback_all(dx_query_mob); | ||
1586 | mutex_unlock(&dev_priv->binding_mutex); | ||
1587 | |||
1588 | /* Create a fence and attach the BO to it */ | ||
1589 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | ||
1590 | vmw_fence_single_bo(bo, fence); | ||
1591 | |||
1592 | if (fence != NULL) | ||
1593 | vmw_fence_obj_unreference(&fence); | ||
1594 | |||
1595 | (void) ttm_bo_wait(bo, false, false, false); | ||
1596 | } else | ||
1597 | mutex_unlock(&dev_priv->binding_mutex); | ||
1598 | |||
1599 | } | ||
1600 | |||
1506 | /** | 1601 | /** |
1507 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. | 1602 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. |
1508 | * | 1603 | * |