aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c872
1 files changed, 796 insertions, 76 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 599f6469a1eb..7a5f1eb55c5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -54,6 +54,8 @@ struct vmw_resource_relocation {
54 * @res: Ref-counted pointer to the resource. 54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve. 55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer. 56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @staged_bindings: If @res is a context, tracks bindings set up during
58 * the command batch. Otherwise NULL.
57 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. 59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
58 * @first_usage: Set to true the first time the resource is referenced in 60 * @first_usage: Set to true the first time the resource is referenced in
59 * the command stream. 61 * the command stream.
@@ -65,12 +67,32 @@ struct vmw_resource_val_node {
65 struct drm_hash_item hash; 67 struct drm_hash_item hash;
66 struct vmw_resource *res; 68 struct vmw_resource *res;
67 struct vmw_dma_buffer *new_backup; 69 struct vmw_dma_buffer *new_backup;
70 struct vmw_ctx_binding_state *staged_bindings;
68 unsigned long new_backup_offset; 71 unsigned long new_backup_offset;
69 bool first_usage; 72 bool first_usage;
70 bool no_buffer_needed; 73 bool no_buffer_needed;
71}; 74};
72 75
73/** 76/**
77 * struct vmw_cmd_entry - Describe a command for the verifier
78 *
79 * @user_allow: Whether allowed from the execbuf ioctl.
80 * @gb_disable: Whether disabled if guest-backed objects are available.
81 * @gb_enable: Whether enabled iff guest-backed objects are available.
82 */
83struct vmw_cmd_entry {
84 int (*func) (struct vmw_private *, struct vmw_sw_context *,
85 SVGA3dCmdHeader *);
86 bool user_allow;
87 bool gb_disable;
88 bool gb_enable;
89};
90
91#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)}
94
95/**
74 * vmw_resource_unreserve - unreserve resources previously reserved for 96 * vmw_resource_unreserve - unreserve resources previously reserved for
75 * command submission. 97 * command submission.
76 * 98 *
@@ -87,6 +109,16 @@ static void vmw_resource_list_unreserve(struct list_head *list,
87 struct vmw_dma_buffer *new_backup = 109 struct vmw_dma_buffer *new_backup =
88 backoff ? NULL : val->new_backup; 110 backoff ? NULL : val->new_backup;
89 111
112 /*
113 * Transfer staged context bindings to the
114 * persistent context binding tracker.
115 */
116 if (unlikely(val->staged_bindings)) {
117 vmw_context_binding_state_transfer
118 (val->res, val->staged_bindings);
119 kfree(val->staged_bindings);
120 val->staged_bindings = NULL;
121 }
90 vmw_resource_unreserve(res, new_backup, 122 vmw_resource_unreserve(res, new_backup,
91 val->new_backup_offset); 123 val->new_backup_offset);
92 vmw_dmabuf_unreference(&val->new_backup); 124 vmw_dmabuf_unreference(&val->new_backup);
@@ -224,6 +256,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
224 * 256 *
225 * @sw_context: The software context used for this command submission batch. 257 * @sw_context: The software context used for this command submission batch.
226 * @bo: The buffer object to add. 258 * @bo: The buffer object to add.
259 * @validate_as_mob: Validate this buffer as a MOB.
227 * @p_val_node: If non-NULL Will be updated with the validate node number 260 * @p_val_node: If non-NULL Will be updated with the validate node number
228 * on return. 261 * on return.
229 * 262 *
@@ -232,6 +265,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
232 */ 265 */
233static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 266static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
234 struct ttm_buffer_object *bo, 267 struct ttm_buffer_object *bo,
268 bool validate_as_mob,
235 uint32_t *p_val_node) 269 uint32_t *p_val_node)
236{ 270{
237 uint32_t val_node; 271 uint32_t val_node;
@@ -244,6 +278,10 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
244 &hash) == 0)) { 278 &hash) == 0)) {
245 vval_buf = container_of(hash, struct vmw_validate_buffer, 279 vval_buf = container_of(hash, struct vmw_validate_buffer,
246 hash); 280 hash);
281 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
282 DRM_ERROR("Inconsistent buffer usage.\n");
283 return -EINVAL;
284 }
247 val_buf = &vval_buf->base; 285 val_buf = &vval_buf->base;
248 val_node = vval_buf - sw_context->val_bufs; 286 val_node = vval_buf - sw_context->val_bufs;
249 } else { 287 } else {
@@ -266,6 +304,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
266 val_buf->bo = ttm_bo_reference(bo); 304 val_buf->bo = ttm_bo_reference(bo);
267 val_buf->reserved = false; 305 val_buf->reserved = false;
268 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 306 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
307 vval_buf->validate_as_mob = validate_as_mob;
269 } 308 }
270 309
271 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; 310 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
@@ -302,7 +341,8 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
302 struct ttm_buffer_object *bo = &res->backup->base; 341 struct ttm_buffer_object *bo = &res->backup->base;
303 342
304 ret = vmw_bo_to_validate_list 343 ret = vmw_bo_to_validate_list
305 (sw_context, bo, NULL); 344 (sw_context, bo,
345 vmw_resource_needs_backup(res), NULL);
306 346
307 if (unlikely(ret != 0)) 347 if (unlikely(ret != 0))
308 return ret; 348 return ret;
@@ -362,8 +402,15 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
362 struct vmw_resource_val_node *node; 402 struct vmw_resource_val_node *node;
363 int ret; 403 int ret;
364 404
365 if (*id == SVGA3D_INVALID_ID) 405 if (*id == SVGA3D_INVALID_ID) {
406 if (p_val)
407 *p_val = NULL;
408 if (res_type == vmw_res_context) {
409 DRM_ERROR("Illegal context invalid id.\n");
410 return -EINVAL;
411 }
366 return 0; 412 return 0;
413 }
367 414
368 /* 415 /*
369 * Fastpath in case of repeated commands referencing the same 416 * Fastpath in case of repeated commands referencing the same
@@ -411,6 +458,18 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
411 rcache->node = node; 458 rcache->node = node;
412 if (p_val) 459 if (p_val)
413 *p_val = node; 460 *p_val = node;
461
462 if (node->first_usage && res_type == vmw_res_context) {
463 node->staged_bindings =
464 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
465 if (node->staged_bindings == NULL) {
466 DRM_ERROR("Failed to allocate context binding "
467 "information.\n");
468 goto out_no_reloc;
469 }
470 INIT_LIST_HEAD(&node->staged_bindings->list);
471 }
472
414 vmw_resource_unreference(&res); 473 vmw_resource_unreference(&res);
415 return 0; 474 return 0;
416 475
@@ -453,17 +512,35 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
453 SVGA3dCmdHeader header; 512 SVGA3dCmdHeader header;
454 SVGA3dCmdSetRenderTarget body; 513 SVGA3dCmdSetRenderTarget body;
455 } *cmd; 514 } *cmd;
515 struct vmw_resource_val_node *ctx_node;
516 struct vmw_resource_val_node *res_node;
456 int ret; 517 int ret;
457 518
458 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 519 cmd = container_of(header, struct vmw_sid_cmd, header);
520
521 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
522 user_context_converter, &cmd->body.cid,
523 &ctx_node);
459 if (unlikely(ret != 0)) 524 if (unlikely(ret != 0))
460 return ret; 525 return ret;
461 526
462 cmd = container_of(header, struct vmw_sid_cmd, header);
463 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 527 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
464 user_surface_converter, 528 user_surface_converter,
465 &cmd->body.target.sid, NULL); 529 &cmd->body.target.sid, &res_node);
466 return ret; 530 if (unlikely(ret != 0))
531 return ret;
532
533 if (dev_priv->has_mob) {
534 struct vmw_ctx_bindinfo bi;
535
536 bi.ctx = ctx_node->res;
537 bi.res = res_node ? res_node->res : NULL;
538 bi.bt = vmw_ctx_binding_rt;
539 bi.i1.rt_type = cmd->body.type;
540 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
541 }
542
543 return 0;
467} 544}
468 545
469static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, 546static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -519,11 +596,6 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
519 596
520 cmd = container_of(header, struct vmw_sid_cmd, header); 597 cmd = container_of(header, struct vmw_sid_cmd, header);
521 598
522 if (unlikely(!sw_context->kernel)) {
523 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
524 return -EPERM;
525 }
526
527 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 599 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
528 user_surface_converter, 600 user_surface_converter,
529 &cmd->body.srcImage.sid, NULL); 601 &cmd->body.srcImage.sid, NULL);
@@ -541,11 +613,6 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
541 613
542 cmd = container_of(header, struct vmw_sid_cmd, header); 614 cmd = container_of(header, struct vmw_sid_cmd, header);
543 615
544 if (unlikely(!sw_context->kernel)) {
545 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
546 return -EPERM;
547 }
548
549 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 616 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
550 user_surface_converter, &cmd->body.sid, 617 user_surface_converter, &cmd->body.sid,
551 NULL); 618 NULL);
@@ -586,7 +653,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
586 sw_context->needs_post_query_barrier = true; 653 sw_context->needs_post_query_barrier = true;
587 ret = vmw_bo_to_validate_list(sw_context, 654 ret = vmw_bo_to_validate_list(sw_context,
588 sw_context->cur_query_bo, 655 sw_context->cur_query_bo,
589 NULL); 656 dev_priv->has_mob, NULL);
590 if (unlikely(ret != 0)) 657 if (unlikely(ret != 0))
591 return ret; 658 return ret;
592 } 659 }
@@ -594,7 +661,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
594 661
595 ret = vmw_bo_to_validate_list(sw_context, 662 ret = vmw_bo_to_validate_list(sw_context,
596 dev_priv->dummy_query_bo, 663 dev_priv->dummy_query_bo,
597 NULL); 664 dev_priv->has_mob, NULL);
598 if (unlikely(ret != 0)) 665 if (unlikely(ret != 0))
599 return ret; 666 return ret;
600 667
@@ -672,6 +739,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
672} 739}
673 740
674/** 741/**
742 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
743 * handle to a MOB id.
744 *
745 * @dev_priv: Pointer to a device private structure.
746 * @sw_context: The software context used for this command batch validation.
747 * @id: Pointer to the user-space handle to be translated.
748 * @vmw_bo_p: Points to a location that, on successful return will carry
749 * a reference-counted pointer to the DMA buffer identified by the
750 * user-space handle in @id.
751 *
752 * This function saves information needed to translate a user-space buffer
753 * handle to a MOB id. The translation does not take place immediately, but
754 * during a call to vmw_apply_relocations(). This function builds a relocation
755 * list and a list of buffers to validate. The former needs to be freed using
756 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
757 * needs to be freed using vmw_clear_validations.
758 */
759static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
760 struct vmw_sw_context *sw_context,
761 SVGAMobId *id,
762 struct vmw_dma_buffer **vmw_bo_p)
763{
764 struct vmw_dma_buffer *vmw_bo = NULL;
765 struct ttm_buffer_object *bo;
766 uint32_t handle = *id;
767 struct vmw_relocation *reloc;
768 int ret;
769
770 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
771 if (unlikely(ret != 0)) {
772 DRM_ERROR("Could not find or use MOB buffer.\n");
773 return -EINVAL;
774 }
775 bo = &vmw_bo->base;
776
777 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
778 DRM_ERROR("Max number relocations per submission"
779 " exceeded\n");
780 ret = -EINVAL;
781 goto out_no_reloc;
782 }
783
784 reloc = &sw_context->relocs[sw_context->cur_reloc++];
785 reloc->mob_loc = id;
786 reloc->location = NULL;
787
788 ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
789 if (unlikely(ret != 0))
790 goto out_no_reloc;
791
792 *vmw_bo_p = vmw_bo;
793 return 0;
794
795out_no_reloc:
796 vmw_dmabuf_unreference(&vmw_bo);
797 vmw_bo_p = NULL;
798 return ret;
799}
800
801/**
675 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer 802 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
676 * handle to a valid SVGAGuestPtr 803 * handle to a valid SVGAGuestPtr
677 * 804 *
@@ -718,7 +845,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
718 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 845 reloc = &sw_context->relocs[sw_context->cur_reloc++];
719 reloc->location = ptr; 846 reloc->location = ptr;
720 847
721 ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index); 848 ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
722 if (unlikely(ret != 0)) 849 if (unlikely(ret != 0))
723 goto out_no_reloc; 850 goto out_no_reloc;
724 851
@@ -732,6 +859,30 @@ out_no_reloc:
732} 859}
733 860
734/** 861/**
862 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
863 *
864 * @dev_priv: Pointer to a device private struct.
865 * @sw_context: The software context used for this command submission.
866 * @header: Pointer to the command header in the command stream.
867 */
868static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
869 struct vmw_sw_context *sw_context,
870 SVGA3dCmdHeader *header)
871{
872 struct vmw_begin_gb_query_cmd {
873 SVGA3dCmdHeader header;
874 SVGA3dCmdBeginGBQuery q;
875 } *cmd;
876
877 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
878 header);
879
880 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
881 user_context_converter, &cmd->q.cid,
882 NULL);
883}
884
885/**
735 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. 886 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
736 * 887 *
737 * @dev_priv: Pointer to a device private struct. 888 * @dev_priv: Pointer to a device private struct.
@@ -750,12 +901,64 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
750 cmd = container_of(header, struct vmw_begin_query_cmd, 901 cmd = container_of(header, struct vmw_begin_query_cmd,
751 header); 902 header);
752 903
904 if (unlikely(dev_priv->has_mob)) {
905 struct {
906 SVGA3dCmdHeader header;
907 SVGA3dCmdBeginGBQuery q;
908 } gb_cmd;
909
910 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
911
912 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
913 gb_cmd.header.size = cmd->header.size;
914 gb_cmd.q.cid = cmd->q.cid;
915 gb_cmd.q.type = cmd->q.type;
916
917 memcpy(cmd, &gb_cmd, sizeof(*cmd));
918 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
919 }
920
753 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 921 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
754 user_context_converter, &cmd->q.cid, 922 user_context_converter, &cmd->q.cid,
755 NULL); 923 NULL);
756} 924}
757 925
758/** 926/**
927 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
928 *
929 * @dev_priv: Pointer to a device private struct.
930 * @sw_context: The software context used for this command submission.
931 * @header: Pointer to the command header in the command stream.
932 */
933static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
934 struct vmw_sw_context *sw_context,
935 SVGA3dCmdHeader *header)
936{
937 struct vmw_dma_buffer *vmw_bo;
938 struct vmw_query_cmd {
939 SVGA3dCmdHeader header;
940 SVGA3dCmdEndGBQuery q;
941 } *cmd;
942 int ret;
943
944 cmd = container_of(header, struct vmw_query_cmd, header);
945 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
946 if (unlikely(ret != 0))
947 return ret;
948
949 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
950 &cmd->q.mobid,
951 &vmw_bo);
952 if (unlikely(ret != 0))
953 return ret;
954
955 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
956
957 vmw_dmabuf_unreference(&vmw_bo);
958 return ret;
959}
960
961/**
759 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. 962 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
760 * 963 *
761 * @dev_priv: Pointer to a device private struct. 964 * @dev_priv: Pointer to a device private struct.
@@ -774,6 +977,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
774 int ret; 977 int ret;
775 978
776 cmd = container_of(header, struct vmw_query_cmd, header); 979 cmd = container_of(header, struct vmw_query_cmd, header);
980 if (dev_priv->has_mob) {
981 struct {
982 SVGA3dCmdHeader header;
983 SVGA3dCmdEndGBQuery q;
984 } gb_cmd;
985
986 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
987
988 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
989 gb_cmd.header.size = cmd->header.size;
990 gb_cmd.q.cid = cmd->q.cid;
991 gb_cmd.q.type = cmd->q.type;
992 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
993 gb_cmd.q.offset = cmd->q.guestResult.offset;
994
995 memcpy(cmd, &gb_cmd, sizeof(*cmd));
996 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
997 }
998
777 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 999 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
778 if (unlikely(ret != 0)) 1000 if (unlikely(ret != 0))
779 return ret; 1001 return ret;
@@ -790,7 +1012,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
790 return ret; 1012 return ret;
791} 1013}
792 1014
793/* 1015/**
1016 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1017 *
1018 * @dev_priv: Pointer to a device private struct.
1019 * @sw_context: The software context used for this command submission.
1020 * @header: Pointer to the command header in the command stream.
1021 */
1022static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1023 struct vmw_sw_context *sw_context,
1024 SVGA3dCmdHeader *header)
1025{
1026 struct vmw_dma_buffer *vmw_bo;
1027 struct vmw_query_cmd {
1028 SVGA3dCmdHeader header;
1029 SVGA3dCmdWaitForGBQuery q;
1030 } *cmd;
1031 int ret;
1032
1033 cmd = container_of(header, struct vmw_query_cmd, header);
1034 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1035 if (unlikely(ret != 0))
1036 return ret;
1037
1038 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1039 &cmd->q.mobid,
1040 &vmw_bo);
1041 if (unlikely(ret != 0))
1042 return ret;
1043
1044 vmw_dmabuf_unreference(&vmw_bo);
1045 return 0;
1046}
1047
1048/**
794 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. 1049 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
795 * 1050 *
796 * @dev_priv: Pointer to a device private struct. 1051 * @dev_priv: Pointer to a device private struct.
@@ -809,6 +1064,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
809 int ret; 1064 int ret;
810 1065
811 cmd = container_of(header, struct vmw_query_cmd, header); 1066 cmd = container_of(header, struct vmw_query_cmd, header);
1067 if (dev_priv->has_mob) {
1068 struct {
1069 SVGA3dCmdHeader header;
1070 SVGA3dCmdWaitForGBQuery q;
1071 } gb_cmd;
1072
1073 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1074
1075 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1076 gb_cmd.header.size = cmd->header.size;
1077 gb_cmd.q.cid = cmd->q.cid;
1078 gb_cmd.q.type = cmd->q.type;
1079 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1080 gb_cmd.q.offset = cmd->q.guestResult.offset;
1081
1082 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1083 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1084 }
1085
812 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1086 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
813 if (unlikely(ret != 0)) 1087 if (unlikely(ret != 0))
814 return ret; 1088 return ret;
@@ -921,15 +1195,22 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
921 struct vmw_tex_state_cmd { 1195 struct vmw_tex_state_cmd {
922 SVGA3dCmdHeader header; 1196 SVGA3dCmdHeader header;
923 SVGA3dCmdSetTextureState state; 1197 SVGA3dCmdSetTextureState state;
924 }; 1198 } *cmd;
925 1199
926 SVGA3dTextureState *last_state = (SVGA3dTextureState *) 1200 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
927 ((unsigned long) header + header->size + sizeof(header)); 1201 ((unsigned long) header + header->size + sizeof(header));
928 SVGA3dTextureState *cur_state = (SVGA3dTextureState *) 1202 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
929 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); 1203 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1204 struct vmw_resource_val_node *ctx_node;
1205 struct vmw_resource_val_node *res_node;
930 int ret; 1206 int ret;
931 1207
932 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1208 cmd = container_of(header, struct vmw_tex_state_cmd,
1209 header);
1210
1211 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1212 user_context_converter, &cmd->state.cid,
1213 &ctx_node);
933 if (unlikely(ret != 0)) 1214 if (unlikely(ret != 0))
934 return ret; 1215 return ret;
935 1216
@@ -939,9 +1220,20 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
939 1220
940 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1221 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
941 user_surface_converter, 1222 user_surface_converter,
942 &cur_state->value, NULL); 1223 &cur_state->value, &res_node);
943 if (unlikely(ret != 0)) 1224 if (unlikely(ret != 0))
944 return ret; 1225 return ret;
1226
1227 if (dev_priv->has_mob) {
1228 struct vmw_ctx_bindinfo bi;
1229
1230 bi.ctx = ctx_node->res;
1231 bi.res = res_node ? res_node->res : NULL;
1232 bi.bt = vmw_ctx_binding_tex;
1233 bi.i1.texture_stage = cur_state->stage;
1234 vmw_context_binding_add(ctx_node->staged_bindings,
1235 &bi);
1236 }
945 } 1237 }
946 1238
947 return 0; 1239 return 0;
@@ -971,6 +1263,222 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
971} 1263}
972 1264
973/** 1265/**
1266 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1267 *
1268 * @dev_priv: Pointer to a device private struct.
1269 * @sw_context: The software context being used for this batch.
1270 * @res_type: The resource type.
1271 * @converter: Information about user-space binding for this resource type.
1272 * @res_id: Pointer to the user-space resource handle in the command stream.
1273 * @buf_id: Pointer to the user-space backup buffer handle in the command
1274 * stream.
1275 * @backup_offset: Offset of backup into MOB.
1276 *
1277 * This function prepares for registering a switch of backup buffers
1278 * in the resource metadata just prior to unreserving.
1279 */
1280static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1281 struct vmw_sw_context *sw_context,
1282 enum vmw_res_type res_type,
1283 const struct vmw_user_resource_conv
1284 *converter,
1285 uint32_t *res_id,
1286 uint32_t *buf_id,
1287 unsigned long backup_offset)
1288{
1289 int ret;
1290 struct vmw_dma_buffer *dma_buf;
1291 struct vmw_resource_val_node *val_node;
1292
1293 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1294 converter, res_id, &val_node);
1295 if (unlikely(ret != 0))
1296 return ret;
1297
1298 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1299 if (unlikely(ret != 0))
1300 return ret;
1301
1302 if (val_node->first_usage)
1303 val_node->no_buffer_needed = true;
1304
1305 vmw_dmabuf_unreference(&val_node->new_backup);
1306 val_node->new_backup = dma_buf;
1307 val_node->new_backup_offset = backup_offset;
1308
1309 return 0;
1310}
1311
1312/**
1313 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1314 * command
1315 *
1316 * @dev_priv: Pointer to a device private struct.
1317 * @sw_context: The software context being used for this batch.
1318 * @header: Pointer to the command header in the command stream.
1319 */
1320static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1321 struct vmw_sw_context *sw_context,
1322 SVGA3dCmdHeader *header)
1323{
1324 struct vmw_bind_gb_surface_cmd {
1325 SVGA3dCmdHeader header;
1326 SVGA3dCmdBindGBSurface body;
1327 } *cmd;
1328
1329 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1330
1331 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1332 user_surface_converter,
1333 &cmd->body.sid, &cmd->body.mobid,
1334 0);
1335}
1336
1337/**
1338 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1339 * command
1340 *
1341 * @dev_priv: Pointer to a device private struct.
1342 * @sw_context: The software context being used for this batch.
1343 * @header: Pointer to the command header in the command stream.
1344 */
1345static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1346 struct vmw_sw_context *sw_context,
1347 SVGA3dCmdHeader *header)
1348{
1349 struct vmw_gb_surface_cmd {
1350 SVGA3dCmdHeader header;
1351 SVGA3dCmdUpdateGBImage body;
1352 } *cmd;
1353
1354 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1355
1356 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1357 user_surface_converter,
1358 &cmd->body.image.sid, NULL);
1359}
1360
1361/**
1362 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1363 * command
1364 *
1365 * @dev_priv: Pointer to a device private struct.
1366 * @sw_context: The software context being used for this batch.
1367 * @header: Pointer to the command header in the command stream.
1368 */
1369static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1370 struct vmw_sw_context *sw_context,
1371 SVGA3dCmdHeader *header)
1372{
1373 struct vmw_gb_surface_cmd {
1374 SVGA3dCmdHeader header;
1375 SVGA3dCmdUpdateGBSurface body;
1376 } *cmd;
1377
1378 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1379
1380 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1381 user_surface_converter,
1382 &cmd->body.sid, NULL);
1383}
1384
1385/**
1386 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1387 * command
1388 *
1389 * @dev_priv: Pointer to a device private struct.
1390 * @sw_context: The software context being used for this batch.
1391 * @header: Pointer to the command header in the command stream.
1392 */
1393static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1394 struct vmw_sw_context *sw_context,
1395 SVGA3dCmdHeader *header)
1396{
1397 struct vmw_gb_surface_cmd {
1398 SVGA3dCmdHeader header;
1399 SVGA3dCmdReadbackGBImage body;
1400 } *cmd;
1401
1402 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1403
1404 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1405 user_surface_converter,
1406 &cmd->body.image.sid, NULL);
1407}
1408
1409/**
1410 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1411 * command
1412 *
1413 * @dev_priv: Pointer to a device private struct.
1414 * @sw_context: The software context being used for this batch.
1415 * @header: Pointer to the command header in the command stream.
1416 */
1417static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1418 struct vmw_sw_context *sw_context,
1419 SVGA3dCmdHeader *header)
1420{
1421 struct vmw_gb_surface_cmd {
1422 SVGA3dCmdHeader header;
1423 SVGA3dCmdReadbackGBSurface body;
1424 } *cmd;
1425
1426 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1427
1428 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1429 user_surface_converter,
1430 &cmd->body.sid, NULL);
1431}
1432
1433/**
1434 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1435 * command
1436 *
1437 * @dev_priv: Pointer to a device private struct.
1438 * @sw_context: The software context being used for this batch.
1439 * @header: Pointer to the command header in the command stream.
1440 */
1441static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1442 struct vmw_sw_context *sw_context,
1443 SVGA3dCmdHeader *header)
1444{
1445 struct vmw_gb_surface_cmd {
1446 SVGA3dCmdHeader header;
1447 SVGA3dCmdInvalidateGBImage body;
1448 } *cmd;
1449
1450 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1451
1452 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1453 user_surface_converter,
1454 &cmd->body.image.sid, NULL);
1455}
1456
1457/**
1458 * vmw_cmd_invalidate_gb_surface - Validate an
1459 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1460 *
1461 * @dev_priv: Pointer to a device private struct.
1462 * @sw_context: The software context being used for this batch.
1463 * @header: Pointer to the command header in the command stream.
1464 */
1465static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1466 struct vmw_sw_context *sw_context,
1467 SVGA3dCmdHeader *header)
1468{
1469 struct vmw_gb_surface_cmd {
1470 SVGA3dCmdHeader header;
1471 SVGA3dCmdInvalidateGBSurface body;
1472 } *cmd;
1473
1474 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1475
1476 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1477 user_surface_converter,
1478 &cmd->body.sid, NULL);
1479}
1480
1481/**
974 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER 1482 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
975 * command 1483 * command
976 * 1484 *
@@ -986,18 +1494,64 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
986 SVGA3dCmdHeader header; 1494 SVGA3dCmdHeader header;
987 SVGA3dCmdSetShader body; 1495 SVGA3dCmdSetShader body;
988 } *cmd; 1496 } *cmd;
1497 struct vmw_resource_val_node *ctx_node;
989 int ret; 1498 int ret;
990 1499
991 cmd = container_of(header, struct vmw_set_shader_cmd, 1500 cmd = container_of(header, struct vmw_set_shader_cmd,
992 header); 1501 header);
993 1502
994 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1503 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1504 user_context_converter, &cmd->body.cid,
1505 &ctx_node);
995 if (unlikely(ret != 0)) 1506 if (unlikely(ret != 0))
996 return ret; 1507 return ret;
997 1508
1509 if (dev_priv->has_mob) {
1510 struct vmw_ctx_bindinfo bi;
1511 struct vmw_resource_val_node *res_node;
1512
1513 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
1514 user_shader_converter,
1515 &cmd->body.shid, &res_node);
1516 if (unlikely(ret != 0))
1517 return ret;
1518
1519 bi.ctx = ctx_node->res;
1520 bi.res = res_node ? res_node->res : NULL;
1521 bi.bt = vmw_ctx_binding_shader;
1522 bi.i1.shader_type = cmd->body.type;
1523 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1524 }
1525
998 return 0; 1526 return 0;
999} 1527}
1000 1528
1529/**
1530 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1531 * command
1532 *
1533 * @dev_priv: Pointer to a device private struct.
1534 * @sw_context: The software context being used for this batch.
1535 * @header: Pointer to the command header in the command stream.
1536 */
1537static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1538 struct vmw_sw_context *sw_context,
1539 SVGA3dCmdHeader *header)
1540{
1541 struct vmw_bind_gb_shader_cmd {
1542 SVGA3dCmdHeader header;
1543 SVGA3dCmdBindGBShader body;
1544 } *cmd;
1545
1546 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1547 header);
1548
1549 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1550 user_shader_converter,
1551 &cmd->body.shid, &cmd->body.mobid,
1552 cmd->body.offsetInBytes);
1553}
1554
1001static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 1555static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1002 struct vmw_sw_context *sw_context, 1556 struct vmw_sw_context *sw_context,
1003 void *buf, uint32_t *size) 1557 void *buf, uint32_t *size)
@@ -1041,50 +1595,173 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1041 return 0; 1595 return 0;
1042} 1596}
1043 1597
1044typedef int (*vmw_cmd_func) (struct vmw_private *, 1598static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1045 struct vmw_sw_context *, 1599 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1046 SVGA3dCmdHeader *); 1600 false, false, false),
1047 1601 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1048#define VMW_CMD_DEF(cmd, func) \ 1602 false, false, false),
1049 [cmd - SVGA_3D_CMD_BASE] = func 1603 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1050 1604 true, false, false),
1051static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { 1605 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1052 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), 1606 true, false, false),
1053 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), 1607 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1054 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), 1608 true, false, false),
1055 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), 1609 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1056 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), 1610 false, false, false),
1057 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), 1611 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1058 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), 1612 false, false, false),
1059 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), 1613 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1060 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), 1614 true, false, false),
1061 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), 1615 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1616 true, false, false),
1617 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1618 true, false, false),
1062 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, 1619 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1063 &vmw_cmd_set_render_target_check), 1620 &vmw_cmd_set_render_target_check, true, false, false),
1064 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state), 1621 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1065 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), 1622 true, false, false),
1066 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), 1623 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1067 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), 1624 true, false, false),
1068 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), 1625 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1069 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), 1626 true, false, false),
1070 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), 1627 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1071 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), 1628 true, false, false),
1072 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), 1629 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1073 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), 1630 true, false, false),
1074 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), 1631 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1075 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), 1632 true, false, false),
1076 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 1633 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1077 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 1634 true, false, false),
1078 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), 1635 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1079 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), 1636 false, false, false),
1080 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), 1637 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
1081 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 1638 true, true, false),
1639 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
1640 true, true, false),
1641 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1642 true, false, false),
1643 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
1644 true, true, false),
1645 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1646 true, false, false),
1647 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1648 true, false, false),
1649 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1650 true, false, false),
1651 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1652 true, false, false),
1653 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1654 true, false, false),
1655 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1656 true, false, false),
1082 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 1657 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1083 &vmw_cmd_blt_surf_screen_check), 1658 &vmw_cmd_blt_surf_screen_check, false, false, false),
1084 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), 1659 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1085 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), 1660 false, false, false),
1086 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), 1661 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1087 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), 1662 false, false, false),
1663 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1664 false, false, false),
1665 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1666 false, false, false),
1667 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1668 false, false, false),
1669 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1670 false, false, false),
1671 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1672 false, false, false),
1673 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1674 false, false, false),
1675 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1676 false, false, false),
1677 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1678 false, false, false),
1679 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1680 false, false, false),
1681 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1682 false, false, false),
1683 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1684 false, false, false),
1685 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1686 false, false, true),
1687 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1688 false, false, true),
1689 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1690 false, false, true),
1691 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1692 false, false, true),
1693 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1694 false, false, true),
1695 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1696 false, false, true),
1697 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1698 false, false, true),
1699 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1700 false, false, true),
1701 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1702 true, false, true),
1703 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1704 false, false, true),
1705 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1706 true, false, true),
1707 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1708 &vmw_cmd_update_gb_surface, true, false, true),
1709 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1710 &vmw_cmd_readback_gb_image, true, false, true),
1711 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
1712 &vmw_cmd_readback_gb_surface, true, false, true),
1713 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
1714 &vmw_cmd_invalidate_gb_image, true, false, true),
1715 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
1716 &vmw_cmd_invalidate_gb_surface, true, false, true),
1717 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1718 false, false, true),
1719 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1720 false, false, true),
1721 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1722 false, false, true),
1723 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1724 false, false, true),
1725 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1726 false, false, true),
1727 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1728 false, false, true),
1729 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1730 true, false, true),
1731 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1732 false, false, true),
1733 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
1734 false, false, false),
1735 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1736 true, false, true),
1737 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
1738 true, false, true),
1739 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
1740 true, false, true),
1741 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
1742 true, false, true),
1743 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
1744 false, false, true),
1745 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
1746 false, false, true),
1747 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
1748 false, false, true),
1749 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
1750 false, false, true),
1751 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
1752 false, false, true),
1753 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
1754 false, false, true),
1755 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
1756 false, false, true),
1757 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
1758 false, false, true),
1759 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1760 false, false, true),
1761 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1762 false, false, true),
1763 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
1764 true, false, true)
1088}; 1765};
1089 1766
1090static int vmw_cmd_check(struct vmw_private *dev_priv, 1767static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -1095,6 +1772,8 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
1095 uint32_t size_remaining = *size; 1772 uint32_t size_remaining = *size;
1096 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 1773 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
1097 int ret; 1774 int ret;
1775 const struct vmw_cmd_entry *entry;
1776 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
1098 1777
1099 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); 1778 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1100 /* Handle any none 3D commands */ 1779 /* Handle any none 3D commands */
@@ -1107,18 +1786,40 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
1107 1786
1108 cmd_id -= SVGA_3D_CMD_BASE; 1787 cmd_id -= SVGA_3D_CMD_BASE;
1109 if (unlikely(*size > size_remaining)) 1788 if (unlikely(*size > size_remaining))
1110 goto out_err; 1789 goto out_invalid;
1111 1790
1112 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) 1791 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
1113 goto out_err; 1792 goto out_invalid;
1793
1794 entry = &vmw_cmd_entries[cmd_id];
1795 if (unlikely(!entry->user_allow && !sw_context->kernel))
1796 goto out_privileged;
1114 1797
1115 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); 1798 if (unlikely(entry->gb_disable && gb))
1799 goto out_old;
1800
1801 if (unlikely(entry->gb_enable && !gb))
1802 goto out_new;
1803
1804 ret = entry->func(dev_priv, sw_context, header);
1116 if (unlikely(ret != 0)) 1805 if (unlikely(ret != 0))
1117 goto out_err; 1806 goto out_invalid;
1118 1807
1119 return 0; 1808 return 0;
1120out_err: 1809out_invalid:
1121 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", 1810 DRM_ERROR("Invalid SVGA3D command: %d\n",
1811 cmd_id + SVGA_3D_CMD_BASE);
1812 return -EINVAL;
1813out_privileged:
1814 DRM_ERROR("Privileged SVGA3D command: %d\n",
1815 cmd_id + SVGA_3D_CMD_BASE);
1816 return -EPERM;
1817out_old:
1818 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
1819 cmd_id + SVGA_3D_CMD_BASE);
1820 return -EINVAL;
1821out_new:
1822 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
1122 cmd_id + SVGA_3D_CMD_BASE); 1823 cmd_id + SVGA_3D_CMD_BASE);
1123 return -EINVAL; 1824 return -EINVAL;
1124} 1825}
@@ -1174,6 +1875,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
1174 case VMW_PL_GMR: 1875 case VMW_PL_GMR:
1175 reloc->location->gmrId = bo->mem.start; 1876 reloc->location->gmrId = bo->mem.start;
1176 break; 1877 break;
1878 case VMW_PL_MOB:
1879 *reloc->mob_loc = bo->mem.start;
1880 break;
1177 default: 1881 default:
1178 BUG(); 1882 BUG();
1179 } 1883 }
@@ -1198,6 +1902,8 @@ static void vmw_resource_list_unreference(struct list_head *list)
1198 list_for_each_entry_safe(val, val_next, list, head) { 1902 list_for_each_entry_safe(val, val_next, list, head) {
1199 list_del_init(&val->head); 1903 list_del_init(&val->head);
1200 vmw_resource_unreference(&val->res); 1904 vmw_resource_unreference(&val->res);
1905 if (unlikely(val->staged_bindings))
1906 kfree(val->staged_bindings);
1201 kfree(val); 1907 kfree(val);
1202 } 1908 }
1203} 1909}
@@ -1224,7 +1930,8 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
1224} 1930}
1225 1931
1226static int vmw_validate_single_buffer(struct vmw_private *dev_priv, 1932static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1227 struct ttm_buffer_object *bo) 1933 struct ttm_buffer_object *bo,
1934 bool validate_as_mob)
1228{ 1935{
1229 int ret; 1936 int ret;
1230 1937
@@ -1238,6 +1945,9 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1238 dev_priv->dummy_query_bo_pinned)) 1945 dev_priv->dummy_query_bo_pinned))
1239 return 0; 1946 return 0;
1240 1947
1948 if (validate_as_mob)
1949 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
1950
1241 /** 1951 /**
1242 * Put BO in VRAM if there is space, otherwise as a GMR. 1952 * Put BO in VRAM if there is space, otherwise as a GMR.
1243 * If there is no space in VRAM and GMR ids are all used up, 1953 * If there is no space in VRAM and GMR ids are all used up,
@@ -1259,7 +1969,6 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1259 return ret; 1969 return ret;
1260} 1970}
1261 1971
1262
1263static int vmw_validate_buffers(struct vmw_private *dev_priv, 1972static int vmw_validate_buffers(struct vmw_private *dev_priv,
1264 struct vmw_sw_context *sw_context) 1973 struct vmw_sw_context *sw_context)
1265{ 1974{
@@ -1267,7 +1976,8 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
1267 int ret; 1976 int ret;
1268 1977
1269 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { 1978 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
1270 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); 1979 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1980 entry->validate_as_mob);
1271 if (unlikely(ret != 0)) 1981 if (unlikely(ret != 0))
1272 return ret; 1982 return ret;
1273 } 1983 }
@@ -1509,11 +2219,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1509 goto out_err; 2219 goto out_err;
1510 } 2220 }
1511 2221
2222 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2223 if (unlikely(ret != 0)) {
2224 ret = -ERESTARTSYS;
2225 goto out_err;
2226 }
2227
1512 cmd = vmw_fifo_reserve(dev_priv, command_size); 2228 cmd = vmw_fifo_reserve(dev_priv, command_size);
1513 if (unlikely(cmd == NULL)) { 2229 if (unlikely(cmd == NULL)) {
1514 DRM_ERROR("Failed reserving fifo space for commands.\n"); 2230 DRM_ERROR("Failed reserving fifo space for commands.\n");
1515 ret = -ENOMEM; 2231 ret = -ENOMEM;
1516 goto out_err; 2232 goto out_unlock_binding;
1517 } 2233 }
1518 2234
1519 vmw_apply_relocations(sw_context); 2235 vmw_apply_relocations(sw_context);
@@ -1538,6 +2254,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1538 DRM_ERROR("Fence submission error. Syncing.\n"); 2254 DRM_ERROR("Fence submission error. Syncing.\n");
1539 2255
1540 vmw_resource_list_unreserve(&sw_context->resource_list, false); 2256 vmw_resource_list_unreserve(&sw_context->resource_list, false);
2257 mutex_unlock(&dev_priv->binding_mutex);
2258
1541 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, 2259 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
1542 (void *) fence); 2260 (void *) fence);
1543 2261
@@ -1568,6 +2286,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1568 2286
1569 return 0; 2287 return 0;
1570 2288
2289out_unlock_binding:
2290 mutex_unlock(&dev_priv->binding_mutex);
1571out_err: 2291out_err:
1572 vmw_resource_relocations_free(&sw_context->res_relocations); 2292 vmw_resource_relocations_free(&sw_context->res_relocations);
1573 vmw_free_relocations(sw_context); 2293 vmw_free_relocations(sw_context);