aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2012-11-21 05:26:55 -0500
committerThomas Hellstrom <thellstrom@vmware.com>2014-01-17 01:52:23 -0500
commitddcda24e3bec1d4c8bcc37e85d1b1b37bf0fecac (patch)
tree73fe64722ed25d0b13b4bb5108299828bf7e1134 /drivers/gpu
parent96c5f0df22aaf1f20075bc6ad3bdd7656e49cf4d (diff)
drm/vmwgfx: Hook up guest-backed queries
Perform a translation of legacy query commands should they occur in the command stream. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Brian Paul <brianp@vmware.com> Reviewed-by: Zack Rusin <zackr@vmware.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c215
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c90
3 files changed, 292 insertions, 14 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a9a0d6949ca2..3d672adf0ea4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -167,6 +167,7 @@ struct vmw_fifo_state {
167}; 167};
168 168
169struct vmw_relocation { 169struct vmw_relocation {
170 SVGAMobId *mob_loc;
170 SVGAGuestPtr *location; 171 SVGAGuestPtr *location;
171 uint32_t index; 172 uint32_t index;
172}; 173};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 9d7e49d3801b..6583dd34ed18 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -680,6 +680,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
680} 680}
681 681
682/** 682/**
683 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
684 * handle to a MOB id.
685 *
686 * @dev_priv: Pointer to a device private structure.
687 * @sw_context: The software context used for this command batch validation.
688 * @id: Pointer to the user-space handle to be translated.
689 * @vmw_bo_p: Points to a location that, on successful return will carry
690 * a reference-counted pointer to the DMA buffer identified by the
691 * user-space handle in @id.
692 *
693 * This function saves information needed to translate a user-space buffer
694 * handle to a MOB id. The translation does not take place immediately, but
695 * during a call to vmw_apply_relocations(). This function builds a relocation
696 * list and a list of buffers to validate. The former needs to be freed using
697 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
698 * needs to be freed using vmw_clear_validations.
699 */
700static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
701 struct vmw_sw_context *sw_context,
702 SVGAMobId *id,
703 struct vmw_dma_buffer **vmw_bo_p)
704{
705 struct vmw_dma_buffer *vmw_bo = NULL;
706 struct ttm_buffer_object *bo;
707 uint32_t handle = *id;
708 struct vmw_relocation *reloc;
709 int ret;
710
711 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
712 if (unlikely(ret != 0)) {
713 DRM_ERROR("Could not find or use MOB buffer.\n");
714 return -EINVAL;
715 }
716 bo = &vmw_bo->base;
717
718 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
719 DRM_ERROR("Max number relocations per submission"
720 " exceeded\n");
721 ret = -EINVAL;
722 goto out_no_reloc;
723 }
724
725 reloc = &sw_context->relocs[sw_context->cur_reloc++];
726 reloc->mob_loc = id;
727 reloc->location = NULL;
728
729 ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
730 if (unlikely(ret != 0))
731 goto out_no_reloc;
732
733 *vmw_bo_p = vmw_bo;
734 return 0;
735
736out_no_reloc:
737 vmw_dmabuf_unreference(&vmw_bo);
738 vmw_bo_p = NULL;
739 return ret;
740}
741
742/**
683 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer 743 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
684 * handle to a valid SVGAGuestPtr 744 * handle to a valid SVGAGuestPtr
685 * 745 *
@@ -740,6 +800,30 @@ out_no_reloc:
740} 800}
741 801
742/** 802/**
803 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
804 *
805 * @dev_priv: Pointer to a device private struct.
806 * @sw_context: The software context used for this command submission.
807 * @header: Pointer to the command header in the command stream.
808 */
809static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
810 struct vmw_sw_context *sw_context,
811 SVGA3dCmdHeader *header)
812{
813 struct vmw_begin_gb_query_cmd {
814 SVGA3dCmdHeader header;
815 SVGA3dCmdBeginGBQuery q;
816 } *cmd;
817
818 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
819 header);
820
821 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
822 user_context_converter, &cmd->q.cid,
823 NULL);
824}
825
826/**
743 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. 827 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
744 * 828 *
745 * @dev_priv: Pointer to a device private struct. 829 * @dev_priv: Pointer to a device private struct.
@@ -758,12 +842,64 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
758 cmd = container_of(header, struct vmw_begin_query_cmd, 842 cmd = container_of(header, struct vmw_begin_query_cmd,
759 header); 843 header);
760 844
845 if (unlikely(dev_priv->has_mob)) {
846 struct {
847 SVGA3dCmdHeader header;
848 SVGA3dCmdBeginGBQuery q;
849 } gb_cmd;
850
851 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
852
853 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
854 gb_cmd.header.size = cmd->header.size;
855 gb_cmd.q.cid = cmd->q.cid;
856 gb_cmd.q.type = cmd->q.type;
857
858 memcpy(cmd, &gb_cmd, sizeof(*cmd));
859 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
860 }
861
761 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 862 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
762 user_context_converter, &cmd->q.cid, 863 user_context_converter, &cmd->q.cid,
763 NULL); 864 NULL);
764} 865}
765 866
766/** 867/**
868 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
869 *
870 * @dev_priv: Pointer to a device private struct.
871 * @sw_context: The software context used for this command submission.
872 * @header: Pointer to the command header in the command stream.
873 */
874static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
875 struct vmw_sw_context *sw_context,
876 SVGA3dCmdHeader *header)
877{
878 struct vmw_dma_buffer *vmw_bo;
879 struct vmw_query_cmd {
880 SVGA3dCmdHeader header;
881 SVGA3dCmdEndGBQuery q;
882 } *cmd;
883 int ret;
884
885 cmd = container_of(header, struct vmw_query_cmd, header);
886 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
887 if (unlikely(ret != 0))
888 return ret;
889
890 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
891 &cmd->q.mobid,
892 &vmw_bo);
893 if (unlikely(ret != 0))
894 return ret;
895
896 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
897
898 vmw_dmabuf_unreference(&vmw_bo);
899 return ret;
900}
901
902/**
767 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. 903 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
768 * 904 *
769 * @dev_priv: Pointer to a device private struct. 905 * @dev_priv: Pointer to a device private struct.
@@ -782,6 +918,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
782 int ret; 918 int ret;
783 919
784 cmd = container_of(header, struct vmw_query_cmd, header); 920 cmd = container_of(header, struct vmw_query_cmd, header);
921 if (dev_priv->has_mob) {
922 struct {
923 SVGA3dCmdHeader header;
924 SVGA3dCmdEndGBQuery q;
925 } gb_cmd;
926
927 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
928
929 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
930 gb_cmd.header.size = cmd->header.size;
931 gb_cmd.q.cid = cmd->q.cid;
932 gb_cmd.q.type = cmd->q.type;
933 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
934 gb_cmd.q.offset = cmd->q.guestResult.offset;
935
936 memcpy(cmd, &gb_cmd, sizeof(*cmd));
937 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
938 }
939
785 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 940 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
786 if (unlikely(ret != 0)) 941 if (unlikely(ret != 0))
787 return ret; 942 return ret;
@@ -798,7 +953,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
798 return ret; 953 return ret;
799} 954}
800 955
801/* 956/**
957 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
958 *
959 * @dev_priv: Pointer to a device private struct.
960 * @sw_context: The software context used for this command submission.
961 * @header: Pointer to the command header in the command stream.
962 */
963static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
964 struct vmw_sw_context *sw_context,
965 SVGA3dCmdHeader *header)
966{
967 struct vmw_dma_buffer *vmw_bo;
968 struct vmw_query_cmd {
969 SVGA3dCmdHeader header;
970 SVGA3dCmdWaitForGBQuery q;
971 } *cmd;
972 int ret;
973
974 cmd = container_of(header, struct vmw_query_cmd, header);
975 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
976 if (unlikely(ret != 0))
977 return ret;
978
979 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
980 &cmd->q.mobid,
981 &vmw_bo);
982 if (unlikely(ret != 0))
983 return ret;
984
985 vmw_dmabuf_unreference(&vmw_bo);
986 return 0;
987}
988
989/**
802 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. 990 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
803 * 991 *
804 * @dev_priv: Pointer to a device private struct. 992 * @dev_priv: Pointer to a device private struct.
@@ -817,6 +1005,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
817 int ret; 1005 int ret;
818 1006
819 cmd = container_of(header, struct vmw_query_cmd, header); 1007 cmd = container_of(header, struct vmw_query_cmd, header);
1008 if (dev_priv->has_mob) {
1009 struct {
1010 SVGA3dCmdHeader header;
1011 SVGA3dCmdWaitForGBQuery q;
1012 } gb_cmd;
1013
1014 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1015
1016 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1017 gb_cmd.header.size = cmd->header.size;
1018 gb_cmd.q.cid = cmd->q.cid;
1019 gb_cmd.q.type = cmd->q.type;
1020 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1021 gb_cmd.q.offset = cmd->q.guestResult.offset;
1022
1023 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1024 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1025 }
1026
820 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1027 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
821 if (unlikely(ret != 0)) 1028 if (unlikely(ret != 0))
822 return ret; 1029 return ret;
@@ -1093,6 +1300,9 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
1093 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), 1300 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
1094 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), 1301 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
1095 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), 1302 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
1303 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query),
1304 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query),
1305 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query),
1096}; 1306};
1097 1307
1098static int vmw_cmd_check(struct vmw_private *dev_priv, 1308static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -1182,6 +1392,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
1182 case VMW_PL_GMR: 1392 case VMW_PL_GMR:
1183 reloc->location->gmrId = bo->mem.start; 1393 reloc->location->gmrId = bo->mem.start;
1184 break; 1394 break;
1395 case VMW_PL_MOB:
1396 *reloc->mob_loc = bo->mem.start;
1397 break;
1185 default: 1398 default:
1186 BUG(); 1399 BUG();
1187 } 1400 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 3eb148667d63..01baffd90549 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -511,24 +511,16 @@ out_err:
511} 511}
512 512
513/** 513/**
514 * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. 514 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
515 * legacy query commands.
515 * 516 *
516 * @dev_priv: The device private structure. 517 * @dev_priv: The device private structure.
517 * @cid: The hardware context id used for the query. 518 * @cid: The hardware context id used for the query.
518 * 519 *
519 * This function is used to emit a dummy occlusion query with 520 * See the vmw_fifo_emit_dummy_query documentation.
520 * no primitives rendered between query begin and query end.
521 * It's used to provide a query barrier, in order to know that when
522 * this query is finished, all preceding queries are also finished.
523 *
524 * A Query results structure should have been initialized at the start
525 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
526 * must also be either reserved or pinned when this function is called.
527 *
528 * Returns -ENOMEM on failure to reserve fifo space.
529 */ 521 */
530int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, 522static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
531 uint32_t cid) 523 uint32_t cid)
532{ 524{
533 /* 525 /*
534 * A query wait without a preceding query end will 526 * A query wait without a preceding query end will
@@ -566,3 +558,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
566 558
567 return 0; 559 return 0;
568} 560}
561
562/**
563 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
564 * guest-backed resource query commands.
565 *
566 * @dev_priv: The device private structure.
567 * @cid: The hardware context id used for the query.
568 *
569 * See the vmw_fifo_emit_dummy_query documentation.
570 */
571static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
572 uint32_t cid)
573{
574 /*
575 * A query wait without a preceding query end will
576 * actually finish all queries for this cid
577 * without writing to the query result structure.
578 */
579
580 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
581 struct {
582 SVGA3dCmdHeader header;
583 SVGA3dCmdWaitForGBQuery body;
584 } *cmd;
585
586 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
587
588 if (unlikely(cmd == NULL)) {
589 DRM_ERROR("Out of fifo space for dummy query.\n");
590 return -ENOMEM;
591 }
592
593 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
594 cmd->header.size = sizeof(cmd->body);
595 cmd->body.cid = cid;
596 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
597 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
598 cmd->body.mobid = bo->mem.start;
599 cmd->body.offset = 0;
600
601 vmw_fifo_commit(dev_priv, sizeof(*cmd));
602
603 return 0;
604}
605
606
607/**
608 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
609 * appropriate resource query commands.
610 *
611 * @dev_priv: The device private structure.
612 * @cid: The hardware context id used for the query.
613 *
614 * This function is used to emit a dummy occlusion query with
615 * no primitives rendered between query begin and query end.
616 * It's used to provide a query barrier, in order to know that when
617 * this query is finished, all preceding queries are also finished.
618 *
619 * A Query results structure should have been initialized at the start
620 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
621 * must also be either reserved or pinned when this function is called.
622 *
623 * Returns -ENOMEM on failure to reserve fifo space.
624 */
625int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
626 uint32_t cid)
627{
628 if (dev_priv->has_mob)
629 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
630
631 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
632}