aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c144
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c330
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c93
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c467
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c6
10 files changed, 988 insertions, 130 deletions
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d95335cb90bd..b645647b7776 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -2583,4 +2583,28 @@ typedef union {
2583 float f; 2583 float f;
2584} SVGA3dDevCapResult; 2584} SVGA3dDevCapResult;
2585 2585
2586typedef enum {
2587 SVGA3DCAPS_RECORD_UNKNOWN = 0,
2588 SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
2589 SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
2590 SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
2591} SVGA3dCapsRecordType;
2592
2593typedef
2594struct SVGA3dCapsRecordHeader {
2595 uint32 length;
2596 SVGA3dCapsRecordType type;
2597}
2598SVGA3dCapsRecordHeader;
2599
2600typedef
2601struct SVGA3dCapsRecord {
2602 SVGA3dCapsRecordHeader header;
2603 uint32 data[1];
2604}
2605SVGA3dCapsRecord;
2606
2607
2608typedef uint32 SVGA3dCapPair[2];
2609
2586#endif /* _SVGA3D_REG_H_ */ 2610#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 82c41daebc0e..9426c53fb483 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -37,7 +37,7 @@ struct vmw_user_context {
37 37
38 38
39 39
40typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); 40typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
41 41
42static void vmw_user_context_free(struct vmw_resource *res); 42static void vmw_user_context_free(struct vmw_resource *res);
43static struct vmw_resource * 43static struct vmw_resource *
@@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
50 bool readback, 50 bool readback,
51 struct ttm_validate_buffer *val_buf); 51 struct ttm_validate_buffer *val_buf);
52static int vmw_gb_context_destroy(struct vmw_resource *res); 52static int vmw_gb_context_destroy(struct vmw_resource *res);
53static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); 53static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
54static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); 54static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
55static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); 55 bool rebind);
56static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
57static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
56static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); 58static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
57static uint64_t vmw_user_context_size; 59static uint64_t vmw_user_context_size;
58 60
@@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
111 113
112 if (res->func->destroy == vmw_gb_context_destroy) { 114 if (res->func->destroy == vmw_gb_context_destroy) {
113 mutex_lock(&dev_priv->cmdbuf_mutex); 115 mutex_lock(&dev_priv->cmdbuf_mutex);
116 mutex_lock(&dev_priv->binding_mutex);
117 (void) vmw_context_binding_state_kill
118 (&container_of(res, struct vmw_user_context, res)->cbs);
114 (void) vmw_gb_context_destroy(res); 119 (void) vmw_gb_context_destroy(res);
115 if (dev_priv->pinned_bo != NULL && 120 if (dev_priv->pinned_bo != NULL &&
116 !dev_priv->query_cid_valid) 121 !dev_priv->query_cid_valid)
117 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 122 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
123 mutex_unlock(&dev_priv->binding_mutex);
118 mutex_unlock(&dev_priv->cmdbuf_mutex); 124 mutex_unlock(&dev_priv->cmdbuf_mutex);
119 return; 125 return;
120 } 126 }
@@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
328 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 334 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
329 335
330 mutex_lock(&dev_priv->binding_mutex); 336 mutex_lock(&dev_priv->binding_mutex);
331 vmw_context_binding_state_kill(&uctx->cbs); 337 vmw_context_binding_state_scrub(&uctx->cbs);
332 338
333 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); 339 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
334 340
@@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
378 SVGA3dCmdHeader header; 384 SVGA3dCmdHeader header;
379 SVGA3dCmdDestroyGBContext body; 385 SVGA3dCmdDestroyGBContext body;
380 } *cmd; 386 } *cmd;
381 struct vmw_user_context *uctx =
382 container_of(res, struct vmw_user_context, res);
383
384 BUG_ON(!list_empty(&uctx->cbs.list));
385 387
386 if (likely(res->id == -1)) 388 if (likely(res->id == -1))
387 return 0; 389 return 0;
@@ -528,8 +530,9 @@ out_unlock:
528 * vmw_context_scrub_shader - scrub a shader binding from a context. 530 * vmw_context_scrub_shader - scrub a shader binding from a context.
529 * 531 *
530 * @bi: single binding information. 532 * @bi: single binding information.
533 * @rebind: Whether to issue a bind instead of scrub command.
531 */ 534 */
532static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) 535static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
533{ 536{
534 struct vmw_private *dev_priv = bi->ctx->dev_priv; 537 struct vmw_private *dev_priv = bi->ctx->dev_priv;
535 struct { 538 struct {
@@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
548 cmd->header.size = sizeof(cmd->body); 551 cmd->header.size = sizeof(cmd->body);
549 cmd->body.cid = bi->ctx->id; 552 cmd->body.cid = bi->ctx->id;
550 cmd->body.type = bi->i1.shader_type; 553 cmd->body.type = bi->i1.shader_type;
551 cmd->body.shid = SVGA3D_INVALID_ID; 554 cmd->body.shid =
555 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
552 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 556 vmw_fifo_commit(dev_priv, sizeof(*cmd));
553 557
554 return 0; 558 return 0;
@@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
559 * from a context. 563 * from a context.
560 * 564 *
561 * @bi: single binding information. 565 * @bi: single binding information.
566 * @rebind: Whether to issue a bind instead of scrub command.
562 */ 567 */
563static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) 568static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
569 bool rebind)
564{ 570{
565 struct vmw_private *dev_priv = bi->ctx->dev_priv; 571 struct vmw_private *dev_priv = bi->ctx->dev_priv;
566 struct { 572 struct {
@@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
579 cmd->header.size = sizeof(cmd->body); 585 cmd->header.size = sizeof(cmd->body);
580 cmd->body.cid = bi->ctx->id; 586 cmd->body.cid = bi->ctx->id;
581 cmd->body.type = bi->i1.rt_type; 587 cmd->body.type = bi->i1.rt_type;
582 cmd->body.target.sid = SVGA3D_INVALID_ID; 588 cmd->body.target.sid =
589 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
583 cmd->body.target.face = 0; 590 cmd->body.target.face = 0;
584 cmd->body.target.mipmap = 0; 591 cmd->body.target.mipmap = 0;
585 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 592 vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
591 * vmw_context_scrub_texture - scrub a texture binding from a context. 598 * vmw_context_scrub_texture - scrub a texture binding from a context.
592 * 599 *
593 * @bi: single binding information. 600 * @bi: single binding information.
601 * @rebind: Whether to issue a bind instead of scrub command.
594 * 602 *
595 * TODO: Possibly complement this function with a function that takes 603 * TODO: Possibly complement this function with a function that takes
596 * a list of texture bindings and combines them to a single command. 604 * a list of texture bindings and combines them to a single command.
597 */ 605 */
598static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) 606static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
607 bool rebind)
599{ 608{
600 struct vmw_private *dev_priv = bi->ctx->dev_priv; 609 struct vmw_private *dev_priv = bi->ctx->dev_priv;
601 struct { 610 struct {
@@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
619 cmd->body.c.cid = bi->ctx->id; 628 cmd->body.c.cid = bi->ctx->id;
620 cmd->body.s1.stage = bi->i1.texture_stage; 629 cmd->body.s1.stage = bi->i1.texture_stage;
621 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; 630 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
622 cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; 631 cmd->body.s1.value =
632 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
623 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 633 vmw_fifo_commit(dev_priv, sizeof(*cmd));
624 634
625 return 0; 635 return 0;
@@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
692 vmw_context_binding_drop(loc); 702 vmw_context_binding_drop(loc);
693 703
694 loc->bi = *bi; 704 loc->bi = *bi;
705 loc->bi.scrubbed = false;
695 list_add_tail(&loc->ctx_list, &cbs->list); 706 list_add_tail(&loc->ctx_list, &cbs->list);
696 INIT_LIST_HEAD(&loc->res_list); 707 INIT_LIST_HEAD(&loc->res_list);
697 708
@@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
727 if (loc->bi.ctx != NULL) 738 if (loc->bi.ctx != NULL)
728 vmw_context_binding_drop(loc); 739 vmw_context_binding_drop(loc);
729 740
730 loc->bi = *bi; 741 if (bi->res != NULL) {
731 list_add_tail(&loc->ctx_list, &cbs->list); 742 loc->bi = *bi;
732 if (bi->res != NULL) 743 list_add_tail(&loc->ctx_list, &cbs->list);
733 list_add_tail(&loc->res_list, &bi->res->binding_head); 744 list_add_tail(&loc->res_list, &bi->res->binding_head);
734 else 745 }
735 INIT_LIST_HEAD(&loc->res_list);
736} 746}
737 747
738/** 748/**
@@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
746 */ 756 */
747static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) 757static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
748{ 758{
749 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); 759 if (!cb->bi.scrubbed) {
760 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
761 cb->bi.scrubbed = true;
762 }
750 vmw_context_binding_drop(cb); 763 vmw_context_binding_drop(cb);
751} 764}
752 765
@@ -768,6 +781,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
768} 781}
769 782
770/** 783/**
784 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
785 * struct vmw_ctx_binding state structure.
786 *
787 * @cbs: Pointer to the context binding state tracker.
788 *
789 * Emits commands to scrub all bindings associated with the
790 * context binding state tracker.
791 */
792static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
793{
794 struct vmw_ctx_binding *entry;
795
796 list_for_each_entry(entry, &cbs->list, ctx_list) {
797 if (!entry->bi.scrubbed) {
798 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
799 entry->bi.scrubbed = true;
800 }
801 }
802}
803
804/**
771 * vmw_context_binding_res_list_kill - Kill all bindings on a 805 * vmw_context_binding_res_list_kill - Kill all bindings on a
772 * resource binding list 806 * resource binding list
773 * 807 *
@@ -785,6 +819,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
785} 819}
786 820
787/** 821/**
822 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
823 * resource binding list
824 *
825 * @head: list head of resource binding list
826 *
827 * Scrub all bindings associated with a specific resource. Typically
828 * called before the resource is evicted.
829 */
830void vmw_context_binding_res_list_scrub(struct list_head *head)
831{
832 struct vmw_ctx_binding *entry;
833
834 list_for_each_entry(entry, head, res_list) {
835 if (!entry->bi.scrubbed) {
836 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
837 entry->bi.scrubbed = true;
838 }
839 }
840}
841
842/**
788 * vmw_context_binding_state_transfer - Commit staged binding info 843 * vmw_context_binding_state_transfer - Commit staged binding info
789 * 844 *
790 * @ctx: Pointer to context to commit the staged binding info to. 845 * @ctx: Pointer to context to commit the staged binding info to.
@@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
803 list_for_each_entry_safe(entry, next, &from->list, ctx_list) 858 list_for_each_entry_safe(entry, next, &from->list, ctx_list)
804 vmw_context_binding_transfer(&uctx->cbs, &entry->bi); 859 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
805} 860}
861
862/**
863 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
864 *
865 * @ctx: The context resource
866 *
867 * Walks through the context binding list and rebinds all scrubbed
868 * resources.
869 */
870int vmw_context_rebind_all(struct vmw_resource *ctx)
871{
872 struct vmw_ctx_binding *entry;
873 struct vmw_user_context *uctx =
874 container_of(ctx, struct vmw_user_context, res);
875 struct vmw_ctx_binding_state *cbs = &uctx->cbs;
876 int ret;
877
878 list_for_each_entry(entry, &cbs->list, ctx_list) {
879 if (likely(!entry->bi.scrubbed))
880 continue;
881
882 if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
883 SVGA3D_INVALID_ID))
884 continue;
885
886 ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
887 if (unlikely(ret != 0))
888 return ret;
889
890 entry->bi.scrubbed = false;
891 }
892
893 return 0;
894}
895
896/**
897 * vmw_context_binding_list - Return a list of context bindings
898 *
899 * @ctx: The context resource
900 *
901 * Returns the current list of bindings of the given context. Note that
902 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
903 */
904struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
905{
906 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
907}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9893328f8fdc..3bdc0adc656d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
941 drm_master_put(&vmw_fp->locked_master); 941 drm_master_put(&vmw_fp->locked_master);
942 } 942 }
943 943
944 vmw_compat_shader_man_destroy(vmw_fp->shman);
944 ttm_object_file_release(&vmw_fp->tfile); 945 ttm_object_file_release(&vmw_fp->tfile);
945 kfree(vmw_fp); 946 kfree(vmw_fp);
946} 947}
@@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
960 if (unlikely(vmw_fp->tfile == NULL)) 961 if (unlikely(vmw_fp->tfile == NULL))
961 goto out_no_tfile; 962 goto out_no_tfile;
962 963
964 vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
965 if (IS_ERR(vmw_fp->shman))
966 goto out_no_shman;
967
963 file_priv->driver_priv = vmw_fp; 968 file_priv->driver_priv = vmw_fp;
964 dev_priv->bdev.dev_mapping = dev->dev_mapping; 969 dev_priv->bdev.dev_mapping = dev->dev_mapping;
965 970
966 return 0; 971 return 0;
967 972
973out_no_shman:
974 ttm_object_file_release(&vmw_fp->tfile);
968out_no_tfile: 975out_no_tfile:
969 kfree(vmw_fp); 976 kfree(vmw_fp);
970 return ret; 977 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 554e7fa33082..ecaa302a6154 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -75,10 +75,14 @@
75#define VMW_RES_FENCE ttm_driver_type3 75#define VMW_RES_FENCE ttm_driver_type3
76#define VMW_RES_SHADER ttm_driver_type4 76#define VMW_RES_SHADER ttm_driver_type4
77 77
78struct vmw_compat_shader_manager;
79
78struct vmw_fpriv { 80struct vmw_fpriv {
79 struct drm_master *locked_master; 81 struct drm_master *locked_master;
80 struct ttm_object_file *tfile; 82 struct ttm_object_file *tfile;
81 struct list_head fence_events; 83 struct list_head fence_events;
84 bool gb_aware;
85 struct vmw_compat_shader_manager *shman;
82}; 86};
83 87
84struct vmw_dma_buffer { 88struct vmw_dma_buffer {
@@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
272 struct vmw_resource *ctx; 276 struct vmw_resource *ctx;
273 struct vmw_resource *res; 277 struct vmw_resource *res;
274 enum vmw_ctx_binding_type bt; 278 enum vmw_ctx_binding_type bt;
279 bool scrubbed;
275 union { 280 union {
276 SVGA3dShaderType shader_type; 281 SVGA3dShaderType shader_type;
277 SVGA3dRenderTargetType rt_type; 282 SVGA3dRenderTargetType rt_type;
@@ -318,7 +323,7 @@ struct vmw_sw_context{
318 struct drm_open_hash res_ht; 323 struct drm_open_hash res_ht;
319 bool res_ht_initialized; 324 bool res_ht_initialized;
320 bool kernel; /**< is the called made from the kernel */ 325 bool kernel; /**< is the called made from the kernel */
321 struct ttm_object_file *tfile; 326 struct vmw_fpriv *fp;
322 struct list_head validate_nodes; 327 struct list_head validate_nodes;
323 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; 328 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
324 uint32_t cur_reloc; 329 uint32_t cur_reloc;
@@ -336,6 +341,7 @@ struct vmw_sw_context{
336 bool needs_post_query_barrier; 341 bool needs_post_query_barrier;
337 struct vmw_resource *error_resource; 342 struct vmw_resource *error_resource;
338 struct vmw_ctx_binding_state staged_bindings; 343 struct vmw_ctx_binding_state staged_bindings;
344 struct list_head staged_shaders;
339}; 345};
340 346
341struct vmw_legacy_display; 347struct vmw_legacy_display;
@@ -569,6 +575,8 @@ struct vmw_user_resource_conv;
569 575
570extern void vmw_resource_unreference(struct vmw_resource **p_res); 576extern void vmw_resource_unreference(struct vmw_resource **p_res);
571extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 577extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
578extern struct vmw_resource *
579vmw_resource_reference_unless_doomed(struct vmw_resource *res);
572extern int vmw_resource_validate(struct vmw_resource *res); 580extern int vmw_resource_validate(struct vmw_resource *res);
573extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); 581extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
574extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 582extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -957,6 +965,9 @@ extern void
957vmw_context_binding_state_transfer(struct vmw_resource *res, 965vmw_context_binding_state_transfer(struct vmw_resource *res,
958 struct vmw_ctx_binding_state *cbs); 966 struct vmw_ctx_binding_state *cbs);
959extern void vmw_context_binding_res_list_kill(struct list_head *head); 967extern void vmw_context_binding_res_list_kill(struct list_head *head);
968extern void vmw_context_binding_res_list_scrub(struct list_head *head);
969extern int vmw_context_rebind_all(struct vmw_resource *ctx);
970extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
960 971
961/* 972/*
962 * Surface management - vmwgfx_surface.c 973 * Surface management - vmwgfx_surface.c
@@ -991,6 +1002,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
991 struct drm_file *file_priv); 1002 struct drm_file *file_priv);
992extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 1003extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
993 struct drm_file *file_priv); 1004 struct drm_file *file_priv);
1005extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
1006 SVGA3dShaderType shader_type,
1007 u32 *user_key);
1008extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
1009 struct list_head *list);
1010extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
1011 struct list_head *list);
1012extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
1013 u32 user_key,
1014 SVGA3dShaderType shader_type,
1015 struct list_head *list);
1016extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
1017 u32 user_key, const void *bytecode,
1018 SVGA3dShaderType shader_type,
1019 size_t size,
1020 struct ttm_object_file *tfile,
1021 struct list_head *list);
1022extern struct vmw_compat_shader_manager *
1023vmw_compat_shader_man_create(struct vmw_private *dev_priv);
1024extern void
1025vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
1026
994 1027
995/** 1028/**
996 * Inline helper functions 1029 * Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7a5f1eb55c5a..269b85cc875a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list,
114 * persistent context binding tracker. 114 * persistent context binding tracker.
115 */ 115 */
116 if (unlikely(val->staged_bindings)) { 116 if (unlikely(val->staged_bindings)) {
117 vmw_context_binding_state_transfer 117 if (!backoff) {
118 (val->res, val->staged_bindings); 118 vmw_context_binding_state_transfer
119 (val->res, val->staged_bindings);
120 }
119 kfree(val->staged_bindings); 121 kfree(val->staged_bindings);
120 val->staged_bindings = NULL; 122 val->staged_bindings = NULL;
121 } 123 }
@@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
178} 180}
179 181
180/** 182/**
183 * vmw_resource_context_res_add - Put resources previously bound to a context on
184 * the validation list
185 *
186 * @dev_priv: Pointer to a device private structure
187 * @sw_context: Pointer to a software context used for this command submission
188 * @ctx: Pointer to the context resource
189 *
190 * This function puts all resources that were previously bound to @ctx on
191 * the resource validation list. This is part of the context state reemission
192 */
193static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194 struct vmw_sw_context *sw_context,
195 struct vmw_resource *ctx)
196{
197 struct list_head *binding_list;
198 struct vmw_ctx_binding *entry;
199 int ret = 0;
200 struct vmw_resource *res;
201
202 mutex_lock(&dev_priv->binding_mutex);
203 binding_list = vmw_context_binding_list(ctx);
204
205 list_for_each_entry(entry, binding_list, ctx_list) {
206 res = vmw_resource_reference_unless_doomed(entry->bi.res);
207 if (unlikely(res == NULL))
208 continue;
209
210 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211 vmw_resource_unreference(&res);
212 if (unlikely(ret != 0))
213 break;
214 }
215
216 mutex_unlock(&dev_priv->binding_mutex);
217 return ret;
218}
219
220/**
181 * vmw_resource_relocation_add - Add a relocation to the relocation list 221 * vmw_resource_relocation_add - Add a relocation to the relocation list
182 * 222 *
183 * @list: Pointer to head of relocation list. 223 * @list: Pointer to head of relocation list.
@@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
233{ 273{
234 struct vmw_resource_relocation *rel; 274 struct vmw_resource_relocation *rel;
235 275
236 list_for_each_entry(rel, list, head) 276 list_for_each_entry(rel, list, head) {
237 cb[rel->offset] = rel->res->id; 277 if (likely(rel->res != NULL))
278 cb[rel->offset] = rel->res->id;
279 else
280 cb[rel->offset] = SVGA_3D_CMD_NOP;
281 }
238} 282}
239 283
240static int vmw_cmd_invalid(struct vmw_private *dev_priv, 284static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
379} 423}
380 424
381/** 425/**
382 * vmw_cmd_res_check - Check that a resource is present and if so, put it 426 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
383 * on the resource validate list unless it's already there. 427 * on the resource validate list unless it's already there.
384 * 428 *
385 * @dev_priv: Pointer to a device private structure. 429 * @dev_priv: Pointer to a device private structure.
386 * @sw_context: Pointer to the software context. 430 * @sw_context: Pointer to the software context.
387 * @res_type: Resource type. 431 * @res_type: Resource type.
388 * @converter: User-space visisble type specific information. 432 * @converter: User-space visisble type specific information.
389 * @id: Pointer to the location in the command buffer currently being 433 * @id: user-space resource id handle.
434 * @id_loc: Pointer to the location in the command buffer currently being
390 * parsed from where the user-space resource id handle is located. 435 * parsed from where the user-space resource id handle is located.
436 * @p_val: Pointer to pointer to resource validalidation node. Populated
437 * on exit.
391 */ 438 */
392static int vmw_cmd_res_check(struct vmw_private *dev_priv, 439static int
393 struct vmw_sw_context *sw_context, 440vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
394 enum vmw_res_type res_type, 441 struct vmw_sw_context *sw_context,
395 const struct vmw_user_resource_conv *converter, 442 enum vmw_res_type res_type,
396 uint32_t *id, 443 const struct vmw_user_resource_conv *converter,
397 struct vmw_resource_val_node **p_val) 444 uint32_t id,
445 uint32_t *id_loc,
446 struct vmw_resource_val_node **p_val)
398{ 447{
399 struct vmw_res_cache_entry *rcache = 448 struct vmw_res_cache_entry *rcache =
400 &sw_context->res_cache[res_type]; 449 &sw_context->res_cache[res_type];
@@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
402 struct vmw_resource_val_node *node; 451 struct vmw_resource_val_node *node;
403 int ret; 452 int ret;
404 453
405 if (*id == SVGA3D_INVALID_ID) { 454 if (id == SVGA3D_INVALID_ID) {
406 if (p_val) 455 if (p_val)
407 *p_val = NULL; 456 *p_val = NULL;
408 if (res_type == vmw_res_context) { 457 if (res_type == vmw_res_context) {
@@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
417 * resource 466 * resource
418 */ 467 */
419 468
420 if (likely(rcache->valid && *id == rcache->handle)) { 469 if (likely(rcache->valid && id == rcache->handle)) {
421 const struct vmw_resource *res = rcache->res; 470 const struct vmw_resource *res = rcache->res;
422 471
423 rcache->node->first_usage = false; 472 rcache->node->first_usage = false;
@@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
426 475
427 return vmw_resource_relocation_add 476 return vmw_resource_relocation_add
428 (&sw_context->res_relocations, res, 477 (&sw_context->res_relocations, res,
429 id - sw_context->buf_start); 478 id_loc - sw_context->buf_start);
430 } 479 }
431 480
432 ret = vmw_user_resource_lookup_handle(dev_priv, 481 ret = vmw_user_resource_lookup_handle(dev_priv,
433 sw_context->tfile, 482 sw_context->fp->tfile,
434 *id, 483 id,
435 converter, 484 converter,
436 &res); 485 &res);
437 if (unlikely(ret != 0)) { 486 if (unlikely(ret != 0)) {
438 DRM_ERROR("Could not find or use resource 0x%08x.\n", 487 DRM_ERROR("Could not find or use resource 0x%08x.\n",
439 (unsigned) *id); 488 (unsigned) id);
440 dump_stack(); 489 dump_stack();
441 return ret; 490 return ret;
442 } 491 }
443 492
444 rcache->valid = true; 493 rcache->valid = true;
445 rcache->res = res; 494 rcache->res = res;
446 rcache->handle = *id; 495 rcache->handle = id;
447 496
448 ret = vmw_resource_relocation_add(&sw_context->res_relocations, 497 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
449 res, 498 res,
450 id - sw_context->buf_start); 499 id_loc - sw_context->buf_start);
451 if (unlikely(ret != 0)) 500 if (unlikely(ret != 0))
452 goto out_no_reloc; 501 goto out_no_reloc;
453 502
@@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
459 if (p_val) 508 if (p_val)
460 *p_val = node; 509 *p_val = node;
461 510
462 if (node->first_usage && res_type == vmw_res_context) { 511 if (dev_priv->has_mob && node->first_usage &&
512 res_type == vmw_res_context) {
513 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
514 if (unlikely(ret != 0))
515 goto out_no_reloc;
463 node->staged_bindings = 516 node->staged_bindings =
464 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); 517 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
465 if (node->staged_bindings == NULL) { 518 if (node->staged_bindings == NULL) {
@@ -481,6 +534,59 @@ out_no_reloc:
481} 534}
482 535
483/** 536/**
537 * vmw_cmd_res_check - Check that a resource is present and if so, put it
538 * on the resource validate list unless it's already there.
539 *
540 * @dev_priv: Pointer to a device private structure.
541 * @sw_context: Pointer to the software context.
542 * @res_type: Resource type.
543 * @converter: User-space visisble type specific information.
544 * @id_loc: Pointer to the location in the command buffer currently being
545 * parsed from where the user-space resource id handle is located.
546 * @p_val: Pointer to pointer to resource validalidation node. Populated
547 * on exit.
548 */
549static int
550vmw_cmd_res_check(struct vmw_private *dev_priv,
551 struct vmw_sw_context *sw_context,
552 enum vmw_res_type res_type,
553 const struct vmw_user_resource_conv *converter,
554 uint32_t *id_loc,
555 struct vmw_resource_val_node **p_val)
556{
557 return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
558 converter, *id_loc, id_loc, p_val);
559}
560
561/**
562 * vmw_rebind_contexts - Rebind all resources previously bound to
563 * referenced contexts.
564 *
565 * @sw_context: Pointer to the software context.
566 *
567 * Rebind context binding points that have been scrubbed because of eviction.
568 */
569static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
570{
571 struct vmw_resource_val_node *val;
572 int ret;
573
574 list_for_each_entry(val, &sw_context->resource_list, head) {
575 if (likely(!val->staged_bindings))
576 continue;
577
578 ret = vmw_context_rebind_all(val->res);
579 if (unlikely(ret != 0)) {
580 if (ret != -ERESTARTSYS)
581 DRM_ERROR("Failed to rebind context.\n");
582 return ret;
583 }
584 }
585
586 return 0;
587}
588
589/**
484 * vmw_cmd_cid_check - Check a command header for valid context information. 590 * vmw_cmd_cid_check - Check a command header for valid context information.
485 * 591 *
486 * @dev_priv: Pointer to a device private structure. 592 * @dev_priv: Pointer to a device private structure.
@@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
767 struct vmw_relocation *reloc; 873 struct vmw_relocation *reloc;
768 int ret; 874 int ret;
769 875
770 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 876 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
771 if (unlikely(ret != 0)) { 877 if (unlikely(ret != 0)) {
772 DRM_ERROR("Could not find or use MOB buffer.\n"); 878 DRM_ERROR("Could not find or use MOB buffer.\n");
773 return -EINVAL; 879 return -EINVAL;
@@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
828 struct vmw_relocation *reloc; 934 struct vmw_relocation *reloc;
829 int ret; 935 int ret;
830 936
831 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 937 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
832 if (unlikely(ret != 0)) { 938 if (unlikely(ret != 0)) {
833 DRM_ERROR("Could not find or use GMR region.\n"); 939 DRM_ERROR("Could not find or use GMR region.\n");
834 return -EINVAL; 940 return -EINVAL;
@@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
1127 1233
1128 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); 1234 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1129 1235
1130 vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); 1236 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1237 header);
1131 1238
1132out_no_surface: 1239out_no_surface:
1133 vmw_dmabuf_unreference(&vmw_bo); 1240 vmw_dmabuf_unreference(&vmw_bo);
@@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1478 &cmd->body.sid, NULL); 1585 &cmd->body.sid, NULL);
1479} 1586}
1480 1587
1588
1589/**
1590 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1591 * command
1592 *
1593 * @dev_priv: Pointer to a device private struct.
1594 * @sw_context: The software context being used for this batch.
1595 * @header: Pointer to the command header in the command stream.
1596 */
1597static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1598 struct vmw_sw_context *sw_context,
1599 SVGA3dCmdHeader *header)
1600{
1601 struct vmw_shader_define_cmd {
1602 SVGA3dCmdHeader header;
1603 SVGA3dCmdDefineShader body;
1604 } *cmd;
1605 int ret;
1606 size_t size;
1607
1608 cmd = container_of(header, struct vmw_shader_define_cmd,
1609 header);
1610
1611 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1612 user_context_converter, &cmd->body.cid,
1613 NULL);
1614 if (unlikely(ret != 0))
1615 return ret;
1616
1617 if (unlikely(!dev_priv->has_mob))
1618 return 0;
1619
1620 size = cmd->header.size - sizeof(cmd->body);
1621 ret = vmw_compat_shader_add(sw_context->fp->shman,
1622 cmd->body.shid, cmd + 1,
1623 cmd->body.type, size,
1624 sw_context->fp->tfile,
1625 &sw_context->staged_shaders);
1626 if (unlikely(ret != 0))
1627 return ret;
1628
1629 return vmw_resource_relocation_add(&sw_context->res_relocations,
1630 NULL, &cmd->header.id -
1631 sw_context->buf_start);
1632
1633 return 0;
1634}
1635
1636/**
1637 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1638 * command
1639 *
1640 * @dev_priv: Pointer to a device private struct.
1641 * @sw_context: The software context being used for this batch.
1642 * @header: Pointer to the command header in the command stream.
1643 */
1644static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1645 struct vmw_sw_context *sw_context,
1646 SVGA3dCmdHeader *header)
1647{
1648 struct vmw_shader_destroy_cmd {
1649 SVGA3dCmdHeader header;
1650 SVGA3dCmdDestroyShader body;
1651 } *cmd;
1652 int ret;
1653
1654 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1655 header);
1656
1657 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1658 user_context_converter, &cmd->body.cid,
1659 NULL);
1660 if (unlikely(ret != 0))
1661 return ret;
1662
1663 if (unlikely(!dev_priv->has_mob))
1664 return 0;
1665
1666 ret = vmw_compat_shader_remove(sw_context->fp->shman,
1667 cmd->body.shid,
1668 cmd->body.type,
1669 &sw_context->staged_shaders);
1670 if (unlikely(ret != 0))
1671 return ret;
1672
1673 return vmw_resource_relocation_add(&sw_context->res_relocations,
1674 NULL, &cmd->header.id -
1675 sw_context->buf_start);
1676
1677 return 0;
1678}
1679
1481/** 1680/**
1482 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER 1681 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1483 * command 1682 * command
@@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1509 if (dev_priv->has_mob) { 1708 if (dev_priv->has_mob) {
1510 struct vmw_ctx_bindinfo bi; 1709 struct vmw_ctx_bindinfo bi;
1511 struct vmw_resource_val_node *res_node; 1710 struct vmw_resource_val_node *res_node;
1512 1711 u32 shid = cmd->body.shid;
1513 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, 1712
1514 user_shader_converter, 1713 if (shid != SVGA3D_INVALID_ID)
1515 &cmd->body.shid, &res_node); 1714 (void) vmw_compat_shader_lookup(sw_context->fp->shman,
1715 cmd->body.type,
1716 &shid);
1717
1718 ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
1719 vmw_res_shader,
1720 user_shader_converter,
1721 shid,
1722 &cmd->body.shid, &res_node);
1516 if (unlikely(ret != 0)) 1723 if (unlikely(ret != 0))
1517 return ret; 1724 return ret;
1518 1725
@@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1527} 1734}
1528 1735
1529/** 1736/**
1737 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1738 * command
1739 *
1740 * @dev_priv: Pointer to a device private struct.
1741 * @sw_context: The software context being used for this batch.
1742 * @header: Pointer to the command header in the command stream.
1743 */
1744static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1745 struct vmw_sw_context *sw_context,
1746 SVGA3dCmdHeader *header)
1747{
1748 struct vmw_set_shader_const_cmd {
1749 SVGA3dCmdHeader header;
1750 SVGA3dCmdSetShaderConst body;
1751 } *cmd;
1752 int ret;
1753
1754 cmd = container_of(header, struct vmw_set_shader_const_cmd,
1755 header);
1756
1757 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1758 user_context_converter, &cmd->body.cid,
1759 NULL);
1760 if (unlikely(ret != 0))
1761 return ret;
1762
1763 if (dev_priv->has_mob)
1764 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1765
1766 return 0;
1767}
1768
1769/**
1530 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER 1770 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1531 * command 1771 * command
1532 * 1772 *
@@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1634 true, false, false), 1874 true, false, false),
1635 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, 1875 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1636 false, false, false), 1876 false, false, false),
1637 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, 1877 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1638 true, true, false), 1878 true, false, false),
1639 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, 1879 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1640 true, true, false), 1880 true, false, false),
1641 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, 1881 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1642 true, false, false), 1882 true, false, false),
1643 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, 1883 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1644 true, true, false), 1884 true, false, false),
1645 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, 1885 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1646 true, false, false), 1886 true, false, false),
1647 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, 1887 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
@@ -2171,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2171 } else 2411 } else
2172 sw_context->kernel = true; 2412 sw_context->kernel = true;
2173 2413
2174 sw_context->tfile = vmw_fpriv(file_priv)->tfile; 2414 sw_context->fp = vmw_fpriv(file_priv);
2175 sw_context->cur_reloc = 0; 2415 sw_context->cur_reloc = 0;
2176 sw_context->cur_val_buf = 0; 2416 sw_context->cur_val_buf = 0;
2177 sw_context->fence_flags = 0; 2417 sw_context->fence_flags = 0;
@@ -2188,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2188 goto out_unlock; 2428 goto out_unlock;
2189 sw_context->res_ht_initialized = true; 2429 sw_context->res_ht_initialized = true;
2190 } 2430 }
2431 INIT_LIST_HEAD(&sw_context->staged_shaders);
2191 2432
2192 INIT_LIST_HEAD(&resource_list); 2433 INIT_LIST_HEAD(&resource_list);
2193 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 2434 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2194 command_size); 2435 command_size);
2195 if (unlikely(ret != 0)) 2436 if (unlikely(ret != 0))
2196 goto out_err; 2437 goto out_err_nores;
2197 2438
2198 ret = vmw_resources_reserve(sw_context); 2439 ret = vmw_resources_reserve(sw_context);
2199 if (unlikely(ret != 0)) 2440 if (unlikely(ret != 0))
2200 goto out_err; 2441 goto out_err_nores;
2201 2442
2202 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); 2443 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2203 if (unlikely(ret != 0)) 2444 if (unlikely(ret != 0))
@@ -2225,6 +2466,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2225 goto out_err; 2466 goto out_err;
2226 } 2467 }
2227 2468
2469 if (dev_priv->has_mob) {
2470 ret = vmw_rebind_contexts(sw_context);
2471 if (unlikely(ret != 0))
2472 goto out_err;
2473 }
2474
2228 cmd = vmw_fifo_reserve(dev_priv, command_size); 2475 cmd = vmw_fifo_reserve(dev_priv, command_size);
2229 if (unlikely(cmd == NULL)) { 2476 if (unlikely(cmd == NULL)) {
2230 DRM_ERROR("Failed reserving fifo space for commands.\n"); 2477 DRM_ERROR("Failed reserving fifo space for commands.\n");
@@ -2276,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2276 } 2523 }
2277 2524
2278 list_splice_init(&sw_context->resource_list, &resource_list); 2525 list_splice_init(&sw_context->resource_list, &resource_list);
2526 vmw_compat_shaders_commit(sw_context->fp->shman,
2527 &sw_context->staged_shaders);
2279 mutex_unlock(&dev_priv->cmdbuf_mutex); 2528 mutex_unlock(&dev_priv->cmdbuf_mutex);
2280 2529
2281 /* 2530 /*
@@ -2289,10 +2538,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2289out_unlock_binding: 2538out_unlock_binding:
2290 mutex_unlock(&dev_priv->binding_mutex); 2539 mutex_unlock(&dev_priv->binding_mutex);
2291out_err: 2540out_err:
2292 vmw_resource_relocations_free(&sw_context->res_relocations);
2293 vmw_free_relocations(sw_context);
2294 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); 2541 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2542out_err_nores:
2295 vmw_resource_list_unreserve(&sw_context->resource_list, true); 2543 vmw_resource_list_unreserve(&sw_context->resource_list, true);
2544 vmw_resource_relocations_free(&sw_context->res_relocations);
2545 vmw_free_relocations(sw_context);
2296 vmw_clear_validations(sw_context); 2546 vmw_clear_validations(sw_context);
2297 if (unlikely(dev_priv->pinned_bo != NULL && 2547 if (unlikely(dev_priv->pinned_bo != NULL &&
2298 !dev_priv->query_cid_valid)) 2548 !dev_priv->query_cid_valid))
@@ -2301,6 +2551,8 @@ out_unlock:
2301 list_splice_init(&sw_context->resource_list, &resource_list); 2551 list_splice_init(&sw_context->resource_list, &resource_list);
2302 error_resource = sw_context->error_resource; 2552 error_resource = sw_context->error_resource;
2303 sw_context->error_resource = NULL; 2553 sw_context->error_resource = NULL;
2554 vmw_compat_shaders_revert(sw_context->fp->shman,
2555 &sw_context->staged_shaders);
2304 mutex_unlock(&dev_priv->cmdbuf_mutex); 2556 mutex_unlock(&dev_priv->cmdbuf_mutex);
2305 2557
2306 /* 2558 /*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 116c49736763..f9881f9e62bd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
29#include <drm/vmwgfx_drm.h> 29#include <drm/vmwgfx_drm.h>
30#include "vmwgfx_kms.h" 30#include "vmwgfx_kms.h"
31 31
32struct svga_3d_compat_cap {
33 SVGA3dCapsRecordHeader header;
34 SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
35};
36
32int vmw_getparam_ioctl(struct drm_device *dev, void *data, 37int vmw_getparam_ioctl(struct drm_device *dev, void *data,
33 struct drm_file *file_priv) 38 struct drm_file *file_priv)
34{ 39{
35 struct vmw_private *dev_priv = vmw_priv(dev); 40 struct vmw_private *dev_priv = vmw_priv(dev);
36 struct drm_vmw_getparam_arg *param = 41 struct drm_vmw_getparam_arg *param =
37 (struct drm_vmw_getparam_arg *)data; 42 (struct drm_vmw_getparam_arg *)data;
43 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
38 44
39 switch (param->param) { 45 switch (param->param) {
40 case DRM_VMW_PARAM_NUM_STREAMS: 46 case DRM_VMW_PARAM_NUM_STREAMS:
@@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
60 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 66 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
61 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 67 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
62 68
69 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
70 param->value = SVGA3D_HWVERSION_WS8_B1;
71 break;
72 }
73
63 param->value = 74 param->value =
64 ioread32(fifo_mem + 75 ioread32(fifo_mem +
65 ((fifo->capabilities & 76 ((fifo->capabilities &
@@ -69,17 +80,26 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
69 break; 80 break;
70 } 81 }
71 case DRM_VMW_PARAM_MAX_SURF_MEMORY: 82 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
72 param->value = dev_priv->memory_size; 83 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
84 !vmw_fp->gb_aware)
85 param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
86 else
87 param->value = dev_priv->memory_size;
73 break; 88 break;
74 case DRM_VMW_PARAM_3D_CAPS_SIZE: 89 case DRM_VMW_PARAM_3D_CAPS_SIZE:
75 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) 90 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
76 param->value = SVGA3D_DEVCAP_MAX; 91 vmw_fp->gb_aware)
92 param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
93 else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
94 param->value = sizeof(struct svga_3d_compat_cap) +
95 sizeof(uint32_t);
77 else 96 else
78 param->value = (SVGA_FIFO_3D_CAPS_LAST - 97 param->value = (SVGA_FIFO_3D_CAPS_LAST -
79 SVGA_FIFO_3D_CAPS + 1); 98 SVGA_FIFO_3D_CAPS + 1) *
80 param->value *= sizeof(uint32_t); 99 sizeof(uint32_t);
81 break; 100 break;
82 case DRM_VMW_PARAM_MAX_MOB_MEMORY: 101 case DRM_VMW_PARAM_MAX_MOB_MEMORY:
102 vmw_fp->gb_aware = true;
83 param->value = dev_priv->max_mob_pages * PAGE_SIZE; 103 param->value = dev_priv->max_mob_pages * PAGE_SIZE;
84 break; 104 break;
85 default: 105 default:
@@ -91,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
91 return 0; 111 return 0;
92} 112}
93 113
114static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
115 size_t size)
116{
117 struct svga_3d_compat_cap *compat_cap =
118 (struct svga_3d_compat_cap *) bounce;
119 unsigned int i;
120 size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
121 unsigned int max_size;
122
123 if (size < pair_offset)
124 return -EINVAL;
125
126 max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
127
128 if (max_size > SVGA3D_DEVCAP_MAX)
129 max_size = SVGA3D_DEVCAP_MAX;
130
131 compat_cap->header.length =
132 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
133 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
134
135 mutex_lock(&dev_priv->hw_mutex);
136 for (i = 0; i < max_size; ++i) {
137 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
138 compat_cap->pairs[i][0] = i;
139 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
140 }
141 mutex_unlock(&dev_priv->hw_mutex);
142
143 return 0;
144}
145
94 146
95int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, 147int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv) 148 struct drm_file *file_priv)
@@ -104,41 +156,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
104 void *bounce; 156 void *bounce;
105 int ret; 157 int ret;
106 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); 158 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
159 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
107 160
108 if (unlikely(arg->pad64 != 0)) { 161 if (unlikely(arg->pad64 != 0)) {
109 DRM_ERROR("Illegal GET_3D_CAP argument.\n"); 162 DRM_ERROR("Illegal GET_3D_CAP argument.\n");
110 return -EINVAL; 163 return -EINVAL;
111 } 164 }
112 165
113 if (gb_objects) 166 if (gb_objects && vmw_fp->gb_aware)
114 size = SVGA3D_DEVCAP_MAX; 167 size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
168 else if (gb_objects)
169 size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
115 else 170 else
116 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); 171 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
117 172 sizeof(uint32_t);
118 size *= sizeof(uint32_t);
119 173
120 if (arg->max_size < size) 174 if (arg->max_size < size)
121 size = arg->max_size; 175 size = arg->max_size;
122 176
123 bounce = vmalloc(size); 177 bounce = vzalloc(size);
124 if (unlikely(bounce == NULL)) { 178 if (unlikely(bounce == NULL)) {
125 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); 179 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
126 return -ENOMEM; 180 return -ENOMEM;
127 } 181 }
128 182
129 if (gb_objects) { 183 if (gb_objects && vmw_fp->gb_aware) {
130 int i; 184 int i, num;
131 uint32_t *bounce32 = (uint32_t *) bounce; 185 uint32_t *bounce32 = (uint32_t *) bounce;
132 186
187 num = size / sizeof(uint32_t);
188 if (num > SVGA3D_DEVCAP_MAX)
189 num = SVGA3D_DEVCAP_MAX;
190
133 mutex_lock(&dev_priv->hw_mutex); 191 mutex_lock(&dev_priv->hw_mutex);
134 for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { 192 for (i = 0; i < num; ++i) {
135 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 193 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
136 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 194 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
137 } 195 }
138 mutex_unlock(&dev_priv->hw_mutex); 196 mutex_unlock(&dev_priv->hw_mutex);
139 197 } else if (gb_objects) {
198 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
199 if (unlikely(ret != 0))
200 goto out_err;
140 } else { 201 } else {
141
142 fifo_mem = dev_priv->mmio_virt; 202 fifo_mem = dev_priv->mmio_virt;
143 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); 203 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
144 } 204 }
@@ -146,6 +206,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
146 ret = copy_to_user(buffer, bounce, size); 206 ret = copy_to_user(buffer, bounce, size);
147 if (ret) 207 if (ret)
148 ret = -EFAULT; 208 ret = -EFAULT;
209out_err:
149 vfree(bounce); 210 vfree(bounce);
150 211
151 if (unlikely(ret != 0)) 212 if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 4910e7b81811..d4a5a19cb8c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
134 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 134 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
135 if (unlikely(cmd == NULL)) { 135 if (unlikely(cmd == NULL)) {
136 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); 136 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
137 ret = -ENOMEM;
137 goto out_no_fifo; 138 goto out_no_fifo;
138 } 139 }
139 140
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6fdd82d42f65..2aa4bc6a4d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
88 return res; 88 return res;
89} 89}
90 90
91struct vmw_resource *
92vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93{
94 return kref_get_unless_zero(&res->kref) ? res : NULL;
95}
91 96
92/** 97/**
93 * vmw_resource_release_id - release a resource id to the id manager. 98 * vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
136 vmw_dmabuf_unreference(&res->backup); 141 vmw_dmabuf_unreference(&res->backup);
137 } 142 }
138 143
139 if (likely(res->hw_destroy != NULL)) 144 if (likely(res->hw_destroy != NULL)) {
140 res->hw_destroy(res); 145 res->hw_destroy(res);
146 mutex_lock(&dev_priv->binding_mutex);
147 vmw_context_binding_res_list_kill(&res->binding_head);
148 mutex_unlock(&dev_priv->binding_mutex);
149 }
141 150
142 id = res->id; 151 id = res->id;
143 if (res->res_free != NULL) 152 if (res->res_free != NULL)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 1457ec4b7125..217d941b8176 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,6 +29,8 @@
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31 31
32#define VMW_COMPAT_SHADER_HT_ORDER 12
33
32struct vmw_shader { 34struct vmw_shader {
33 struct vmw_resource res; 35 struct vmw_resource res;
34 SVGA3dShaderType type; 36 SVGA3dShaderType type;
@@ -40,6 +42,50 @@ struct vmw_user_shader {
40 struct vmw_shader shader; 42 struct vmw_shader shader;
41}; 43};
42 44
45/**
46 * enum vmw_compat_shader_state - Staging state for compat shaders
47 */
48enum vmw_compat_shader_state {
49 VMW_COMPAT_COMMITED,
50 VMW_COMPAT_ADD,
51 VMW_COMPAT_DEL
52};
53
54/**
55 * struct vmw_compat_shader - Metadata for compat shaders.
56 *
57 * @handle: The TTM handle of the guest backed shader.
58 * @tfile: The struct ttm_object_file the guest backed shader is registered
59 * with.
60 * @hash: Hash item for lookup.
61 * @head: List head for staging lists or the compat shader manager list.
62 * @state: Staging state.
63 *
64 * The structure is protected by the cmdbuf lock.
65 */
66struct vmw_compat_shader {
67 u32 handle;
68 struct ttm_object_file *tfile;
69 struct drm_hash_item hash;
70 struct list_head head;
71 enum vmw_compat_shader_state state;
72};
73
74/**
75 * struct vmw_compat_shader_manager - Compat shader manager.
76 *
77 * @shaders: Hash table containing staged and commited compat shaders
78 * @list: List of commited shaders.
79 * @dev_priv: Pointer to a device private structure.
80 *
81 * @shaders and @list are protected by the cmdbuf mutex for now.
82 */
83struct vmw_compat_shader_manager {
84 struct drm_open_hash shaders;
85 struct list_head list;
86 struct vmw_private *dev_priv;
87};
88
43static void vmw_user_shader_free(struct vmw_resource *res); 89static void vmw_user_shader_free(struct vmw_resource *res);
44static struct vmw_resource * 90static struct vmw_resource *
45vmw_user_shader_base_to_res(struct ttm_base_object *base); 91vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
258 return 0; 304 return 0;
259 305
260 mutex_lock(&dev_priv->binding_mutex); 306 mutex_lock(&dev_priv->binding_mutex);
261 vmw_context_binding_res_list_kill(&res->binding_head); 307 vmw_context_binding_res_list_scrub(&res->binding_head);
262 308
263 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 309 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
264 if (unlikely(cmd == NULL)) { 310 if (unlikely(cmd == NULL)) {
@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
325 TTM_REF_USAGE); 371 TTM_REF_USAGE);
326} 372}
327 373
374int vmw_shader_alloc(struct vmw_private *dev_priv,
375 struct vmw_dma_buffer *buffer,
376 size_t shader_size,
377 size_t offset,
378 SVGA3dShaderType shader_type,
379 struct ttm_object_file *tfile,
380 u32 *handle)
381{
382 struct vmw_user_shader *ushader;
383 struct vmw_resource *res, *tmp;
384 int ret;
385
386 /*
387 * Approximate idr memory usage with 128 bytes. It will be limited
388 * by maximum number_of shaders anyway.
389 */
390 if (unlikely(vmw_user_shader_size == 0))
391 vmw_user_shader_size =
392 ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
393
394 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
395 vmw_user_shader_size,
396 false, true);
397 if (unlikely(ret != 0)) {
398 if (ret != -ERESTARTSYS)
399 DRM_ERROR("Out of graphics memory for shader "
400 "creation.\n");
401 goto out;
402 }
403
404 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
405 if (unlikely(ushader == NULL)) {
406 ttm_mem_global_free(vmw_mem_glob(dev_priv),
407 vmw_user_shader_size);
408 ret = -ENOMEM;
409 goto out;
410 }
411
412 res = &ushader->shader.res;
413 ushader->base.shareable = false;
414 ushader->base.tfile = NULL;
415
416 /*
417 * From here on, the destructor takes over resource freeing.
418 */
419
420 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
421 offset, shader_type, buffer,
422 vmw_user_shader_free);
423 if (unlikely(ret != 0))
424 goto out;
425
426 tmp = vmw_resource_reference(res);
427 ret = ttm_base_object_init(tfile, &ushader->base, false,
428 VMW_RES_SHADER,
429 &vmw_user_shader_base_release, NULL);
430
431 if (unlikely(ret != 0)) {
432 vmw_resource_unreference(&tmp);
433 goto out_err;
434 }
435
436 if (handle)
437 *handle = ushader->base.hash.key;
438out_err:
439 vmw_resource_unreference(&res);
440out:
441 return ret;
442}
443
444
328int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 445int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
329 struct drm_file *file_priv) 446 struct drm_file *file_priv)
330{ 447{
331 struct vmw_private *dev_priv = vmw_priv(dev); 448 struct vmw_private *dev_priv = vmw_priv(dev);
332 struct vmw_user_shader *ushader;
333 struct vmw_resource *res;
334 struct vmw_resource *tmp;
335 struct drm_vmw_shader_create_arg *arg = 449 struct drm_vmw_shader_create_arg *arg =
336 (struct drm_vmw_shader_create_arg *)data; 450 (struct drm_vmw_shader_create_arg *)data;
337 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 451 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
373 goto out_bad_arg; 487 goto out_bad_arg;
374 } 488 }
375 489
376 /* 490 ret = ttm_read_lock(&vmaster->lock, true);
377 * Approximate idr memory usage with 128 bytes. It will be limited 491 if (unlikely(ret != 0))
378 * by maximum number_of shaders anyway. 492 goto out_bad_arg;
379 */
380 493
381 if (unlikely(vmw_user_shader_size == 0)) 494 ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
382 vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) 495 shader_type, tfile, &arg->shader_handle);
383 + 128;
384 496
385 ret = ttm_read_lock(&vmaster->lock, true); 497 ttm_read_unlock(&vmaster->lock);
498out_bad_arg:
499 vmw_dmabuf_unreference(&buffer);
500 return ret;
501}
502
503/**
504 * vmw_compat_shader_lookup - Look up a compat shader
505 *
506 * @man: Pointer to the compat shader manager.
507 * @shader_type: The shader type, that combined with the user_key identifies
508 * the shader.
509 * @user_key: On entry, this should be a pointer to the user_key.
510 * On successful exit, it will contain the guest-backed shader's TTM handle.
511 *
512 * Returns 0 on success. Non-zero on failure, in which case the value pointed
513 * to by @user_key is unmodified.
514 */
515int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
516 SVGA3dShaderType shader_type,
517 u32 *user_key)
518{
519 struct drm_hash_item *hash;
520 int ret;
521 unsigned long key = *user_key | (shader_type << 24);
522
523 ret = drm_ht_find_item(&man->shaders, key, &hash);
386 if (unlikely(ret != 0)) 524 if (unlikely(ret != 0))
387 return ret; 525 return ret;
388 526
389 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 527 *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
390 vmw_user_shader_size, 528 hash)->handle;
391 false, true); 529
392 if (unlikely(ret != 0)) { 530 return 0;
393 if (ret != -ERESTARTSYS) 531}
394 DRM_ERROR("Out of graphics memory for shader" 532
395 " creation.\n"); 533/**
396 goto out_unlock; 534 * vmw_compat_shader_free - Free a compat shader.
535 *
536 * @man: Pointer to the compat shader manager.
537 * @entry: Pointer to a struct vmw_compat_shader.
538 *
539 * Frees a struct vmw_compat_shder entry and drops its reference to the
540 * guest backed shader.
541 */
542static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
543 struct vmw_compat_shader *entry)
544{
545 list_del(&entry->head);
546 WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
547 WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
548 TTM_REF_USAGE));
549 kfree(entry);
550}
551
552/**
553 * vmw_compat_shaders_commit - Commit a list of compat shader actions.
554 *
555 * @man: Pointer to the compat shader manager.
556 * @list: Caller's list of compat shader actions.
557 *
558 * This function commits a list of compat shader additions or removals.
559 * It is typically called when the execbuf ioctl call triggering these
560 * actions has commited the fifo contents to the device.
561 */
562void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
563 struct list_head *list)
564{
565 struct vmw_compat_shader *entry, *next;
566
567 list_for_each_entry_safe(entry, next, list, head) {
568 list_del(&entry->head);
569 switch (entry->state) {
570 case VMW_COMPAT_ADD:
571 entry->state = VMW_COMPAT_COMMITED;
572 list_add_tail(&entry->head, &man->list);
573 break;
574 case VMW_COMPAT_DEL:
575 ttm_ref_object_base_unref(entry->tfile, entry->handle,
576 TTM_REF_USAGE);
577 kfree(entry);
578 break;
579 default:
580 BUG();
581 break;
582 }
397 } 583 }
584}
398 585
399 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); 586/**
400 if (unlikely(ushader == NULL)) { 587 * vmw_compat_shaders_revert - Revert a list of compat shader actions
401 ttm_mem_global_free(vmw_mem_glob(dev_priv), 588 *
402 vmw_user_shader_size); 589 * @man: Pointer to the compat shader manager.
403 ret = -ENOMEM; 590 * @list: Caller's list of compat shader actions.
404 goto out_unlock; 591 *
592 * This function reverts a list of compat shader additions or removals.
593 * It is typically called when the execbuf ioctl call triggering these
594 * actions failed for some reason, and the command stream was never
595 * submitted.
596 */
597void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
598 struct list_head *list)
599{
600 struct vmw_compat_shader *entry, *next;
601 int ret;
602
603 list_for_each_entry_safe(entry, next, list, head) {
604 switch (entry->state) {
605 case VMW_COMPAT_ADD:
606 vmw_compat_shader_free(man, entry);
607 break;
608 case VMW_COMPAT_DEL:
609 ret = drm_ht_insert_item(&man->shaders, &entry->hash);
610 list_del(&entry->head);
611 list_add_tail(&entry->head, &man->list);
612 entry->state = VMW_COMPAT_COMMITED;
613 break;
614 default:
615 BUG();
616 break;
617 }
405 } 618 }
619}
406 620
407 res = &ushader->shader.res; 621/**
408 ushader->base.shareable = false; 622 * vmw_compat_shader_remove - Stage a compat shader for removal.
409 ushader->base.tfile = NULL; 623 *
624 * @man: Pointer to the compat shader manager
625 * @user_key: The key that is used to identify the shader. The key is
626 * unique to the shader type.
627 * @shader_type: Shader type.
628 * @list: Caller's list of staged shader actions.
629 *
630 * This function stages a compat shader for removal and removes the key from
631 * the shader manager's hash table. If the shader was previously only staged
632 * for addition it is completely removed (But the execbuf code may keep a
633 * reference if it was bound to a context between addition and removal). If
634 * it was previously commited to the manager, it is staged for removal.
635 */
636int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
637 u32 user_key, SVGA3dShaderType shader_type,
638 struct list_head *list)
639{
640 struct vmw_compat_shader *entry;
641 struct drm_hash_item *hash;
642 int ret;
410 643
411 /* 644 ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
412 * From here on, the destructor takes over resource freeing. 645 &hash);
413 */ 646 if (likely(ret != 0))
647 return -EINVAL;
414 648
415 ret = vmw_gb_shader_init(dev_priv, res, arg->size, 649 entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
416 arg->offset, shader_type, buffer, 650
417 vmw_user_shader_free); 651 switch (entry->state) {
652 case VMW_COMPAT_ADD:
653 vmw_compat_shader_free(man, entry);
654 break;
655 case VMW_COMPAT_COMMITED:
656 (void) drm_ht_remove_item(&man->shaders, &entry->hash);
657 list_del(&entry->head);
658 entry->state = VMW_COMPAT_DEL;
659 list_add_tail(&entry->head, list);
660 break;
661 default:
662 BUG();
663 break;
664 }
665
666 return 0;
667}
668
669/**
670 * vmw_compat_shader_add - Create a compat shader and add the
671 * key to the manager
672 *
673 * @man: Pointer to the compat shader manager
674 * @user_key: The key that is used to identify the shader. The key is
675 * unique to the shader type.
676 * @bytecode: Pointer to the bytecode of the shader.
677 * @shader_type: Shader type.
678 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
679 * to be created with.
680 * @list: Caller's list of staged shader actions.
681 *
682 * Note that only the key is added to the shader manager's hash table.
683 * The shader is not yet added to the shader manager's list of shaders.
684 */
685int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
686 u32 user_key, const void *bytecode,
687 SVGA3dShaderType shader_type,
688 size_t size,
689 struct ttm_object_file *tfile,
690 struct list_head *list)
691{
692 struct vmw_dma_buffer *buf;
693 struct ttm_bo_kmap_obj map;
694 bool is_iomem;
695 struct vmw_compat_shader *compat;
696 u32 handle;
697 int ret;
698
699 if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
700 return -EINVAL;
701
702 /* Allocate and pin a DMA buffer */
703 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
704 if (unlikely(buf == NULL))
705 return -ENOMEM;
706
707 ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
708 true, vmw_dmabuf_bo_free);
418 if (unlikely(ret != 0)) 709 if (unlikely(ret != 0))
419 goto out_unlock; 710 goto out;
420 711
421 tmp = vmw_resource_reference(res); 712 ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
422 ret = ttm_base_object_init(tfile, &ushader->base, false, 713 if (unlikely(ret != 0))
423 VMW_RES_SHADER, 714 goto no_reserve;
424 &vmw_user_shader_base_release, NULL);
425 715
716 /* Map and copy shader bytecode. */
717 ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
718 &map);
426 if (unlikely(ret != 0)) { 719 if (unlikely(ret != 0)) {
427 vmw_resource_unreference(&tmp); 720 ttm_bo_unreserve(&buf->base);
428 goto out_err; 721 goto no_reserve;
429 } 722 }
430 723
431 arg->shader_handle = ushader->base.hash.key; 724 memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
432out_err: 725 WARN_ON(is_iomem);
433 vmw_resource_unreference(&res); 726
434out_unlock: 727 ttm_bo_kunmap(&map);
435 ttm_read_unlock(&vmaster->lock); 728 ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
436out_bad_arg: 729 WARN_ON(ret != 0);
437 vmw_dmabuf_unreference(&buffer); 730 ttm_bo_unreserve(&buf->base);
731
732 /* Create a guest-backed shader container backed by the dma buffer */
733 ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
734 tfile, &handle);
735 vmw_dmabuf_unreference(&buf);
736 if (unlikely(ret != 0))
737 goto no_reserve;
738 /*
739 * Create a compat shader structure and stage it for insertion
740 * in the manager
741 */
742 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
743 if (compat == NULL)
744 goto no_compat;
745
746 compat->hash.key = user_key | (shader_type << 24);
747 ret = drm_ht_insert_item(&man->shaders, &compat->hash);
748 if (unlikely(ret != 0))
749 goto out_invalid_key;
750
751 compat->state = VMW_COMPAT_ADD;
752 compat->handle = handle;
753 compat->tfile = tfile;
754 list_add_tail(&compat->head, list);
438 755
756 return 0;
757
758out_invalid_key:
759 kfree(compat);
760no_compat:
761 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
762no_reserve:
763out:
439 return ret; 764 return ret;
765}
766
767/**
768 * vmw_compat_shader_man_create - Create a compat shader manager
769 *
770 * @dev_priv: Pointer to a device private structure.
771 *
772 * Typically done at file open time. If successful returns a pointer to a
773 * compat shader manager. Otherwise returns an error pointer.
774 */
775struct vmw_compat_shader_manager *
776vmw_compat_shader_man_create(struct vmw_private *dev_priv)
777{
778 struct vmw_compat_shader_manager *man;
779 int ret;
780
781 man = kzalloc(sizeof(*man), GFP_KERNEL);
782
783 man->dev_priv = dev_priv;
784 INIT_LIST_HEAD(&man->list);
785 ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
786 if (ret == 0)
787 return man;
788
789 kfree(man);
790 return ERR_PTR(ret);
791}
792
793/**
794 * vmw_compat_shader_man_destroy - Destroy a compat shader manager
795 *
796 * @man: Pointer to the shader manager to destroy.
797 *
798 * Typically done at file close time.
799 */
800void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
801{
802 struct vmw_compat_shader *entry, *next;
803
804 mutex_lock(&man->dev_priv->cmdbuf_mutex);
805 list_for_each_entry_safe(entry, next, &man->list, head)
806 vmw_compat_shader_free(man, entry);
440 807
808 mutex_unlock(&man->dev_priv->cmdbuf_mutex);
809 kfree(man);
441} 810}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 979da1c246a5..82468d902915 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
908 rep->size_addr; 908 rep->size_addr;
909 909
910 if (user_sizes) 910 if (user_sizes)
911 ret = copy_to_user(user_sizes, srf->sizes, 911 ret = copy_to_user(user_sizes, &srf->base_size,
912 srf->num_sizes * sizeof(*srf->sizes)); 912 sizeof(srf->base_size));
913 if (unlikely(ret != 0)) { 913 if (unlikely(ret != 0)) {
914 DRM_ERROR("copy_to_user failed %p %u\n", 914 DRM_ERROR("copy_to_user failed %p %u\n",
915 user_sizes, srf->num_sizes); 915 user_sizes, srf->num_sizes);
@@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
1111 return 0; 1111 return 0;
1112 1112
1113 mutex_lock(&dev_priv->binding_mutex); 1113 mutex_lock(&dev_priv->binding_mutex);
1114 vmw_context_binding_res_list_kill(&res->binding_head); 1114 vmw_context_binding_res_list_scrub(&res->binding_head);
1115 1115
1116 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1116 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1117 if (unlikely(cmd == NULL)) { 1117 if (unlikely(cmd == NULL)) {