diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2013-10-08 05:32:36 -0400 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2014-01-17 01:52:36 -0500 |
commit | 173fb7d4e26705a9e8b8e9d197a18ff39bfdad0a (patch) | |
tree | f7797b8cbcd5f6d5be686c4c642f4ef9df59a49d /drivers/gpu/drm | |
parent | b5c3b1a6bfaf71895d656162f29e979c5c904888 (diff) |
drm/vmwgfx: Persistent tracking of context bindings
Only scrub context bindings when a bound resource is destroyed, or when
the MOB backing the context is unbound.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Zack Rusin <zackr@vmware.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 101 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 4 |
7 files changed, 143 insertions, 10 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index b4de756112d4..97aa55159107 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
@@ -32,6 +32,7 @@ | |||
32 | struct vmw_user_context { | 32 | struct vmw_user_context { |
33 | struct ttm_base_object base; | 33 | struct ttm_base_object base; |
34 | struct vmw_resource res; | 34 | struct vmw_resource res; |
35 | struct vmw_ctx_binding_state cbs; | ||
35 | }; | 36 | }; |
36 | 37 | ||
37 | 38 | ||
@@ -52,7 +53,7 @@ static int vmw_gb_context_destroy(struct vmw_resource *res); | |||
52 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); | 53 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); |
53 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); | 54 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); |
54 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); | 55 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); |
55 | 56 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); | |
56 | static uint64_t vmw_user_context_size; | 57 | static uint64_t vmw_user_context_size; |
57 | 58 | ||
58 | static const struct vmw_user_resource_conv user_context_conv = { | 59 | static const struct vmw_user_resource_conv user_context_conv = { |
@@ -139,6 +140,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, | |||
139 | void (*res_free) (struct vmw_resource *res)) | 140 | void (*res_free) (struct vmw_resource *res)) |
140 | { | 141 | { |
141 | int ret; | 142 | int ret; |
143 | struct vmw_user_context *uctx = | ||
144 | container_of(res, struct vmw_user_context, res); | ||
142 | 145 | ||
143 | ret = vmw_resource_init(dev_priv, res, true, | 146 | ret = vmw_resource_init(dev_priv, res, true, |
144 | res_free, &vmw_gb_context_func); | 147 | res_free, &vmw_gb_context_func); |
@@ -152,6 +155,9 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, | |||
152 | return ret; | 155 | return ret; |
153 | } | 156 | } |
154 | 157 | ||
158 | memset(&uctx->cbs, 0, sizeof(uctx->cbs)); | ||
159 | INIT_LIST_HEAD(&uctx->cbs.list); | ||
160 | |||
155 | vmw_resource_activate(res, vmw_hw_context_destroy); | 161 | vmw_resource_activate(res, vmw_hw_context_destroy); |
156 | return 0; | 162 | return 0; |
157 | } | 163 | } |
@@ -304,6 +310,8 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | |||
304 | struct vmw_private *dev_priv = res->dev_priv; | 310 | struct vmw_private *dev_priv = res->dev_priv; |
305 | struct ttm_buffer_object *bo = val_buf->bo; | 311 | struct ttm_buffer_object *bo = val_buf->bo; |
306 | struct vmw_fence_obj *fence; | 312 | struct vmw_fence_obj *fence; |
313 | struct vmw_user_context *uctx = | ||
314 | container_of(res, struct vmw_user_context, res); | ||
307 | 315 | ||
308 | struct { | 316 | struct { |
309 | SVGA3dCmdHeader header; | 317 | SVGA3dCmdHeader header; |
@@ -319,12 +327,16 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | |||
319 | 327 | ||
320 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | 328 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
321 | 329 | ||
330 | mutex_lock(&dev_priv->binding_mutex); | ||
331 | vmw_context_binding_state_kill(&uctx->cbs); | ||
332 | |||
322 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | 333 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); |
323 | 334 | ||
324 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 335 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
325 | if (unlikely(cmd == NULL)) { | 336 | if (unlikely(cmd == NULL)) { |
326 | DRM_ERROR("Failed reserving FIFO space for context " | 337 | DRM_ERROR("Failed reserving FIFO space for context " |
327 | "unbinding.\n"); | 338 | "unbinding.\n"); |
339 | mutex_unlock(&dev_priv->binding_mutex); | ||
328 | return -ENOMEM; | 340 | return -ENOMEM; |
329 | } | 341 | } |
330 | 342 | ||
@@ -342,6 +354,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | |||
342 | cmd2->body.mobid = SVGA3D_INVALID_ID; | 354 | cmd2->body.mobid = SVGA3D_INVALID_ID; |
343 | 355 | ||
344 | vmw_fifo_commit(dev_priv, submit_size); | 356 | vmw_fifo_commit(dev_priv, submit_size); |
357 | mutex_unlock(&dev_priv->binding_mutex); | ||
345 | 358 | ||
346 | /* | 359 | /* |
347 | * Create a fence object and fence the backup buffer. | 360 | * Create a fence object and fence the backup buffer. |
@@ -365,6 +378,10 @@ static int vmw_gb_context_destroy(struct vmw_resource *res) | |||
365 | SVGA3dCmdHeader header; | 378 | SVGA3dCmdHeader header; |
366 | SVGA3dCmdDestroyGBContext body; | 379 | SVGA3dCmdDestroyGBContext body; |
367 | } *cmd; | 380 | } *cmd; |
381 | struct vmw_user_context *uctx = | ||
382 | container_of(res, struct vmw_user_context, res); | ||
383 | |||
384 | BUG_ON(!list_empty(&uctx->cbs.list)); | ||
368 | 385 | ||
369 | if (likely(res->id == -1)) | 386 | if (likely(res->id == -1)) |
370 | return 0; | 387 | return 0; |
@@ -620,6 +637,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) | |||
620 | static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) | 637 | static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) |
621 | { | 638 | { |
622 | list_del(&cb->ctx_list); | 639 | list_del(&cb->ctx_list); |
640 | if (!list_empty(&cb->res_list)) | ||
641 | list_del(&cb->res_list); | ||
623 | cb->bi.ctx = NULL; | 642 | cb->bi.ctx = NULL; |
624 | } | 643 | } |
625 | 644 | ||
@@ -674,11 +693,49 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | |||
674 | 693 | ||
675 | loc->bi = *bi; | 694 | loc->bi = *bi; |
676 | list_add_tail(&loc->ctx_list, &cbs->list); | 695 | list_add_tail(&loc->ctx_list, &cbs->list); |
696 | INIT_LIST_HEAD(&loc->res_list); | ||
677 | 697 | ||
678 | return 0; | 698 | return 0; |
679 | } | 699 | } |
680 | 700 | ||
681 | /** | 701 | /** |
702 | * vmw_context_binding_transfer: Transfer a context binding tracking entry. | ||
703 | * | ||
704 | * @cbs: Pointer to the persistent context binding state tracker. | ||
705 | * @bi: Information about the binding to track. | ||
706 | * | ||
707 | */ | ||
708 | static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, | ||
709 | const struct vmw_ctx_bindinfo *bi) | ||
710 | { | ||
711 | struct vmw_ctx_binding *loc; | ||
712 | |||
713 | switch (bi->bt) { | ||
714 | case vmw_ctx_binding_rt: | ||
715 | loc = &cbs->render_targets[bi->i1.rt_type]; | ||
716 | break; | ||
717 | case vmw_ctx_binding_tex: | ||
718 | loc = &cbs->texture_units[bi->i1.texture_stage]; | ||
719 | break; | ||
720 | case vmw_ctx_binding_shader: | ||
721 | loc = &cbs->shaders[bi->i1.shader_type]; | ||
722 | break; | ||
723 | default: | ||
724 | BUG(); | ||
725 | } | ||
726 | |||
727 | if (loc->bi.ctx != NULL) | ||
728 | vmw_context_binding_drop(loc); | ||
729 | |||
730 | loc->bi = *bi; | ||
731 | list_add_tail(&loc->ctx_list, &cbs->list); | ||
732 | if (bi->res != NULL) | ||
733 | list_add_tail(&loc->res_list, &bi->res->binding_head); | ||
734 | else | ||
735 | INIT_LIST_HEAD(&loc->res_list); | ||
736 | } | ||
737 | |||
738 | /** | ||
682 | * vmw_context_binding_kill - Kill a binding on the device | 739 | * vmw_context_binding_kill - Kill a binding on the device |
683 | * and stop tracking it. | 740 | * and stop tracking it. |
684 | * | 741 | * |
@@ -702,11 +759,47 @@ void vmw_context_binding_kill(struct vmw_ctx_binding *cb) | |||
702 | * Emits commands to scrub all bindings associated with the | 759 | * Emits commands to scrub all bindings associated with the |
703 | * context binding state tracker. Then re-initializes the whole structure. | 760 | * context binding state tracker. Then re-initializes the whole structure. |
704 | */ | 761 | */ |
705 | void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) | 762 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) |
706 | { | 763 | { |
707 | struct vmw_ctx_binding *entry, *next; | 764 | struct vmw_ctx_binding *entry, *next; |
708 | 765 | ||
709 | list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) { | 766 | list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) |
710 | vmw_context_binding_kill(entry); | 767 | vmw_context_binding_kill(entry); |
711 | } | 768 | } |
769 | |||
770 | /** | ||
771 | * vmw_context_binding_res_list_kill - Kill all bindings on a | ||
772 | * resource binding list | ||
773 | * | ||
774 | * @head: list head of resource binding list | ||
775 | * | ||
776 | * Kills all bindings associated with a specific resource. Typically | ||
777 | * called before the resource is destroyed. | ||
778 | */ | ||
779 | void vmw_context_binding_res_list_kill(struct list_head *head) | ||
780 | { | ||
781 | struct vmw_ctx_binding *entry, *next; | ||
782 | |||
783 | list_for_each_entry_safe(entry, next, head, res_list) | ||
784 | vmw_context_binding_kill(entry); | ||
785 | } | ||
786 | |||
787 | /** | ||
788 | * vmw_context_binding_state_transfer - Commit staged binding info | ||
789 | * | ||
790 | * @ctx: Pointer to context to commit the staged binding info to. | ||
791 | * @from: Staged binding info built during execbuf. | ||
792 | * | ||
793 | * Transfers binding info from a temporary structure to the persistent | ||
794 | * structure in the context. This can be done once commands | ||
795 | */ | ||
796 | void vmw_context_binding_state_transfer(struct vmw_resource *ctx, | ||
797 | struct vmw_ctx_binding_state *from) | ||
798 | { | ||
799 | struct vmw_user_context *uctx = | ||
800 | container_of(ctx, struct vmw_user_context, res); | ||
801 | struct vmw_ctx_binding *entry, *next; | ||
802 | |||
803 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) | ||
804 | vmw_context_binding_transfer(&uctx->cbs, &entry->bi); | ||
712 | } | 805 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index fb56676ed3ee..9008a56e2a97 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -612,6 +612,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
612 | mutex_init(&dev_priv->hw_mutex); | 612 | mutex_init(&dev_priv->hw_mutex); |
613 | mutex_init(&dev_priv->cmdbuf_mutex); | 613 | mutex_init(&dev_priv->cmdbuf_mutex); |
614 | mutex_init(&dev_priv->release_mutex); | 614 | mutex_init(&dev_priv->release_mutex); |
615 | mutex_init(&dev_priv->binding_mutex); | ||
615 | rwlock_init(&dev_priv->resource_lock); | 616 | rwlock_init(&dev_priv->resource_lock); |
616 | 617 | ||
617 | for (i = vmw_res_context; i < vmw_res_max; ++i) { | 618 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index a962e4c12a75..18ece4f53c42 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -112,6 +112,7 @@ struct vmw_resource { | |||
112 | const struct vmw_res_func *func; | 112 | const struct vmw_res_func *func; |
113 | struct list_head lru_head; /* Protected by the resource lock */ | 113 | struct list_head lru_head; /* Protected by the resource lock */ |
114 | struct list_head mob_head; /* Protected by @backup reserved */ | 114 | struct list_head mob_head; /* Protected by @backup reserved */ |
115 | struct list_head binding_head; /* Protected by binding_mutex */ | ||
115 | void (*res_free) (struct vmw_resource *res); | 116 | void (*res_free) (struct vmw_resource *res); |
116 | void (*hw_destroy) (struct vmw_resource *res); | 117 | void (*hw_destroy) (struct vmw_resource *res); |
117 | }; | 118 | }; |
@@ -260,11 +261,13 @@ enum vmw_ctx_binding_type { | |||
260 | * | 261 | * |
261 | * @ctx: Pointer to the context structure. NULL means the binding is not | 262 | * @ctx: Pointer to the context structure. NULL means the binding is not |
262 | * active. | 263 | * active. |
264 | * @res: Non ref-counted pointer to the bound resource. | ||
263 | * @bt: The binding type. | 265 | * @bt: The binding type. |
264 | * @i1: Union of information needed to unbind. | 266 | * @i1: Union of information needed to unbind. |
265 | */ | 267 | */ |
266 | struct vmw_ctx_bindinfo { | 268 | struct vmw_ctx_bindinfo { |
267 | struct vmw_resource *ctx; | 269 | struct vmw_resource *ctx; |
270 | struct vmw_resource *res; | ||
268 | enum vmw_ctx_binding_type bt; | 271 | enum vmw_ctx_binding_type bt; |
269 | union { | 272 | union { |
270 | SVGA3dShaderType shader_type; | 273 | SVGA3dShaderType shader_type; |
@@ -278,10 +281,12 @@ struct vmw_ctx_bindinfo { | |||
278 | * - suitable for tracking in a context | 281 | * - suitable for tracking in a context |
279 | * | 282 | * |
280 | * @ctx_list: List head for context. | 283 | * @ctx_list: List head for context. |
284 | * @res_list: List head for bound resource. | ||
281 | * @bi: Binding info | 285 | * @bi: Binding info |
282 | */ | 286 | */ |
283 | struct vmw_ctx_binding { | 287 | struct vmw_ctx_binding { |
284 | struct list_head ctx_list; | 288 | struct list_head ctx_list; |
289 | struct list_head res_list; | ||
285 | struct vmw_ctx_bindinfo bi; | 290 | struct vmw_ctx_bindinfo bi; |
286 | }; | 291 | }; |
287 | 292 | ||
@@ -450,6 +455,7 @@ struct vmw_private { | |||
450 | 455 | ||
451 | struct vmw_sw_context ctx; | 456 | struct vmw_sw_context ctx; |
452 | struct mutex cmdbuf_mutex; | 457 | struct mutex cmdbuf_mutex; |
458 | struct mutex binding_mutex; | ||
453 | 459 | ||
454 | /** | 460 | /** |
455 | * Operating mode. | 461 | * Operating mode. |
@@ -940,7 +946,10 @@ extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | |||
940 | struct drm_file *file_priv); | 946 | struct drm_file *file_priv); |
941 | extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | 947 | extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, |
942 | const struct vmw_ctx_bindinfo *ci); | 948 | const struct vmw_ctx_bindinfo *ci); |
943 | extern void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); | 949 | extern void |
950 | vmw_context_binding_state_transfer(struct vmw_resource *res, | ||
951 | struct vmw_ctx_binding_state *cbs); | ||
952 | extern void vmw_context_binding_res_list_kill(struct list_head *head); | ||
944 | 953 | ||
945 | /* | 954 | /* |
946 | * Surface management - vmwgfx_surface.c | 955 | * Surface management - vmwgfx_surface.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 8eb87d855781..b924fd6e6edd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -109,8 +109,13 @@ static void vmw_resource_list_unreserve(struct list_head *list, | |||
109 | struct vmw_dma_buffer *new_backup = | 109 | struct vmw_dma_buffer *new_backup = |
110 | backoff ? NULL : val->new_backup; | 110 | backoff ? NULL : val->new_backup; |
111 | 111 | ||
112 | /* | ||
113 | * Transfer staged context bindings to the | ||
114 | * persistent context binding tracker. | ||
115 | */ | ||
112 | if (unlikely(val->staged_bindings)) { | 116 | if (unlikely(val->staged_bindings)) { |
113 | vmw_context_binding_state_kill(val->staged_bindings); | 117 | vmw_context_binding_state_transfer |
118 | (val->res, val->staged_bindings); | ||
114 | kfree(val->staged_bindings); | 119 | kfree(val->staged_bindings); |
115 | val->staged_bindings = NULL; | 120 | val->staged_bindings = NULL; |
116 | } | 121 | } |
@@ -508,6 +513,7 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
508 | SVGA3dCmdSetRenderTarget body; | 513 | SVGA3dCmdSetRenderTarget body; |
509 | } *cmd; | 514 | } *cmd; |
510 | struct vmw_resource_val_node *ctx_node; | 515 | struct vmw_resource_val_node *ctx_node; |
516 | struct vmw_resource_val_node *res_node; | ||
511 | int ret; | 517 | int ret; |
512 | 518 | ||
513 | cmd = container_of(header, struct vmw_sid_cmd, header); | 519 | cmd = container_of(header, struct vmw_sid_cmd, header); |
@@ -520,7 +526,7 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
520 | 526 | ||
521 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 527 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
522 | user_surface_converter, | 528 | user_surface_converter, |
523 | &cmd->body.target.sid, NULL); | 529 | &cmd->body.target.sid, &res_node); |
524 | if (unlikely(ret != 0)) | 530 | if (unlikely(ret != 0)) |
525 | return ret; | 531 | return ret; |
526 | 532 | ||
@@ -528,6 +534,7 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
528 | struct vmw_ctx_bindinfo bi; | 534 | struct vmw_ctx_bindinfo bi; |
529 | 535 | ||
530 | bi.ctx = ctx_node->res; | 536 | bi.ctx = ctx_node->res; |
537 | bi.res = res_node ? res_node->res : NULL; | ||
531 | bi.bt = vmw_ctx_binding_rt; | 538 | bi.bt = vmw_ctx_binding_rt; |
532 | bi.i1.rt_type = cmd->body.type; | 539 | bi.i1.rt_type = cmd->body.type; |
533 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | 540 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
@@ -1195,6 +1202,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
1195 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) | 1202 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
1196 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); | 1203 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
1197 | struct vmw_resource_val_node *ctx_node; | 1204 | struct vmw_resource_val_node *ctx_node; |
1205 | struct vmw_resource_val_node *res_node; | ||
1198 | int ret; | 1206 | int ret; |
1199 | 1207 | ||
1200 | cmd = container_of(header, struct vmw_tex_state_cmd, | 1208 | cmd = container_of(header, struct vmw_tex_state_cmd, |
@@ -1212,7 +1220,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
1212 | 1220 | ||
1213 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1221 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1214 | user_surface_converter, | 1222 | user_surface_converter, |
1215 | &cur_state->value, NULL); | 1223 | &cur_state->value, &res_node); |
1216 | if (unlikely(ret != 0)) | 1224 | if (unlikely(ret != 0)) |
1217 | return ret; | 1225 | return ret; |
1218 | 1226 | ||
@@ -1220,6 +1228,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
1220 | struct vmw_ctx_bindinfo bi; | 1228 | struct vmw_ctx_bindinfo bi; |
1221 | 1229 | ||
1222 | bi.ctx = ctx_node->res; | 1230 | bi.ctx = ctx_node->res; |
1231 | bi.res = res_node ? res_node->res : NULL; | ||
1223 | bi.bt = vmw_ctx_binding_tex; | 1232 | bi.bt = vmw_ctx_binding_tex; |
1224 | bi.i1.texture_stage = cur_state->stage; | 1233 | bi.i1.texture_stage = cur_state->stage; |
1225 | vmw_context_binding_add(ctx_node->staged_bindings, | 1234 | vmw_context_binding_add(ctx_node->staged_bindings, |
@@ -1499,14 +1508,16 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
1499 | 1508 | ||
1500 | if (dev_priv->has_mob) { | 1509 | if (dev_priv->has_mob) { |
1501 | struct vmw_ctx_bindinfo bi; | 1510 | struct vmw_ctx_bindinfo bi; |
1511 | struct vmw_resource_val_node *res_node; | ||
1502 | 1512 | ||
1503 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, | 1513 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, |
1504 | user_shader_converter, | 1514 | user_shader_converter, |
1505 | &cmd->body.shid, NULL); | 1515 | &cmd->body.shid, &res_node); |
1506 | if (unlikely(ret != 0)) | 1516 | if (unlikely(ret != 0)) |
1507 | return ret; | 1517 | return ret; |
1508 | 1518 | ||
1509 | bi.ctx = ctx_node->res; | 1519 | bi.ctx = ctx_node->res; |
1520 | bi.res = res_node ? res_node->res : NULL; | ||
1510 | bi.bt = vmw_ctx_binding_shader; | 1521 | bi.bt = vmw_ctx_binding_shader; |
1511 | bi.i1.shader_type = cmd->body.type; | 1522 | bi.i1.shader_type = cmd->body.type; |
1512 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | 1523 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
@@ -2208,11 +2219,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2208 | goto out_err; | 2219 | goto out_err; |
2209 | } | 2220 | } |
2210 | 2221 | ||
2222 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); | ||
2223 | if (unlikely(ret != 0)) { | ||
2224 | ret = -ERESTARTSYS; | ||
2225 | goto out_err; | ||
2226 | } | ||
2227 | |||
2211 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 2228 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
2212 | if (unlikely(cmd == NULL)) { | 2229 | if (unlikely(cmd == NULL)) { |
2213 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 2230 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
2214 | ret = -ENOMEM; | 2231 | ret = -ENOMEM; |
2215 | goto out_err; | 2232 | goto out_unlock_binding; |
2216 | } | 2233 | } |
2217 | 2234 | ||
2218 | vmw_apply_relocations(sw_context); | 2235 | vmw_apply_relocations(sw_context); |
@@ -2237,6 +2254,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2237 | DRM_ERROR("Fence submission error. Syncing.\n"); | 2254 | DRM_ERROR("Fence submission error. Syncing.\n"); |
2238 | 2255 | ||
2239 | vmw_resource_list_unreserve(&sw_context->resource_list, false); | 2256 | vmw_resource_list_unreserve(&sw_context->resource_list, false); |
2257 | mutex_unlock(&dev_priv->binding_mutex); | ||
2258 | |||
2240 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, | 2259 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
2241 | (void *) fence); | 2260 | (void *) fence); |
2242 | 2261 | ||
@@ -2267,6 +2286,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2267 | 2286 | ||
2268 | return 0; | 2287 | return 0; |
2269 | 2288 | ||
2289 | out_unlock_binding: | ||
2290 | mutex_unlock(&dev_priv->binding_mutex); | ||
2270 | out_err: | 2291 | out_err: |
2271 | vmw_resource_relocations_free(&sw_context->res_relocations); | 2292 | vmw_resource_relocations_free(&sw_context->res_relocations); |
2272 | vmw_free_relocations(sw_context); | 2293 | vmw_free_relocations(sw_context); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 12e68e58d9e4..6fdd82d42f65 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -215,6 +215,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, | |||
215 | res->func = func; | 215 | res->func = func; |
216 | INIT_LIST_HEAD(&res->lru_head); | 216 | INIT_LIST_HEAD(&res->lru_head); |
217 | INIT_LIST_HEAD(&res->mob_head); | 217 | INIT_LIST_HEAD(&res->mob_head); |
218 | INIT_LIST_HEAD(&res->binding_head); | ||
218 | res->id = -1; | 219 | res->id = -1; |
219 | res->backup = NULL; | 220 | res->backup = NULL; |
220 | res->backup_offset = 0; | 221 | res->backup_offset = 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 76d354101403..813bd0a2abaf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
@@ -257,6 +257,9 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) | |||
257 | if (likely(res->id == -1)) | 257 | if (likely(res->id == -1)) |
258 | return 0; | 258 | return 0; |
259 | 259 | ||
260 | mutex_lock(&dev_priv->binding_mutex); | ||
261 | vmw_context_binding_res_list_kill(&res->binding_head); | ||
262 | |||
260 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 263 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
261 | if (unlikely(cmd == NULL)) { | 264 | if (unlikely(cmd == NULL)) { |
262 | DRM_ERROR("Failed reserving FIFO space for shader " | 265 | DRM_ERROR("Failed reserving FIFO space for shader " |
@@ -268,6 +271,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) | |||
268 | cmd->header.size = sizeof(cmd->body); | 271 | cmd->header.size = sizeof(cmd->body); |
269 | cmd->body.shid = res->id; | 272 | cmd->body.shid = res->id; |
270 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 273 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
274 | mutex_unlock(&dev_priv->binding_mutex); | ||
271 | vmw_resource_release_id(res); | 275 | vmw_resource_release_id(res); |
272 | vmw_3d_resource_dec(dev_priv, false); | 276 | vmw_3d_resource_dec(dev_priv, false); |
273 | 277 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 739a93dc941e..a729b20ee14d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -1100,6 +1100,9 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) | |||
1100 | if (likely(res->id == -1)) | 1100 | if (likely(res->id == -1)) |
1101 | return 0; | 1101 | return 0; |
1102 | 1102 | ||
1103 | mutex_lock(&dev_priv->binding_mutex); | ||
1104 | vmw_context_binding_res_list_kill(&res->binding_head); | ||
1105 | |||
1103 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 1106 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
1104 | if (unlikely(cmd == NULL)) { | 1107 | if (unlikely(cmd == NULL)) { |
1105 | DRM_ERROR("Failed reserving FIFO space for surface " | 1108 | DRM_ERROR("Failed reserving FIFO space for surface " |
@@ -1111,6 +1114,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) | |||
1111 | cmd->header.size = sizeof(cmd->body); | 1114 | cmd->header.size = sizeof(cmd->body); |
1112 | cmd->body.sid = res->id; | 1115 | cmd->body.sid = res->id; |
1113 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 1116 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
1117 | mutex_unlock(&dev_priv->binding_mutex); | ||
1114 | vmw_resource_release_id(res); | 1118 | vmw_resource_release_id(res); |
1115 | vmw_3d_resource_dec(dev_priv, false); | 1119 | vmw_3d_resource_dec(dev_priv, false); |
1116 | 1120 | ||