aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2013-10-08 05:27:17 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2014-01-17 01:52:35 -0500
commitb5c3b1a6bfaf71895d656162f29e979c5c904888 (patch)
tree1ed1f901dad57af4ec25cfcad74e6c67b55c07cf /drivers/gpu
parent8ba07315d3ffcb7dfbb5143a3be03fe4af079969 (diff)
drm/vmwgfx: Track context bindings and scrub them upon exiting execbuf
The device is no longer capable of scrubbing context bindings of resources that are bound when destroyed. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c216
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h65
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c95
3 files changed, 365 insertions, 11 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 308e78fdc55e..b4de756112d4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -34,6 +34,10 @@ struct vmw_user_context {
34 struct vmw_resource res; 34 struct vmw_resource res;
35}; 35};
36 36
37
38
39typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
40
37static void vmw_user_context_free(struct vmw_resource *res); 41static void vmw_user_context_free(struct vmw_resource *res);
38static struct vmw_resource * 42static struct vmw_resource *
39vmw_user_context_base_to_res(struct ttm_base_object *base); 43vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -45,6 +49,9 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
45 bool readback, 49 bool readback,
46 struct ttm_validate_buffer *val_buf); 50 struct ttm_validate_buffer *val_buf);
47static int vmw_gb_context_destroy(struct vmw_resource *res); 51static int vmw_gb_context_destroy(struct vmw_resource *res);
52static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
53static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
54static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
48 55
49static uint64_t vmw_user_context_size; 56static uint64_t vmw_user_context_size;
50 57
@@ -82,6 +89,11 @@ static const struct vmw_res_func vmw_gb_context_func = {
82 .unbind = vmw_gb_context_unbind 89 .unbind = vmw_gb_context_unbind
83}; 90};
84 91
92static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
93 [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
94 [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
95 [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
96
85/** 97/**
86 * Context management: 98 * Context management:
87 */ 99 */
@@ -494,3 +506,207 @@ out_unlock:
494 return ret; 506 return ret;
495 507
496} 508}
509
510/**
511 * vmw_context_scrub_shader - scrub a shader binding from a context.
512 *
513 * @bi: single binding information.
514 */
515static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
516{
517 struct vmw_private *dev_priv = bi->ctx->dev_priv;
518 struct {
519 SVGA3dCmdHeader header;
520 SVGA3dCmdSetShader body;
521 } *cmd;
522
523 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
524 if (unlikely(cmd == NULL)) {
525 DRM_ERROR("Failed reserving FIFO space for shader "
526 "unbinding.\n");
527 return -ENOMEM;
528 }
529
530 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
531 cmd->header.size = sizeof(cmd->body);
532 cmd->body.cid = bi->ctx->id;
533 cmd->body.type = bi->i1.shader_type;
534 cmd->body.shid = SVGA3D_INVALID_ID;
535 vmw_fifo_commit(dev_priv, sizeof(*cmd));
536
537 return 0;
538}
539
540/**
541 * vmw_context_scrub_render_target - scrub a render target binding
542 * from a context.
543 *
544 * @bi: single binding information.
545 */
546static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
547{
548 struct vmw_private *dev_priv = bi->ctx->dev_priv;
549 struct {
550 SVGA3dCmdHeader header;
551 SVGA3dCmdSetRenderTarget body;
552 } *cmd;
553
554 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
555 if (unlikely(cmd == NULL)) {
556 DRM_ERROR("Failed reserving FIFO space for render target "
557 "unbinding.\n");
558 return -ENOMEM;
559 }
560
561 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
562 cmd->header.size = sizeof(cmd->body);
563 cmd->body.cid = bi->ctx->id;
564 cmd->body.type = bi->i1.rt_type;
565 cmd->body.target.sid = SVGA3D_INVALID_ID;
566 cmd->body.target.face = 0;
567 cmd->body.target.mipmap = 0;
568 vmw_fifo_commit(dev_priv, sizeof(*cmd));
569
570 return 0;
571}
572
573/**
574 * vmw_context_scrub_texture - scrub a texture binding from a context.
575 *
576 * @bi: single binding information.
577 *
578 * TODO: Possibly complement this function with a function that takes
579 * a list of texture bindings and combines them to a single command.
580 */
581static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
582{
583 struct vmw_private *dev_priv = bi->ctx->dev_priv;
584 struct {
585 SVGA3dCmdHeader header;
586 struct {
587 SVGA3dCmdSetTextureState c;
588 SVGA3dTextureState s1;
589 } body;
590 } *cmd;
591
592 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
593 if (unlikely(cmd == NULL)) {
594 DRM_ERROR("Failed reserving FIFO space for texture "
595 "unbinding.\n");
596 return -ENOMEM;
597 }
598
599
600 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
601 cmd->header.size = sizeof(cmd->body);
602 cmd->body.c.cid = bi->ctx->id;
603 cmd->body.s1.stage = bi->i1.texture_stage;
604 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
605 cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
606 vmw_fifo_commit(dev_priv, sizeof(*cmd));
607
608 return 0;
609}
610
611/**
612 * vmw_context_binding_drop: Stop tracking a context binding
613 *
614 * @cb: Pointer to binding tracker storage.
615 *
616 * Stops tracking a context binding, and re-initializes its storage.
617 * Typically used when the context binding is replaced with a binding to
618 * another (or the same, for that matter) resource.
619 */
620static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
621{
622 list_del(&cb->ctx_list);
623 cb->bi.ctx = NULL;
624}
625
626/**
627 * vmw_context_binding_add: Start tracking a context binding
628 *
629 * @cbs: Pointer to the context binding state tracker.
630 * @bi: Information about the binding to track.
631 *
632 * Performs basic checks on the binding to make sure arguments are within
633 * bounds and then starts tracking the binding in the context binding
634 * state structure @cbs.
635 */
636int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
637 const struct vmw_ctx_bindinfo *bi)
638{
639 struct vmw_ctx_binding *loc;
640
641 switch (bi->bt) {
642 case vmw_ctx_binding_rt:
643 if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
644 DRM_ERROR("Illegal render target type %u.\n",
645 (unsigned) bi->i1.rt_type);
646 return -EINVAL;
647 }
648 loc = &cbs->render_targets[bi->i1.rt_type];
649 break;
650 case vmw_ctx_binding_tex:
651 if (unlikely((unsigned)bi->i1.texture_stage >=
652 SVGA3D_NUM_TEXTURE_UNITS)) {
653 DRM_ERROR("Illegal texture/sampler unit %u.\n",
654 (unsigned) bi->i1.texture_stage);
655 return -EINVAL;
656 }
657 loc = &cbs->texture_units[bi->i1.texture_stage];
658 break;
659 case vmw_ctx_binding_shader:
660 if (unlikely((unsigned)bi->i1.shader_type >=
661 SVGA3D_SHADERTYPE_MAX)) {
662 DRM_ERROR("Illegal shader type %u.\n",
663 (unsigned) bi->i1.shader_type);
664 return -EINVAL;
665 }
666 loc = &cbs->shaders[bi->i1.shader_type];
667 break;
668 default:
669 BUG();
670 }
671
672 if (loc->bi.ctx != NULL)
673 vmw_context_binding_drop(loc);
674
675 loc->bi = *bi;
676 list_add_tail(&loc->ctx_list, &cbs->list);
677
678 return 0;
679}
680
681/**
682 * vmw_context_binding_kill - Kill a binding on the device
683 * and stop tracking it.
684 *
685 * @cb: Pointer to binding tracker storage.
686 *
687 * Emits FIFO commands to scrub a binding represented by @cb.
688 * Then stops tracking the binding and re-initializes its storage.
689 */
690void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
691{
692 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
693 vmw_context_binding_drop(cb);
694}
695
696/**
697 * vmw_context_binding_state_kill - Kill all bindings associated with a
698 * struct vmw_ctx_binding state structure, and re-initialize the structure.
699 *
700 * @cbs: Pointer to the context binding state tracker.
701 *
702 * Emits commands to scrub all bindings associated with the
703 * context binding state tracker. Then re-initializes the whole structure.
704 */
705void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
706{
707 struct vmw_ctx_binding *entry, *next;
708
709 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) {
710 vmw_context_binding_kill(entry);
711 }
712}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 76751e953834..a962e4c12a75 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -245,6 +245,67 @@ struct vmw_piter {
245 struct page *(*page)(struct vmw_piter *); 245 struct page *(*page)(struct vmw_piter *);
246}; 246};
247 247
248/*
249 * enum vmw_ctx_binding_type - abstract resource to context binding types
250 */
251enum vmw_ctx_binding_type {
252 vmw_ctx_binding_shader,
253 vmw_ctx_binding_rt,
254 vmw_ctx_binding_tex,
255 vmw_ctx_binding_max
256};
257
258/**
259 * struct vmw_ctx_bindinfo - structure representing a single context binding
260 *
261 * @ctx: Pointer to the context structure. NULL means the binding is not
262 * active.
263 * @bt: The binding type.
264 * @i1: Union of information needed to unbind.
265 */
266struct vmw_ctx_bindinfo {
267 struct vmw_resource *ctx;
268 enum vmw_ctx_binding_type bt;
269 union {
270 SVGA3dShaderType shader_type;
271 SVGA3dRenderTargetType rt_type;
272 uint32 texture_stage;
273 } i1;
274};
275
276/**
277 * struct vmw_ctx_binding - structure representing a single context binding
278 * - suitable for tracking in a context
279 *
280 * @ctx_list: List head for context.
281 * @bi: Binding info
282 */
283struct vmw_ctx_binding {
284 struct list_head ctx_list;
285 struct vmw_ctx_bindinfo bi;
286};
287
288
289/**
290 * struct vmw_ctx_binding_state - context binding state
291 *
292 * @list: linked list of individual bindings.
293 * @render_targets: Render target bindings.
294 * @texture_units: Texture units/samplers bindings.
295 * @shaders: Shader bindings.
296 *
297 * Note that this structure also provides storage space for the individual
298 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
299 * for individual bindings.
300 *
301 */
302struct vmw_ctx_binding_state {
303 struct list_head list;
304 struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
305 struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
306 struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
307};
308
248struct vmw_sw_context{ 309struct vmw_sw_context{
249 struct drm_open_hash res_ht; 310 struct drm_open_hash res_ht;
250 bool res_ht_initialized; 311 bool res_ht_initialized;
@@ -266,6 +327,7 @@ struct vmw_sw_context{
266 struct vmw_resource *last_query_ctx; 327 struct vmw_resource *last_query_ctx;
267 bool needs_post_query_barrier; 328 bool needs_post_query_barrier;
268 struct vmw_resource *error_resource; 329 struct vmw_resource *error_resource;
330 struct vmw_ctx_binding_state staged_bindings;
269}; 331};
270 332
271struct vmw_legacy_display; 333struct vmw_legacy_display;
@@ -876,6 +938,9 @@ extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
876 struct drm_file *file_priv); 938 struct drm_file *file_priv);
877extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 939extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
878 struct drm_file *file_priv); 940 struct drm_file *file_priv);
941extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
942 const struct vmw_ctx_bindinfo *ci);
943extern void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
879 944
880/* 945/*
881 * Surface management - vmwgfx_surface.c 946 * Surface management - vmwgfx_surface.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dd5a9a297845..8eb87d855781 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -54,6 +54,8 @@ struct vmw_resource_relocation {
54 * @res: Ref-counted pointer to the resource. 54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve. 55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer. 56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @staged_bindings: If @res is a context, tracks bindings set up during
58 * the command batch. Otherwise NULL.
57 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. 59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
58 * @first_usage: Set to true the first time the resource is referenced in 60 * @first_usage: Set to true the first time the resource is referenced in
59 * the command stream. 61 * the command stream.
@@ -65,6 +67,7 @@ struct vmw_resource_val_node {
65 struct drm_hash_item hash; 67 struct drm_hash_item hash;
66 struct vmw_resource *res; 68 struct vmw_resource *res;
67 struct vmw_dma_buffer *new_backup; 69 struct vmw_dma_buffer *new_backup;
70 struct vmw_ctx_binding_state *staged_bindings;
68 unsigned long new_backup_offset; 71 unsigned long new_backup_offset;
69 bool first_usage; 72 bool first_usage;
70 bool no_buffer_needed; 73 bool no_buffer_needed;
@@ -106,6 +109,11 @@ static void vmw_resource_list_unreserve(struct list_head *list,
106 struct vmw_dma_buffer *new_backup = 109 struct vmw_dma_buffer *new_backup =
107 backoff ? NULL : val->new_backup; 110 backoff ? NULL : val->new_backup;
108 111
112 if (unlikely(val->staged_bindings)) {
113 vmw_context_binding_state_kill(val->staged_bindings);
114 kfree(val->staged_bindings);
115 val->staged_bindings = NULL;
116 }
109 vmw_resource_unreserve(res, new_backup, 117 vmw_resource_unreserve(res, new_backup,
110 val->new_backup_offset); 118 val->new_backup_offset);
111 vmw_dmabuf_unreference(&val->new_backup); 119 vmw_dmabuf_unreference(&val->new_backup);
@@ -389,8 +397,15 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
389 struct vmw_resource_val_node *node; 397 struct vmw_resource_val_node *node;
390 int ret; 398 int ret;
391 399
392 if (*id == SVGA3D_INVALID_ID) 400 if (*id == SVGA3D_INVALID_ID) {
401 if (p_val)
402 *p_val = NULL;
403 if (res_type == vmw_res_context) {
404 DRM_ERROR("Illegal context invalid id.\n");
405 return -EINVAL;
406 }
393 return 0; 407 return 0;
408 }
394 409
395 /* 410 /*
396 * Fastpath in case of repeated commands referencing the same 411 * Fastpath in case of repeated commands referencing the same
@@ -438,6 +453,18 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
438 rcache->node = node; 453 rcache->node = node;
439 if (p_val) 454 if (p_val)
440 *p_val = node; 455 *p_val = node;
456
457 if (node->first_usage && res_type == vmw_res_context) {
458 node->staged_bindings =
459 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
460 if (node->staged_bindings == NULL) {
461 DRM_ERROR("Failed to allocate context binding "
462 "information.\n");
463 goto out_no_reloc;
464 }
465 INIT_LIST_HEAD(&node->staged_bindings->list);
466 }
467
441 vmw_resource_unreference(&res); 468 vmw_resource_unreference(&res);
442 return 0; 469 return 0;
443 470
@@ -480,17 +507,33 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
480 SVGA3dCmdHeader header; 507 SVGA3dCmdHeader header;
481 SVGA3dCmdSetRenderTarget body; 508 SVGA3dCmdSetRenderTarget body;
482 } *cmd; 509 } *cmd;
510 struct vmw_resource_val_node *ctx_node;
483 int ret; 511 int ret;
484 512
485 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 513 cmd = container_of(header, struct vmw_sid_cmd, header);
514
515 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
516 user_context_converter, &cmd->body.cid,
517 &ctx_node);
486 if (unlikely(ret != 0)) 518 if (unlikely(ret != 0))
487 return ret; 519 return ret;
488 520
489 cmd = container_of(header, struct vmw_sid_cmd, header);
490 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 521 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
491 user_surface_converter, 522 user_surface_converter,
492 &cmd->body.target.sid, NULL); 523 &cmd->body.target.sid, NULL);
493 return ret; 524 if (unlikely(ret != 0))
525 return ret;
526
527 if (dev_priv->has_mob) {
528 struct vmw_ctx_bindinfo bi;
529
530 bi.ctx = ctx_node->res;
531 bi.bt = vmw_ctx_binding_rt;
532 bi.i1.rt_type = cmd->body.type;
533 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
534 }
535
536 return 0;
494} 537}
495 538
496static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, 539static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -1145,15 +1188,21 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1145 struct vmw_tex_state_cmd { 1188 struct vmw_tex_state_cmd {
1146 SVGA3dCmdHeader header; 1189 SVGA3dCmdHeader header;
1147 SVGA3dCmdSetTextureState state; 1190 SVGA3dCmdSetTextureState state;
1148 }; 1191 } *cmd;
1149 1192
1150 SVGA3dTextureState *last_state = (SVGA3dTextureState *) 1193 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1151 ((unsigned long) header + header->size + sizeof(header)); 1194 ((unsigned long) header + header->size + sizeof(header));
1152 SVGA3dTextureState *cur_state = (SVGA3dTextureState *) 1195 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1153 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); 1196 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1197 struct vmw_resource_val_node *ctx_node;
1154 int ret; 1198 int ret;
1155 1199
1156 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1200 cmd = container_of(header, struct vmw_tex_state_cmd,
1201 header);
1202
1203 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1204 user_context_converter, &cmd->state.cid,
1205 &ctx_node);
1157 if (unlikely(ret != 0)) 1206 if (unlikely(ret != 0))
1158 return ret; 1207 return ret;
1159 1208
@@ -1166,6 +1215,16 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1166 &cur_state->value, NULL); 1215 &cur_state->value, NULL);
1167 if (unlikely(ret != 0)) 1216 if (unlikely(ret != 0))
1168 return ret; 1217 return ret;
1218
1219 if (dev_priv->has_mob) {
1220 struct vmw_ctx_bindinfo bi;
1221
1222 bi.ctx = ctx_node->res;
1223 bi.bt = vmw_ctx_binding_tex;
1224 bi.i1.texture_stage = cur_state->stage;
1225 vmw_context_binding_add(ctx_node->staged_bindings,
1226 &bi);
1227 }
1169 } 1228 }
1170 1229
1171 return 0; 1230 return 0;
@@ -1426,20 +1485,32 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1426 SVGA3dCmdHeader header; 1485 SVGA3dCmdHeader header;
1427 SVGA3dCmdSetShader body; 1486 SVGA3dCmdSetShader body;
1428 } *cmd; 1487 } *cmd;
1488 struct vmw_resource_val_node *ctx_node;
1429 int ret; 1489 int ret;
1430 1490
1431 cmd = container_of(header, struct vmw_set_shader_cmd, 1491 cmd = container_of(header, struct vmw_set_shader_cmd,
1432 header); 1492 header);
1433 1493
1434 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1494 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1495 user_context_converter, &cmd->body.cid,
1496 &ctx_node);
1435 if (unlikely(ret != 0)) 1497 if (unlikely(ret != 0))
1436 return ret; 1498 return ret;
1437 1499
1500 if (dev_priv->has_mob) {
1501 struct vmw_ctx_bindinfo bi;
1502
1503 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
1504 user_shader_converter,
1505 &cmd->body.shid, NULL);
1506 if (unlikely(ret != 0))
1507 return ret;
1438 1508
1439 if (dev_priv->has_mob) 1509 bi.ctx = ctx_node->res;
1440 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, 1510 bi.bt = vmw_ctx_binding_shader;
1441 user_shader_converter, 1511 bi.i1.shader_type = cmd->body.type;
1442 &cmd->body.shid, NULL); 1512 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1513 }
1443 1514
1444 return 0; 1515 return 0;
1445} 1516}
@@ -1820,6 +1891,8 @@ static void vmw_resource_list_unreference(struct list_head *list)
1820 list_for_each_entry_safe(val, val_next, list, head) { 1891 list_for_each_entry_safe(val, val_next, list, head) {
1821 list_del_init(&val->head); 1892 list_del_init(&val->head);
1822 vmw_resource_unreference(&val->res); 1893 vmw_resource_unreference(&val->res);
1894 if (unlikely(val->staged_bindings))
1895 kfree(val->staged_bindings);
1823 kfree(val); 1896 kfree(val);
1824 } 1897 }
1825} 1898}