aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_context.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c531
1 files changed, 531 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 00ae0925aca8..97aa55159107 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -32,12 +32,28 @@
32struct vmw_user_context { 32struct vmw_user_context {
33 struct ttm_base_object base; 33 struct ttm_base_object base;
34 struct vmw_resource res; 34 struct vmw_resource res;
35 struct vmw_ctx_binding_state cbs;
35}; 36};
36 37
38
39
40typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
41
37static void vmw_user_context_free(struct vmw_resource *res); 42static void vmw_user_context_free(struct vmw_resource *res);
38static struct vmw_resource * 43static struct vmw_resource *
39vmw_user_context_base_to_res(struct ttm_base_object *base); 44vmw_user_context_base_to_res(struct ttm_base_object *base);
40 45
46static int vmw_gb_context_create(struct vmw_resource *res);
47static int vmw_gb_context_bind(struct vmw_resource *res,
48 struct ttm_validate_buffer *val_buf);
49static int vmw_gb_context_unbind(struct vmw_resource *res,
50 bool readback,
51 struct ttm_validate_buffer *val_buf);
52static int vmw_gb_context_destroy(struct vmw_resource *res);
53static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
54static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
55static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
56static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
41static uint64_t vmw_user_context_size; 57static uint64_t vmw_user_context_size;
42 58
43static const struct vmw_user_resource_conv user_context_conv = { 59static const struct vmw_user_resource_conv user_context_conv = {
@@ -62,6 +78,23 @@ static const struct vmw_res_func vmw_legacy_context_func = {
62 .unbind = NULL 78 .unbind = NULL
63}; 79};
64 80
81static const struct vmw_res_func vmw_gb_context_func = {
82 .res_type = vmw_res_context,
83 .needs_backup = true,
84 .may_evict = true,
85 .type_name = "guest backed contexts",
86 .backup_placement = &vmw_mob_placement,
87 .create = vmw_gb_context_create,
88 .destroy = vmw_gb_context_destroy,
89 .bind = vmw_gb_context_bind,
90 .unbind = vmw_gb_context_unbind
91};
92
93static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
94 [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
95 [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
96 [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
97
65/** 98/**
66 * Context management: 99 * Context management:
67 */ 100 */
@@ -76,6 +109,16 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
76 } *cmd; 109 } *cmd;
77 110
78 111
112 if (res->func->destroy == vmw_gb_context_destroy) {
113 mutex_lock(&dev_priv->cmdbuf_mutex);
114 (void) vmw_gb_context_destroy(res);
115 if (dev_priv->pinned_bo != NULL &&
116 !dev_priv->query_cid_valid)
117 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
118 mutex_unlock(&dev_priv->cmdbuf_mutex);
119 return;
120 }
121
79 vmw_execbuf_release_pinned_bo(dev_priv); 122 vmw_execbuf_release_pinned_bo(dev_priv);
80 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 123 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
81 if (unlikely(cmd == NULL)) { 124 if (unlikely(cmd == NULL)) {
@@ -92,6 +135,33 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
92 vmw_3d_resource_dec(dev_priv, false); 135 vmw_3d_resource_dec(dev_priv, false);
93} 136}
94 137
138static int vmw_gb_context_init(struct vmw_private *dev_priv,
139 struct vmw_resource *res,
140 void (*res_free) (struct vmw_resource *res))
141{
142 int ret;
143 struct vmw_user_context *uctx =
144 container_of(res, struct vmw_user_context, res);
145
146 ret = vmw_resource_init(dev_priv, res, true,
147 res_free, &vmw_gb_context_func);
148 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
149
150 if (unlikely(ret != 0)) {
151 if (res_free)
152 res_free(res);
153 else
154 kfree(res);
155 return ret;
156 }
157
158 memset(&uctx->cbs, 0, sizeof(uctx->cbs));
159 INIT_LIST_HEAD(&uctx->cbs.list);
160
161 vmw_resource_activate(res, vmw_hw_context_destroy);
162 return 0;
163}
164
95static int vmw_context_init(struct vmw_private *dev_priv, 165static int vmw_context_init(struct vmw_private *dev_priv,
96 struct vmw_resource *res, 166 struct vmw_resource *res,
97 void (*res_free) (struct vmw_resource *res)) 167 void (*res_free) (struct vmw_resource *res))
@@ -103,6 +173,9 @@ static int vmw_context_init(struct vmw_private *dev_priv,
103 SVGA3dCmdDefineContext body; 173 SVGA3dCmdDefineContext body;
104 } *cmd; 174 } *cmd;
105 175
176 if (dev_priv->has_mob)
177 return vmw_gb_context_init(dev_priv, res, res_free);
178
106 ret = vmw_resource_init(dev_priv, res, false, 179 ret = vmw_resource_init(dev_priv, res, false,
107 res_free, &vmw_legacy_context_func); 180 res_free, &vmw_legacy_context_func);
108 181
@@ -154,6 +227,184 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
154 return (ret == 0) ? res : NULL; 227 return (ret == 0) ? res : NULL;
155} 228}
156 229
230
231static int vmw_gb_context_create(struct vmw_resource *res)
232{
233 struct vmw_private *dev_priv = res->dev_priv;
234 int ret;
235 struct {
236 SVGA3dCmdHeader header;
237 SVGA3dCmdDefineGBContext body;
238 } *cmd;
239
240 if (likely(res->id != -1))
241 return 0;
242
243 ret = vmw_resource_alloc_id(res);
244 if (unlikely(ret != 0)) {
245 DRM_ERROR("Failed to allocate a context id.\n");
246 goto out_no_id;
247 }
248
249 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
250 ret = -EBUSY;
251 goto out_no_fifo;
252 }
253
254 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
255 if (unlikely(cmd == NULL)) {
256 DRM_ERROR("Failed reserving FIFO space for context "
257 "creation.\n");
258 ret = -ENOMEM;
259 goto out_no_fifo;
260 }
261
262 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
263 cmd->header.size = sizeof(cmd->body);
264 cmd->body.cid = res->id;
265 vmw_fifo_commit(dev_priv, sizeof(*cmd));
266 (void) vmw_3d_resource_inc(dev_priv, false);
267
268 return 0;
269
270out_no_fifo:
271 vmw_resource_release_id(res);
272out_no_id:
273 return ret;
274}
275
276static int vmw_gb_context_bind(struct vmw_resource *res,
277 struct ttm_validate_buffer *val_buf)
278{
279 struct vmw_private *dev_priv = res->dev_priv;
280 struct {
281 SVGA3dCmdHeader header;
282 SVGA3dCmdBindGBContext body;
283 } *cmd;
284 struct ttm_buffer_object *bo = val_buf->bo;
285
286 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
287
288 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
289 if (unlikely(cmd == NULL)) {
290 DRM_ERROR("Failed reserving FIFO space for context "
291 "binding.\n");
292 return -ENOMEM;
293 }
294
295 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
296 cmd->header.size = sizeof(cmd->body);
297 cmd->body.cid = res->id;
298 cmd->body.mobid = bo->mem.start;
299 cmd->body.validContents = res->backup_dirty;
300 res->backup_dirty = false;
301 vmw_fifo_commit(dev_priv, sizeof(*cmd));
302
303 return 0;
304}
305
306static int vmw_gb_context_unbind(struct vmw_resource *res,
307 bool readback,
308 struct ttm_validate_buffer *val_buf)
309{
310 struct vmw_private *dev_priv = res->dev_priv;
311 struct ttm_buffer_object *bo = val_buf->bo;
312 struct vmw_fence_obj *fence;
313 struct vmw_user_context *uctx =
314 container_of(res, struct vmw_user_context, res);
315
316 struct {
317 SVGA3dCmdHeader header;
318 SVGA3dCmdReadbackGBContext body;
319 } *cmd1;
320 struct {
321 SVGA3dCmdHeader header;
322 SVGA3dCmdBindGBContext body;
323 } *cmd2;
324 uint32_t submit_size;
325 uint8_t *cmd;
326
327
328 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
329
330 mutex_lock(&dev_priv->binding_mutex);
331 vmw_context_binding_state_kill(&uctx->cbs);
332
333 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
334
335 cmd = vmw_fifo_reserve(dev_priv, submit_size);
336 if (unlikely(cmd == NULL)) {
337 DRM_ERROR("Failed reserving FIFO space for context "
338 "unbinding.\n");
339 mutex_unlock(&dev_priv->binding_mutex);
340 return -ENOMEM;
341 }
342
343 cmd2 = (void *) cmd;
344 if (readback) {
345 cmd1 = (void *) cmd;
346 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
347 cmd1->header.size = sizeof(cmd1->body);
348 cmd1->body.cid = res->id;
349 cmd2 = (void *) (&cmd1[1]);
350 }
351 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
352 cmd2->header.size = sizeof(cmd2->body);
353 cmd2->body.cid = res->id;
354 cmd2->body.mobid = SVGA3D_INVALID_ID;
355
356 vmw_fifo_commit(dev_priv, submit_size);
357 mutex_unlock(&dev_priv->binding_mutex);
358
359 /*
360 * Create a fence object and fence the backup buffer.
361 */
362
363 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
364 &fence, NULL);
365
366 vmw_fence_single_bo(bo, fence);
367
368 if (likely(fence != NULL))
369 vmw_fence_obj_unreference(&fence);
370
371 return 0;
372}
373
374static int vmw_gb_context_destroy(struct vmw_resource *res)
375{
376 struct vmw_private *dev_priv = res->dev_priv;
377 struct {
378 SVGA3dCmdHeader header;
379 SVGA3dCmdDestroyGBContext body;
380 } *cmd;
381 struct vmw_user_context *uctx =
382 container_of(res, struct vmw_user_context, res);
383
384 BUG_ON(!list_empty(&uctx->cbs.list));
385
386 if (likely(res->id == -1))
387 return 0;
388
389 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
390 if (unlikely(cmd == NULL)) {
391 DRM_ERROR("Failed reserving FIFO space for context "
392 "destruction.\n");
393 return -ENOMEM;
394 }
395
396 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
397 cmd->header.size = sizeof(cmd->body);
398 cmd->body.cid = res->id;
399 vmw_fifo_commit(dev_priv, sizeof(*cmd));
400 if (dev_priv->query_cid == res->id)
401 dev_priv->query_cid_valid = false;
402 vmw_resource_release_id(res);
403 vmw_3d_resource_dec(dev_priv, false);
404
405 return 0;
406}
407
157/** 408/**
158 * User-space context management: 409 * User-space context management:
159 */ 410 */
@@ -272,3 +523,283 @@ out_unlock:
272 return ret; 523 return ret;
273 524
274} 525}
526
527/**
528 * vmw_context_scrub_shader - scrub a shader binding from a context.
529 *
530 * @bi: single binding information.
531 */
532static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
533{
534 struct vmw_private *dev_priv = bi->ctx->dev_priv;
535 struct {
536 SVGA3dCmdHeader header;
537 SVGA3dCmdSetShader body;
538 } *cmd;
539
540 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
541 if (unlikely(cmd == NULL)) {
542 DRM_ERROR("Failed reserving FIFO space for shader "
543 "unbinding.\n");
544 return -ENOMEM;
545 }
546
547 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
548 cmd->header.size = sizeof(cmd->body);
549 cmd->body.cid = bi->ctx->id;
550 cmd->body.type = bi->i1.shader_type;
551 cmd->body.shid = SVGA3D_INVALID_ID;
552 vmw_fifo_commit(dev_priv, sizeof(*cmd));
553
554 return 0;
555}
556
557/**
558 * vmw_context_scrub_render_target - scrub a render target binding
559 * from a context.
560 *
561 * @bi: single binding information.
562 */
563static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
564{
565 struct vmw_private *dev_priv = bi->ctx->dev_priv;
566 struct {
567 SVGA3dCmdHeader header;
568 SVGA3dCmdSetRenderTarget body;
569 } *cmd;
570
571 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
572 if (unlikely(cmd == NULL)) {
573 DRM_ERROR("Failed reserving FIFO space for render target "
574 "unbinding.\n");
575 return -ENOMEM;
576 }
577
578 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
579 cmd->header.size = sizeof(cmd->body);
580 cmd->body.cid = bi->ctx->id;
581 cmd->body.type = bi->i1.rt_type;
582 cmd->body.target.sid = SVGA3D_INVALID_ID;
583 cmd->body.target.face = 0;
584 cmd->body.target.mipmap = 0;
585 vmw_fifo_commit(dev_priv, sizeof(*cmd));
586
587 return 0;
588}
589
590/**
591 * vmw_context_scrub_texture - scrub a texture binding from a context.
592 *
593 * @bi: single binding information.
594 *
595 * TODO: Possibly complement this function with a function that takes
596 * a list of texture bindings and combines them to a single command.
597 */
598static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
599{
600 struct vmw_private *dev_priv = bi->ctx->dev_priv;
601 struct {
602 SVGA3dCmdHeader header;
603 struct {
604 SVGA3dCmdSetTextureState c;
605 SVGA3dTextureState s1;
606 } body;
607 } *cmd;
608
609 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
610 if (unlikely(cmd == NULL)) {
611 DRM_ERROR("Failed reserving FIFO space for texture "
612 "unbinding.\n");
613 return -ENOMEM;
614 }
615
616
617 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
618 cmd->header.size = sizeof(cmd->body);
619 cmd->body.c.cid = bi->ctx->id;
620 cmd->body.s1.stage = bi->i1.texture_stage;
621 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
622 cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
623 vmw_fifo_commit(dev_priv, sizeof(*cmd));
624
625 return 0;
626}
627
628/**
629 * vmw_context_binding_drop: Stop tracking a context binding
630 *
631 * @cb: Pointer to binding tracker storage.
632 *
633 * Stops tracking a context binding, and re-initializes its storage.
634 * Typically used when the context binding is replaced with a binding to
635 * another (or the same, for that matter) resource.
636 */
637static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
638{
639 list_del(&cb->ctx_list);
640 if (!list_empty(&cb->res_list))
641 list_del(&cb->res_list);
642 cb->bi.ctx = NULL;
643}
644
645/**
646 * vmw_context_binding_add: Start tracking a context binding
647 *
648 * @cbs: Pointer to the context binding state tracker.
649 * @bi: Information about the binding to track.
650 *
651 * Performs basic checks on the binding to make sure arguments are within
652 * bounds and then starts tracking the binding in the context binding
653 * state structure @cbs.
654 */
655int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
656 const struct vmw_ctx_bindinfo *bi)
657{
658 struct vmw_ctx_binding *loc;
659
660 switch (bi->bt) {
661 case vmw_ctx_binding_rt:
662 if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
663 DRM_ERROR("Illegal render target type %u.\n",
664 (unsigned) bi->i1.rt_type);
665 return -EINVAL;
666 }
667 loc = &cbs->render_targets[bi->i1.rt_type];
668 break;
669 case vmw_ctx_binding_tex:
670 if (unlikely((unsigned)bi->i1.texture_stage >=
671 SVGA3D_NUM_TEXTURE_UNITS)) {
672 DRM_ERROR("Illegal texture/sampler unit %u.\n",
673 (unsigned) bi->i1.texture_stage);
674 return -EINVAL;
675 }
676 loc = &cbs->texture_units[bi->i1.texture_stage];
677 break;
678 case vmw_ctx_binding_shader:
679 if (unlikely((unsigned)bi->i1.shader_type >=
680 SVGA3D_SHADERTYPE_MAX)) {
681 DRM_ERROR("Illegal shader type %u.\n",
682 (unsigned) bi->i1.shader_type);
683 return -EINVAL;
684 }
685 loc = &cbs->shaders[bi->i1.shader_type];
686 break;
687 default:
688 BUG();
689 }
690
691 if (loc->bi.ctx != NULL)
692 vmw_context_binding_drop(loc);
693
694 loc->bi = *bi;
695 list_add_tail(&loc->ctx_list, &cbs->list);
696 INIT_LIST_HEAD(&loc->res_list);
697
698 return 0;
699}
700
701/**
702 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
703 *
704 * @cbs: Pointer to the persistent context binding state tracker.
705 * @bi: Information about the binding to track.
706 *
707 */
708static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
709 const struct vmw_ctx_bindinfo *bi)
710{
711 struct vmw_ctx_binding *loc;
712
713 switch (bi->bt) {
714 case vmw_ctx_binding_rt:
715 loc = &cbs->render_targets[bi->i1.rt_type];
716 break;
717 case vmw_ctx_binding_tex:
718 loc = &cbs->texture_units[bi->i1.texture_stage];
719 break;
720 case vmw_ctx_binding_shader:
721 loc = &cbs->shaders[bi->i1.shader_type];
722 break;
723 default:
724 BUG();
725 }
726
727 if (loc->bi.ctx != NULL)
728 vmw_context_binding_drop(loc);
729
730 loc->bi = *bi;
731 list_add_tail(&loc->ctx_list, &cbs->list);
732 if (bi->res != NULL)
733 list_add_tail(&loc->res_list, &bi->res->binding_head);
734 else
735 INIT_LIST_HEAD(&loc->res_list);
736}
737
738/**
739 * vmw_context_binding_kill - Kill a binding on the device
740 * and stop tracking it.
741 *
742 * @cb: Pointer to binding tracker storage.
743 *
744 * Emits FIFO commands to scrub a binding represented by @cb.
745 * Then stops tracking the binding and re-initializes its storage.
746 */
747void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
748{
749 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
750 vmw_context_binding_drop(cb);
751}
752
753/**
754 * vmw_context_binding_state_kill - Kill all bindings associated with a
755 * struct vmw_ctx_binding state structure, and re-initialize the structure.
756 *
757 * @cbs: Pointer to the context binding state tracker.
758 *
759 * Emits commands to scrub all bindings associated with the
760 * context binding state tracker. Then re-initializes the whole structure.
761 */
762static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
763{
764 struct vmw_ctx_binding *entry, *next;
765
766 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
767 vmw_context_binding_kill(entry);
768}
769
770/**
771 * vmw_context_binding_res_list_kill - Kill all bindings on a
772 * resource binding list
773 *
774 * @head: list head of resource binding list
775 *
776 * Kills all bindings associated with a specific resource. Typically
777 * called before the resource is destroyed.
778 */
779void vmw_context_binding_res_list_kill(struct list_head *head)
780{
781 struct vmw_ctx_binding *entry, *next;
782
783 list_for_each_entry_safe(entry, next, head, res_list)
784 vmw_context_binding_kill(entry);
785}
786
787/**
788 * vmw_context_binding_state_transfer - Commit staged binding info
789 *
790 * @ctx: Pointer to context to commit the staged binding info to.
791 * @from: Staged binding info built during execbuf.
792 *
793 * Transfers binding info from a temporary structure to the persistent
794 * structure in the context. This can be done once commands
795 */
796void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
797 struct vmw_ctx_binding_state *from)
798{
799 struct vmw_user_context *uctx =
800 container_of(ctx, struct vmw_user_context, res);
801 struct vmw_ctx_binding *entry, *next;
802
803 list_for_each_entry_safe(entry, next, &from->list, ctx_list)
804 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
805}