aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c57
1 files changed, 25 insertions, 32 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index f283324ce598..9f45d5004cae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -101,6 +101,7 @@ struct vmw_cmdbuf_context {
101 * @handle: DMA address handle for the command buffer space if @using_mob is 101 * @handle: DMA address handle for the command buffer space if @using_mob is
102 * false. Immutable. 102 * false. Immutable.
103 * @size: The size of the command buffer space. Immutable. 103 * @size: The size of the command buffer space. Immutable.
104 * @num_contexts: Number of contexts actually enabled.
104 */ 105 */
105struct vmw_cmdbuf_man { 106struct vmw_cmdbuf_man {
106 struct mutex cur_mutex; 107 struct mutex cur_mutex;
@@ -128,6 +129,7 @@ struct vmw_cmdbuf_man {
128 bool has_pool; 129 bool has_pool;
129 dma_addr_t handle; 130 dma_addr_t handle;
130 size_t size; 131 size_t size;
132 u32 num_contexts;
131}; 133};
132 134
133/** 135/**
@@ -185,7 +187,7 @@ struct vmw_cmdbuf_alloc_info {
185 187
186/* Loop over each context in the command buffer manager. */ 188/* Loop over each context in the command buffer manager. */
187#define for_each_cmdbuf_ctx(_man, _i, _ctx) \ 189#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
188 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \ 190 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
189 ++(_i), ++(_ctx)) 191 ++(_i), ++(_ctx))
190 192
191static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, 193static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
@@ -514,6 +516,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
514 struct list_head restart_head[SVGA_CB_CONTEXT_MAX]; 516 struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
515 int i; 517 int i;
516 struct vmw_cmdbuf_context *ctx; 518 struct vmw_cmdbuf_context *ctx;
519 bool global_block = false;
517 520
518 for_each_cmdbuf_ctx(man, i, ctx) { 521 for_each_cmdbuf_ctx(man, i, ctx) {
519 INIT_LIST_HEAD(&restart_head[i]); 522 INIT_LIST_HEAD(&restart_head[i]);
@@ -531,6 +534,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
531 534
532 list_del_init(&entry->list); 535 list_del_init(&entry->list);
533 restart[entry->cb_context] = true; 536 restart[entry->cb_context] = true;
537 global_block = true;
534 538
535 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) { 539 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
536 DRM_ERROR("Unknown command causing device error.\n"); 540 DRM_ERROR("Unknown command causing device error.\n");
@@ -564,23 +568,21 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
564 cb_hdr->length -= new_start_offset; 568 cb_hdr->length -= new_start_offset;
565 cb_hdr->errorOffset = 0; 569 cb_hdr->errorOffset = 0;
566 cb_hdr->offset = 0; 570 cb_hdr->offset = 0;
571
567 list_add_tail(&entry->list, &restart_head[entry->cb_context]); 572 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
568 man->ctx[entry->cb_context].block_submission = true;
569 } 573 }
574
575 for_each_cmdbuf_ctx(man, i, ctx)
576 man->ctx[i].block_submission = true;
577
570 spin_unlock(&man->lock); 578 spin_unlock(&man->lock);
571 579
572 /* Preempt all contexts with errors */ 580 /* Preempt all contexts */
573 for_each_cmdbuf_ctx(man, i, ctx) { 581 if (global_block && vmw_cmdbuf_preempt(man, 0))
574 if (ctx->block_submission && vmw_cmdbuf_preempt(man, i)) 582 DRM_ERROR("Failed preempting command buffer contexts\n");
575 DRM_ERROR("Failed preempting command buffer "
576 "context %u.\n", i);
577 }
578 583
579 spin_lock(&man->lock); 584 spin_lock(&man->lock);
580 for_each_cmdbuf_ctx(man, i, ctx) { 585 for_each_cmdbuf_ctx(man, i, ctx) {
581 if (!ctx->block_submission)
582 continue;
583
584 /* Move preempted command buffers to the preempted queue. */ 586 /* Move preempted command buffers to the preempted queue. */
585 vmw_cmdbuf_ctx_process(man, ctx, &dummy); 587 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
586 588
@@ -594,19 +596,16 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
594 * Finally add all command buffers first in the submitted 596 * Finally add all command buffers first in the submitted
595 * queue, to rerun them. 597 * queue, to rerun them.
596 */ 598 */
597 list_splice_init(&restart_head[i], &ctx->submitted);
598 599
599 ctx->block_submission = false; 600 ctx->block_submission = false;
601 list_splice_init(&restart_head[i], &ctx->submitted);
600 } 602 }
601 603
602 vmw_cmdbuf_man_process(man); 604 vmw_cmdbuf_man_process(man);
603 spin_unlock(&man->lock); 605 spin_unlock(&man->lock);
604 606
605 for_each_cmdbuf_ctx(man, i, ctx) { 607 if (global_block && vmw_cmdbuf_startstop(man, 0, true))
606 if (restart[i] && vmw_cmdbuf_startstop(man, i, true)) 608 DRM_ERROR("Failed restarting command buffer contexts\n");
607 DRM_ERROR("Failed restarting command buffer "
608 "context %u.\n", i);
609 }
610 609
611 /* Send a new fence in case one was removed */ 610 /* Send a new fence in case one was removed */
612 if (send_fence) { 611 if (send_fence) {
@@ -1307,6 +1306,8 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1307 if (!man) 1306 if (!man)
1308 return ERR_PTR(-ENOMEM); 1307 return ERR_PTR(-ENOMEM);
1309 1308
1309 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1310 2 : 1;
1310 man->headers = dma_pool_create("vmwgfx cmdbuf", 1311 man->headers = dma_pool_create("vmwgfx cmdbuf",
1311 &dev_priv->dev->pdev->dev, 1312 &dev_priv->dev->pdev->dev,
1312 sizeof(SVGACBHeader), 1313 sizeof(SVGACBHeader),
@@ -1341,14 +1342,11 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1341 INIT_WORK(&man->work, &vmw_cmdbuf_work_func); 1342 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1342 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, 1343 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1343 &dev_priv->error_waiters); 1344 &dev_priv->error_waiters);
1344 for_each_cmdbuf_ctx(man, i, ctx) { 1345 ret = vmw_cmdbuf_startstop(man, 0, true);
1345 ret = vmw_cmdbuf_startstop(man, i, true); 1346 if (ret) {
1346 if (ret) { 1347 DRM_ERROR("Failed starting command buffer contexts\n");
1347 DRM_ERROR("Failed starting command buffer " 1348 vmw_cmdbuf_man_destroy(man);
1348 "context %u.\n", i); 1349 return ERR_PTR(ret);
1349 vmw_cmdbuf_man_destroy(man);
1350 return ERR_PTR(ret);
1351 }
1352 } 1350 }
1353 1351
1354 return man; 1352 return man;
@@ -1398,16 +1396,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1398 */ 1396 */
1399void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) 1397void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1400{ 1398{
1401 struct vmw_cmdbuf_context *ctx;
1402 unsigned int i;
1403
1404 WARN_ON_ONCE(man->has_pool); 1399 WARN_ON_ONCE(man->has_pool);
1405 (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1400 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406 1401
1407 for_each_cmdbuf_ctx(man, i, ctx) 1402 if (vmw_cmdbuf_startstop(man, 0, false))
1408 if (vmw_cmdbuf_startstop(man, i, false)) 1403 DRM_ERROR("Failed stopping command buffer contexts.\n");
1409 DRM_ERROR("Failed stopping command buffer "
1410 "context %u.\n", i);
1411 1404
1412 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, 1405 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1413 &man->dev_priv->error_waiters); 1406 &man->dev_priv->error_waiters);