aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2015-10-21 15:31:49 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2015-10-21 15:31:49 -0400
commit09dc1387c9c06cdaf55bc99b35238bd2ec0aed4b (patch)
tree58442a83eb12933095397bc3c9905866abb3a31a
parented7d78b2da32198ca4c70172e3b63c6b3e2c570b (diff)
drm/vmwgfx: Stabilize the command buffer submission code
This commit addresses some stability problems with the command buffer submission code recently introduced: 1) Make the vmw_cmdbuf_man_process() function handle reruns internally to avoid losing interrupts if the caller forgets to rerun on -EAGAIN. 2) Handle default command buffer allocations using inline command buffers. This avoids rare allocation deadlocks. 3) In case of command buffer errors we might lose fence submissions. Therefore send a new fence after each command buffer error. This will help avoid lengthy fence waits. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 8a76821177a6..6377e8151000 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -415,16 +415,16 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
415 * 415 *
416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has 416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
417 * command buffers left that are not submitted to hardware, Make sure 417 * command buffers left that are not submitted to hardware, Make sure
418 * IRQ handling is turned on. Otherwise, make sure it's turned off. This 418 * IRQ handling is turned on. Otherwise, make sure it's turned off.
419 * function may return -EAGAIN to indicate it should be rerun due to
420 * possibly missed IRQs if IRQs has just been turned on.
421 */ 419 */
422static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) 420static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
423{ 421{
424 int notempty = 0; 422 int notempty;
425 struct vmw_cmdbuf_context *ctx; 423 struct vmw_cmdbuf_context *ctx;
426 int i; 424 int i;
427 425
426retry:
427 notempty = 0;
428 for_each_cmdbuf_ctx(man, i, ctx) 428 for_each_cmdbuf_ctx(man, i, ctx)
429 vmw_cmdbuf_ctx_process(man, ctx, &notempty); 429 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
430 430
@@ -440,10 +440,8 @@ static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
440 man->irq_on = true; 440 man->irq_on = true;
441 441
442 /* Rerun in case we just missed an irq. */ 442 /* Rerun in case we just missed an irq. */
443 return -EAGAIN; 443 goto retry;
444 } 444 }
445
446 return 0;
447} 445}
448 446
449/** 447/**
@@ -468,8 +466,7 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
468 header->cb_context = cb_context; 466 header->cb_context = cb_context;
469 list_add_tail(&header->list, &man->ctx[cb_context].submitted); 467 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
470 468
471 if (vmw_cmdbuf_man_process(man) == -EAGAIN) 469 vmw_cmdbuf_man_process(man);
472 vmw_cmdbuf_man_process(man);
473} 470}
474 471
475/** 472/**
@@ -488,8 +485,7 @@ static void vmw_cmdbuf_man_tasklet(unsigned long data)
488 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; 485 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
489 486
490 spin_lock(&man->lock); 487 spin_lock(&man->lock);
491 if (vmw_cmdbuf_man_process(man) == -EAGAIN) 488 vmw_cmdbuf_man_process(man);
492 (void) vmw_cmdbuf_man_process(man);
493 spin_unlock(&man->lock); 489 spin_unlock(&man->lock);
494} 490}
495 491
@@ -507,6 +503,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
507 struct vmw_cmdbuf_man *man = 503 struct vmw_cmdbuf_man *man =
508 container_of(work, struct vmw_cmdbuf_man, work); 504 container_of(work, struct vmw_cmdbuf_man, work);
509 struct vmw_cmdbuf_header *entry, *next; 505 struct vmw_cmdbuf_header *entry, *next;
506 uint32_t dummy;
510 bool restart = false; 507 bool restart = false;
511 508
512 spin_lock_bh(&man->lock); 509 spin_lock_bh(&man->lock);
@@ -523,6 +520,8 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
523 if (restart && vmw_cmdbuf_startstop(man, true)) 520 if (restart && vmw_cmdbuf_startstop(man, true))
524 DRM_ERROR("Failed restarting command buffer context 0.\n"); 521 DRM_ERROR("Failed restarting command buffer context 0.\n");
525 522
523 /* Send a new fence in case one was removed */
524 vmw_fifo_send_fence(man->dev_priv, &dummy);
526} 525}
527 526
528/** 527/**
@@ -682,7 +681,7 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
682 DRM_MM_SEARCH_DEFAULT, 681 DRM_MM_SEARCH_DEFAULT,
683 DRM_MM_CREATE_DEFAULT); 682 DRM_MM_CREATE_DEFAULT);
684 if (ret) { 683 if (ret) {
685 (void) vmw_cmdbuf_man_process(man); 684 vmw_cmdbuf_man_process(man);
686 ret = drm_mm_insert_node_generic(&man->mm, info->node, 685 ret = drm_mm_insert_node_generic(&man->mm, info->node,
687 info->page_size, 0, 0, 686 info->page_size, 0, 0,
688 DRM_MM_SEARCH_DEFAULT, 687 DRM_MM_SEARCH_DEFAULT,
@@ -1168,7 +1167,14 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1168 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); 1167 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1169 1168
1170 man->has_pool = true; 1169 man->has_pool = true;
1171 man->default_size = default_size; 1170
1171 /*
1172 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1173 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1174 * needs to wait for space and we block on further command
1175 * submissions to be able to free up space.
1176 */
1177 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1172 DRM_INFO("Using command buffers with %s pool.\n", 1178 DRM_INFO("Using command buffers with %s pool.\n",
1173 (man->using_mob) ? "MOB" : "DMA"); 1179 (man->using_mob) ? "MOB" : "DMA");
1174 1180