diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2018-03-22 05:15:23 -0400 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2018-03-22 07:08:23 -0400 |
commit | dc366364c4ef809dccd063919314301f8ba01ac2 (patch) | |
tree | 08090face26346c75e35b37d53d175d7c95ff9b4 /drivers | |
parent | ef86cfee7d74baf2e3b883871087a684acecb595 (diff) |
drm/vmwgfx: Fix multiple command buffer context use
The start / stop and preempt commands don't honor the context argument
but rather acts on all available contexts.
Also add detection for context 1 availability.
Note that currently there's no driver interface for submitting buffers
using the high-priority command queue (context 1).
Testing done:
Change the default context for command submission to 1 instead of 0,
verify basic desktop functionality including faulty command injection and
recovery.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/device_include/svga_reg.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 2 |
3 files changed, 38 insertions, 33 deletions
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h index 6e0ccb70a700..88e72bf9a534 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h | |||
@@ -372,6 +372,14 @@ SVGAGuestPtr; | |||
372 | * PA, not biased by the offset. When the command buffer is finished | 372 | * PA, not biased by the offset. When the command buffer is finished |
373 | * the guest should not read the offset field as there is no guarantee | 373 | * the guest should not read the offset field as there is no guarantee |
374 | * what it will set to. | 374 | * what it will set to. |
375 | * | ||
376 | * When the SVGA_CAP_HP_CMD_QUEUE cap bit is set a new command queue | ||
377 | * SVGA_CB_CONTEXT_1 is available. Commands submitted to this queue | ||
378 | * will be executed as quickly as possible by the SVGA device | ||
379 | * potentially before already queued commands on SVGA_CB_CONTEXT_0. | ||
380 | * The SVGA device guarantees that any command buffers submitted to | ||
381 | * SVGA_CB_CONTEXT_0 will be executed after any _already_ submitted | ||
382 | * command buffers to SVGA_CB_CONTEXT_1. | ||
375 | */ | 383 | */ |
376 | 384 | ||
377 | #define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */ | 385 | #define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */ |
@@ -382,7 +390,8 @@ SVGAGuestPtr; | |||
382 | typedef enum { | 390 | typedef enum { |
383 | SVGA_CB_CONTEXT_DEVICE = 0x3f, | 391 | SVGA_CB_CONTEXT_DEVICE = 0x3f, |
384 | SVGA_CB_CONTEXT_0 = 0x0, | 392 | SVGA_CB_CONTEXT_0 = 0x0, |
385 | SVGA_CB_CONTEXT_MAX = 0x1, | 393 | SVGA_CB_CONTEXT_1 = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */ |
394 | SVGA_CB_CONTEXT_MAX = 0x2, | ||
386 | } SVGACBContext; | 395 | } SVGACBContext; |
387 | 396 | ||
388 | 397 | ||
@@ -689,6 +698,7 @@ SVGASignedPoint; | |||
689 | #define SVGA_CAP_CMD_BUFFERS_2 0x04000000 | 698 | #define SVGA_CAP_CMD_BUFFERS_2 0x04000000 |
690 | #define SVGA_CAP_GBOBJECTS 0x08000000 | 699 | #define SVGA_CAP_GBOBJECTS 0x08000000 |
691 | #define SVGA_CAP_DX 0x10000000 | 700 | #define SVGA_CAP_DX 0x10000000 |
701 | #define SVGA_CAP_HP_CMD_QUEUE 0x20000000 | ||
692 | 702 | ||
693 | #define SVGA_CAP_CMD_RESERVED 0x80000000 | 703 | #define SVGA_CAP_CMD_RESERVED 0x80000000 |
694 | 704 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index f283324ce598..9f45d5004cae 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | |||
@@ -101,6 +101,7 @@ struct vmw_cmdbuf_context { | |||
101 | * @handle: DMA address handle for the command buffer space if @using_mob is | 101 | * @handle: DMA address handle for the command buffer space if @using_mob is |
102 | * false. Immutable. | 102 | * false. Immutable. |
103 | * @size: The size of the command buffer space. Immutable. | 103 | * @size: The size of the command buffer space. Immutable. |
104 | * @num_contexts: Number of contexts actually enabled. | ||
104 | */ | 105 | */ |
105 | struct vmw_cmdbuf_man { | 106 | struct vmw_cmdbuf_man { |
106 | struct mutex cur_mutex; | 107 | struct mutex cur_mutex; |
@@ -128,6 +129,7 @@ struct vmw_cmdbuf_man { | |||
128 | bool has_pool; | 129 | bool has_pool; |
129 | dma_addr_t handle; | 130 | dma_addr_t handle; |
130 | size_t size; | 131 | size_t size; |
132 | u32 num_contexts; | ||
131 | }; | 133 | }; |
132 | 134 | ||
133 | /** | 135 | /** |
@@ -185,7 +187,7 @@ struct vmw_cmdbuf_alloc_info { | |||
185 | 187 | ||
186 | /* Loop over each context in the command buffer manager. */ | 188 | /* Loop over each context in the command buffer manager. */ |
187 | #define for_each_cmdbuf_ctx(_man, _i, _ctx) \ | 189 | #define for_each_cmdbuf_ctx(_man, _i, _ctx) \ |
188 | for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \ | 190 | for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \ |
189 | ++(_i), ++(_ctx)) | 191 | ++(_i), ++(_ctx)) |
190 | 192 | ||
191 | static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, | 193 | static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, |
@@ -514,6 +516,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) | |||
514 | struct list_head restart_head[SVGA_CB_CONTEXT_MAX]; | 516 | struct list_head restart_head[SVGA_CB_CONTEXT_MAX]; |
515 | int i; | 517 | int i; |
516 | struct vmw_cmdbuf_context *ctx; | 518 | struct vmw_cmdbuf_context *ctx; |
519 | bool global_block = false; | ||
517 | 520 | ||
518 | for_each_cmdbuf_ctx(man, i, ctx) { | 521 | for_each_cmdbuf_ctx(man, i, ctx) { |
519 | INIT_LIST_HEAD(&restart_head[i]); | 522 | INIT_LIST_HEAD(&restart_head[i]); |
@@ -531,6 +534,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) | |||
531 | 534 | ||
532 | list_del_init(&entry->list); | 535 | list_del_init(&entry->list); |
533 | restart[entry->cb_context] = true; | 536 | restart[entry->cb_context] = true; |
537 | global_block = true; | ||
534 | 538 | ||
535 | if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) { | 539 | if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) { |
536 | DRM_ERROR("Unknown command causing device error.\n"); | 540 | DRM_ERROR("Unknown command causing device error.\n"); |
@@ -564,23 +568,21 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) | |||
564 | cb_hdr->length -= new_start_offset; | 568 | cb_hdr->length -= new_start_offset; |
565 | cb_hdr->errorOffset = 0; | 569 | cb_hdr->errorOffset = 0; |
566 | cb_hdr->offset = 0; | 570 | cb_hdr->offset = 0; |
571 | |||
567 | list_add_tail(&entry->list, &restart_head[entry->cb_context]); | 572 | list_add_tail(&entry->list, &restart_head[entry->cb_context]); |
568 | man->ctx[entry->cb_context].block_submission = true; | ||
569 | } | 573 | } |
574 | |||
575 | for_each_cmdbuf_ctx(man, i, ctx) | ||
576 | man->ctx[i].block_submission = true; | ||
577 | |||
570 | spin_unlock(&man->lock); | 578 | spin_unlock(&man->lock); |
571 | 579 | ||
572 | /* Preempt all contexts with errors */ | 580 | /* Preempt all contexts */ |
573 | for_each_cmdbuf_ctx(man, i, ctx) { | 581 | if (global_block && vmw_cmdbuf_preempt(man, 0)) |
574 | if (ctx->block_submission && vmw_cmdbuf_preempt(man, i)) | 582 | DRM_ERROR("Failed preempting command buffer contexts\n"); |
575 | DRM_ERROR("Failed preempting command buffer " | ||
576 | "context %u.\n", i); | ||
577 | } | ||
578 | 583 | ||
579 | spin_lock(&man->lock); | 584 | spin_lock(&man->lock); |
580 | for_each_cmdbuf_ctx(man, i, ctx) { | 585 | for_each_cmdbuf_ctx(man, i, ctx) { |
581 | if (!ctx->block_submission) | ||
582 | continue; | ||
583 | |||
584 | /* Move preempted command buffers to the preempted queue. */ | 586 | /* Move preempted command buffers to the preempted queue. */ |
585 | vmw_cmdbuf_ctx_process(man, ctx, &dummy); | 587 | vmw_cmdbuf_ctx_process(man, ctx, &dummy); |
586 | 588 | ||
@@ -594,19 +596,16 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) | |||
594 | * Finally add all command buffers first in the submitted | 596 | * Finally add all command buffers first in the submitted |
595 | * queue, to rerun them. | 597 | * queue, to rerun them. |
596 | */ | 598 | */ |
597 | list_splice_init(&restart_head[i], &ctx->submitted); | ||
598 | 599 | ||
599 | ctx->block_submission = false; | 600 | ctx->block_submission = false; |
601 | list_splice_init(&restart_head[i], &ctx->submitted); | ||
600 | } | 602 | } |
601 | 603 | ||
602 | vmw_cmdbuf_man_process(man); | 604 | vmw_cmdbuf_man_process(man); |
603 | spin_unlock(&man->lock); | 605 | spin_unlock(&man->lock); |
604 | 606 | ||
605 | for_each_cmdbuf_ctx(man, i, ctx) { | 607 | if (global_block && vmw_cmdbuf_startstop(man, 0, true)) |
606 | if (restart[i] && vmw_cmdbuf_startstop(man, i, true)) | 608 | DRM_ERROR("Failed restarting command buffer contexts\n"); |
607 | DRM_ERROR("Failed restarting command buffer " | ||
608 | "context %u.\n", i); | ||
609 | } | ||
610 | 609 | ||
611 | /* Send a new fence in case one was removed */ | 610 | /* Send a new fence in case one was removed */ |
612 | if (send_fence) { | 611 | if (send_fence) { |
@@ -1307,6 +1306,8 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) | |||
1307 | if (!man) | 1306 | if (!man) |
1308 | return ERR_PTR(-ENOMEM); | 1307 | return ERR_PTR(-ENOMEM); |
1309 | 1308 | ||
1309 | man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ? | ||
1310 | 2 : 1; | ||
1310 | man->headers = dma_pool_create("vmwgfx cmdbuf", | 1311 | man->headers = dma_pool_create("vmwgfx cmdbuf", |
1311 | &dev_priv->dev->pdev->dev, | 1312 | &dev_priv->dev->pdev->dev, |
1312 | sizeof(SVGACBHeader), | 1313 | sizeof(SVGACBHeader), |
@@ -1341,14 +1342,11 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) | |||
1341 | INIT_WORK(&man->work, &vmw_cmdbuf_work_func); | 1342 | INIT_WORK(&man->work, &vmw_cmdbuf_work_func); |
1342 | vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, | 1343 | vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, |
1343 | &dev_priv->error_waiters); | 1344 | &dev_priv->error_waiters); |
1344 | for_each_cmdbuf_ctx(man, i, ctx) { | 1345 | ret = vmw_cmdbuf_startstop(man, 0, true); |
1345 | ret = vmw_cmdbuf_startstop(man, i, true); | 1346 | if (ret) { |
1346 | if (ret) { | 1347 | DRM_ERROR("Failed starting command buffer contexts\n"); |
1347 | DRM_ERROR("Failed starting command buffer " | 1348 | vmw_cmdbuf_man_destroy(man); |
1348 | "context %u.\n", i); | 1349 | return ERR_PTR(ret); |
1349 | vmw_cmdbuf_man_destroy(man); | ||
1350 | return ERR_PTR(ret); | ||
1351 | } | ||
1352 | } | 1350 | } |
1353 | 1351 | ||
1354 | return man; | 1352 | return man; |
@@ -1398,16 +1396,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) | |||
1398 | */ | 1396 | */ |
1399 | void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) | 1397 | void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) |
1400 | { | 1398 | { |
1401 | struct vmw_cmdbuf_context *ctx; | ||
1402 | unsigned int i; | ||
1403 | |||
1404 | WARN_ON_ONCE(man->has_pool); | 1399 | WARN_ON_ONCE(man->has_pool); |
1405 | (void) vmw_cmdbuf_idle(man, false, 10*HZ); | 1400 | (void) vmw_cmdbuf_idle(man, false, 10*HZ); |
1406 | 1401 | ||
1407 | for_each_cmdbuf_ctx(man, i, ctx) | 1402 | if (vmw_cmdbuf_startstop(man, 0, false)) |
1408 | if (vmw_cmdbuf_startstop(man, i, false)) | 1403 | DRM_ERROR("Failed stopping command buffer contexts.\n"); |
1409 | DRM_ERROR("Failed stopping command buffer " | ||
1410 | "context %u.\n", i); | ||
1411 | 1404 | ||
1412 | vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, | 1405 | vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, |
1413 | &man->dev_priv->error_waiters); | 1406 | &man->dev_priv->error_waiters); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 184340d486c3..5055e5f68c4f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -301,6 +301,8 @@ static void vmw_print_capabilities(uint32_t capabilities) | |||
301 | DRM_INFO(" Guest Backed Resources.\n"); | 301 | DRM_INFO(" Guest Backed Resources.\n"); |
302 | if (capabilities & SVGA_CAP_DX) | 302 | if (capabilities & SVGA_CAP_DX) |
303 | DRM_INFO(" DX Features.\n"); | 303 | DRM_INFO(" DX Features.\n"); |
304 | if (capabilities & SVGA_CAP_HP_CMD_QUEUE) | ||
305 | DRM_INFO(" HP Command Queue.\n"); | ||
304 | } | 306 | } |
305 | 307 | ||
306 | /** | 308 | /** |