diff options
author | Dave Airlie <airlied@redhat.com> | 2017-08-28 20:38:14 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-08-28 20:38:14 -0400 |
commit | 7846b12fe0b5feab5446d892f41b5140c1419109 (patch) | |
tree | d507942842bbc7b6aa6be3cc77b2c2c05f004a6a | |
parent | 7ebdb0dd52404907b8eac2bab476b43a8b8aa9f1 (diff) | |
parent | d78acfe934e3b9f533f72ee3dde0982935fc2b32 (diff) |
Merge branch 'drm-vmwgfx-next' of git://people.freedesktop.org/~syeh/repos_linux into drm-next
vmwgfx add fence fd support.
* 'drm-vmwgfx-next' of git://people.freedesktop.org/~syeh/repos_linux:
drm/vmwgfx: Bump the version for fence FD support
drm/vmwgfx: Add export fence to file descriptor support
drm/vmwgfx: Add support for imported Fence File Descriptor
drm/vmwgfx: Prepare to support fence fd
drm/vmwgfx: Fix incorrect command header offset at restart
drm/vmwgfx: Support the NOP_ERROR command
drm/vmwgfx: Restart command buffers after errors
drm/vmwgfx: Move irq bottom half processing to threads
drm/vmwgfx: Don't use drm_irq_[un]install
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | 242 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 148 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 104 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 111 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 | ||||
-rw-r--r-- | include/uapi/drm/vmwgfx_drm.h | 11 |
9 files changed, 511 insertions, 161 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 86178796de6c..c706ad30411b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | |||
@@ -51,6 +51,7 @@ struct vmw_cmdbuf_context { | |||
51 | struct list_head hw_submitted; | 51 | struct list_head hw_submitted; |
52 | struct list_head preempted; | 52 | struct list_head preempted; |
53 | unsigned num_hw_submitted; | 53 | unsigned num_hw_submitted; |
54 | bool block_submission; | ||
54 | }; | 55 | }; |
55 | 56 | ||
56 | /** | 57 | /** |
@@ -60,6 +61,9 @@ struct vmw_cmdbuf_context { | |||
60 | * kernel command submissions, @cur. | 61 | * kernel command submissions, @cur. |
61 | * @space_mutex: Mutex to protect against starvation when we allocate | 62 | * @space_mutex: Mutex to protect against starvation when we allocate |
62 | * main pool buffer space. | 63 | * main pool buffer space. |
64 | * @error_mutex: Mutex to serialize the work queue error handling. | ||
65 | * Note this is not needed if the same workqueue handler | ||
66 | * can't race with itself... | ||
63 | * @work: A struct work_struct implementeing command buffer error handling. | 67 | * @work: A struct work_struct implementeing command buffer error handling. |
64 | * Immutable. | 68 | * Immutable. |
65 | * @dev_priv: Pointer to the device private struct. Immutable. | 69 | * @dev_priv: Pointer to the device private struct. Immutable. |
@@ -85,7 +89,6 @@ struct vmw_cmdbuf_context { | |||
85 | * Internal protection. | 89 | * Internal protection. |
86 | * @dheaders: Pool of DMA memory for device command buffer headers with trailing | 90 | * @dheaders: Pool of DMA memory for device command buffer headers with trailing |
87 | * space for inline data. Internal protection. | 91 | * space for inline data. Internal protection. |
88 | * @tasklet: Tasklet struct for irq processing. Immutable. | ||
89 | * @alloc_queue: Wait queue for processes waiting to allocate command buffer | 92 | * @alloc_queue: Wait queue for processes waiting to allocate command buffer |
90 | * space. | 93 | * space. |
91 | * @idle_queue: Wait queue for processes waiting for command buffer idle. | 94 | * @idle_queue: Wait queue for processes waiting for command buffer idle. |
@@ -102,6 +105,7 @@ struct vmw_cmdbuf_context { | |||
102 | struct vmw_cmdbuf_man { | 105 | struct vmw_cmdbuf_man { |
103 | struct mutex cur_mutex; | 106 | struct mutex cur_mutex; |
104 | struct mutex space_mutex; | 107 | struct mutex space_mutex; |
108 | struct mutex error_mutex; | ||
105 | struct work_struct work; | 109 | struct work_struct work; |
106 | struct vmw_private *dev_priv; | 110 | struct vmw_private *dev_priv; |
107 | struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; | 111 | struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; |
@@ -117,7 +121,6 @@ struct vmw_cmdbuf_man { | |||
117 | spinlock_t lock; | 121 | spinlock_t lock; |
118 | struct dma_pool *headers; | 122 | struct dma_pool *headers; |
119 | struct dma_pool *dheaders; | 123 | struct dma_pool *dheaders; |
120 | struct tasklet_struct tasklet; | ||
121 | wait_queue_head_t alloc_queue; | 124 | wait_queue_head_t alloc_queue; |
122 | wait_queue_head_t idle_queue; | 125 | wait_queue_head_t idle_queue; |
123 | bool irq_on; | 126 | bool irq_on; |
@@ -181,12 +184,13 @@ struct vmw_cmdbuf_alloc_info { | |||
181 | }; | 184 | }; |
182 | 185 | ||
183 | /* Loop over each context in the command buffer manager. */ | 186 | /* Loop over each context in the command buffer manager. */ |
184 | #define for_each_cmdbuf_ctx(_man, _i, _ctx) \ | 187 | #define for_each_cmdbuf_ctx(_man, _i, _ctx) \ |
185 | for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \ | 188 | for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \ |
186 | ++(_i), ++(_ctx)) | 189 | ++(_i), ++(_ctx)) |
187 | 190 | ||
188 | static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable); | 191 | static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, |
189 | 192 | bool enable); | |
193 | static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context); | ||
190 | 194 | ||
191 | /** | 195 | /** |
192 | * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex. | 196 | * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex. |
@@ -278,9 +282,9 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) | |||
278 | vmw_cmdbuf_header_inline_free(header); | 282 | vmw_cmdbuf_header_inline_free(header); |
279 | return; | 283 | return; |
280 | } | 284 | } |
281 | spin_lock_bh(&man->lock); | 285 | spin_lock(&man->lock); |
282 | __vmw_cmdbuf_header_free(header); | 286 | __vmw_cmdbuf_header_free(header); |
283 | spin_unlock_bh(&man->lock); | 287 | spin_unlock(&man->lock); |
284 | } | 288 | } |
285 | 289 | ||
286 | 290 | ||
@@ -331,7 +335,8 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, | |||
331 | struct vmw_cmdbuf_context *ctx) | 335 | struct vmw_cmdbuf_context *ctx) |
332 | { | 336 | { |
333 | while (ctx->num_hw_submitted < man->max_hw_submitted && | 337 | while (ctx->num_hw_submitted < man->max_hw_submitted && |
334 | !list_empty(&ctx->submitted)) { | 338 | !list_empty(&ctx->submitted) && |
339 | !ctx->block_submission) { | ||
335 | struct vmw_cmdbuf_header *entry; | 340 | struct vmw_cmdbuf_header *entry; |
336 | SVGACBStatus status; | 341 | SVGACBStatus status; |
337 | 342 | ||
@@ -386,12 +391,17 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, | |||
386 | __vmw_cmdbuf_header_free(entry); | 391 | __vmw_cmdbuf_header_free(entry); |
387 | break; | 392 | break; |
388 | case SVGA_CB_STATUS_COMMAND_ERROR: | 393 | case SVGA_CB_STATUS_COMMAND_ERROR: |
389 | case SVGA_CB_STATUS_CB_HEADER_ERROR: | 394 | entry->cb_header->status = SVGA_CB_STATUS_NONE; |
390 | list_add_tail(&entry->list, &man->error); | 395 | list_add_tail(&entry->list, &man->error); |
391 | schedule_work(&man->work); | 396 | schedule_work(&man->work); |
392 | break; | 397 | break; |
393 | case SVGA_CB_STATUS_PREEMPTED: | 398 | case SVGA_CB_STATUS_PREEMPTED: |
394 | list_add(&entry->list, &ctx->preempted); | 399 | entry->cb_header->status = SVGA_CB_STATUS_NONE; |
400 | list_add_tail(&entry->list, &ctx->preempted); | ||
401 | break; | ||
402 | case SVGA_CB_STATUS_CB_HEADER_ERROR: | ||
403 | WARN_ONCE(true, "Command buffer header error.\n"); | ||
404 | __vmw_cmdbuf_header_free(entry); | ||
395 | break; | 405 | break; |
396 | default: | 406 | default: |
397 | WARN_ONCE(true, "Undefined command buffer status.\n"); | 407 | WARN_ONCE(true, "Undefined command buffer status.\n"); |
@@ -468,20 +478,17 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, | |||
468 | } | 478 | } |
469 | 479 | ||
470 | /** | 480 | /** |
471 | * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt | 481 | * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt |
472 | * handler implemented as a tasklet. | 482 | * handler implemented as a threaded irq task. |
473 | * | 483 | * |
474 | * @data: Tasklet closure. A pointer to the command buffer manager cast to | 484 | * @man: Pointer to the command buffer manager. |
475 | * an unsigned long. | ||
476 | * | 485 | * |
477 | * The bottom half (tasklet) of the interrupt handler simply calls into the | 486 | * The bottom half of the interrupt handler simply calls into the |
478 | * command buffer processor to free finished buffers and submit any | 487 | * command buffer processor to free finished buffers and submit any |
479 | * queued buffers to hardware. | 488 | * queued buffers to hardware. |
480 | */ | 489 | */ |
481 | static void vmw_cmdbuf_man_tasklet(unsigned long data) | 490 | void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man) |
482 | { | 491 | { |
483 | struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; | ||
484 | |||
485 | spin_lock(&man->lock); | 492 | spin_lock(&man->lock); |
486 | vmw_cmdbuf_man_process(man); | 493 | vmw_cmdbuf_man_process(man); |
487 | spin_unlock(&man->lock); | 494 | spin_unlock(&man->lock); |
@@ -502,24 +509,112 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) | |||
502 | container_of(work, struct vmw_cmdbuf_man, work); | 509 | container_of(work, struct vmw_cmdbuf_man, work); |
503 | struct vmw_cmdbuf_header *entry, *next; | 510 | struct vmw_cmdbuf_header *entry, *next; |
504 | uint32_t dummy; | 511 | uint32_t dummy; |
505 | bool restart = false; | 512 | bool restart[SVGA_CB_CONTEXT_MAX]; |
513 | bool send_fence = false; | ||
514 | struct list_head restart_head[SVGA_CB_CONTEXT_MAX]; | ||
515 | int i; | ||
516 | struct vmw_cmdbuf_context *ctx; | ||
506 | 517 | ||
507 | spin_lock_bh(&man->lock); | 518 | for_each_cmdbuf_ctx(man, i, ctx) { |
519 | INIT_LIST_HEAD(&restart_head[i]); | ||
520 | restart[i] = false; | ||
521 | } | ||
522 | |||
523 | mutex_lock(&man->error_mutex); | ||
524 | spin_lock(&man->lock); | ||
508 | list_for_each_entry_safe(entry, next, &man->error, list) { | 525 | list_for_each_entry_safe(entry, next, &man->error, list) { |
509 | restart = true; | 526 | SVGACBHeader *cb_hdr = entry->cb_header; |
510 | DRM_ERROR("Command buffer error.\n"); | 527 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) |
528 | (entry->cmd + cb_hdr->errorOffset); | ||
529 | u32 error_cmd_size, new_start_offset; | ||
530 | const char *cmd_name; | ||
531 | |||
532 | list_del_init(&entry->list); | ||
533 | restart[entry->cb_context] = true; | ||
534 | |||
535 | if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) { | ||
536 | DRM_ERROR("Unknown command causing device error.\n"); | ||
537 | DRM_ERROR("Command buffer offset is %lu\n", | ||
538 | (unsigned long) cb_hdr->errorOffset); | ||
539 | __vmw_cmdbuf_header_free(entry); | ||
540 | send_fence = true; | ||
541 | continue; | ||
542 | } | ||
511 | 543 | ||
512 | list_del(&entry->list); | 544 | DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name); |
513 | __vmw_cmdbuf_header_free(entry); | 545 | DRM_ERROR("Command buffer offset is %lu\n", |
514 | wake_up_all(&man->idle_queue); | 546 | (unsigned long) cb_hdr->errorOffset); |
547 | DRM_ERROR("Command size is %lu\n", | ||
548 | (unsigned long) error_cmd_size); | ||
549 | |||
550 | new_start_offset = cb_hdr->errorOffset + error_cmd_size; | ||
551 | |||
552 | if (new_start_offset >= cb_hdr->length) { | ||
553 | __vmw_cmdbuf_header_free(entry); | ||
554 | send_fence = true; | ||
555 | continue; | ||
556 | } | ||
557 | |||
558 | if (man->using_mob) | ||
559 | cb_hdr->ptr.mob.mobOffset += new_start_offset; | ||
560 | else | ||
561 | cb_hdr->ptr.pa += (u64) new_start_offset; | ||
562 | |||
563 | entry->cmd += new_start_offset; | ||
564 | cb_hdr->length -= new_start_offset; | ||
565 | cb_hdr->errorOffset = 0; | ||
566 | cb_hdr->offset = 0; | ||
567 | list_add_tail(&entry->list, &restart_head[entry->cb_context]); | ||
568 | man->ctx[entry->cb_context].block_submission = true; | ||
569 | } | ||
570 | spin_unlock(&man->lock); | ||
571 | |||
572 | /* Preempt all contexts with errors */ | ||
573 | for_each_cmdbuf_ctx(man, i, ctx) { | ||
574 | if (ctx->block_submission && vmw_cmdbuf_preempt(man, i)) | ||
575 | DRM_ERROR("Failed preempting command buffer " | ||
576 | "context %u.\n", i); | ||
577 | } | ||
578 | |||
579 | spin_lock(&man->lock); | ||
580 | for_each_cmdbuf_ctx(man, i, ctx) { | ||
581 | if (!ctx->block_submission) | ||
582 | continue; | ||
583 | |||
584 | /* Move preempted command buffers to the preempted queue. */ | ||
585 | vmw_cmdbuf_ctx_process(man, ctx, &dummy); | ||
586 | |||
587 | /* | ||
588 | * Add the preempted queue after the command buffer | ||
589 | * that caused an error. | ||
590 | */ | ||
591 | list_splice_init(&ctx->preempted, restart_head[i].prev); | ||
592 | |||
593 | /* | ||
594 | * Finally add all command buffers first in the submitted | ||
595 | * queue, to rerun them. | ||
596 | */ | ||
597 | list_splice_init(&restart_head[i], &ctx->submitted); | ||
598 | |||
599 | ctx->block_submission = false; | ||
515 | } | 600 | } |
516 | spin_unlock_bh(&man->lock); | ||
517 | 601 | ||
518 | if (restart && vmw_cmdbuf_startstop(man, true)) | 602 | vmw_cmdbuf_man_process(man); |
519 | DRM_ERROR("Failed restarting command buffer context 0.\n"); | 603 | spin_unlock(&man->lock); |
604 | |||
605 | for_each_cmdbuf_ctx(man, i, ctx) { | ||
606 | if (restart[i] && vmw_cmdbuf_startstop(man, i, true)) | ||
607 | DRM_ERROR("Failed restarting command buffer " | ||
608 | "context %u.\n", i); | ||
609 | } | ||
520 | 610 | ||
521 | /* Send a new fence in case one was removed */ | 611 | /* Send a new fence in case one was removed */ |
522 | vmw_fifo_send_fence(man->dev_priv, &dummy); | 612 | if (send_fence) { |
613 | vmw_fifo_send_fence(man->dev_priv, &dummy); | ||
614 | wake_up_all(&man->idle_queue); | ||
615 | } | ||
616 | |||
617 | mutex_unlock(&man->error_mutex); | ||
523 | } | 618 | } |
524 | 619 | ||
525 | /** | 620 | /** |
@@ -536,7 +631,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, | |||
536 | bool idle = false; | 631 | bool idle = false; |
537 | int i; | 632 | int i; |
538 | 633 | ||
539 | spin_lock_bh(&man->lock); | 634 | spin_lock(&man->lock); |
540 | vmw_cmdbuf_man_process(man); | 635 | vmw_cmdbuf_man_process(man); |
541 | for_each_cmdbuf_ctx(man, i, ctx) { | 636 | for_each_cmdbuf_ctx(man, i, ctx) { |
542 | if (!list_empty(&ctx->submitted) || | 637 | if (!list_empty(&ctx->submitted) || |
@@ -548,7 +643,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, | |||
548 | idle = list_empty(&man->error); | 643 | idle = list_empty(&man->error); |
549 | 644 | ||
550 | out_unlock: | 645 | out_unlock: |
551 | spin_unlock_bh(&man->lock); | 646 | spin_unlock(&man->lock); |
552 | 647 | ||
553 | return idle; | 648 | return idle; |
554 | } | 649 | } |
@@ -571,7 +666,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) | |||
571 | if (!cur) | 666 | if (!cur) |
572 | return; | 667 | return; |
573 | 668 | ||
574 | spin_lock_bh(&man->lock); | 669 | spin_lock(&man->lock); |
575 | if (man->cur_pos == 0) { | 670 | if (man->cur_pos == 0) { |
576 | __vmw_cmdbuf_header_free(cur); | 671 | __vmw_cmdbuf_header_free(cur); |
577 | goto out_unlock; | 672 | goto out_unlock; |
@@ -580,7 +675,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) | |||
580 | man->cur->cb_header->length = man->cur_pos; | 675 | man->cur->cb_header->length = man->cur_pos; |
581 | vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); | 676 | vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); |
582 | out_unlock: | 677 | out_unlock: |
583 | spin_unlock_bh(&man->lock); | 678 | spin_unlock(&man->lock); |
584 | man->cur = NULL; | 679 | man->cur = NULL; |
585 | man->cur_pos = 0; | 680 | man->cur_pos = 0; |
586 | } | 681 | } |
@@ -673,14 +768,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, | |||
673 | return true; | 768 | return true; |
674 | 769 | ||
675 | memset(info->node, 0, sizeof(*info->node)); | 770 | memset(info->node, 0, sizeof(*info->node)); |
676 | spin_lock_bh(&man->lock); | 771 | spin_lock(&man->lock); |
677 | ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); | 772 | ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); |
678 | if (ret) { | 773 | if (ret) { |
679 | vmw_cmdbuf_man_process(man); | 774 | vmw_cmdbuf_man_process(man); |
680 | ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); | 775 | ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); |
681 | } | 776 | } |
682 | 777 | ||
683 | spin_unlock_bh(&man->lock); | 778 | spin_unlock(&man->lock); |
684 | info->done = !ret; | 779 | info->done = !ret; |
685 | 780 | ||
686 | return info->done; | 781 | return info->done; |
@@ -801,9 +896,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, | |||
801 | return 0; | 896 | return 0; |
802 | 897 | ||
803 | out_no_cb_header: | 898 | out_no_cb_header: |
804 | spin_lock_bh(&man->lock); | 899 | spin_lock(&man->lock); |
805 | drm_mm_remove_node(&header->node); | 900 | drm_mm_remove_node(&header->node); |
806 | spin_unlock_bh(&man->lock); | 901 | spin_unlock(&man->lock); |
807 | 902 | ||
808 | return ret; | 903 | return ret; |
809 | } | 904 | } |
@@ -1023,18 +1118,6 @@ void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, | |||
1023 | vmw_cmdbuf_cur_unlock(man); | 1118 | vmw_cmdbuf_cur_unlock(man); |
1024 | } | 1119 | } |
1025 | 1120 | ||
1026 | /** | ||
1027 | * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half. | ||
1028 | * | ||
1029 | * @man: The command buffer manager. | ||
1030 | */ | ||
1031 | void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man) | ||
1032 | { | ||
1033 | if (!man) | ||
1034 | return; | ||
1035 | |||
1036 | tasklet_schedule(&man->tasklet); | ||
1037 | } | ||
1038 | 1121 | ||
1039 | /** | 1122 | /** |
1040 | * vmw_cmdbuf_send_device_command - Send a command through the device context. | 1123 | * vmw_cmdbuf_send_device_command - Send a command through the device context. |
@@ -1059,9 +1142,9 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, | |||
1059 | memcpy(cmd, command, size); | 1142 | memcpy(cmd, command, size); |
1060 | header->cb_header->length = size; | 1143 | header->cb_header->length = size; |
1061 | header->cb_context = SVGA_CB_CONTEXT_DEVICE; | 1144 | header->cb_context = SVGA_CB_CONTEXT_DEVICE; |
1062 | spin_lock_bh(&man->lock); | 1145 | spin_lock(&man->lock); |
1063 | status = vmw_cmdbuf_header_submit(header); | 1146 | status = vmw_cmdbuf_header_submit(header); |
1064 | spin_unlock_bh(&man->lock); | 1147 | spin_unlock(&man->lock); |
1065 | vmw_cmdbuf_header_free(header); | 1148 | vmw_cmdbuf_header_free(header); |
1066 | 1149 | ||
1067 | if (status != SVGA_CB_STATUS_COMPLETED) { | 1150 | if (status != SVGA_CB_STATUS_COMPLETED) { |
@@ -1074,6 +1157,29 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, | |||
1074 | } | 1157 | } |
1075 | 1158 | ||
1076 | /** | 1159 | /** |
1160 | * vmw_cmdbuf_preempt - Send a preempt command through the device | ||
1161 | * context. | ||
1162 | * | ||
1163 | * @man: The command buffer manager. | ||
1164 | * | ||
1165 | * Synchronously sends a preempt command. | ||
1166 | */ | ||
1167 | static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context) | ||
1168 | { | ||
1169 | struct { | ||
1170 | uint32 id; | ||
1171 | SVGADCCmdPreempt body; | ||
1172 | } __packed cmd; | ||
1173 | |||
1174 | cmd.id = SVGA_DC_CMD_PREEMPT; | ||
1175 | cmd.body.context = SVGA_CB_CONTEXT_0 + context; | ||
1176 | cmd.body.ignoreIDZero = 0; | ||
1177 | |||
1178 | return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); | ||
1179 | } | ||
1180 | |||
1181 | |||
1182 | /** | ||
1077 | * vmw_cmdbuf_startstop - Send a start / stop command through the device | 1183 | * vmw_cmdbuf_startstop - Send a start / stop command through the device |
1078 | * context. | 1184 | * context. |
1079 | * | 1185 | * |
@@ -1082,7 +1188,7 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, | |||
1082 | * | 1188 | * |
1083 | * Synchronously sends a device start / stop context command. | 1189 | * Synchronously sends a device start / stop context command. |
1084 | */ | 1190 | */ |
1085 | static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, | 1191 | static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, |
1086 | bool enable) | 1192 | bool enable) |
1087 | { | 1193 | { |
1088 | struct { | 1194 | struct { |
@@ -1092,7 +1198,7 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, | |||
1092 | 1198 | ||
1093 | cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT; | 1199 | cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT; |
1094 | cmd.body.enable = (enable) ? 1 : 0; | 1200 | cmd.body.enable = (enable) ? 1 : 0; |
1095 | cmd.body.context = SVGA_CB_CONTEXT_0; | 1201 | cmd.body.context = SVGA_CB_CONTEXT_0 + context; |
1096 | 1202 | ||
1097 | return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); | 1203 | return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); |
1098 | } | 1204 | } |
@@ -1191,7 +1297,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) | |||
1191 | { | 1297 | { |
1192 | struct vmw_cmdbuf_man *man; | 1298 | struct vmw_cmdbuf_man *man; |
1193 | struct vmw_cmdbuf_context *ctx; | 1299 | struct vmw_cmdbuf_context *ctx; |
1194 | int i; | 1300 | unsigned int i; |
1195 | int ret; | 1301 | int ret; |
1196 | 1302 | ||
1197 | if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS)) | 1303 | if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS)) |
@@ -1226,8 +1332,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) | |||
1226 | spin_lock_init(&man->lock); | 1332 | spin_lock_init(&man->lock); |
1227 | mutex_init(&man->cur_mutex); | 1333 | mutex_init(&man->cur_mutex); |
1228 | mutex_init(&man->space_mutex); | 1334 | mutex_init(&man->space_mutex); |
1229 | tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet, | 1335 | mutex_init(&man->error_mutex); |
1230 | (unsigned long) man); | ||
1231 | man->default_size = VMW_CMDBUF_INLINE_SIZE; | 1336 | man->default_size = VMW_CMDBUF_INLINE_SIZE; |
1232 | init_waitqueue_head(&man->alloc_queue); | 1337 | init_waitqueue_head(&man->alloc_queue); |
1233 | init_waitqueue_head(&man->idle_queue); | 1338 | init_waitqueue_head(&man->idle_queue); |
@@ -1236,11 +1341,14 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) | |||
1236 | INIT_WORK(&man->work, &vmw_cmdbuf_work_func); | 1341 | INIT_WORK(&man->work, &vmw_cmdbuf_work_func); |
1237 | vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, | 1342 | vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, |
1238 | &dev_priv->error_waiters); | 1343 | &dev_priv->error_waiters); |
1239 | ret = vmw_cmdbuf_startstop(man, true); | 1344 | for_each_cmdbuf_ctx(man, i, ctx) { |
1240 | if (ret) { | 1345 | ret = vmw_cmdbuf_startstop(man, i, true); |
1241 | DRM_ERROR("Failed starting command buffer context 0.\n"); | 1346 | if (ret) { |
1242 | vmw_cmdbuf_man_destroy(man); | 1347 | DRM_ERROR("Failed starting command buffer " |
1243 | return ERR_PTR(ret); | 1348 | "context %u.\n", i); |
1349 | vmw_cmdbuf_man_destroy(man); | ||
1350 | return ERR_PTR(ret); | ||
1351 | } | ||
1244 | } | 1352 | } |
1245 | 1353 | ||
1246 | return man; | 1354 | return man; |
@@ -1290,18 +1398,24 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) | |||
1290 | */ | 1398 | */ |
1291 | void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) | 1399 | void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) |
1292 | { | 1400 | { |
1401 | struct vmw_cmdbuf_context *ctx; | ||
1402 | unsigned int i; | ||
1403 | |||
1293 | WARN_ON_ONCE(man->has_pool); | 1404 | WARN_ON_ONCE(man->has_pool); |
1294 | (void) vmw_cmdbuf_idle(man, false, 10*HZ); | 1405 | (void) vmw_cmdbuf_idle(man, false, 10*HZ); |
1295 | if (vmw_cmdbuf_startstop(man, false)) | 1406 | |
1296 | DRM_ERROR("Failed stopping command buffer context 0.\n"); | 1407 | for_each_cmdbuf_ctx(man, i, ctx) |
1408 | if (vmw_cmdbuf_startstop(man, i, false)) | ||
1409 | DRM_ERROR("Failed stopping command buffer " | ||
1410 | "context %u.\n", i); | ||
1297 | 1411 | ||
1298 | vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, | 1412 | vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, |
1299 | &man->dev_priv->error_waiters); | 1413 | &man->dev_priv->error_waiters); |
1300 | tasklet_kill(&man->tasklet); | ||
1301 | (void) cancel_work_sync(&man->work); | 1414 | (void) cancel_work_sync(&man->work); |
1302 | dma_pool_destroy(man->dheaders); | 1415 | dma_pool_destroy(man->dheaders); |
1303 | dma_pool_destroy(man->headers); | 1416 | dma_pool_destroy(man->headers); |
1304 | mutex_destroy(&man->cur_mutex); | 1417 | mutex_destroy(&man->cur_mutex); |
1305 | mutex_destroy(&man->space_mutex); | 1418 | mutex_destroy(&man->space_mutex); |
1419 | mutex_destroy(&man->error_mutex); | ||
1306 | kfree(man); | 1420 | kfree(man); |
1307 | } | 1421 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 8be26509a9aa..e84fee3ec4f3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <drm/ttm/ttm_module.h> | 36 | #include <drm/ttm/ttm_module.h> |
37 | #include <linux/dma_remapping.h> | 37 | #include <linux/dma_remapping.h> |
38 | 38 | ||
39 | #define VMWGFX_DRIVER_NAME "vmwgfx" | ||
40 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" | 39 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
41 | #define VMWGFX_CHIP_SVGAII 0 | 40 | #define VMWGFX_CHIP_SVGAII 0 |
42 | #define VMW_FB_RESERVATION 0 | 41 | #define VMW_FB_RESERVATION 0 |
@@ -825,7 +824,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
825 | } | 824 | } |
826 | 825 | ||
827 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | 826 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
828 | ret = drm_irq_install(dev, dev->pdev->irq); | 827 | ret = vmw_irq_install(dev, dev->pdev->irq); |
829 | if (ret != 0) { | 828 | if (ret != 0) { |
830 | DRM_ERROR("Failed installing irq: %d\n", ret); | 829 | DRM_ERROR("Failed installing irq: %d\n", ret); |
831 | goto out_no_irq; | 830 | goto out_no_irq; |
@@ -937,7 +936,7 @@ out_no_bdev: | |||
937 | vmw_fence_manager_takedown(dev_priv->fman); | 936 | vmw_fence_manager_takedown(dev_priv->fman); |
938 | out_no_fman: | 937 | out_no_fman: |
939 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 938 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
940 | drm_irq_uninstall(dev_priv->dev); | 939 | vmw_irq_uninstall(dev_priv->dev); |
941 | out_no_irq: | 940 | out_no_irq: |
942 | if (dev_priv->stealth) | 941 | if (dev_priv->stealth) |
943 | pci_release_region(dev->pdev, 2); | 942 | pci_release_region(dev->pdev, 2); |
@@ -990,7 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev) | |||
990 | vmw_release_device_late(dev_priv); | 989 | vmw_release_device_late(dev_priv); |
991 | vmw_fence_manager_takedown(dev_priv->fman); | 990 | vmw_fence_manager_takedown(dev_priv->fman); |
992 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 991 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
993 | drm_irq_uninstall(dev_priv->dev); | 992 | vmw_irq_uninstall(dev_priv->dev); |
994 | if (dev_priv->stealth) | 993 | if (dev_priv->stealth) |
995 | pci_release_region(dev->pdev, 2); | 994 | pci_release_region(dev->pdev, 2); |
996 | else | 995 | else |
@@ -1516,10 +1515,6 @@ static struct drm_driver driver = { | |||
1516 | .load = vmw_driver_load, | 1515 | .load = vmw_driver_load, |
1517 | .unload = vmw_driver_unload, | 1516 | .unload = vmw_driver_unload, |
1518 | .lastclose = vmw_lastclose, | 1517 | .lastclose = vmw_lastclose, |
1519 | .irq_preinstall = vmw_irq_preinstall, | ||
1520 | .irq_postinstall = vmw_irq_postinstall, | ||
1521 | .irq_uninstall = vmw_irq_uninstall, | ||
1522 | .irq_handler = vmw_irq_handler, | ||
1523 | .get_vblank_counter = vmw_get_vblank_counter, | 1518 | .get_vblank_counter = vmw_get_vblank_counter, |
1524 | .enable_vblank = vmw_enable_vblank, | 1519 | .enable_vblank = vmw_enable_vblank, |
1525 | .disable_vblank = vmw_disable_vblank, | 1520 | .disable_vblank = vmw_disable_vblank, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4b948fba9eec..7e5f30e234b1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -40,10 +40,12 @@ | |||
40 | #include <drm/ttm/ttm_execbuf_util.h> | 40 | #include <drm/ttm/ttm_execbuf_util.h> |
41 | #include <drm/ttm/ttm_module.h> | 41 | #include <drm/ttm/ttm_module.h> |
42 | #include "vmwgfx_fence.h" | 42 | #include "vmwgfx_fence.h" |
43 | #include <linux/sync_file.h> | ||
43 | 44 | ||
44 | #define VMWGFX_DRIVER_DATE "20170607" | 45 | #define VMWGFX_DRIVER_NAME "vmwgfx" |
46 | #define VMWGFX_DRIVER_DATE "20170612" | ||
45 | #define VMWGFX_DRIVER_MAJOR 2 | 47 | #define VMWGFX_DRIVER_MAJOR 2 |
46 | #define VMWGFX_DRIVER_MINOR 13 | 48 | #define VMWGFX_DRIVER_MINOR 14 |
47 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 49 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
48 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 50 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
49 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 51 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
@@ -351,6 +353,12 @@ struct vmw_otable_batch { | |||
351 | struct ttm_buffer_object *otable_bo; | 353 | struct ttm_buffer_object *otable_bo; |
352 | }; | 354 | }; |
353 | 355 | ||
356 | enum { | ||
357 | VMW_IRQTHREAD_FENCE, | ||
358 | VMW_IRQTHREAD_CMDBUF, | ||
359 | VMW_IRQTHREAD_MAX | ||
360 | }; | ||
361 | |||
354 | struct vmw_private { | 362 | struct vmw_private { |
355 | struct ttm_bo_device bdev; | 363 | struct ttm_bo_device bdev; |
356 | struct ttm_bo_global_ref bo_global_ref; | 364 | struct ttm_bo_global_ref bo_global_ref; |
@@ -529,6 +537,7 @@ struct vmw_private { | |||
529 | struct vmw_otable_batch otable_batch; | 537 | struct vmw_otable_batch otable_batch; |
530 | 538 | ||
531 | struct vmw_cmdbuf_man *cman; | 539 | struct vmw_cmdbuf_man *cman; |
540 | DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); | ||
532 | }; | 541 | }; |
533 | 542 | ||
534 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) | 543 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
@@ -561,24 +570,21 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) | |||
561 | static inline void vmw_write(struct vmw_private *dev_priv, | 570 | static inline void vmw_write(struct vmw_private *dev_priv, |
562 | unsigned int offset, uint32_t value) | 571 | unsigned int offset, uint32_t value) |
563 | { | 572 | { |
564 | unsigned long irq_flags; | 573 | spin_lock(&dev_priv->hw_lock); |
565 | |||
566 | spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); | ||
567 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); | 574 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
568 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); | 575 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); |
569 | spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); | 576 | spin_unlock(&dev_priv->hw_lock); |
570 | } | 577 | } |
571 | 578 | ||
572 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, | 579 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, |
573 | unsigned int offset) | 580 | unsigned int offset) |
574 | { | 581 | { |
575 | unsigned long irq_flags; | ||
576 | u32 val; | 582 | u32 val; |
577 | 583 | ||
578 | spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); | 584 | spin_lock(&dev_priv->hw_lock); |
579 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); | 585 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
580 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); | 586 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); |
581 | spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); | 587 | spin_unlock(&dev_priv->hw_lock); |
582 | 588 | ||
583 | return val; | 589 | return val; |
584 | } | 590 | } |
@@ -821,7 +827,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv, | |||
821 | uint32_t dx_context_handle, | 827 | uint32_t dx_context_handle, |
822 | struct drm_vmw_fence_rep __user | 828 | struct drm_vmw_fence_rep __user |
823 | *user_fence_rep, | 829 | *user_fence_rep, |
824 | struct vmw_fence_obj **out_fence); | 830 | struct vmw_fence_obj **out_fence, |
831 | uint32_t flags); | ||
825 | extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | 832 | extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
826 | struct vmw_fence_obj *fence); | 833 | struct vmw_fence_obj *fence); |
827 | extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); | 834 | extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); |
@@ -836,23 +843,23 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | |||
836 | struct drm_vmw_fence_rep __user | 843 | struct drm_vmw_fence_rep __user |
837 | *user_fence_rep, | 844 | *user_fence_rep, |
838 | struct vmw_fence_obj *fence, | 845 | struct vmw_fence_obj *fence, |
839 | uint32_t fence_handle); | 846 | uint32_t fence_handle, |
847 | int32_t out_fence_fd, | ||
848 | struct sync_file *sync_file); | ||
840 | extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, | 849 | extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
841 | struct ttm_buffer_object *bo, | 850 | struct ttm_buffer_object *bo, |
842 | bool interruptible, | 851 | bool interruptible, |
843 | bool validate_as_mob); | 852 | bool validate_as_mob); |
844 | 853 | bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); | |
845 | 854 | ||
846 | /** | 855 | /** |
847 | * IRQs and wating - vmwgfx_irq.c | 856 | * IRQs and wating - vmwgfx_irq.c |
848 | */ | 857 | */ |
849 | 858 | ||
850 | extern irqreturn_t vmw_irq_handler(int irq, void *arg); | ||
851 | extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, | 859 | extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, |
852 | uint32_t seqno, bool interruptible, | 860 | uint32_t seqno, bool interruptible, |
853 | unsigned long timeout); | 861 | unsigned long timeout); |
854 | extern void vmw_irq_preinstall(struct drm_device *dev); | 862 | extern int vmw_irq_install(struct drm_device *dev, int irq); |
855 | extern int vmw_irq_postinstall(struct drm_device *dev); | ||
856 | extern void vmw_irq_uninstall(struct drm_device *dev); | 863 | extern void vmw_irq_uninstall(struct drm_device *dev); |
857 | extern bool vmw_seqno_passed(struct vmw_private *dev_priv, | 864 | extern bool vmw_seqno_passed(struct vmw_private *dev_priv, |
858 | uint32_t seqno); | 865 | uint32_t seqno); |
@@ -1150,13 +1157,13 @@ extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, | |||
1150 | extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, | 1157 | extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, |
1151 | struct vmw_cmdbuf_header *header, | 1158 | struct vmw_cmdbuf_header *header, |
1152 | bool flush); | 1159 | bool flush); |
1153 | extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man); | ||
1154 | extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, | 1160 | extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, |
1155 | size_t size, bool interruptible, | 1161 | size_t size, bool interruptible, |
1156 | struct vmw_cmdbuf_header **p_header); | 1162 | struct vmw_cmdbuf_header **p_header); |
1157 | extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); | 1163 | extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); |
1158 | extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, | 1164 | extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, |
1159 | bool interruptible); | 1165 | bool interruptible); |
1166 | extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man); | ||
1160 | 1167 | ||
1161 | 1168 | ||
1162 | /** | 1169 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 2cfb3c93f42a..21c62a34e558 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * | 25 | * |
26 | **************************************************************************/ | 26 | **************************************************************************/ |
27 | #include <linux/sync_file.h> | ||
27 | 28 | ||
28 | #include "vmwgfx_drv.h" | 29 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_reg.h" | 30 | #include "vmwgfx_reg.h" |
@@ -112,11 +113,12 @@ struct vmw_cmd_entry { | |||
112 | bool user_allow; | 113 | bool user_allow; |
113 | bool gb_disable; | 114 | bool gb_disable; |
114 | bool gb_enable; | 115 | bool gb_enable; |
116 | const char *cmd_name; | ||
115 | }; | 117 | }; |
116 | 118 | ||
117 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ | 119 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ |
118 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ | 120 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
119 | (_gb_disable), (_gb_enable)} | 121 | (_gb_disable), (_gb_enable), #_cmd} |
120 | 122 | ||
121 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | 123 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
122 | struct vmw_sw_context *sw_context, | 124 | struct vmw_sw_context *sw_context, |
@@ -3302,6 +3304,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | |||
3302 | true, false, true), | 3304 | true, false, true), |
3303 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, | 3305 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, |
3304 | true, false, true), | 3306 | true, false, true), |
3307 | VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok, | ||
3308 | true, false, true), | ||
3305 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, | 3309 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, |
3306 | false, false, true), | 3310 | false, false, true), |
3307 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, | 3311 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, |
@@ -3469,6 +3473,51 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | |||
3469 | true, false, true), | 3473 | true, false, true), |
3470 | }; | 3474 | }; |
3471 | 3475 | ||
3476 | bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) | ||
3477 | { | ||
3478 | u32 cmd_id = ((u32 *) buf)[0]; | ||
3479 | |||
3480 | if (cmd_id >= SVGA_CMD_MAX) { | ||
3481 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; | ||
3482 | const struct vmw_cmd_entry *entry; | ||
3483 | |||
3484 | *size = header->size + sizeof(SVGA3dCmdHeader); | ||
3485 | cmd_id = header->id; | ||
3486 | if (cmd_id >= SVGA_3D_CMD_MAX) | ||
3487 | return false; | ||
3488 | |||
3489 | cmd_id -= SVGA_3D_CMD_BASE; | ||
3490 | entry = &vmw_cmd_entries[cmd_id]; | ||
3491 | *cmd = entry->cmd_name; | ||
3492 | return true; | ||
3493 | } | ||
3494 | |||
3495 | switch (cmd_id) { | ||
3496 | case SVGA_CMD_UPDATE: | ||
3497 | *cmd = "SVGA_CMD_UPDATE"; | ||
3498 | *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate); | ||
3499 | break; | ||
3500 | case SVGA_CMD_DEFINE_GMRFB: | ||
3501 | *cmd = "SVGA_CMD_DEFINE_GMRFB"; | ||
3502 | *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB); | ||
3503 | break; | ||
3504 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: | ||
3505 | *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN"; | ||
3506 | *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); | ||
3507 | break; | ||
3508 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: | ||
3509 | *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB"; | ||
3510 | *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); | ||
3511 | break; | ||
3512 | default: | ||
3513 | *cmd = "UNKNOWN"; | ||
3514 | *size = 0; | ||
3515 | return false; | ||
3516 | } | ||
3517 | |||
3518 | return true; | ||
3519 | } | ||
3520 | |||
3472 | static int vmw_cmd_check(struct vmw_private *dev_priv, | 3521 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
3473 | struct vmw_sw_context *sw_context, | 3522 | struct vmw_sw_context *sw_context, |
3474 | void *buf, uint32_t *size) | 3523 | void *buf, uint32_t *size) |
@@ -3781,6 +3830,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, | |||
3781 | * which the information should be copied. | 3830 | * which the information should be copied. |
3782 | * @fence: Pointer to the fenc object. | 3831 | * @fence: Pointer to the fenc object. |
3783 | * @fence_handle: User-space fence handle. | 3832 | * @fence_handle: User-space fence handle. |
3833 | * @out_fence_fd: exported file descriptor for the fence. -1 if not used | ||
3834 | * @sync_file: Only used to clean up in case of an error in this function. | ||
3784 | * | 3835 | * |
3785 | * This function copies fence information to user-space. If copying fails, | 3836 | * This function copies fence information to user-space. If copying fails, |
3786 | * The user-space struct drm_vmw_fence_rep::error member is hopefully | 3837 | * The user-space struct drm_vmw_fence_rep::error member is hopefully |
@@ -3796,7 +3847,9 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | |||
3796 | int ret, | 3847 | int ret, |
3797 | struct drm_vmw_fence_rep __user *user_fence_rep, | 3848 | struct drm_vmw_fence_rep __user *user_fence_rep, |
3798 | struct vmw_fence_obj *fence, | 3849 | struct vmw_fence_obj *fence, |
3799 | uint32_t fence_handle) | 3850 | uint32_t fence_handle, |
3851 | int32_t out_fence_fd, | ||
3852 | struct sync_file *sync_file) | ||
3800 | { | 3853 | { |
3801 | struct drm_vmw_fence_rep fence_rep; | 3854 | struct drm_vmw_fence_rep fence_rep; |
3802 | 3855 | ||
@@ -3806,6 +3859,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | |||
3806 | memset(&fence_rep, 0, sizeof(fence_rep)); | 3859 | memset(&fence_rep, 0, sizeof(fence_rep)); |
3807 | 3860 | ||
3808 | fence_rep.error = ret; | 3861 | fence_rep.error = ret; |
3862 | fence_rep.fd = out_fence_fd; | ||
3809 | if (ret == 0) { | 3863 | if (ret == 0) { |
3810 | BUG_ON(fence == NULL); | 3864 | BUG_ON(fence == NULL); |
3811 | 3865 | ||
@@ -3828,6 +3882,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | |||
3828 | * and unreference the handle. | 3882 | * and unreference the handle. |
3829 | */ | 3883 | */ |
3830 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { | 3884 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { |
3885 | if (sync_file) | ||
3886 | fput(sync_file->file); | ||
3887 | |||
3888 | if (fence_rep.fd != -1) { | ||
3889 | put_unused_fd(fence_rep.fd); | ||
3890 | fence_rep.fd = -1; | ||
3891 | } | ||
3892 | |||
3831 | ttm_ref_object_base_unref(vmw_fp->tfile, | 3893 | ttm_ref_object_base_unref(vmw_fp->tfile, |
3832 | fence_handle, TTM_REF_USAGE); | 3894 | fence_handle, TTM_REF_USAGE); |
3833 | DRM_ERROR("Fence copy error. Syncing.\n"); | 3895 | DRM_ERROR("Fence copy error. Syncing.\n"); |
@@ -4003,7 +4065,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4003 | uint64_t throttle_us, | 4065 | uint64_t throttle_us, |
4004 | uint32_t dx_context_handle, | 4066 | uint32_t dx_context_handle, |
4005 | struct drm_vmw_fence_rep __user *user_fence_rep, | 4067 | struct drm_vmw_fence_rep __user *user_fence_rep, |
4006 | struct vmw_fence_obj **out_fence) | 4068 | struct vmw_fence_obj **out_fence, |
4069 | uint32_t flags) | ||
4007 | { | 4070 | { |
4008 | struct vmw_sw_context *sw_context = &dev_priv->ctx; | 4071 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
4009 | struct vmw_fence_obj *fence = NULL; | 4072 | struct vmw_fence_obj *fence = NULL; |
@@ -4013,20 +4076,33 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4013 | struct ww_acquire_ctx ticket; | 4076 | struct ww_acquire_ctx ticket; |
4014 | uint32_t handle; | 4077 | uint32_t handle; |
4015 | int ret; | 4078 | int ret; |
4079 | int32_t out_fence_fd = -1; | ||
4080 | struct sync_file *sync_file = NULL; | ||
4081 | |||
4082 | |||
4083 | if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { | ||
4084 | out_fence_fd = get_unused_fd_flags(O_CLOEXEC); | ||
4085 | if (out_fence_fd < 0) { | ||
4086 | DRM_ERROR("Failed to get a fence file descriptor.\n"); | ||
4087 | return out_fence_fd; | ||
4088 | } | ||
4089 | } | ||
4016 | 4090 | ||
4017 | if (throttle_us) { | 4091 | if (throttle_us) { |
4018 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, | 4092 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
4019 | throttle_us); | 4093 | throttle_us); |
4020 | 4094 | ||
4021 | if (ret) | 4095 | if (ret) |
4022 | return ret; | 4096 | goto out_free_fence_fd; |
4023 | } | 4097 | } |
4024 | 4098 | ||
4025 | kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, | 4099 | kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, |
4026 | kernel_commands, command_size, | 4100 | kernel_commands, command_size, |
4027 | &header); | 4101 | &header); |
4028 | if (IS_ERR(kernel_commands)) | 4102 | if (IS_ERR(kernel_commands)) { |
4029 | return PTR_ERR(kernel_commands); | 4103 | ret = PTR_ERR(kernel_commands); |
4104 | goto out_free_fence_fd; | ||
4105 | } | ||
4030 | 4106 | ||
4031 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); | 4107 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
4032 | if (ret) { | 4108 | if (ret) { |
@@ -4162,8 +4238,32 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4162 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); | 4238 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); |
4163 | 4239 | ||
4164 | vmw_clear_validations(sw_context); | 4240 | vmw_clear_validations(sw_context); |
4241 | |||
4242 | /* | ||
4243 | * If anything fails here, give up trying to export the fence | ||
4244 | * and do a sync since the user mode will not be able to sync | ||
4245 | * the fence itself. This ensures we are still functionally | ||
4246 | * correct. | ||
4247 | */ | ||
4248 | if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { | ||
4249 | |||
4250 | sync_file = sync_file_create(&fence->base); | ||
4251 | if (!sync_file) { | ||
4252 | DRM_ERROR("Unable to create sync file for fence\n"); | ||
4253 | put_unused_fd(out_fence_fd); | ||
4254 | out_fence_fd = -1; | ||
4255 | |||
4256 | (void) vmw_fence_obj_wait(fence, false, false, | ||
4257 | VMW_FENCE_WAIT_TIMEOUT); | ||
4258 | } else { | ||
4259 | /* Link the fence with the FD created earlier */ | ||
4260 | fd_install(out_fence_fd, sync_file->file); | ||
4261 | } | ||
4262 | } | ||
4263 | |||
4165 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, | 4264 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
4166 | user_fence_rep, fence, handle); | 4265 | user_fence_rep, fence, handle, |
4266 | out_fence_fd, sync_file); | ||
4167 | 4267 | ||
4168 | /* Don't unreference when handing fence out */ | 4268 | /* Don't unreference when handing fence out */ |
4169 | if (unlikely(out_fence != NULL)) { | 4269 | if (unlikely(out_fence != NULL)) { |
@@ -4214,6 +4314,9 @@ out_unlock: | |||
4214 | out_free_header: | 4314 | out_free_header: |
4215 | if (header) | 4315 | if (header) |
4216 | vmw_cmdbuf_header_free(header); | 4316 | vmw_cmdbuf_header_free(header); |
4317 | out_free_fence_fd: | ||
4318 | if (out_fence_fd >= 0) | ||
4319 | put_unused_fd(out_fence_fd); | ||
4217 | 4320 | ||
4218 | return ret; | 4321 | return ret; |
4219 | } | 4322 | } |
@@ -4366,6 +4469,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, | |||
4366 | static const size_t copy_offset[] = { | 4469 | static const size_t copy_offset[] = { |
4367 | offsetof(struct drm_vmw_execbuf_arg, context_handle), | 4470 | offsetof(struct drm_vmw_execbuf_arg, context_handle), |
4368 | sizeof(struct drm_vmw_execbuf_arg)}; | 4471 | sizeof(struct drm_vmw_execbuf_arg)}; |
4472 | struct dma_fence *in_fence = NULL; | ||
4369 | 4473 | ||
4370 | if (unlikely(size < copy_offset[0])) { | 4474 | if (unlikely(size < copy_offset[0])) { |
4371 | DRM_ERROR("Invalid command size, ioctl %d\n", | 4475 | DRM_ERROR("Invalid command size, ioctl %d\n", |
@@ -4401,15 +4505,25 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, | |||
4401 | arg.context_handle = (uint32_t) -1; | 4505 | arg.context_handle = (uint32_t) -1; |
4402 | break; | 4506 | break; |
4403 | case 2: | 4507 | case 2: |
4404 | if (arg.pad64 != 0) { | ||
4405 | DRM_ERROR("Unused IOCTL data not set to zero.\n"); | ||
4406 | return -EINVAL; | ||
4407 | } | ||
4408 | break; | ||
4409 | default: | 4508 | default: |
4410 | break; | 4509 | break; |
4411 | } | 4510 | } |
4412 | 4511 | ||
4512 | |||
4513 | /* If imported a fence FD from elsewhere, then wait on it */ | ||
4514 | if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) { | ||
4515 | in_fence = sync_file_get_fence(arg.imported_fence_fd); | ||
4516 | |||
4517 | if (!in_fence) { | ||
4518 | DRM_ERROR("Cannot get imported fence\n"); | ||
4519 | return -EINVAL; | ||
4520 | } | ||
4521 | |||
4522 | ret = vmw_wait_dma_fence(dev_priv->fman, in_fence); | ||
4523 | if (ret) | ||
4524 | goto out; | ||
4525 | } | ||
4526 | |||
4413 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); | 4527 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
4414 | if (unlikely(ret != 0)) | 4528 | if (unlikely(ret != 0)) |
4415 | return ret; | 4529 | return ret; |
@@ -4419,12 +4533,16 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, | |||
4419 | NULL, arg.command_size, arg.throttle_us, | 4533 | NULL, arg.command_size, arg.throttle_us, |
4420 | arg.context_handle, | 4534 | arg.context_handle, |
4421 | (void __user *)(unsigned long)arg.fence_rep, | 4535 | (void __user *)(unsigned long)arg.fence_rep, |
4422 | NULL); | 4536 | NULL, |
4537 | arg.flags); | ||
4423 | ttm_read_unlock(&dev_priv->reservation_sem); | 4538 | ttm_read_unlock(&dev_priv->reservation_sem); |
4424 | if (unlikely(ret != 0)) | 4539 | if (unlikely(ret != 0)) |
4425 | return ret; | 4540 | goto out; |
4426 | 4541 | ||
4427 | vmw_kms_cursor_post_execbuf(dev_priv); | 4542 | vmw_kms_cursor_post_execbuf(dev_priv); |
4428 | 4543 | ||
4429 | return 0; | 4544 | out: |
4545 | if (in_fence) | ||
4546 | dma_fence_put(in_fence); | ||
4547 | return ret; | ||
4430 | } | 4548 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index b8bc5bc7de7e..3bbad22b3748 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
@@ -114,12 +114,11 @@ static void vmw_fence_obj_destroy(struct dma_fence *f) | |||
114 | container_of(f, struct vmw_fence_obj, base); | 114 | container_of(f, struct vmw_fence_obj, base); |
115 | 115 | ||
116 | struct vmw_fence_manager *fman = fman_from_fence(fence); | 116 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
117 | unsigned long irq_flags; | ||
118 | 117 | ||
119 | spin_lock_irqsave(&fman->lock, irq_flags); | 118 | spin_lock(&fman->lock); |
120 | list_del_init(&fence->head); | 119 | list_del_init(&fence->head); |
121 | --fman->num_fence_objects; | 120 | --fman->num_fence_objects; |
122 | spin_unlock_irqrestore(&fman->lock, irq_flags); | 121 | spin_unlock(&fman->lock); |
123 | fence->destroy(fence); | 122 | fence->destroy(fence); |
124 | } | 123 | } |
125 | 124 | ||
@@ -252,10 +251,10 @@ static void vmw_fence_work_func(struct work_struct *work) | |||
252 | INIT_LIST_HEAD(&list); | 251 | INIT_LIST_HEAD(&list); |
253 | mutex_lock(&fman->goal_irq_mutex); | 252 | mutex_lock(&fman->goal_irq_mutex); |
254 | 253 | ||
255 | spin_lock_irq(&fman->lock); | 254 | spin_lock(&fman->lock); |
256 | list_splice_init(&fman->cleanup_list, &list); | 255 | list_splice_init(&fman->cleanup_list, &list); |
257 | seqno_valid = fman->seqno_valid; | 256 | seqno_valid = fman->seqno_valid; |
258 | spin_unlock_irq(&fman->lock); | 257 | spin_unlock(&fman->lock); |
259 | 258 | ||
260 | if (!seqno_valid && fman->goal_irq_on) { | 259 | if (!seqno_valid && fman->goal_irq_on) { |
261 | fman->goal_irq_on = false; | 260 | fman->goal_irq_on = false; |
@@ -305,15 +304,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) | |||
305 | 304 | ||
306 | void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) | 305 | void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) |
307 | { | 306 | { |
308 | unsigned long irq_flags; | ||
309 | bool lists_empty; | 307 | bool lists_empty; |
310 | 308 | ||
311 | (void) cancel_work_sync(&fman->work); | 309 | (void) cancel_work_sync(&fman->work); |
312 | 310 | ||
313 | spin_lock_irqsave(&fman->lock, irq_flags); | 311 | spin_lock(&fman->lock); |
314 | lists_empty = list_empty(&fman->fence_list) && | 312 | lists_empty = list_empty(&fman->fence_list) && |
315 | list_empty(&fman->cleanup_list); | 313 | list_empty(&fman->cleanup_list); |
316 | spin_unlock_irqrestore(&fman->lock, irq_flags); | 314 | spin_unlock(&fman->lock); |
317 | 315 | ||
318 | BUG_ON(!lists_empty); | 316 | BUG_ON(!lists_empty); |
319 | kfree(fman); | 317 | kfree(fman); |
@@ -323,7 +321,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, | |||
323 | struct vmw_fence_obj *fence, u32 seqno, | 321 | struct vmw_fence_obj *fence, u32 seqno, |
324 | void (*destroy) (struct vmw_fence_obj *fence)) | 322 | void (*destroy) (struct vmw_fence_obj *fence)) |
325 | { | 323 | { |
326 | unsigned long irq_flags; | ||
327 | int ret = 0; | 324 | int ret = 0; |
328 | 325 | ||
329 | dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, | 326 | dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, |
@@ -331,7 +328,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, | |||
331 | INIT_LIST_HEAD(&fence->seq_passed_actions); | 328 | INIT_LIST_HEAD(&fence->seq_passed_actions); |
332 | fence->destroy = destroy; | 329 | fence->destroy = destroy; |
333 | 330 | ||
334 | spin_lock_irqsave(&fman->lock, irq_flags); | 331 | spin_lock(&fman->lock); |
335 | if (unlikely(fman->fifo_down)) { | 332 | if (unlikely(fman->fifo_down)) { |
336 | ret = -EBUSY; | 333 | ret = -EBUSY; |
337 | goto out_unlock; | 334 | goto out_unlock; |
@@ -340,7 +337,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, | |||
340 | ++fman->num_fence_objects; | 337 | ++fman->num_fence_objects; |
341 | 338 | ||
342 | out_unlock: | 339 | out_unlock: |
343 | spin_unlock_irqrestore(&fman->lock, irq_flags); | 340 | spin_unlock(&fman->lock); |
344 | return ret; | 341 | return ret; |
345 | 342 | ||
346 | } | 343 | } |
@@ -489,11 +486,9 @@ rerun: | |||
489 | 486 | ||
490 | void vmw_fences_update(struct vmw_fence_manager *fman) | 487 | void vmw_fences_update(struct vmw_fence_manager *fman) |
491 | { | 488 | { |
492 | unsigned long irq_flags; | 489 | spin_lock(&fman->lock); |
493 | |||
494 | spin_lock_irqsave(&fman->lock, irq_flags); | ||
495 | __vmw_fences_update(fman); | 490 | __vmw_fences_update(fman); |
496 | spin_unlock_irqrestore(&fman->lock, irq_flags); | 491 | spin_unlock(&fman->lock); |
497 | } | 492 | } |
498 | 493 | ||
499 | bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) | 494 | bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) |
@@ -650,6 +645,51 @@ out_no_object: | |||
650 | 645 | ||
651 | 646 | ||
652 | /** | 647 | /** |
648 | * vmw_wait_dma_fence - Wait for a dma fence | ||
649 | * | ||
650 | * @fman: pointer to a fence manager | ||
651 | * @fence: DMA fence to wait on | ||
652 | * | ||
653 | * This function handles the case when the fence is actually a fence | ||
654 | * array. If that's the case, it'll wait on each of the child fence | ||
655 | */ | ||
656 | int vmw_wait_dma_fence(struct vmw_fence_manager *fman, | ||
657 | struct dma_fence *fence) | ||
658 | { | ||
659 | struct dma_fence_array *fence_array; | ||
660 | int ret = 0; | ||
661 | int i; | ||
662 | |||
663 | |||
664 | if (dma_fence_is_signaled(fence)) | ||
665 | return 0; | ||
666 | |||
667 | if (!dma_fence_is_array(fence)) | ||
668 | return dma_fence_wait(fence, true); | ||
669 | |||
670 | /* From i915: Note that if the fence-array was created in | ||
671 | * signal-on-any mode, we should *not* decompose it into its individual | ||
672 | * fences. However, we don't currently store which mode the fence-array | ||
673 | * is operating in. Fortunately, the only user of signal-on-any is | ||
674 | * private to amdgpu and we should not see any incoming fence-array | ||
675 | * from sync-file being in signal-on-any mode. | ||
676 | */ | ||
677 | |||
678 | fence_array = to_dma_fence_array(fence); | ||
679 | for (i = 0; i < fence_array->num_fences; i++) { | ||
680 | struct dma_fence *child = fence_array->fences[i]; | ||
681 | |||
682 | ret = dma_fence_wait(child, true); | ||
683 | |||
684 | if (ret < 0) | ||
685 | return ret; | ||
686 | } | ||
687 | |||
688 | return 0; | ||
689 | } | ||
690 | |||
691 | |||
692 | /** | ||
653 | * vmw_fence_fifo_down - signal all unsignaled fence objects. | 693 | * vmw_fence_fifo_down - signal all unsignaled fence objects. |
654 | */ | 694 | */ |
655 | 695 | ||
@@ -663,14 +703,14 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) | |||
663 | * restart when we've released the fman->lock. | 703 | * restart when we've released the fman->lock. |
664 | */ | 704 | */ |
665 | 705 | ||
666 | spin_lock_irq(&fman->lock); | 706 | spin_lock(&fman->lock); |
667 | fman->fifo_down = true; | 707 | fman->fifo_down = true; |
668 | while (!list_empty(&fman->fence_list)) { | 708 | while (!list_empty(&fman->fence_list)) { |
669 | struct vmw_fence_obj *fence = | 709 | struct vmw_fence_obj *fence = |
670 | list_entry(fman->fence_list.prev, struct vmw_fence_obj, | 710 | list_entry(fman->fence_list.prev, struct vmw_fence_obj, |
671 | head); | 711 | head); |
672 | dma_fence_get(&fence->base); | 712 | dma_fence_get(&fence->base); |
673 | spin_unlock_irq(&fman->lock); | 713 | spin_unlock(&fman->lock); |
674 | 714 | ||
675 | ret = vmw_fence_obj_wait(fence, false, false, | 715 | ret = vmw_fence_obj_wait(fence, false, false, |
676 | VMW_FENCE_WAIT_TIMEOUT); | 716 | VMW_FENCE_WAIT_TIMEOUT); |
@@ -686,18 +726,16 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) | |||
686 | 726 | ||
687 | BUG_ON(!list_empty(&fence->head)); | 727 | BUG_ON(!list_empty(&fence->head)); |
688 | dma_fence_put(&fence->base); | 728 | dma_fence_put(&fence->base); |
689 | spin_lock_irq(&fman->lock); | 729 | spin_lock(&fman->lock); |
690 | } | 730 | } |
691 | spin_unlock_irq(&fman->lock); | 731 | spin_unlock(&fman->lock); |
692 | } | 732 | } |
693 | 733 | ||
694 | void vmw_fence_fifo_up(struct vmw_fence_manager *fman) | 734 | void vmw_fence_fifo_up(struct vmw_fence_manager *fman) |
695 | { | 735 | { |
696 | unsigned long irq_flags; | 736 | spin_lock(&fman->lock); |
697 | |||
698 | spin_lock_irqsave(&fman->lock, irq_flags); | ||
699 | fman->fifo_down = false; | 737 | fman->fifo_down = false; |
700 | spin_unlock_irqrestore(&fman->lock, irq_flags); | 738 | spin_unlock(&fman->lock); |
701 | } | 739 | } |
702 | 740 | ||
703 | 741 | ||
@@ -812,9 +850,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, | |||
812 | arg->signaled = vmw_fence_obj_signaled(fence); | 850 | arg->signaled = vmw_fence_obj_signaled(fence); |
813 | 851 | ||
814 | arg->signaled_flags = arg->flags; | 852 | arg->signaled_flags = arg->flags; |
815 | spin_lock_irq(&fman->lock); | 853 | spin_lock(&fman->lock); |
816 | arg->passed_seqno = dev_priv->last_read_seqno; | 854 | arg->passed_seqno = dev_priv->last_read_seqno; |
817 | spin_unlock_irq(&fman->lock); | 855 | spin_unlock(&fman->lock); |
818 | 856 | ||
819 | ttm_base_object_unref(&base); | 857 | ttm_base_object_unref(&base); |
820 | 858 | ||
@@ -841,8 +879,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, | |||
841 | * | 879 | * |
842 | * This function is called when the seqno of the fence where @action is | 880 | * This function is called when the seqno of the fence where @action is |
843 | * attached has passed. It queues the event on the submitter's event list. | 881 | * attached has passed. It queues the event on the submitter's event list. |
844 | * This function is always called from atomic context, and may be called | 882 | * This function is always called from atomic context. |
845 | * from irq context. | ||
846 | */ | 883 | */ |
847 | static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) | 884 | static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) |
848 | { | 885 | { |
@@ -851,13 +888,13 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) | |||
851 | struct drm_device *dev = eaction->dev; | 888 | struct drm_device *dev = eaction->dev; |
852 | struct drm_pending_event *event = eaction->event; | 889 | struct drm_pending_event *event = eaction->event; |
853 | struct drm_file *file_priv; | 890 | struct drm_file *file_priv; |
854 | unsigned long irq_flags; | 891 | |
855 | 892 | ||
856 | if (unlikely(event == NULL)) | 893 | if (unlikely(event == NULL)) |
857 | return; | 894 | return; |
858 | 895 | ||
859 | file_priv = event->file_priv; | 896 | file_priv = event->file_priv; |
860 | spin_lock_irqsave(&dev->event_lock, irq_flags); | 897 | spin_lock_irq(&dev->event_lock); |
861 | 898 | ||
862 | if (likely(eaction->tv_sec != NULL)) { | 899 | if (likely(eaction->tv_sec != NULL)) { |
863 | struct timeval tv; | 900 | struct timeval tv; |
@@ -869,7 +906,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) | |||
869 | 906 | ||
870 | drm_send_event_locked(dev, eaction->event); | 907 | drm_send_event_locked(dev, eaction->event); |
871 | eaction->event = NULL; | 908 | eaction->event = NULL; |
872 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | 909 | spin_unlock_irq(&dev->event_lock); |
873 | } | 910 | } |
874 | 911 | ||
875 | /** | 912 | /** |
@@ -904,11 +941,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, | |||
904 | struct vmw_fence_action *action) | 941 | struct vmw_fence_action *action) |
905 | { | 942 | { |
906 | struct vmw_fence_manager *fman = fman_from_fence(fence); | 943 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
907 | unsigned long irq_flags; | ||
908 | bool run_update = false; | 944 | bool run_update = false; |
909 | 945 | ||
910 | mutex_lock(&fman->goal_irq_mutex); | 946 | mutex_lock(&fman->goal_irq_mutex); |
911 | spin_lock_irqsave(&fman->lock, irq_flags); | 947 | spin_lock(&fman->lock); |
912 | 948 | ||
913 | fman->pending_actions[action->type]++; | 949 | fman->pending_actions[action->type]++; |
914 | if (dma_fence_is_signaled_locked(&fence->base)) { | 950 | if (dma_fence_is_signaled_locked(&fence->base)) { |
@@ -927,7 +963,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, | |||
927 | run_update = vmw_fence_goal_check_locked(fence); | 963 | run_update = vmw_fence_goal_check_locked(fence); |
928 | } | 964 | } |
929 | 965 | ||
930 | spin_unlock_irqrestore(&fman->lock, irq_flags); | 966 | spin_unlock(&fman->lock); |
931 | 967 | ||
932 | if (run_update) { | 968 | if (run_update) { |
933 | if (!fman->goal_irq_on) { | 969 | if (!fman->goal_irq_on) { |
@@ -1114,7 +1150,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | |||
1114 | } | 1150 | } |
1115 | 1151 | ||
1116 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, | 1152 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, |
1117 | handle); | 1153 | handle, -1, NULL); |
1118 | vmw_fence_obj_unreference(&fence); | 1154 | vmw_fence_obj_unreference(&fence); |
1119 | return 0; | 1155 | return 0; |
1120 | out_no_create: | 1156 | out_no_create: |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index d9d85aa6ed20..20224dba9d8e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #ifndef _VMWGFX_FENCE_H_ | 28 | #ifndef _VMWGFX_FENCE_H_ |
29 | 29 | ||
30 | #include <linux/dma-fence.h> | 30 | #include <linux/dma-fence.h> |
31 | #include <linux/dma-fence-array.h> | ||
31 | 32 | ||
32 | #define VMW_FENCE_WAIT_TIMEOUT (5*HZ) | 33 | #define VMW_FENCE_WAIT_TIMEOUT (5*HZ) |
33 | 34 | ||
@@ -102,6 +103,9 @@ extern int vmw_user_fence_create(struct drm_file *file_priv, | |||
102 | struct vmw_fence_obj **p_fence, | 103 | struct vmw_fence_obj **p_fence, |
103 | uint32_t *p_handle); | 104 | uint32_t *p_handle); |
104 | 105 | ||
106 | extern int vmw_wait_dma_fence(struct vmw_fence_manager *fman, | ||
107 | struct dma_fence *fence); | ||
108 | |||
105 | extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman); | 109 | extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman); |
106 | 110 | ||
107 | extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman); | 111 | extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 0c7e1723292c..b9239ba067c4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
@@ -30,11 +30,56 @@ | |||
30 | 30 | ||
31 | #define VMW_FENCE_WRAP (1 << 24) | 31 | #define VMW_FENCE_WRAP (1 << 24) |
32 | 32 | ||
33 | irqreturn_t vmw_irq_handler(int irq, void *arg) | 33 | /** |
34 | * vmw_thread_fn - Deferred (process context) irq handler | ||
35 | * | ||
36 | * @irq: irq number | ||
37 | * @arg: Closure argument. Pointer to a struct drm_device cast to void * | ||
38 | * | ||
39 | * This function implements the deferred part of irq processing. | ||
40 | * The function is guaranteed to run at least once after the | ||
41 | * vmw_irq_handler has returned with IRQ_WAKE_THREAD. | ||
42 | * | ||
43 | */ | ||
44 | static irqreturn_t vmw_thread_fn(int irq, void *arg) | ||
45 | { | ||
46 | struct drm_device *dev = (struct drm_device *)arg; | ||
47 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
48 | irqreturn_t ret = IRQ_NONE; | ||
49 | |||
50 | if (test_and_clear_bit(VMW_IRQTHREAD_FENCE, | ||
51 | dev_priv->irqthread_pending)) { | ||
52 | vmw_fences_update(dev_priv->fman); | ||
53 | wake_up_all(&dev_priv->fence_queue); | ||
54 | ret = IRQ_HANDLED; | ||
55 | } | ||
56 | |||
57 | if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF, | ||
58 | dev_priv->irqthread_pending)) { | ||
59 | vmw_cmdbuf_irqthread(dev_priv->cman); | ||
60 | ret = IRQ_HANDLED; | ||
61 | } | ||
62 | |||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * vmw_irq_handler irq handler | ||
68 | * | ||
69 | * @irq: irq number | ||
70 | * @arg: Closure argument. Pointer to a struct drm_device cast to void * | ||
71 | * | ||
72 | * This function implements the quick part of irq processing. | ||
73 | * The function performs fast actions like clearing the device interrupt | ||
74 | * flags and also reasonably quick actions like waking processes waiting for | ||
75 | * FIFO space. Other IRQ actions are deferred to the IRQ thread. | ||
76 | */ | ||
77 | static irqreturn_t vmw_irq_handler(int irq, void *arg) | ||
34 | { | 78 | { |
35 | struct drm_device *dev = (struct drm_device *)arg; | 79 | struct drm_device *dev = (struct drm_device *)arg; |
36 | struct vmw_private *dev_priv = vmw_priv(dev); | 80 | struct vmw_private *dev_priv = vmw_priv(dev); |
37 | uint32_t status, masked_status; | 81 | uint32_t status, masked_status; |
82 | irqreturn_t ret = IRQ_HANDLED; | ||
38 | 83 | ||
39 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 84 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
40 | masked_status = status & READ_ONCE(dev_priv->irq_mask); | 85 | masked_status = status & READ_ONCE(dev_priv->irq_mask); |
@@ -45,20 +90,21 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) | |||
45 | if (!status) | 90 | if (!status) |
46 | return IRQ_NONE; | 91 | return IRQ_NONE; |
47 | 92 | ||
48 | if (masked_status & (SVGA_IRQFLAG_ANY_FENCE | | ||
49 | SVGA_IRQFLAG_FENCE_GOAL)) { | ||
50 | vmw_fences_update(dev_priv->fman); | ||
51 | wake_up_all(&dev_priv->fence_queue); | ||
52 | } | ||
53 | |||
54 | if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) | 93 | if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) |
55 | wake_up_all(&dev_priv->fifo_queue); | 94 | wake_up_all(&dev_priv->fifo_queue); |
56 | 95 | ||
57 | if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | | 96 | if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE | |
58 | SVGA_IRQFLAG_ERROR)) | 97 | SVGA_IRQFLAG_FENCE_GOAL)) && |
59 | vmw_cmdbuf_tasklet_schedule(dev_priv->cman); | 98 | !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending)) |
99 | ret = IRQ_WAKE_THREAD; | ||
60 | 100 | ||
61 | return IRQ_HANDLED; | 101 | if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | |
102 | SVGA_IRQFLAG_ERROR)) && | ||
103 | !test_and_set_bit(VMW_IRQTHREAD_CMDBUF, | ||
104 | dev_priv->irqthread_pending)) | ||
105 | ret = IRQ_WAKE_THREAD; | ||
106 | |||
107 | return ret; | ||
62 | } | 108 | } |
63 | 109 | ||
64 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) | 110 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) |
@@ -281,23 +327,15 @@ int vmw_wait_seqno(struct vmw_private *dev_priv, | |||
281 | return ret; | 327 | return ret; |
282 | } | 328 | } |
283 | 329 | ||
284 | void vmw_irq_preinstall(struct drm_device *dev) | 330 | static void vmw_irq_preinstall(struct drm_device *dev) |
285 | { | 331 | { |
286 | struct vmw_private *dev_priv = vmw_priv(dev); | 332 | struct vmw_private *dev_priv = vmw_priv(dev); |
287 | uint32_t status; | 333 | uint32_t status; |
288 | 334 | ||
289 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | ||
290 | return; | ||
291 | |||
292 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 335 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
293 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 336 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
294 | } | 337 | } |
295 | 338 | ||
296 | int vmw_irq_postinstall(struct drm_device *dev) | ||
297 | { | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | void vmw_irq_uninstall(struct drm_device *dev) | 339 | void vmw_irq_uninstall(struct drm_device *dev) |
302 | { | 340 | { |
303 | struct vmw_private *dev_priv = vmw_priv(dev); | 341 | struct vmw_private *dev_priv = vmw_priv(dev); |
@@ -306,8 +344,41 @@ void vmw_irq_uninstall(struct drm_device *dev) | |||
306 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | 344 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) |
307 | return; | 345 | return; |
308 | 346 | ||
347 | if (!dev->irq_enabled) | ||
348 | return; | ||
349 | |||
309 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); | 350 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); |
310 | 351 | ||
311 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 352 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
312 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 353 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
354 | |||
355 | dev->irq_enabled = false; | ||
356 | free_irq(dev->irq, dev); | ||
357 | } | ||
358 | |||
359 | /** | ||
360 | * vmw_irq_install - Install the irq handlers | ||
361 | * | ||
362 | * @dev: Pointer to the drm device. | ||
363 | * @irq: The irq number. | ||
364 | * Return: Zero if successful. Negative number otherwise. | ||
365 | */ | ||
366 | int vmw_irq_install(struct drm_device *dev, int irq) | ||
367 | { | ||
368 | int ret; | ||
369 | |||
370 | if (dev->irq_enabled) | ||
371 | return -EBUSY; | ||
372 | |||
373 | vmw_irq_preinstall(dev); | ||
374 | |||
375 | ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn, | ||
376 | IRQF_SHARED, VMWGFX_DRIVER_NAME, dev); | ||
377 | if (ret < 0) | ||
378 | return ret; | ||
379 | |||
380 | dev->irq_enabled = true; | ||
381 | dev->irq = irq; | ||
382 | |||
383 | return ret; | ||
313 | } | 384 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 36dd7930bf5f..5d50e45ae274 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -2494,7 +2494,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, | |||
2494 | if (file_priv) | 2494 | if (file_priv) |
2495 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), | 2495 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), |
2496 | ret, user_fence_rep, fence, | 2496 | ret, user_fence_rep, fence, |
2497 | handle); | 2497 | handle, -1, NULL); |
2498 | if (out_fence) | 2498 | if (out_fence) |
2499 | *out_fence = fence; | 2499 | *out_fence = fence; |
2500 | else | 2500 | else |
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index d9dfde9aa757..0bc784f5e0db 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h | |||
@@ -297,13 +297,17 @@ union drm_vmw_surface_reference_arg { | |||
297 | * @version: Allows expanding the execbuf ioctl parameters without breaking | 297 | * @version: Allows expanding the execbuf ioctl parameters without breaking |
298 | * backwards compatibility, since user-space will always tell the kernel | 298 | * backwards compatibility, since user-space will always tell the kernel |
299 | * which version it uses. | 299 | * which version it uses. |
300 | * @flags: Execbuf flags. None currently. | 300 | * @flags: Execbuf flags. |
301 | * @imported_fence_fd: FD for a fence imported from another device | ||
301 | * | 302 | * |
302 | * Argument to the DRM_VMW_EXECBUF Ioctl. | 303 | * Argument to the DRM_VMW_EXECBUF Ioctl. |
303 | */ | 304 | */ |
304 | 305 | ||
305 | #define DRM_VMW_EXECBUF_VERSION 2 | 306 | #define DRM_VMW_EXECBUF_VERSION 2 |
306 | 307 | ||
308 | #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0) | ||
309 | #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1) | ||
310 | |||
307 | struct drm_vmw_execbuf_arg { | 311 | struct drm_vmw_execbuf_arg { |
308 | __u64 commands; | 312 | __u64 commands; |
309 | __u32 command_size; | 313 | __u32 command_size; |
@@ -312,7 +316,7 @@ struct drm_vmw_execbuf_arg { | |||
312 | __u32 version; | 316 | __u32 version; |
313 | __u32 flags; | 317 | __u32 flags; |
314 | __u32 context_handle; | 318 | __u32 context_handle; |
315 | __u32 pad64; | 319 | __s32 imported_fence_fd; |
316 | }; | 320 | }; |
317 | 321 | ||
318 | /** | 322 | /** |
@@ -328,6 +332,7 @@ struct drm_vmw_execbuf_arg { | |||
328 | * @passed_seqno: The highest seqno number processed by the hardware | 332 | * @passed_seqno: The highest seqno number processed by the hardware |
329 | * so far. This can be used to mark user-space fence objects as signaled, and | 333 | * so far. This can be used to mark user-space fence objects as signaled, and |
330 | * to determine whether a fence seqno might be stale. | 334 | * to determine whether a fence seqno might be stale. |
335 | * @fd: FD associated with the fence, -1 if not exported | ||
331 | * @error: This member should've been set to -EFAULT on submission. | 336 | * @error: This member should've been set to -EFAULT on submission. |
332 | * The following actions should be take on completion: | 337 | * The following actions should be take on completion: |
333 | * error == -EFAULT: Fence communication failed. The host is synchronized. | 338 | * error == -EFAULT: Fence communication failed. The host is synchronized. |
@@ -345,7 +350,7 @@ struct drm_vmw_fence_rep { | |||
345 | __u32 mask; | 350 | __u32 mask; |
346 | __u32 seqno; | 351 | __u32 seqno; |
347 | __u32 passed_seqno; | 352 | __u32 passed_seqno; |
348 | __u32 pad64; | 353 | __s32 fd; |
349 | __s32 error; | 354 | __s32 error; |
350 | }; | 355 | }; |
351 | 356 | ||