diff options
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 166 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 5 |
4 files changed, 139 insertions, 48 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 96949b93d920..62d54b940474 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -467,6 +467,8 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
467 | 467 | ||
468 | unregister_pm_notifier(&dev_priv->pm_nb); | 468 | unregister_pm_notifier(&dev_priv->pm_nb); |
469 | 469 | ||
470 | if (dev_priv->ctx.cmd_bounce) | ||
471 | vfree(dev_priv->ctx.cmd_bounce); | ||
470 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 472 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
471 | drm_irq_uninstall(dev_priv->dev); | 473 | drm_irq_uninstall(dev_priv->dev); |
472 | if (dev_priv->enable_fb) { | 474 | if (dev_priv->enable_fb) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index fc33f3f9ebc4..ec09a3fa2ac2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -46,8 +46,9 @@ | |||
46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
48 | #define VMWGFX_MAX_RELOCATIONS 2048 | 48 | #define VMWGFX_MAX_RELOCATIONS 2048 |
49 | #define VMWGFX_MAX_GMRS 2048 | 49 | #define VMWGFX_MAX_VALIDATIONS 2048 |
50 | #define VMWGFX_MAX_DISPLAYS 16 | 50 | #define VMWGFX_MAX_DISPLAYS 16 |
51 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 | ||
51 | 52 | ||
52 | #define VMW_PL_GMR TTM_PL_PRIV0 | 53 | #define VMW_PL_GMR TTM_PL_PRIV0 |
53 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 | 54 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 |
@@ -74,7 +75,7 @@ struct vmw_resource { | |||
74 | bool avail; | 75 | bool avail; |
75 | void (*hw_destroy) (struct vmw_resource *res); | 76 | void (*hw_destroy) (struct vmw_resource *res); |
76 | void (*res_free) (struct vmw_resource *res); | 77 | void (*res_free) (struct vmw_resource *res); |
77 | 78 | bool on_validate_list; | |
78 | /* TODO is a generic snooper needed? */ | 79 | /* TODO is a generic snooper needed? */ |
79 | #if 0 | 80 | #if 0 |
80 | void (*snoop)(struct vmw_resource *res, | 81 | void (*snoop)(struct vmw_resource *res, |
@@ -143,8 +144,12 @@ struct vmw_sw_context{ | |||
143 | struct list_head validate_nodes; | 144 | struct list_head validate_nodes; |
144 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; | 145 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; |
145 | uint32_t cur_reloc; | 146 | uint32_t cur_reloc; |
146 | struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS]; | 147 | struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; |
147 | uint32_t cur_val_buf; | 148 | uint32_t cur_val_buf; |
149 | uint32_t *cmd_bounce; | ||
150 | uint32_t cmd_bounce_size; | ||
151 | struct vmw_resource *resources[VMWGFX_MAX_VALIDATIONS]; | ||
152 | uint32_t num_ref_resources; | ||
148 | }; | 153 | }; |
149 | 154 | ||
150 | struct vmw_legacy_display; | 155 | struct vmw_legacy_display; |
@@ -340,7 +345,8 @@ extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
340 | struct drm_file *file_priv); | 345 | struct drm_file *file_priv); |
341 | extern int vmw_context_check(struct vmw_private *dev_priv, | 346 | extern int vmw_context_check(struct vmw_private *dev_priv, |
342 | struct ttm_object_file *tfile, | 347 | struct ttm_object_file *tfile, |
343 | int id); | 348 | int id, |
349 | struct vmw_resource **p_res); | ||
344 | extern void vmw_surface_res_free(struct vmw_resource *res); | 350 | extern void vmw_surface_res_free(struct vmw_resource *res); |
345 | extern int vmw_surface_init(struct vmw_private *dev_priv, | 351 | extern int vmw_surface_init(struct vmw_private *dev_priv, |
346 | struct vmw_surface *srf, | 352 | struct vmw_surface *srf, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 8ca3ddb2ebc3..c6ff0e40f201 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -44,10 +44,36 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
44 | return 0; | 44 | return 0; |
45 | } | 45 | } |
46 | 46 | ||
47 | |||
48 | static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context, | ||
49 | struct vmw_resource **p_res) | ||
50 | { | ||
51 | int ret = 0; | ||
52 | struct vmw_resource *res = *p_res; | ||
53 | |||
54 | if (!res->on_validate_list) { | ||
55 | if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) { | ||
56 | DRM_ERROR("Too many resources referenced in " | ||
57 | "command stream.\n"); | ||
58 | ret = -ENOMEM; | ||
59 | goto out; | ||
60 | } | ||
61 | sw_context->resources[sw_context->num_ref_resources++] = res; | ||
62 | res->on_validate_list = true; | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | out: | ||
67 | vmw_resource_unreference(p_res); | ||
68 | return ret; | ||
69 | } | ||
70 | |||
47 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | 71 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
48 | struct vmw_sw_context *sw_context, | 72 | struct vmw_sw_context *sw_context, |
49 | SVGA3dCmdHeader *header) | 73 | SVGA3dCmdHeader *header) |
50 | { | 74 | { |
75 | struct vmw_resource *ctx; | ||
76 | |||
51 | struct vmw_cid_cmd { | 77 | struct vmw_cid_cmd { |
52 | SVGA3dCmdHeader header; | 78 | SVGA3dCmdHeader header; |
53 | __le32 cid; | 79 | __le32 cid; |
@@ -58,7 +84,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
58 | if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) | 84 | if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) |
59 | return 0; | 85 | return 0; |
60 | 86 | ||
61 | ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid); | 87 | ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid, |
88 | &ctx); | ||
62 | if (unlikely(ret != 0)) { | 89 | if (unlikely(ret != 0)) { |
63 | DRM_ERROR("Could not find or use context %u\n", | 90 | DRM_ERROR("Could not find or use context %u\n", |
64 | (unsigned) cmd->cid); | 91 | (unsigned) cmd->cid); |
@@ -67,39 +94,43 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
67 | 94 | ||
68 | sw_context->last_cid = cmd->cid; | 95 | sw_context->last_cid = cmd->cid; |
69 | sw_context->cid_valid = true; | 96 | sw_context->cid_valid = true; |
70 | 97 | return vmw_resource_to_validate_list(sw_context, &ctx); | |
71 | return 0; | ||
72 | } | 98 | } |
73 | 99 | ||
74 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, | 100 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, |
75 | struct vmw_sw_context *sw_context, | 101 | struct vmw_sw_context *sw_context, |
76 | uint32_t *sid) | 102 | uint32_t *sid) |
77 | { | 103 | { |
104 | struct vmw_surface *srf; | ||
105 | int ret; | ||
106 | struct vmw_resource *res; | ||
107 | |||
78 | if (*sid == SVGA3D_INVALID_ID) | 108 | if (*sid == SVGA3D_INVALID_ID) |
79 | return 0; | 109 | return 0; |
80 | 110 | ||
81 | if (unlikely((!sw_context->sid_valid || | 111 | if (likely((sw_context->sid_valid && |
82 | *sid != sw_context->last_sid))) { | 112 | *sid == sw_context->last_sid))) { |
83 | int real_id; | ||
84 | int ret = vmw_surface_check(dev_priv, sw_context->tfile, | ||
85 | *sid, &real_id); | ||
86 | |||
87 | if (unlikely(ret != 0)) { | ||
88 | DRM_ERROR("Could ot find or use surface 0x%08x " | ||
89 | "address 0x%08lx\n", | ||
90 | (unsigned int) *sid, | ||
91 | (unsigned long) sid); | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | sw_context->last_sid = *sid; | ||
96 | sw_context->sid_valid = true; | ||
97 | *sid = real_id; | ||
98 | sw_context->sid_translation = real_id; | ||
99 | } else | ||
100 | *sid = sw_context->sid_translation; | 113 | *sid = sw_context->sid_translation; |
114 | return 0; | ||
115 | } | ||
101 | 116 | ||
102 | return 0; | 117 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, |
118 | *sid, &srf); | ||
119 | if (unlikely(ret != 0)) { | ||
120 | DRM_ERROR("Could ot find or use surface 0x%08x " | ||
121 | "address 0x%08lx\n", | ||
122 | (unsigned int) *sid, | ||
123 | (unsigned long) sid); | ||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | sw_context->last_sid = *sid; | ||
128 | sw_context->sid_valid = true; | ||
129 | sw_context->sid_translation = srf->res.id; | ||
130 | *sid = sw_context->sid_translation; | ||
131 | |||
132 | res = &srf->res; | ||
133 | return vmw_resource_to_validate_list(sw_context, &res); | ||
103 | } | 134 | } |
104 | 135 | ||
105 | 136 | ||
@@ -213,7 +244,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
213 | reloc->location = ptr; | 244 | reloc->location = ptr; |
214 | 245 | ||
215 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | 246 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); |
216 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { | 247 | if (unlikely(cur_validate_node >= VMWGFX_MAX_VALIDATIONS)) { |
217 | DRM_ERROR("Max number of DMA buffers per submission" | 248 | DRM_ERROR("Max number of DMA buffers per submission" |
218 | " exceeded.\n"); | 249 | " exceeded.\n"); |
219 | ret = -EINVAL; | 250 | ret = -EINVAL; |
@@ -303,6 +334,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
303 | SVGA3dCmdSurfaceDMA dma; | 334 | SVGA3dCmdSurfaceDMA dma; |
304 | } *cmd; | 335 | } *cmd; |
305 | int ret; | 336 | int ret; |
337 | struct vmw_resource *res; | ||
306 | 338 | ||
307 | cmd = container_of(header, struct vmw_dma_cmd, header); | 339 | cmd = container_of(header, struct vmw_dma_cmd, header); |
308 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | 340 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
@@ -319,17 +351,16 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
319 | goto out_no_reloc; | 351 | goto out_no_reloc; |
320 | } | 352 | } |
321 | 353 | ||
322 | /** | 354 | /* |
323 | * Patch command stream with device SID. | 355 | * Patch command stream with device SID. |
324 | */ | 356 | */ |
325 | |||
326 | cmd->dma.host.sid = srf->res.id; | 357 | cmd->dma.host.sid = srf->res.id; |
327 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); | 358 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); |
328 | /** | 359 | |
329 | * FIXME: May deadlock here when called from the | 360 | vmw_dmabuf_unreference(&vmw_bo); |
330 | * command parsing code. | 361 | |
331 | */ | 362 | res = &srf->res; |
332 | vmw_surface_unreference(&srf); | 363 | return vmw_resource_to_validate_list(sw_context, &res); |
333 | 364 | ||
334 | out_no_reloc: | 365 | out_no_reloc: |
335 | vmw_dmabuf_unreference(&vmw_bo); | 366 | vmw_dmabuf_unreference(&vmw_bo); |
@@ -501,8 +532,9 @@ out_err: | |||
501 | 532 | ||
502 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, | 533 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
503 | struct vmw_sw_context *sw_context, | 534 | struct vmw_sw_context *sw_context, |
504 | void *buf, uint32_t size) | 535 | uint32_t size) |
505 | { | 536 | { |
537 | void *buf = sw_context->cmd_bounce; | ||
506 | int32_t cur_size = size; | 538 | int32_t cur_size = size; |
507 | int ret; | 539 | int ret; |
508 | 540 | ||
@@ -551,7 +583,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
551 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) | 583 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
552 | { | 584 | { |
553 | struct ttm_validate_buffer *entry, *next; | 585 | struct ttm_validate_buffer *entry, *next; |
586 | uint32_t i = sw_context->num_ref_resources; | ||
554 | 587 | ||
588 | /* | ||
589 | * Drop references to DMA buffers held during command submission. | ||
590 | */ | ||
555 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, | 591 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
556 | head) { | 592 | head) { |
557 | list_del(&entry->head); | 593 | list_del(&entry->head); |
@@ -560,6 +596,14 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context) | |||
560 | sw_context->cur_val_buf--; | 596 | sw_context->cur_val_buf--; |
561 | } | 597 | } |
562 | BUG_ON(sw_context->cur_val_buf != 0); | 598 | BUG_ON(sw_context->cur_val_buf != 0); |
599 | |||
600 | /* | ||
601 | * Drop references to resources held during command submission. | ||
602 | */ | ||
603 | while (i-- > 0) { | ||
604 | sw_context->resources[i]->on_validate_list = false; | ||
605 | vmw_resource_unreference(&sw_context->resources[i]); | ||
606 | } | ||
563 | } | 607 | } |
564 | 608 | ||
565 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | 609 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
@@ -603,6 +647,35 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv, | |||
603 | return 0; | 647 | return 0; |
604 | } | 648 | } |
605 | 649 | ||
650 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, | ||
651 | uint32_t size) | ||
652 | { | ||
653 | if (likely(sw_context->cmd_bounce_size >= size)) | ||
654 | return 0; | ||
655 | |||
656 | if (sw_context->cmd_bounce_size == 0) | ||
657 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; | ||
658 | |||
659 | while (sw_context->cmd_bounce_size < size) { | ||
660 | sw_context->cmd_bounce_size = | ||
661 | PAGE_ALIGN(sw_context->cmd_bounce_size + | ||
662 | (sw_context->cmd_bounce_size >> 1)); | ||
663 | } | ||
664 | |||
665 | if (sw_context->cmd_bounce != NULL) | ||
666 | vfree(sw_context->cmd_bounce); | ||
667 | |||
668 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); | ||
669 | |||
670 | if (sw_context->cmd_bounce == NULL) { | ||
671 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); | ||
672 | sw_context->cmd_bounce_size = 0; | ||
673 | return -ENOMEM; | ||
674 | } | ||
675 | |||
676 | return 0; | ||
677 | } | ||
678 | |||
606 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | 679 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
607 | struct drm_file *file_priv) | 680 | struct drm_file *file_priv) |
608 | { | 681 | { |
@@ -627,20 +700,18 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
627 | goto out_no_cmd_mutex; | 700 | goto out_no_cmd_mutex; |
628 | } | 701 | } |
629 | 702 | ||
630 | cmd = vmw_fifo_reserve(dev_priv, arg->command_size); | 703 | ret = vmw_resize_cmd_bounce(sw_context, arg->command_size); |
631 | if (unlikely(cmd == NULL)) { | 704 | if (unlikely(ret != 0)) |
632 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | ||
633 | ret = -ENOMEM; | ||
634 | goto out_unlock; | 705 | goto out_unlock; |
635 | } | ||
636 | 706 | ||
637 | user_cmd = (void __user *)(unsigned long)arg->commands; | 707 | user_cmd = (void __user *)(unsigned long)arg->commands; |
638 | ret = copy_from_user(cmd, user_cmd, arg->command_size); | 708 | ret = copy_from_user(sw_context->cmd_bounce, |
709 | user_cmd, arg->command_size); | ||
639 | 710 | ||
640 | if (unlikely(ret != 0)) { | 711 | if (unlikely(ret != 0)) { |
641 | ret = -EFAULT; | 712 | ret = -EFAULT; |
642 | DRM_ERROR("Failed copying commands.\n"); | 713 | DRM_ERROR("Failed copying commands.\n"); |
643 | goto out_commit; | 714 | goto out_unlock; |
644 | } | 715 | } |
645 | 716 | ||
646 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; | 717 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; |
@@ -648,12 +719,14 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
648 | sw_context->sid_valid = false; | 719 | sw_context->sid_valid = false; |
649 | sw_context->cur_reloc = 0; | 720 | sw_context->cur_reloc = 0; |
650 | sw_context->cur_val_buf = 0; | 721 | sw_context->cur_val_buf = 0; |
722 | sw_context->num_ref_resources = 0; | ||
651 | 723 | ||
652 | INIT_LIST_HEAD(&sw_context->validate_nodes); | 724 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
653 | 725 | ||
654 | ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); | 726 | ret = vmw_cmd_check_all(dev_priv, sw_context, arg->command_size); |
655 | if (unlikely(ret != 0)) | 727 | if (unlikely(ret != 0)) |
656 | goto out_err; | 728 | goto out_err; |
729 | |||
657 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); | 730 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); |
658 | if (unlikely(ret != 0)) | 731 | if (unlikely(ret != 0)) |
659 | goto out_err; | 732 | goto out_err; |
@@ -669,9 +742,17 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
669 | arg->throttle_us); | 742 | arg->throttle_us); |
670 | 743 | ||
671 | if (unlikely(ret != 0)) | 744 | if (unlikely(ret != 0)) |
672 | goto out_err; | 745 | goto out_throttle; |
746 | } | ||
747 | |||
748 | cmd = vmw_fifo_reserve(dev_priv, arg->command_size); | ||
749 | if (unlikely(cmd == NULL)) { | ||
750 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | ||
751 | ret = -ENOMEM; | ||
752 | goto out_err; | ||
673 | } | 753 | } |
674 | 754 | ||
755 | memcpy(cmd, sw_context->cmd_bounce, arg->command_size); | ||
675 | vmw_fifo_commit(dev_priv, arg->command_size); | 756 | vmw_fifo_commit(dev_priv, arg->command_size); |
676 | 757 | ||
677 | ret = vmw_fifo_send_fence(dev_priv, &sequence); | 758 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
@@ -708,10 +789,9 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
708 | return 0; | 789 | return 0; |
709 | out_err: | 790 | out_err: |
710 | vmw_free_relocations(sw_context); | 791 | vmw_free_relocations(sw_context); |
792 | out_throttle: | ||
711 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); | 793 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); |
712 | vmw_clear_validations(sw_context); | 794 | vmw_clear_validations(sw_context); |
713 | out_commit: | ||
714 | vmw_fifo_commit(dev_priv, 0); | ||
715 | out_unlock: | 795 | out_unlock: |
716 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 796 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
717 | out_no_cmd_mutex: | 797 | out_no_cmd_mutex: |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index bfe1bcce7f8a..dc8904a1c1e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -364,7 +364,8 @@ out_err: | |||
364 | 364 | ||
365 | int vmw_context_check(struct vmw_private *dev_priv, | 365 | int vmw_context_check(struct vmw_private *dev_priv, |
366 | struct ttm_object_file *tfile, | 366 | struct ttm_object_file *tfile, |
367 | int id) | 367 | int id, |
368 | struct vmw_resource **p_res) | ||
368 | { | 369 | { |
369 | struct vmw_resource *res; | 370 | struct vmw_resource *res; |
370 | int ret = 0; | 371 | int ret = 0; |
@@ -376,6 +377,8 @@ int vmw_context_check(struct vmw_private *dev_priv, | |||
376 | container_of(res, struct vmw_user_context, res); | 377 | container_of(res, struct vmw_user_context, res); |
377 | if (ctx->base.tfile != tfile && !ctx->base.shareable) | 378 | if (ctx->base.tfile != tfile && !ctx->base.shareable) |
378 | ret = -EPERM; | 379 | ret = -EPERM; |
380 | if (p_res) | ||
381 | *p_res = vmw_resource_reference(res); | ||
379 | } else | 382 | } else |
380 | ret = -EINVAL; | 383 | ret = -EINVAL; |
381 | read_unlock(&dev_priv->resource_lock); | 384 | read_unlock(&dev_priv->resource_lock); |