diff options
author | Dave Airlie <airlied@redhat.com> | 2015-09-24 04:36:04 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-09-24 04:36:04 -0400 |
commit | 0a3579e39dd0412b3ff932e32ae7a22a604200f0 (patch) | |
tree | 7ba26a4085e47ce8c3a5dbacc5bbf96f6e8b8f9e | |
parent | e4b35f952be9f5706b22e38c1925b7ac49080d72 (diff) | |
parent | 30c64664f110f76064e364cb5dd385edc3751ba5 (diff) |
Merge tag 'vmwgfx-fixes-4.3-150924' of git://people.freedesktop.org/~thomash/linux into drm-fixes
Pull request of 2015-09-24
Vmwgfx fixes for 4.3:
- A couple of uninitialized variable fixes by Christian Engelmayer
- A TTM fix for a bug that causes problems with the new vmwgfx device init
- A vmwgfx refcounting fix
- A vmwgfx iomem caching fix
- A DRM change to allow also control clients to read the drm driver version.
* tag 'vmwgfx-fixes-4.3-150924' of git://people.freedesktop.org/~thomash/linux:
drm: Allow also control clients to check the drm version
drm/vmwgfx: Fix uninitialized return in vmw_kms_helper_dirty()
drm/vmwgfx: Fix uninitialized return in vmw_cotable_unbind()
drm/vmwgfx: Only build on X86
drm/ttm: Fix memory space allocation v2
drm/vmwgfx: Map the fifo as cached
drm/vmwgfx: Fix up user_dmabuf refcounting
-rw-r--r-- | drivers/gpu/drm/drm_ioctl.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/Kconfig | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 11 |
12 files changed, 62 insertions, 44 deletions
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 9a860ca1e9d7..d93e7378c077 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c | |||
@@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit); | |||
520 | 520 | ||
521 | /** Ioctl table */ | 521 | /** Ioctl table */ |
522 | static const struct drm_ioctl_desc drm_ioctls[] = { | 522 | static const struct drm_ioctl_desc drm_ioctls[] = { |
523 | DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), | 523 | DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, |
524 | DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), | ||
524 | DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), | 525 | DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), |
525 | DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), | 526 | DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), |
526 | DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), | 527 | DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 8d9b7de25613..745e996d2dbc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
882 | if (ret) | 882 | if (ret) |
883 | return ret; | 883 | return ret; |
884 | man = &bdev->man[mem_type]; | 884 | man = &bdev->man[mem_type]; |
885 | if (!man->has_type || !man->use_type) | ||
886 | continue; | ||
885 | 887 | ||
886 | type_ok = ttm_bo_mt_compatible(man, mem_type, place, | 888 | type_ok = ttm_bo_mt_compatible(man, mem_type, place, |
887 | &cur_flags); | 889 | &cur_flags); |
@@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
889 | if (!type_ok) | 891 | if (!type_ok) |
890 | continue; | 892 | continue; |
891 | 893 | ||
894 | type_found = true; | ||
892 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | 895 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
893 | cur_flags); | 896 | cur_flags); |
894 | /* | 897 | /* |
@@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
901 | if (mem_type == TTM_PL_SYSTEM) | 904 | if (mem_type == TTM_PL_SYSTEM) |
902 | break; | 905 | break; |
903 | 906 | ||
904 | if (man->has_type && man->use_type) { | 907 | ret = (*man->func->get_node)(man, bo, place, mem); |
905 | type_found = true; | 908 | if (unlikely(ret)) |
906 | ret = (*man->func->get_node)(man, bo, place, mem); | 909 | return ret; |
907 | if (unlikely(ret)) | 910 | |
908 | return ret; | ||
909 | } | ||
910 | if (mem->mm_node) | 911 | if (mem->mm_node) |
911 | break; | 912 | break; |
912 | } | 913 | } |
@@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
917 | return 0; | 918 | return 0; |
918 | } | 919 | } |
919 | 920 | ||
920 | if (!type_found) | ||
921 | return -EINVAL; | ||
922 | |||
923 | for (i = 0; i < placement->num_busy_placement; ++i) { | 921 | for (i = 0; i < placement->num_busy_placement; ++i) { |
924 | const struct ttm_place *place = &placement->busy_placement[i]; | 922 | const struct ttm_place *place = &placement->busy_placement[i]; |
925 | 923 | ||
@@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
927 | if (ret) | 925 | if (ret) |
928 | return ret; | 926 | return ret; |
929 | man = &bdev->man[mem_type]; | 927 | man = &bdev->man[mem_type]; |
930 | if (!man->has_type) | 928 | if (!man->has_type || !man->use_type) |
931 | continue; | 929 | continue; |
932 | if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) | 930 | if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) |
933 | continue; | 931 | continue; |
934 | 932 | ||
933 | type_found = true; | ||
935 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | 934 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
936 | cur_flags); | 935 | cur_flags); |
937 | /* | 936 | /* |
@@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
957 | if (ret == -ERESTARTSYS) | 956 | if (ret == -ERESTARTSYS) |
958 | has_erestartsys = true; | 957 | has_erestartsys = true; |
959 | } | 958 | } |
960 | ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; | 959 | |
961 | return ret; | 960 | if (!type_found) { |
961 | printk(KERN_ERR TTM_PFX "No compatible memory type found.\n"); | ||
962 | return -EINVAL; | ||
963 | } | ||
964 | |||
965 | return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; | ||
962 | } | 966 | } |
963 | EXPORT_SYMBOL(ttm_bo_mem_space); | 967 | EXPORT_SYMBOL(ttm_bo_mem_space); |
964 | 968 | ||
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index 67720f70fe29..b49445df8a7e 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config DRM_VMWGFX | 1 | config DRM_VMWGFX |
2 | tristate "DRM driver for VMware Virtual GPU" | 2 | tristate "DRM driver for VMware Virtual GPU" |
3 | depends on DRM && PCI | 3 | depends on DRM && PCI && X86 |
4 | select FB_DEFERRED_IO | 4 | select FB_DEFERRED_IO |
5 | select FB_CFB_FILLRECT | 5 | select FB_CFB_FILLRECT |
6 | select FB_CFB_COPYAREA | 6 | select FB_CFB_COPYAREA |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index ce659a125f2b..092ea81eeff7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | |||
@@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res, | |||
311 | struct vmw_private *dev_priv = res->dev_priv; | 311 | struct vmw_private *dev_priv = res->dev_priv; |
312 | struct ttm_buffer_object *bo = val_buf->bo; | 312 | struct ttm_buffer_object *bo = val_buf->bo; |
313 | struct vmw_fence_obj *fence; | 313 | struct vmw_fence_obj *fence; |
314 | int ret; | ||
315 | 314 | ||
316 | if (list_empty(&res->mob_head)) | 315 | if (list_empty(&res->mob_head)) |
317 | return 0; | 316 | return 0; |
@@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, | |||
328 | if (likely(fence != NULL)) | 327 | if (likely(fence != NULL)) |
329 | vmw_fence_obj_unreference(&fence); | 328 | vmw_fence_obj_unreference(&fence); |
330 | 329 | ||
331 | return ret; | 330 | return 0; |
332 | } | 331 | } |
333 | 332 | ||
334 | /** | 333 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e13b20bd9908..2c7a25c71af2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
752 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 752 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
753 | dev_priv->active_master = &dev_priv->fbdev_master; | 753 | dev_priv->active_master = &dev_priv->fbdev_master; |
754 | 754 | ||
755 | 755 | dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start, | |
756 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 756 | dev_priv->mmio_size); |
757 | dev_priv->mmio_size); | ||
758 | |||
759 | dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, | ||
760 | dev_priv->mmio_size); | ||
761 | 757 | ||
762 | if (unlikely(dev_priv->mmio_virt == NULL)) { | 758 | if (unlikely(dev_priv->mmio_virt == NULL)) { |
763 | ret = -ENOMEM; | 759 | ret = -ENOMEM; |
@@ -913,7 +909,6 @@ out_no_device: | |||
913 | out_err4: | 909 | out_err4: |
914 | iounmap(dev_priv->mmio_virt); | 910 | iounmap(dev_priv->mmio_virt); |
915 | out_err3: | 911 | out_err3: |
916 | arch_phys_wc_del(dev_priv->mmio_mtrr); | ||
917 | vmw_ttm_global_release(dev_priv); | 912 | vmw_ttm_global_release(dev_priv); |
918 | out_err0: | 913 | out_err0: |
919 | for (i = vmw_res_context; i < vmw_res_max; ++i) | 914 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
964 | 959 | ||
965 | ttm_object_device_release(&dev_priv->tdev); | 960 | ttm_object_device_release(&dev_priv->tdev); |
966 | iounmap(dev_priv->mmio_virt); | 961 | iounmap(dev_priv->mmio_virt); |
967 | arch_phys_wc_del(dev_priv->mmio_mtrr); | ||
968 | if (dev_priv->ctx.staged_bindings) | 962 | if (dev_priv->ctx.staged_bindings) |
969 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); | 963 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); |
970 | vmw_ttm_global_release(dev_priv); | 964 | vmw_ttm_global_release(dev_priv); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 6d02de6dc36c..f19fd39b43e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -376,7 +376,6 @@ struct vmw_private { | |||
376 | uint32_t initial_width; | 376 | uint32_t initial_width; |
377 | uint32_t initial_height; | 377 | uint32_t initial_height; |
378 | u32 __iomem *mmio_virt; | 378 | u32 __iomem *mmio_virt; |
379 | int mmio_mtrr; | ||
380 | uint32_t capabilities; | 379 | uint32_t capabilities; |
381 | uint32_t max_gmr_ids; | 380 | uint32_t max_gmr_ids; |
382 | uint32_t max_gmr_pages; | 381 | uint32_t max_gmr_pages; |
@@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
631 | uint32_t size, | 630 | uint32_t size, |
632 | bool shareable, | 631 | bool shareable, |
633 | uint32_t *handle, | 632 | uint32_t *handle, |
634 | struct vmw_dma_buffer **p_dma_buf); | 633 | struct vmw_dma_buffer **p_dma_buf, |
634 | struct ttm_base_object **p_base); | ||
635 | extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | 635 | extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
636 | struct vmw_dma_buffer *dma_buf, | 636 | struct vmw_dma_buffer *dma_buf, |
637 | uint32_t *handle); | 637 | uint32_t *handle); |
@@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | |||
645 | uint32_t cur_validate_node); | 645 | uint32_t cur_validate_node); |
646 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); | 646 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); |
647 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 647 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
648 | uint32_t id, struct vmw_dma_buffer **out); | 648 | uint32_t id, struct vmw_dma_buffer **out, |
649 | struct ttm_base_object **base); | ||
649 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | 650 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
650 | struct drm_file *file_priv); | 651 | struct drm_file *file_priv); |
651 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | 652 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index b56565457c96..5da5de0cb522 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
1236 | struct vmw_relocation *reloc; | 1236 | struct vmw_relocation *reloc; |
1237 | int ret; | 1237 | int ret; |
1238 | 1238 | ||
1239 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 1239 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, |
1240 | NULL); | ||
1240 | if (unlikely(ret != 0)) { | 1241 | if (unlikely(ret != 0)) { |
1241 | DRM_ERROR("Could not find or use MOB buffer.\n"); | 1242 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
1242 | ret = -EINVAL; | 1243 | ret = -EINVAL; |
@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
1296 | struct vmw_relocation *reloc; | 1297 | struct vmw_relocation *reloc; |
1297 | int ret; | 1298 | int ret; |
1298 | 1299 | ||
1299 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 1300 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, |
1301 | NULL); | ||
1300 | if (unlikely(ret != 0)) { | 1302 | if (unlikely(ret != 0)) { |
1301 | DRM_ERROR("Could not find or use GMR region.\n"); | 1303 | DRM_ERROR("Could not find or use GMR region.\n"); |
1302 | ret = -EINVAL; | 1304 | ret = -EINVAL; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 61fb7f3de311..15a6c01cd016 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, | |||
1685 | struct drm_crtc *crtc; | 1685 | struct drm_crtc *crtc; |
1686 | u32 num_units = 0; | 1686 | u32 num_units = 0; |
1687 | u32 i, k; | 1687 | u32 i, k; |
1688 | int ret; | ||
1689 | 1688 | ||
1690 | dirty->dev_priv = dev_priv; | 1689 | dirty->dev_priv = dev_priv; |
1691 | 1690 | ||
@@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, | |||
1711 | if (!dirty->cmd) { | 1710 | if (!dirty->cmd) { |
1712 | DRM_ERROR("Couldn't reserve fifo space " | 1711 | DRM_ERROR("Couldn't reserve fifo space " |
1713 | "for dirty blits.\n"); | 1712 | "for dirty blits.\n"); |
1714 | return ret; | 1713 | return -ENOMEM; |
1715 | } | 1714 | } |
1716 | memset(dirty->cmd, 0, dirty->fifo_reserve_size); | 1715 | memset(dirty->cmd, 0, dirty->fifo_reserve_size); |
1717 | } | 1716 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 76069f093ccf..222c9c2123a1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, | |||
484 | goto out_unlock; | 484 | goto out_unlock; |
485 | } | 485 | } |
486 | 486 | ||
487 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); | 487 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL); |
488 | if (ret) | 488 | if (ret) |
489 | goto out_unlock; | 489 | goto out_unlock; |
490 | 490 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c1912f852b42..e57667ca7557 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, | |||
354 | } | 354 | } |
355 | 355 | ||
356 | *out_surf = NULL; | 356 | *out_surf = NULL; |
357 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); | 357 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); |
358 | return ret; | 358 | return ret; |
359 | } | 359 | } |
360 | 360 | ||
@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
481 | uint32_t size, | 481 | uint32_t size, |
482 | bool shareable, | 482 | bool shareable, |
483 | uint32_t *handle, | 483 | uint32_t *handle, |
484 | struct vmw_dma_buffer **p_dma_buf) | 484 | struct vmw_dma_buffer **p_dma_buf, |
485 | struct ttm_base_object **p_base) | ||
485 | { | 486 | { |
486 | struct vmw_user_dma_buffer *user_bo; | 487 | struct vmw_user_dma_buffer *user_bo; |
487 | struct ttm_buffer_object *tmp; | 488 | struct ttm_buffer_object *tmp; |
@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
515 | } | 516 | } |
516 | 517 | ||
517 | *p_dma_buf = &user_bo->dma; | 518 | *p_dma_buf = &user_bo->dma; |
519 | if (p_base) { | ||
520 | *p_base = &user_bo->prime.base; | ||
521 | kref_get(&(*p_base)->refcount); | ||
522 | } | ||
518 | *handle = user_bo->prime.base.hash.key; | 523 | *handle = user_bo->prime.base.hash.key; |
519 | 524 | ||
520 | out_no_base_object: | 525 | out_no_base_object: |
@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | |||
631 | struct vmw_dma_buffer *dma_buf; | 636 | struct vmw_dma_buffer *dma_buf; |
632 | struct vmw_user_dma_buffer *user_bo; | 637 | struct vmw_user_dma_buffer *user_bo; |
633 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 638 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
639 | struct ttm_base_object *buffer_base; | ||
634 | int ret; | 640 | int ret; |
635 | 641 | ||
636 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 | 642 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 |
@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | |||
643 | 649 | ||
644 | switch (arg->op) { | 650 | switch (arg->op) { |
645 | case drm_vmw_synccpu_grab: | 651 | case drm_vmw_synccpu_grab: |
646 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); | 652 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, |
653 | &buffer_base); | ||
647 | if (unlikely(ret != 0)) | 654 | if (unlikely(ret != 0)) |
648 | return ret; | 655 | return ret; |
649 | 656 | ||
@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | |||
651 | dma); | 658 | dma); |
652 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); | 659 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); |
653 | vmw_dmabuf_unreference(&dma_buf); | 660 | vmw_dmabuf_unreference(&dma_buf); |
661 | ttm_base_object_unref(&buffer_base); | ||
654 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && | 662 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && |
655 | ret != -EBUSY)) { | 663 | ret != -EBUSY)) { |
656 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", | 664 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", |
@@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
692 | return ret; | 700 | return ret; |
693 | 701 | ||
694 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, | 702 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
695 | req->size, false, &handle, &dma_buf); | 703 | req->size, false, &handle, &dma_buf, |
704 | NULL); | ||
696 | if (unlikely(ret != 0)) | 705 | if (unlikely(ret != 0)) |
697 | goto out_no_dmabuf; | 706 | goto out_no_dmabuf; |
698 | 707 | ||
@@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | |||
721 | } | 730 | } |
722 | 731 | ||
723 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 732 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
724 | uint32_t handle, struct vmw_dma_buffer **out) | 733 | uint32_t handle, struct vmw_dma_buffer **out, |
734 | struct ttm_base_object **p_base) | ||
725 | { | 735 | { |
726 | struct vmw_user_dma_buffer *vmw_user_bo; | 736 | struct vmw_user_dma_buffer *vmw_user_bo; |
727 | struct ttm_base_object *base; | 737 | struct ttm_base_object *base; |
@@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | |||
743 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, | 753 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
744 | prime.base); | 754 | prime.base); |
745 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); | 755 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); |
746 | ttm_base_object_unref(&base); | 756 | if (p_base) |
757 | *p_base = base; | ||
758 | else | ||
759 | ttm_base_object_unref(&base); | ||
747 | *out = &vmw_user_bo->dma; | 760 | *out = &vmw_user_bo->dma; |
748 | 761 | ||
749 | return 0; | 762 | return 0; |
@@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv, | |||
1004 | 1017 | ||
1005 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, | 1018 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
1006 | args->size, false, &args->handle, | 1019 | args->size, false, &args->handle, |
1007 | &dma_buf); | 1020 | &dma_buf, NULL); |
1008 | if (unlikely(ret != 0)) | 1021 | if (unlikely(ret != 0)) |
1009 | goto out_no_dmabuf; | 1022 | goto out_no_dmabuf; |
1010 | 1023 | ||
@@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, | |||
1032 | struct vmw_dma_buffer *out_buf; | 1045 | struct vmw_dma_buffer *out_buf; |
1033 | int ret; | 1046 | int ret; |
1034 | 1047 | ||
1035 | ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); | 1048 | ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); |
1036 | if (ret != 0) | 1049 | if (ret != 0) |
1037 | return -EINVAL; | 1050 | return -EINVAL; |
1038 | 1051 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index bba1ee395478..fd47547b0234 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
@@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, | |||
855 | 855 | ||
856 | if (buffer_handle != SVGA3D_INVALID_ID) { | 856 | if (buffer_handle != SVGA3D_INVALID_ID) { |
857 | ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, | 857 | ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, |
858 | &buffer); | 858 | &buffer, NULL); |
859 | if (unlikely(ret != 0)) { | 859 | if (unlikely(ret != 0)) { |
860 | DRM_ERROR("Could not find buffer for shader " | 860 | DRM_ERROR("Could not find buffer for shader " |
861 | "creation.\n"); | 861 | "creation.\n"); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 3361769842f4..64b50409fa07 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -46,6 +46,7 @@ struct vmw_user_surface { | |||
46 | struct vmw_surface srf; | 46 | struct vmw_surface srf; |
47 | uint32_t size; | 47 | uint32_t size; |
48 | struct drm_master *master; | 48 | struct drm_master *master; |
49 | struct ttm_base_object *backup_base; | ||
49 | }; | 50 | }; |
50 | 51 | ||
51 | /** | 52 | /** |
@@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | |||
656 | struct vmw_resource *res = &user_srf->srf.res; | 657 | struct vmw_resource *res = &user_srf->srf.res; |
657 | 658 | ||
658 | *p_base = NULL; | 659 | *p_base = NULL; |
660 | ttm_base_object_unref(&user_srf->backup_base); | ||
659 | vmw_resource_unreference(&res); | 661 | vmw_resource_unreference(&res); |
660 | } | 662 | } |
661 | 663 | ||
@@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
851 | res->backup_size, | 853 | res->backup_size, |
852 | true, | 854 | true, |
853 | &backup_handle, | 855 | &backup_handle, |
854 | &res->backup); | 856 | &res->backup, |
857 | &user_srf->backup_base); | ||
855 | if (unlikely(ret != 0)) { | 858 | if (unlikely(ret != 0)) { |
856 | vmw_resource_unreference(&res); | 859 | vmw_resource_unreference(&res); |
857 | goto out_unlock; | 860 | goto out_unlock; |
@@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1321 | 1324 | ||
1322 | if (req->buffer_handle != SVGA3D_INVALID_ID) { | 1325 | if (req->buffer_handle != SVGA3D_INVALID_ID) { |
1323 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, | 1326 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, |
1324 | &res->backup); | 1327 | &res->backup, |
1328 | &user_srf->backup_base); | ||
1325 | if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < | 1329 | if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < |
1326 | res->backup_size) { | 1330 | res->backup_size) { |
1327 | DRM_ERROR("Surface backup buffer is too small.\n"); | 1331 | DRM_ERROR("Surface backup buffer is too small.\n"); |
@@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1335 | req->drm_surface_flags & | 1339 | req->drm_surface_flags & |
1336 | drm_vmw_surface_flag_shareable, | 1340 | drm_vmw_surface_flag_shareable, |
1337 | &backup_handle, | 1341 | &backup_handle, |
1338 | &res->backup); | 1342 | &res->backup, |
1343 | &user_srf->backup_base); | ||
1339 | 1344 | ||
1340 | if (unlikely(ret != 0)) { | 1345 | if (unlikely(ret != 0)) { |
1341 | vmw_resource_unreference(&res); | 1346 | vmw_resource_unreference(&res); |