diff options
| -rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_gpu.c | 29 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_manager.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/hdmi/hdmi_audio.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/msm_gpu.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_object.c | 14 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 79 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 31 | ||||
| -rw-r--r-- | include/drm/ttm/ttm_object.h | 5 |
13 files changed, 112 insertions, 81 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 4414cf73735d..36602ac7e248 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* Copyright (c) 2016 The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. |
| 2 | * | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and | 4 | * it under the terms of the GNU General Public License version 2 and |
| @@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu) | |||
| 534 | } | 534 | } |
| 535 | 535 | ||
| 536 | if (a5xx_gpu->gpmu_bo) { | 536 | if (a5xx_gpu->gpmu_bo) { |
| 537 | if (a5xx_gpu->gpmu_bo) | 537 | if (a5xx_gpu->gpmu_iova) |
| 538 | msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); | 538 | msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); |
| 539 | drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); | 539 | drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); |
| 540 | } | 540 | } |
| @@ -860,7 +860,9 @@ static const struct adreno_gpu_funcs funcs = { | |||
| 860 | .idle = a5xx_idle, | 860 | .idle = a5xx_idle, |
| 861 | .irq = a5xx_irq, | 861 | .irq = a5xx_irq, |
| 862 | .destroy = a5xx_destroy, | 862 | .destroy = a5xx_destroy, |
| 863 | #ifdef CONFIG_DEBUG_FS | ||
| 863 | .show = a5xx_show, | 864 | .show = a5xx_show, |
| 865 | #endif | ||
| 864 | }, | 866 | }, |
| 865 | .get_timestamp = a5xx_get_timestamp, | 867 | .get_timestamp = a5xx_get_timestamp, |
| 866 | }; | 868 | }; |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index c9bd1e6225f4..5ae65426b4e5 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -418,18 +418,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 418 | return 0; | 418 | return 0; |
| 419 | } | 419 | } |
| 420 | 420 | ||
| 421 | void adreno_gpu_cleanup(struct adreno_gpu *gpu) | 421 | void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) |
| 422 | { | 422 | { |
| 423 | if (gpu->memptrs_bo) { | 423 | struct msm_gpu *gpu = &adreno_gpu->base; |
| 424 | if (gpu->memptrs) | 424 | |
| 425 | msm_gem_put_vaddr(gpu->memptrs_bo); | 425 | if (adreno_gpu->memptrs_bo) { |
| 426 | if (adreno_gpu->memptrs) | ||
| 427 | msm_gem_put_vaddr(adreno_gpu->memptrs_bo); | ||
| 428 | |||
| 429 | if (adreno_gpu->memptrs_iova) | ||
| 430 | msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id); | ||
| 431 | |||
| 432 | drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo); | ||
| 433 | } | ||
| 434 | release_firmware(adreno_gpu->pm4); | ||
| 435 | release_firmware(adreno_gpu->pfp); | ||
| 426 | 436 | ||
| 427 | if (gpu->memptrs_iova) | 437 | msm_gpu_cleanup(gpu); |
| 428 | msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); | ||
| 429 | 438 | ||
| 430 | drm_gem_object_unreference_unlocked(gpu->memptrs_bo); | 439 | if (gpu->aspace) { |
| 440 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, | ||
| 441 | iommu_ports, ARRAY_SIZE(iommu_ports)); | ||
| 442 | msm_gem_address_space_destroy(gpu->aspace); | ||
| 431 | } | 443 | } |
| 432 | release_firmware(gpu->pm4); | ||
| 433 | release_firmware(gpu->pfp); | ||
| 434 | msm_gpu_cleanup(&gpu->base); | ||
| 435 | } | 444 | } |
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 921270ea6059..a879ffa534b4 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c | |||
| @@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id, | |||
| 171 | } | 171 | } |
| 172 | } | 172 | } |
| 173 | } else { | 173 | } else { |
| 174 | msm_dsi_host_reset_phy(mdsi->host); | 174 | msm_dsi_host_reset_phy(msm_dsi->host); |
| 175 | ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); | 175 | ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); |
| 176 | if (ret) | 176 | if (ret) |
| 177 | return ret; | 177 | return ret; |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c index a54d3bb5baad..8177e8511afd 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c | |||
| @@ -18,13 +18,6 @@ | |||
| 18 | #include <linux/hdmi.h> | 18 | #include <linux/hdmi.h> |
| 19 | #include "hdmi.h" | 19 | #include "hdmi.h" |
| 20 | 20 | ||
| 21 | |||
| 22 | /* Supported HDMI Audio channels */ | ||
| 23 | #define MSM_HDMI_AUDIO_CHANNEL_2 0 | ||
| 24 | #define MSM_HDMI_AUDIO_CHANNEL_4 1 | ||
| 25 | #define MSM_HDMI_AUDIO_CHANNEL_6 2 | ||
| 26 | #define MSM_HDMI_AUDIO_CHANNEL_8 3 | ||
| 27 | |||
| 28 | /* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */ | 21 | /* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */ |
| 29 | static int nchannels[] = { 2, 4, 6, 8 }; | 22 | static int nchannels[] = { 2, 4, 6, 8 }; |
| 30 | 23 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h index 611da7a660c9..238901987e00 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h | |||
| @@ -18,7 +18,8 @@ | |||
| 18 | #ifndef __MDP5_PIPE_H__ | 18 | #ifndef __MDP5_PIPE_H__ |
| 19 | #define __MDP5_PIPE_H__ | 19 | #define __MDP5_PIPE_H__ |
| 20 | 20 | ||
| 21 | #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ | 21 | /* TODO: Add SSPP_MAX in mdp5.xml.h */ |
| 22 | #define SSPP_MAX (SSPP_CURSOR1 + 1) | ||
| 22 | 23 | ||
| 23 | /* represents a hw pipe, which is dynamically assigned to a plane */ | 24 | /* represents a hw pipe, which is dynamically assigned to a plane */ |
| 24 | struct mdp5_hw_pipe { | 25 | struct mdp5_hw_pipe { |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 59811f29607d..68e509b3b9e4 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
| 812 | 812 | ||
| 813 | size = PAGE_ALIGN(size); | 813 | size = PAGE_ALIGN(size); |
| 814 | 814 | ||
| 815 | /* Disallow zero sized objects as they make the underlying | ||
| 816 | * infrastructure grumpy | ||
| 817 | */ | ||
| 818 | if (size == 0) | ||
| 819 | return ERR_PTR(-EINVAL); | ||
| 820 | |||
| 815 | ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); | 821 | ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); |
| 816 | if (ret) | 822 | if (ret) |
| 817 | goto fail; | 823 | goto fail; |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 99e05aacbee1..af5b6ba4095b 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -706,9 +706,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) | |||
| 706 | msm_ringbuffer_destroy(gpu->rb); | 706 | msm_ringbuffer_destroy(gpu->rb); |
| 707 | } | 707 | } |
| 708 | 708 | ||
| 709 | if (gpu->aspace) | ||
| 710 | msm_gem_address_space_destroy(gpu->aspace); | ||
| 711 | |||
| 712 | if (gpu->fctx) | 709 | if (gpu->fctx) |
| 713 | msm_fence_context_free(gpu->fctx); | 710 | msm_fence_context_free(gpu->fctx); |
| 714 | } | 711 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index fdb451e3ec01..26a7ad0f4789 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
| @@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, | |||
| 179 | if (unlikely(ret != 0)) | 179 | if (unlikely(ret != 0)) |
| 180 | goto out_err0; | 180 | goto out_err0; |
| 181 | 181 | ||
| 182 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); | 182 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); |
| 183 | if (unlikely(ret != 0)) | 183 | if (unlikely(ret != 0)) |
| 184 | goto out_err1; | 184 | goto out_err1; |
| 185 | 185 | ||
| @@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists); | |||
| 318 | 318 | ||
| 319 | int ttm_ref_object_add(struct ttm_object_file *tfile, | 319 | int ttm_ref_object_add(struct ttm_object_file *tfile, |
| 320 | struct ttm_base_object *base, | 320 | struct ttm_base_object *base, |
| 321 | enum ttm_ref_type ref_type, bool *existed) | 321 | enum ttm_ref_type ref_type, bool *existed, |
| 322 | bool require_existed) | ||
| 322 | { | 323 | { |
| 323 | struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; | 324 | struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; |
| 324 | struct ttm_ref_object *ref; | 325 | struct ttm_ref_object *ref; |
| @@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, | |||
| 345 | } | 346 | } |
| 346 | 347 | ||
| 347 | rcu_read_unlock(); | 348 | rcu_read_unlock(); |
| 349 | if (require_existed) | ||
| 350 | return -EPERM; | ||
| 351 | |||
| 348 | ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), | 352 | ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), |
| 349 | false, false); | 353 | false, false); |
| 350 | if (unlikely(ret != 0)) | 354 | if (unlikely(ret != 0)) |
| @@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) | |||
| 449 | ttm_ref_object_release(&ref->kref); | 453 | ttm_ref_object_release(&ref->kref); |
| 450 | } | 454 | } |
| 451 | 455 | ||
| 456 | spin_unlock(&tfile->lock); | ||
| 452 | for (i = 0; i < TTM_REF_NUM; ++i) | 457 | for (i = 0; i < TTM_REF_NUM; ++i) |
| 453 | drm_ht_remove(&tfile->ref_hash[i]); | 458 | drm_ht_remove(&tfile->ref_hash[i]); |
| 454 | 459 | ||
| 455 | spin_unlock(&tfile->lock); | ||
| 456 | ttm_object_file_unref(&tfile); | 460 | ttm_object_file_unref(&tfile); |
| 457 | } | 461 | } |
| 458 | EXPORT_SYMBOL(ttm_object_file_release); | 462 | EXPORT_SYMBOL(ttm_object_file_release); |
| @@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) | |||
| 529 | 533 | ||
| 530 | *p_tdev = NULL; | 534 | *p_tdev = NULL; |
| 531 | 535 | ||
| 532 | spin_lock(&tdev->object_lock); | ||
| 533 | drm_ht_remove(&tdev->object_hash); | 536 | drm_ht_remove(&tdev->object_hash); |
| 534 | spin_unlock(&tdev->object_lock); | ||
| 535 | 537 | ||
| 536 | kfree(tdev); | 538 | kfree(tdev); |
| 537 | } | 539 | } |
| @@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, | |||
| 635 | prime = (struct ttm_prime_object *) dma_buf->priv; | 637 | prime = (struct ttm_prime_object *) dma_buf->priv; |
| 636 | base = &prime->base; | 638 | base = &prime->base; |
| 637 | *handle = base->hash.key; | 639 | *handle = base->hash.key; |
| 638 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); | 640 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); |
| 639 | 641 | ||
| 640 | dma_buf_put(dma_buf); | 642 | dma_buf_put(dma_buf); |
| 641 | 643 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 6541dd8b82dc..6b2708b4eafe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
| @@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman, | |||
| 538 | struct vmw_fence_obj **p_fence) | 538 | struct vmw_fence_obj **p_fence) |
| 539 | { | 539 | { |
| 540 | struct vmw_fence_obj *fence; | 540 | struct vmw_fence_obj *fence; |
| 541 | int ret; | 541 | int ret; |
| 542 | 542 | ||
| 543 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); | 543 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); |
| 544 | if (unlikely(fence == NULL)) | 544 | if (unlikely(fence == NULL)) |
| @@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman) | |||
| 701 | } | 701 | } |
| 702 | 702 | ||
| 703 | 703 | ||
| 704 | /** | ||
| 705 | * vmw_fence_obj_lookup - Look up a user-space fence object | ||
| 706 | * | ||
| 707 | * @tfile: A struct ttm_object_file identifying the caller. | ||
| 708 | * @handle: A handle identifying the fence object. | ||
| 709 | * @return: A struct vmw_user_fence base ttm object on success or | ||
| 710 | * an error pointer on failure. | ||
| 711 | * | ||
| 712 | * The fence object is looked up and type-checked. The caller needs | ||
| 713 | * to have opened the fence object first, but since that happens on | ||
| 714 | * creation and fence objects aren't shareable, that's not an | ||
| 715 | * issue currently. | ||
| 716 | */ | ||
| 717 | static struct ttm_base_object * | ||
| 718 | vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle) | ||
| 719 | { | ||
| 720 | struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle); | ||
| 721 | |||
| 722 | if (!base) { | ||
| 723 | pr_err("Invalid fence object handle 0x%08lx.\n", | ||
| 724 | (unsigned long)handle); | ||
| 725 | return ERR_PTR(-EINVAL); | ||
| 726 | } | ||
| 727 | |||
| 728 | if (base->refcount_release != vmw_user_fence_base_release) { | ||
| 729 | pr_err("Invalid fence object handle 0x%08lx.\n", | ||
| 730 | (unsigned long)handle); | ||
| 731 | ttm_base_object_unref(&base); | ||
| 732 | return ERR_PTR(-EINVAL); | ||
| 733 | } | ||
| 734 | |||
| 735 | return base; | ||
| 736 | } | ||
| 737 | |||
| 738 | |||
| 704 | int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, | 739 | int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, |
| 705 | struct drm_file *file_priv) | 740 | struct drm_file *file_priv) |
| 706 | { | 741 | { |
| @@ -726,13 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, | |||
| 726 | arg->kernel_cookie = jiffies + wait_timeout; | 761 | arg->kernel_cookie = jiffies + wait_timeout; |
| 727 | } | 762 | } |
| 728 | 763 | ||
| 729 | base = ttm_base_object_lookup(tfile, arg->handle); | 764 | base = vmw_fence_obj_lookup(tfile, arg->handle); |
| 730 | if (unlikely(base == NULL)) { | 765 | if (IS_ERR(base)) |
| 731 | printk(KERN_ERR "Wait invalid fence object handle " | 766 | return PTR_ERR(base); |
| 732 | "0x%08lx.\n", | ||
| 733 | (unsigned long)arg->handle); | ||
| 734 | return -EINVAL; | ||
| 735 | } | ||
| 736 | 767 | ||
| 737 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); | 768 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); |
| 738 | 769 | ||
| @@ -771,13 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, | |||
| 771 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 802 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 772 | struct vmw_private *dev_priv = vmw_priv(dev); | 803 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 773 | 804 | ||
| 774 | base = ttm_base_object_lookup(tfile, arg->handle); | 805 | base = vmw_fence_obj_lookup(tfile, arg->handle); |
| 775 | if (unlikely(base == NULL)) { | 806 | if (IS_ERR(base)) |
| 776 | printk(KERN_ERR "Fence signaled invalid fence object handle " | 807 | return PTR_ERR(base); |
| 777 | "0x%08lx.\n", | ||
| 778 | (unsigned long)arg->handle); | ||
| 779 | return -EINVAL; | ||
| 780 | } | ||
| 781 | 808 | ||
| 782 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); | 809 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); |
| 783 | fman = fman_from_fence(fence); | 810 | fman = fman_from_fence(fence); |
| @@ -1024,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | |||
| 1024 | (struct drm_vmw_fence_event_arg *) data; | 1051 | (struct drm_vmw_fence_event_arg *) data; |
| 1025 | struct vmw_fence_obj *fence = NULL; | 1052 | struct vmw_fence_obj *fence = NULL; |
| 1026 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | 1053 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
| 1054 | struct ttm_object_file *tfile = vmw_fp->tfile; | ||
| 1027 | struct drm_vmw_fence_rep __user *user_fence_rep = | 1055 | struct drm_vmw_fence_rep __user *user_fence_rep = |
| 1028 | (struct drm_vmw_fence_rep __user *)(unsigned long) | 1056 | (struct drm_vmw_fence_rep __user *)(unsigned long) |
| 1029 | arg->fence_rep; | 1057 | arg->fence_rep; |
| @@ -1037,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | |||
| 1037 | */ | 1065 | */ |
| 1038 | if (arg->handle) { | 1066 | if (arg->handle) { |
| 1039 | struct ttm_base_object *base = | 1067 | struct ttm_base_object *base = |
| 1040 | ttm_base_object_lookup_for_ref(dev_priv->tdev, | 1068 | vmw_fence_obj_lookup(tfile, arg->handle); |
| 1041 | arg->handle); | 1069 | |
| 1042 | 1070 | if (IS_ERR(base)) | |
| 1043 | if (unlikely(base == NULL)) { | 1071 | return PTR_ERR(base); |
| 1044 | DRM_ERROR("Fence event invalid fence object handle " | 1072 | |
| 1045 | "0x%08lx.\n", | ||
| 1046 | (unsigned long)arg->handle); | ||
| 1047 | return -EINVAL; | ||
| 1048 | } | ||
| 1049 | fence = &(container_of(base, struct vmw_user_fence, | 1073 | fence = &(container_of(base, struct vmw_user_fence, |
| 1050 | base)->fence); | 1074 | base)->fence); |
| 1051 | (void) vmw_fence_obj_reference(fence); | 1075 | (void) vmw_fence_obj_reference(fence); |
| 1052 | 1076 | ||
| 1053 | if (user_fence_rep != NULL) { | 1077 | if (user_fence_rep != NULL) { |
| 1054 | bool existed; | ||
| 1055 | |||
| 1056 | ret = ttm_ref_object_add(vmw_fp->tfile, base, | 1078 | ret = ttm_ref_object_add(vmw_fp->tfile, base, |
| 1057 | TTM_REF_USAGE, &existed); | 1079 | TTM_REF_USAGE, NULL, false); |
| 1058 | if (unlikely(ret != 0)) { | 1080 | if (unlikely(ret != 0)) { |
| 1059 | DRM_ERROR("Failed to reference a fence " | 1081 | DRM_ERROR("Failed to reference a fence " |
| 1060 | "object.\n"); | 1082 | "object.\n"); |
| @@ -1097,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | |||
| 1097 | return 0; | 1119 | return 0; |
| 1098 | out_no_create: | 1120 | out_no_create: |
| 1099 | if (user_fence_rep != NULL) | 1121 | if (user_fence_rep != NULL) |
| 1100 | ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | 1122 | ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); |
| 1101 | handle, TTM_REF_USAGE); | ||
| 1102 | out_no_ref_obj: | 1123 | out_no_ref_obj: |
| 1103 | vmw_fence_obj_unreference(&fence); | 1124 | vmw_fence_obj_unreference(&fence); |
| 1104 | return ret; | 1125 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index b8c6a03c8c54..5ec24fd801cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
| @@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 114 | param->value = dev_priv->has_dx; | 114 | param->value = dev_priv->has_dx; |
| 115 | break; | 115 | break; |
| 116 | default: | 116 | default: |
| 117 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | ||
| 118 | param->param); | ||
| 119 | return -EINVAL; | 117 | return -EINVAL; |
| 120 | } | 118 | } |
| 121 | 119 | ||
| @@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
| 186 | bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); | 184 | bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); |
| 187 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | 185 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
| 188 | 186 | ||
| 189 | if (unlikely(arg->pad64 != 0)) { | 187 | if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) { |
| 190 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); | 188 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); |
| 191 | return -EINVAL; | 189 | return -EINVAL; |
| 192 | } | 190 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 65b3f0369636..bf23153d4f55 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, | |||
| 589 | return ret; | 589 | return ret; |
| 590 | 590 | ||
| 591 | ret = ttm_ref_object_add(tfile, &user_bo->prime.base, | 591 | ret = ttm_ref_object_add(tfile, &user_bo->prime.base, |
| 592 | TTM_REF_SYNCCPU_WRITE, &existed); | 592 | TTM_REF_SYNCCPU_WRITE, &existed, false); |
| 593 | if (ret != 0 || existed) | 593 | if (ret != 0 || existed) |
| 594 | ttm_bo_synccpu_write_release(&user_bo->dma.base); | 594 | ttm_bo_synccpu_write_release(&user_bo->dma.base); |
| 595 | 595 | ||
| @@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | |||
| 773 | 773 | ||
| 774 | *handle = user_bo->prime.base.hash.key; | 774 | *handle = user_bo->prime.base.hash.key; |
| 775 | return ttm_ref_object_add(tfile, &user_bo->prime.base, | 775 | return ttm_ref_object_add(tfile, &user_bo->prime.base, |
| 776 | TTM_REF_USAGE, NULL); | 776 | TTM_REF_USAGE, NULL, false); |
| 777 | } | 777 | } |
| 778 | 778 | ||
| 779 | /* | 779 | /* |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index b445ce9b9757..05fa092c942b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
| @@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 713 | 128; | 713 | 128; |
| 714 | 714 | ||
| 715 | num_sizes = 0; | 715 | num_sizes = 0; |
| 716 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | 716 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { |
| 717 | if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) | ||
| 718 | return -EINVAL; | ||
| 717 | num_sizes += req->mip_levels[i]; | 719 | num_sizes += req->mip_levels[i]; |
| 720 | } | ||
| 718 | 721 | ||
| 719 | if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * | 722 | if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS || |
| 720 | DRM_VMW_MAX_MIP_LEVELS) | 723 | num_sizes == 0) |
| 721 | return -EINVAL; | 724 | return -EINVAL; |
| 722 | 725 | ||
| 723 | size = vmw_user_surface_size + 128 + | 726 | size = vmw_user_surface_size + 128 + |
| @@ -891,17 +894,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, | |||
| 891 | uint32_t handle; | 894 | uint32_t handle; |
| 892 | struct ttm_base_object *base; | 895 | struct ttm_base_object *base; |
| 893 | int ret; | 896 | int ret; |
| 897 | bool require_exist = false; | ||
| 894 | 898 | ||
| 895 | if (handle_type == DRM_VMW_HANDLE_PRIME) { | 899 | if (handle_type == DRM_VMW_HANDLE_PRIME) { |
| 896 | ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); | 900 | ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); |
| 897 | if (unlikely(ret != 0)) | 901 | if (unlikely(ret != 0)) |
| 898 | return ret; | 902 | return ret; |
| 899 | } else { | 903 | } else { |
| 900 | if (unlikely(drm_is_render_client(file_priv))) { | 904 | if (unlikely(drm_is_render_client(file_priv))) |
| 901 | DRM_ERROR("Render client refused legacy " | 905 | require_exist = true; |
| 902 | "surface reference.\n"); | 906 | |
| 903 | return -EACCES; | ||
| 904 | } | ||
| 905 | if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { | 907 | if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { |
| 906 | DRM_ERROR("Locked master refused legacy " | 908 | DRM_ERROR("Locked master refused legacy " |
| 907 | "surface reference.\n"); | 909 | "surface reference.\n"); |
| @@ -929,17 +931,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, | |||
| 929 | 931 | ||
| 930 | /* | 932 | /* |
| 931 | * Make sure the surface creator has the same | 933 | * Make sure the surface creator has the same |
| 932 | * authenticating master. | 934 | * authenticating master, or is already registered with us. |
| 933 | */ | 935 | */ |
| 934 | if (drm_is_primary_client(file_priv) && | 936 | if (drm_is_primary_client(file_priv) && |
| 935 | user_srf->master != file_priv->master) { | 937 | user_srf->master != file_priv->master) |
| 936 | DRM_ERROR("Trying to reference surface outside of" | 938 | require_exist = true; |
| 937 | " master domain.\n"); | ||
| 938 | ret = -EACCES; | ||
| 939 | goto out_bad_resource; | ||
| 940 | } | ||
| 941 | 939 | ||
| 942 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); | 940 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, |
| 941 | require_exist); | ||
| 943 | if (unlikely(ret != 0)) { | 942 | if (unlikely(ret != 0)) { |
| 944 | DRM_ERROR("Could not add a reference to a surface.\n"); | 943 | DRM_ERROR("Could not add a reference to a surface.\n"); |
| 945 | goto out_bad_resource; | 944 | goto out_bad_resource; |
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h index ed953f98f0e1..1487011fe057 100644 --- a/include/drm/ttm/ttm_object.h +++ b/include/drm/ttm/ttm_object.h | |||
| @@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); | |||
| 229 | * @ref_type: The type of reference. | 229 | * @ref_type: The type of reference. |
| 230 | * @existed: Upon completion, indicates that an identical reference object | 230 | * @existed: Upon completion, indicates that an identical reference object |
| 231 | * already existed, and the refcount was upped on that object instead. | 231 | * already existed, and the refcount was upped on that object instead. |
| 232 | * @require_existed: Fail with -EPERM if an identical ref object didn't | ||
| 233 | * already exist. | ||
| 232 | * | 234 | * |
| 233 | * Checks that the base object is shareable and adds a ref object to it. | 235 | * Checks that the base object is shareable and adds a ref object to it. |
| 234 | * | 236 | * |
| @@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); | |||
| 243 | */ | 245 | */ |
| 244 | extern int ttm_ref_object_add(struct ttm_object_file *tfile, | 246 | extern int ttm_ref_object_add(struct ttm_object_file *tfile, |
| 245 | struct ttm_base_object *base, | 247 | struct ttm_base_object *base, |
| 246 | enum ttm_ref_type ref_type, bool *existed); | 248 | enum ttm_ref_type ref_type, bool *existed, |
| 249 | bool require_existed); | ||
| 247 | 250 | ||
| 248 | extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, | 251 | extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, |
| 249 | struct ttm_base_object *base); | 252 | struct ttm_base_object *base); |
