diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2015-07-03 02:08:18 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-08-17 16:50:45 -0400 |
commit | 7b5ec431770ac581aa23d460f670cfb97c14280d (patch) | |
tree | ee525c374748fff638d9591090520dc1b2f1accd /drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |
parent | 0011fdaa4dab19bf545a28c0d4d164bba4745d29 (diff) |
drm/amdgpu: use scheduler for UVD ib test
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 61 |
1 files changed, 42 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index f64353579c1e..c1be7db36a69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -809,6 +809,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | |||
809 | return 0; | 809 | return 0; |
810 | } | 810 | } |
811 | 811 | ||
812 | static int amdgpu_uvd_free_job( | ||
813 | struct amdgpu_cs_parser *sched_job) | ||
814 | { | ||
815 | amdgpu_ib_free(sched_job->adev, sched_job->ibs); | ||
816 | kfree(sched_job->ibs); | ||
817 | return 0; | ||
818 | } | ||
819 | |||
812 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, | 820 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, |
813 | struct amdgpu_bo *bo, | 821 | struct amdgpu_bo *bo, |
814 | struct amdgpu_fence **fence) | 822 | struct amdgpu_fence **fence) |
@@ -816,7 +824,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, | |||
816 | struct ttm_validate_buffer tv; | 824 | struct ttm_validate_buffer tv; |
817 | struct ww_acquire_ctx ticket; | 825 | struct ww_acquire_ctx ticket; |
818 | struct list_head head; | 826 | struct list_head head; |
819 | struct amdgpu_ib ib; | 827 | struct amdgpu_ib *ib = NULL; |
828 | struct amdgpu_device *adev = ring->adev; | ||
820 | uint64_t addr; | 829 | uint64_t addr; |
821 | int i, r; | 830 | int i, r; |
822 | 831 | ||
@@ -838,34 +847,48 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, | |||
838 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 847 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
839 | if (r) | 848 | if (r) |
840 | goto err; | 849 | goto err; |
841 | 850 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); | |
842 | r = amdgpu_ib_get(ring, NULL, 64, &ib); | 851 | if (!ib) { |
843 | if (r) | 852 | r = -ENOMEM; |
844 | goto err; | 853 | goto err; |
854 | } | ||
855 | r = amdgpu_ib_get(ring, NULL, 64, ib); | ||
856 | if (r) | ||
857 | goto err1; | ||
845 | 858 | ||
846 | addr = amdgpu_bo_gpu_offset(bo); | 859 | addr = amdgpu_bo_gpu_offset(bo); |
847 | ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); | 860 | ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); |
848 | ib.ptr[1] = addr; | 861 | ib->ptr[1] = addr; |
849 | ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); | 862 | ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); |
850 | ib.ptr[3] = addr >> 32; | 863 | ib->ptr[3] = addr >> 32; |
851 | ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); | 864 | ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); |
852 | ib.ptr[5] = 0; | 865 | ib->ptr[5] = 0; |
853 | for (i = 6; i < 16; ++i) | 866 | for (i = 6; i < 16; ++i) |
854 | ib.ptr[i] = PACKET2(0); | 867 | ib->ptr[i] = PACKET2(0); |
855 | ib.length_dw = 16; | 868 | ib->length_dw = 16; |
856 | 869 | ||
857 | r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); | 870 | r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, |
871 | &amdgpu_uvd_free_job, | ||
872 | AMDGPU_FENCE_OWNER_UNDEFINED); | ||
858 | if (r) | 873 | if (r) |
859 | goto err; | 874 | goto err2; |
860 | ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); | ||
861 | 875 | ||
862 | if (fence) | 876 | ttm_eu_fence_buffer_objects(&ticket, &head, &ib->fence->base); |
863 | *fence = amdgpu_fence_ref(ib.fence); | ||
864 | 877 | ||
865 | amdgpu_ib_free(ring->adev, &ib); | 878 | if (fence) |
879 | *fence = amdgpu_fence_ref(ib->fence); | ||
866 | amdgpu_bo_unref(&bo); | 880 | amdgpu_bo_unref(&bo); |
867 | return 0; | ||
868 | 881 | ||
882 | if (amdgpu_enable_scheduler) | ||
883 | return 0; | ||
884 | |||
885 | amdgpu_ib_free(ring->adev, ib); | ||
886 | kfree(ib); | ||
887 | return 0; | ||
888 | err2: | ||
889 | amdgpu_ib_free(ring->adev, ib); | ||
890 | err1: | ||
891 | kfree(ib); | ||
869 | err: | 892 | err: |
870 | ttm_eu_backoff_reservation(&ticket, &head); | 893 | ttm_eu_backoff_reservation(&ticket, &head); |
871 | return r; | 894 | return r; |