aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
diff options
context:
space:
mode:
authorLeo Liu <leo.liu@amd.com>2018-05-25 10:53:39 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-05-29 14:18:20 -0400
commit4c6530fd66399182d0332c5ed821ea473bdcd7c3 (patch)
tree3d1940a899c55f0de85665c6d64329b4f884bfa3 /drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
parent84b74608442d00fbdcd233e3230b3068b0ab9b18 (diff)
drm/amdgpu: remove unnecessary scheduler entity for VCN
It should be stateless, and no need for scheduler to take care specially. Signed-off-by: Leo Liu <leo.liu@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c51
1 files changed, 10 insertions, 41 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 8851bcdfc260..6fd606f90cb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
49 49
50int amdgpu_vcn_sw_init(struct amdgpu_device *adev) 50int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
51{ 51{
52 struct amdgpu_ring *ring;
53 struct drm_sched_rq *rq;
54 unsigned long bo_size; 52 unsigned long bo_size;
55 const char *fw_name; 53 const char *fw_name;
56 const struct common_firmware_header *hdr; 54 const struct common_firmware_header *hdr;
@@ -102,24 +100,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
102 return r; 100 return r;
103 } 101 }
104 102
105 ring = &adev->vcn.ring_dec;
106 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
107 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
108 rq, NULL);
109 if (r != 0) {
110 DRM_ERROR("Failed setting up VCN dec run queue.\n");
111 return r;
112 }
113
114 ring = &adev->vcn.ring_enc[0];
115 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
116 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
117 rq, NULL);
118 if (r != 0) {
119 DRM_ERROR("Failed setting up VCN enc run queue.\n");
120 return r;
121 }
122
123 return 0; 103 return 0;
124} 104}
125 105
@@ -129,10 +109,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
129 109
130 kfree(adev->vcn.saved_bo); 110 kfree(adev->vcn.saved_bo);
131 111
132 drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
133
134 drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
135
136 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, 112 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
137 &adev->vcn.gpu_addr, 113 &adev->vcn.gpu_addr,
138 (void **)&adev->vcn.cpu_addr); 114 (void **)&adev->vcn.cpu_addr);
@@ -278,7 +254,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
278} 254}
279 255
280static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, 256static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
281 struct amdgpu_bo *bo, bool direct, 257 struct amdgpu_bo *bo,
282 struct dma_fence **fence) 258 struct dma_fence **fence)
283{ 259{
284 struct amdgpu_device *adev = ring->adev; 260 struct amdgpu_device *adev = ring->adev;
@@ -306,19 +282,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
306 } 282 }
307 ib->length_dw = 16; 283 ib->length_dw = 16;
308 284
309 if (direct) { 285 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
310 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 286 job->fence = dma_fence_get(f);
311 job->fence = dma_fence_get(f); 287 if (r)
312 if (r) 288 goto err_free;
313 goto err_free;
314 289
315 amdgpu_job_free(job); 290 amdgpu_job_free(job);
316 } else {
317 r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
318 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
319 if (r)
320 goto err_free;
321 }
322 291
323 amdgpu_bo_fence(bo, f, false); 292 amdgpu_bo_fence(bo, f, false);
324 amdgpu_bo_unreserve(bo); 293 amdgpu_bo_unreserve(bo);
@@ -370,11 +339,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
370 for (i = 14; i < 1024; ++i) 339 for (i = 14; i < 1024; ++i)
371 msg[i] = cpu_to_le32(0x0); 340 msg[i] = cpu_to_le32(0x0);
372 341
373 return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); 342 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
374} 343}
375 344
376static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 345static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
377 bool direct, struct dma_fence **fence) 346 struct dma_fence **fence)
378{ 347{
379 struct amdgpu_device *adev = ring->adev; 348 struct amdgpu_device *adev = ring->adev;
380 struct amdgpu_bo *bo = NULL; 349 struct amdgpu_bo *bo = NULL;
@@ -396,7 +365,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
396 for (i = 6; i < 1024; ++i) 365 for (i = 6; i < 1024; ++i)
397 msg[i] = cpu_to_le32(0x0); 366 msg[i] = cpu_to_le32(0x0);
398 367
399 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); 368 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
400} 369}
401 370
402int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) 371int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
@@ -410,7 +379,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
410 goto error; 379 goto error;
411 } 380 }
412 381
413 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence); 382 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
414 if (r) { 383 if (r) {
415 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); 384 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
416 goto error; 385 goto error;