aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c52
1 files changed, 11 insertions, 41 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 8851bcdfc260..127e87b470ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
49 49
50int amdgpu_vcn_sw_init(struct amdgpu_device *adev) 50int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
51{ 51{
52 struct amdgpu_ring *ring;
53 struct drm_sched_rq *rq;
54 unsigned long bo_size; 52 unsigned long bo_size;
55 const char *fw_name; 53 const char *fw_name;
56 const struct common_firmware_header *hdr; 54 const struct common_firmware_header *hdr;
@@ -84,6 +82,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
84 } 82 }
85 83
86 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 84 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
85 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
87 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; 86 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
88 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 87 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
89 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 88 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
@@ -102,24 +101,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
102 return r; 101 return r;
103 } 102 }
104 103
105 ring = &adev->vcn.ring_dec;
106 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
107 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
108 rq, NULL);
109 if (r != 0) {
110 DRM_ERROR("Failed setting up VCN dec run queue.\n");
111 return r;
112 }
113
114 ring = &adev->vcn.ring_enc[0];
115 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
116 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
117 rq, NULL);
118 if (r != 0) {
119 DRM_ERROR("Failed setting up VCN enc run queue.\n");
120 return r;
121 }
122
123 return 0; 104 return 0;
124} 105}
125 106
@@ -129,10 +110,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
129 110
130 kfree(adev->vcn.saved_bo); 111 kfree(adev->vcn.saved_bo);
131 112
132 drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
133
134 drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
135
136 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, 113 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
137 &adev->vcn.gpu_addr, 114 &adev->vcn.gpu_addr,
138 (void **)&adev->vcn.cpu_addr); 115 (void **)&adev->vcn.cpu_addr);
@@ -278,7 +255,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
278} 255}
279 256
280static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, 257static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
281 struct amdgpu_bo *bo, bool direct, 258 struct amdgpu_bo *bo,
282 struct dma_fence **fence) 259 struct dma_fence **fence)
283{ 260{
284 struct amdgpu_device *adev = ring->adev; 261 struct amdgpu_device *adev = ring->adev;
@@ -306,19 +283,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
306 } 283 }
307 ib->length_dw = 16; 284 ib->length_dw = 16;
308 285
309 if (direct) { 286 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
310 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 287 job->fence = dma_fence_get(f);
311 job->fence = dma_fence_get(f); 288 if (r)
312 if (r) 289 goto err_free;
313 goto err_free;
314 290
315 amdgpu_job_free(job); 291 amdgpu_job_free(job);
316 } else {
317 r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
318 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
319 if (r)
320 goto err_free;
321 }
322 292
323 amdgpu_bo_fence(bo, f, false); 293 amdgpu_bo_fence(bo, f, false);
324 amdgpu_bo_unreserve(bo); 294 amdgpu_bo_unreserve(bo);
@@ -370,11 +340,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
370 for (i = 14; i < 1024; ++i) 340 for (i = 14; i < 1024; ++i)
371 msg[i] = cpu_to_le32(0x0); 341 msg[i] = cpu_to_le32(0x0);
372 342
373 return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); 343 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
374} 344}
375 345
376static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 346static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
377 bool direct, struct dma_fence **fence) 347 struct dma_fence **fence)
378{ 348{
379 struct amdgpu_device *adev = ring->adev; 349 struct amdgpu_device *adev = ring->adev;
380 struct amdgpu_bo *bo = NULL; 350 struct amdgpu_bo *bo = NULL;
@@ -396,7 +366,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
396 for (i = 6; i < 1024; ++i) 366 for (i = 6; i < 1024; ++i)
397 msg[i] = cpu_to_le32(0x0); 367 msg[i] = cpu_to_le32(0x0);
398 368
399 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); 369 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
400} 370}
401 371
402int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) 372int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
@@ -410,7 +380,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
410 goto error; 380 goto error;
411 } 381 }
412 382
413 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence); 383 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
414 if (r) { 384 if (r) {
415 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); 385 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
416 goto error; 386 goto error;