aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c140
1 files changed, 70 insertions, 70 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 9bb59cc33ace..5decef78cce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -196,6 +196,42 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
196 return 0; 196 return 0;
197} 197}
198 198
199static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
200{
201 struct amdgpu_device *adev =
202 container_of(work, struct amdgpu_device, vcn.idle_work.work);
203 unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
204
205 if (fences == 0) {
206 if (adev->pm.dpm_enabled) {
207 amdgpu_dpm_enable_uvd(adev, false);
208 } else {
209 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
210 }
211 } else {
212 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
213 }
214}
215
216void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
217{
218 struct amdgpu_device *adev = ring->adev;
219 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
220
221 if (set_clocks) {
222 if (adev->pm.dpm_enabled) {
223 amdgpu_dpm_enable_uvd(adev, true);
224 } else {
225 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
226 }
227 }
228}
229
230void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
231{
232 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
233}
234
199static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 235static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
200 bool direct, struct dma_fence **fence) 236 bool direct, struct dma_fence **fence)
201{ 237{
@@ -365,42 +401,6 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
365 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); 401 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
366} 402}
367 403
368static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
369{
370 struct amdgpu_device *adev =
371 container_of(work, struct amdgpu_device, vcn.idle_work.work);
372 unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
373
374 if (fences == 0) {
375 if (adev->pm.dpm_enabled) {
376 amdgpu_dpm_enable_uvd(adev, false);
377 } else {
378 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
379 }
380 } else {
381 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
382 }
383}
384
385void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
386{
387 struct amdgpu_device *adev = ring->adev;
388 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
389
390 if (set_clocks) {
391 if (adev->pm.dpm_enabled) {
392 amdgpu_dpm_enable_uvd(adev, true);
393 } else {
394 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
395 }
396 }
397}
398
399void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
400{
401 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
402}
403
404int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) 404int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
405{ 405{
406 struct dma_fence *fence; 406 struct dma_fence *fence;
@@ -435,6 +435,40 @@ error:
435 return r; 435 return r;
436} 436}
437 437
438int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
439{
440 struct amdgpu_device *adev = ring->adev;
441 uint32_t rptr = amdgpu_ring_get_rptr(ring);
442 unsigned i;
443 int r;
444
445 r = amdgpu_ring_alloc(ring, 16);
446 if (r) {
447 DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
448 ring->idx, r);
449 return r;
450 }
451 amdgpu_ring_write(ring, VCE_CMD_END);
452 amdgpu_ring_commit(ring);
453
454 for (i = 0; i < adev->usec_timeout; i++) {
455 if (amdgpu_ring_get_rptr(ring) != rptr)
456 break;
457 DRM_UDELAY(1);
458 }
459
460 if (i < adev->usec_timeout) {
461 DRM_INFO("ring test on %d succeeded in %d usecs\n",
462 ring->idx, i);
463 } else {
464 DRM_ERROR("amdgpu: ring %d test failed\n",
465 ring->idx);
466 r = -ETIMEDOUT;
467 }
468
469 return r;
470}
471
438static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 472static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
439 struct dma_fence **fence) 473 struct dma_fence **fence)
440{ 474{
@@ -561,40 +595,6 @@ err:
561 return r; 595 return r;
562} 596}
563 597
564int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
565{
566 struct amdgpu_device *adev = ring->adev;
567 uint32_t rptr = amdgpu_ring_get_rptr(ring);
568 unsigned i;
569 int r;
570
571 r = amdgpu_ring_alloc(ring, 16);
572 if (r) {
573 DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
574 ring->idx, r);
575 return r;
576 }
577 amdgpu_ring_write(ring, VCE_CMD_END);
578 amdgpu_ring_commit(ring);
579
580 for (i = 0; i < adev->usec_timeout; i++) {
581 if (amdgpu_ring_get_rptr(ring) != rptr)
582 break;
583 DRM_UDELAY(1);
584 }
585
586 if (i < adev->usec_timeout) {
587 DRM_INFO("ring test on %d succeeded in %d usecs\n",
588 ring->idx, i);
589 } else {
590 DRM_ERROR("amdgpu: ring %d test failed\n",
591 ring->idx);
592 r = -ETIMEDOUT;
593 }
594
595 return r;
596}
597
598int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) 598int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
599{ 599{
600 struct dma_fence *fence = NULL; 600 struct dma_fence *fence = NULL;