diff options
author | Andrey Grodzovsky <andrey.grodzovsky@amd.com> | 2018-10-19 16:22:48 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-11-05 14:21:23 -0500 |
commit | c66ed765a0a97b8900f37d4a71f1d75f52f56eeb (patch) | |
tree | cfed66df4fc6088c761e1d5cce16a300af57c0df /drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |
parent | faf6e1a87e07423a729e04fb2e8188742e89ea4c (diff) |
drm/amdgpu: Retire amdgpu_ring.ready flag v4
Start using drm_gpu_scheduler.ready isntead.
v3:
Add helper function to run ring test and set
sched.ready flag status accordingly, clean explicit
sched.ready sets from the IP specific files.
v4: Add kerneldoc and rebase.
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 29 |
1 files changed, 11 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e0fe0c6115a8..02f8ca56386f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -1629,7 +1629,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) | |||
1629 | return 0; | 1629 | return 0; |
1630 | 1630 | ||
1631 | /* bail if the compute ring is not ready */ | 1631 | /* bail if the compute ring is not ready */ |
1632 | if (!ring->ready) | 1632 | if (!ring->sched.ready) |
1633 | return 0; | 1633 | return 0; |
1634 | 1634 | ||
1635 | tmp = RREG32(mmGB_EDC_MODE); | 1635 | tmp = RREG32(mmGB_EDC_MODE); |
@@ -4197,7 +4197,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) | |||
4197 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); | 4197 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); |
4198 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); | 4198 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); |
4199 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | 4199 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) |
4200 | adev->gfx.gfx_ring[i].ready = false; | 4200 | adev->gfx.gfx_ring[i].sched.ready = false; |
4201 | } | 4201 | } |
4202 | WREG32(mmCP_ME_CNTL, tmp); | 4202 | WREG32(mmCP_ME_CNTL, tmp); |
4203 | udelay(50); | 4203 | udelay(50); |
@@ -4379,10 +4379,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
4379 | /* start the ring */ | 4379 | /* start the ring */ |
4380 | amdgpu_ring_clear_ring(ring); | 4380 | amdgpu_ring_clear_ring(ring); |
4381 | gfx_v8_0_cp_gfx_start(adev); | 4381 | gfx_v8_0_cp_gfx_start(adev); |
4382 | ring->ready = true; | 4382 | ring->sched.ready = true; |
4383 | r = amdgpu_ring_test_ring(ring); | 4383 | r = amdgpu_ring_test_helper(ring); |
4384 | if (r) | ||
4385 | ring->ready = false; | ||
4386 | 4384 | ||
4387 | return r; | 4385 | return r; |
4388 | } | 4386 | } |
@@ -4396,8 +4394,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) | |||
4396 | } else { | 4394 | } else { |
4397 | WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); | 4395 | WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); |
4398 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | 4396 | for (i = 0; i < adev->gfx.num_compute_rings; i++) |
4399 | adev->gfx.compute_ring[i].ready = false; | 4397 | adev->gfx.compute_ring[i].sched.ready = false; |
4400 | adev->gfx.kiq.ring.ready = false; | 4398 | adev->gfx.kiq.ring.sched.ready = false; |
4401 | } | 4399 | } |
4402 | udelay(50); | 4400 | udelay(50); |
4403 | } | 4401 | } |
@@ -4473,11 +4471,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) | |||
4473 | amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); | 4471 | amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); |
4474 | } | 4472 | } |
4475 | 4473 | ||
4476 | r = amdgpu_ring_test_ring(kiq_ring); | 4474 | r = amdgpu_ring_test_helper(kiq_ring); |
4477 | if (r) { | 4475 | if (r) |
4478 | DRM_ERROR("KCQ enable failed\n"); | 4476 | DRM_ERROR("KCQ enable failed\n"); |
4479 | kiq_ring->ready = false; | ||
4480 | } | ||
4481 | return r; | 4477 | return r; |
4482 | } | 4478 | } |
4483 | 4479 | ||
@@ -4781,7 +4777,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) | |||
4781 | amdgpu_bo_kunmap(ring->mqd_obj); | 4777 | amdgpu_bo_kunmap(ring->mqd_obj); |
4782 | ring->mqd_ptr = NULL; | 4778 | ring->mqd_ptr = NULL; |
4783 | amdgpu_bo_unreserve(ring->mqd_obj); | 4779 | amdgpu_bo_unreserve(ring->mqd_obj); |
4784 | ring->ready = true; | 4780 | ring->sched.ready = true; |
4785 | return 0; | 4781 | return 0; |
4786 | } | 4782 | } |
4787 | 4783 | ||
@@ -4820,10 +4816,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) | |||
4820 | */ | 4816 | */ |
4821 | for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { | 4817 | for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { |
4822 | ring = &adev->gfx.compute_ring[i]; | 4818 | ring = &adev->gfx.compute_ring[i]; |
4823 | ring->ready = true; | 4819 | r = amdgpu_ring_test_helper(ring); |
4824 | r = amdgpu_ring_test_ring(ring); | ||
4825 | if (r) | ||
4826 | ring->ready = false; | ||
4827 | } | 4820 | } |
4828 | 4821 | ||
4829 | done: | 4822 | done: |
@@ -4899,7 +4892,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) | |||
4899 | amdgpu_ring_write(kiq_ring, 0); | 4892 | amdgpu_ring_write(kiq_ring, 0); |
4900 | amdgpu_ring_write(kiq_ring, 0); | 4893 | amdgpu_ring_write(kiq_ring, 0); |
4901 | } | 4894 | } |
4902 | r = amdgpu_ring_test_ring(kiq_ring); | 4895 | r = amdgpu_ring_test_helper(kiq_ring); |
4903 | if (r) | 4896 | if (r) |
4904 | DRM_ERROR("KCQ disable failed\n"); | 4897 | DRM_ERROR("KCQ disable failed\n"); |
4905 | 4898 | ||