aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorRex Zhu <Rex.Zhu@amd.com>2018-08-22 05:58:31 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-08-27 16:11:56 -0400
commit36859cd5354b9cb418c28930936a8a6fce18a1d7 (patch)
treea765e7d0bd739139b5790a15ac57c9c3c12dbbff /drivers/gpu/drm/amd/amdgpu
parentffabea84c55b1c6446b2245b87cdf6827b22e366 (diff)
drm/amdgpu: Change kiq initialize/reset sequence on gfx8
1. initialize kiq before initialize gfx ring. 2. set kiq ring ready immediately when kiq initialize successfully. 3. split function gfx_v8_0_kiq_resume into two functions. gfx_v8_0_kiq_resume is for kiq initialize. gfx_v8_0_kcq_resume is for kcq initialize. Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Rex Zhu <Rex.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c49
1 files changed, 30 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d5470d449f6d..3882689b2d8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4622,7 +4622,6 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4622 queue_mask |= (1ull << i); 4622 queue_mask |= (1ull << i);
4623 } 4623 }
4624 4624
4625 kiq_ring->ready = true;
4626 r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8); 4625 r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
4627 if (r) { 4626 if (r) {
4628 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 4627 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
@@ -4949,26 +4948,33 @@ static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
4949 4948
4950static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) 4949static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
4951{ 4950{
4952 struct amdgpu_ring *ring = NULL; 4951 struct amdgpu_ring *ring;
4953 int r = 0, i; 4952 int r;
4954
4955 gfx_v8_0_cp_compute_enable(adev, true);
4956 4953
4957 ring = &adev->gfx.kiq.ring; 4954 ring = &adev->gfx.kiq.ring;
4958 4955
4959 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4956 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4960 if (unlikely(r != 0)) 4957 if (unlikely(r != 0))
4961 goto done; 4958 return r;
4962 4959
4963 r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); 4960 r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4964 if (!r) { 4961 if (unlikely(r != 0))
4965 r = gfx_v8_0_kiq_init_queue(ring); 4962 return r;
4966 amdgpu_bo_kunmap(ring->mqd_obj); 4963
4967 ring->mqd_ptr = NULL; 4964 gfx_v8_0_kiq_init_queue(ring);
4968 } 4965 amdgpu_bo_kunmap(ring->mqd_obj);
4966 ring->mqd_ptr = NULL;
4969 amdgpu_bo_unreserve(ring->mqd_obj); 4967 amdgpu_bo_unreserve(ring->mqd_obj);
4970 if (r) 4968 ring->ready = true;
4971 goto done; 4969 return 0;
4970}
4971
4972static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4973{
4974 struct amdgpu_ring *ring = NULL;
4975 int r = 0, i;
4976
4977 gfx_v8_0_cp_compute_enable(adev, true);
4972 4978
4973 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4979 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4974 ring = &adev->gfx.compute_ring[i]; 4980 ring = &adev->gfx.compute_ring[i];
@@ -5024,14 +5030,17 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
5024 return r; 5030 return r;
5025 } 5031 }
5026 5032
5027 r = gfx_v8_0_cp_gfx_resume(adev); 5033 r = gfx_v8_0_kiq_resume(adev);
5028 if (r) 5034 if (r)
5029 return r; 5035 return r;
5030 5036
5031 r = gfx_v8_0_kiq_resume(adev); 5037 r = gfx_v8_0_cp_gfx_resume(adev);
5032 if (r) 5038 if (r)
5033 return r; 5039 return r;
5034 5040
5041 r = gfx_v8_0_kcq_resume(adev);
5042 if (r)
5043 return r;
5035 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 5044 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5036 5045
5037 return 0; 5046 return 0;
@@ -5334,10 +5343,6 @@ static int gfx_v8_0_post_soft_reset(void *handle)
5334 srbm_soft_reset = adev->gfx.srbm_soft_reset; 5343 srbm_soft_reset = adev->gfx.srbm_soft_reset;
5335 5344
5336 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || 5345 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5337 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5338 gfx_v8_0_cp_gfx_resume(adev);
5339
5340 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5341 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || 5346 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5342 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || 5347 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5343 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { 5348 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
@@ -5353,7 +5358,13 @@ static int gfx_v8_0_post_soft_reset(void *handle)
5353 mutex_unlock(&adev->srbm_mutex); 5358 mutex_unlock(&adev->srbm_mutex);
5354 } 5359 }
5355 gfx_v8_0_kiq_resume(adev); 5360 gfx_v8_0_kiq_resume(adev);
5361 gfx_v8_0_kcq_resume(adev);
5356 } 5362 }
5363
5364 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5365 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5366 gfx_v8_0_cp_gfx_resume(adev);
5367
5357 gfx_v8_0_rlc_start(adev); 5368 gfx_v8_0_rlc_start(adev);
5358 5369
5359 return 0; 5370 return 0;