diff options
author | Andres Rodriguez <andresx7@gmail.com> | 2017-04-12 17:19:54 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-05-31 16:48:46 -0400 |
commit | 34130fb1493c91d50b04daaeb25e82eecc4483c6 (patch) | |
tree | 4e812b3923272a80312ab061c4eab446048abde2 /drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |
parent | 268cb4c7dff0aac96b1c5d596b321aa197d31360 (diff) |
drm/amdgpu: refactor MQD/HQD initialization v3
The MQD programming sequence currently exists in 3 different places.
Refactor it to absorb all the duplicates.
The success path remains mostly identical except for a slightly
different order in the non-kiq case. This shouldn't matter if the HQD
is disabled.
The error handling paths have been updated to deal with the new code
structure.
v2: the non-kiq path for gfxv8 was dropped in the rebase
v3: split MEC_HPD_SIZE rename, dropped doorbell changes
Reviewed-by: Edward O'Callaghan <funfunctor@folklore1984.net>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 78 |
1 files changed, 34 insertions, 44 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 1a15c5e16b2f..c549d538932b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -4772,6 +4772,27 @@ static int gfx_v8_0_kiq_kcq_disable(struct amdgpu_device *adev) | |||
4772 | return r; | 4772 | return r; |
4773 | } | 4773 | } |
4774 | 4774 | ||
4775 | static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) | ||
4776 | { | ||
4777 | int i, r = 0; | ||
4778 | |||
4779 | if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { | ||
4780 | WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req); | ||
4781 | for (i = 0; i < adev->usec_timeout; i++) { | ||
4782 | if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) | ||
4783 | break; | ||
4784 | udelay(1); | ||
4785 | } | ||
4786 | if (i == adev->usec_timeout) | ||
4787 | r = -ETIMEDOUT; | ||
4788 | } | ||
4789 | WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); | ||
4790 | WREG32(mmCP_HQD_PQ_RPTR, 0); | ||
4791 | WREG32(mmCP_HQD_PQ_WPTR, 0); | ||
4792 | |||
4793 | return r; | ||
4794 | } | ||
4795 | |||
4775 | static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) | 4796 | static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) |
4776 | { | 4797 | { |
4777 | struct amdgpu_device *adev = ring->adev; | 4798 | struct amdgpu_device *adev = ring->adev; |
@@ -4779,6 +4800,9 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) | |||
4779 | uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; | 4800 | uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; |
4780 | uint32_t tmp; | 4801 | uint32_t tmp; |
4781 | 4802 | ||
4803 | /* init the mqd struct */ | ||
4804 | memset(mqd, 0, sizeof(struct vi_mqd)); | ||
4805 | |||
4782 | mqd->header = 0xC0310800; | 4806 | mqd->header = 0xC0310800; |
4783 | mqd->compute_pipelinestat_enable = 0x00000001; | 4807 | mqd->compute_pipelinestat_enable = 0x00000001; |
4784 | mqd->compute_static_thread_mgmt_se0 = 0xffffffff; | 4808 | mqd->compute_static_thread_mgmt_se0 = 0xffffffff; |
@@ -4806,11 +4830,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) | |||
4806 | 4830 | ||
4807 | mqd->cp_hqd_pq_doorbell_control = tmp; | 4831 | mqd->cp_hqd_pq_doorbell_control = tmp; |
4808 | 4832 | ||
4809 | /* disable the queue if it's active */ | ||
4810 | mqd->cp_hqd_dequeue_request = 0; | ||
4811 | mqd->cp_hqd_pq_rptr = 0; | ||
4812 | mqd->cp_hqd_pq_wptr = 0; | ||
4813 | |||
4814 | /* set the pointer to the MQD */ | 4833 | /* set the pointer to the MQD */ |
4815 | mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; | 4834 | mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; |
4816 | mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); | 4835 | mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); |
@@ -4900,11 +4919,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) | |||
4900 | return 0; | 4919 | return 0; |
4901 | } | 4920 | } |
4902 | 4921 | ||
4903 | static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring) | 4922 | static int gfx_v8_0_mqd_commit(struct amdgpu_ring *ring) |
4904 | { | 4923 | { |
4905 | struct amdgpu_device *adev = ring->adev; | 4924 | struct amdgpu_device *adev = ring->adev; |
4906 | struct vi_mqd *mqd = ring->mqd_ptr; | 4925 | struct vi_mqd *mqd = ring->mqd_ptr; |
4907 | int j; | ||
4908 | 4926 | ||
4909 | /* disable wptr polling */ | 4927 | /* disable wptr polling */ |
4910 | WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0); | 4928 | WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0); |
@@ -4918,18 +4936,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring) | |||
4918 | /* enable doorbell? */ | 4936 | /* enable doorbell? */ |
4919 | WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); | 4937 | WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); |
4920 | 4938 | ||
4921 | /* disable the queue if it's active */ | 4939 | /* set pq read/write pointers */ |
4922 | if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { | 4940 | WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request); |
4923 | WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); | 4941 | WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr); |
4924 | for (j = 0; j < adev->usec_timeout; j++) { | 4942 | WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr); |
4925 | if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) | ||
4926 | break; | ||
4927 | udelay(1); | ||
4928 | } | ||
4929 | WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request); | ||
4930 | WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr); | ||
4931 | WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr); | ||
4932 | } | ||
4933 | 4943 | ||
4934 | /* set the pointer to the MQD */ | 4944 | /* set the pointer to the MQD */ |
4935 | WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); | 4945 | WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); |
@@ -4955,6 +4965,7 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring) | |||
4955 | WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr_lo); | 4965 | WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr_lo); |
4956 | WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, mqd->cp_hqd_pq_wptr_poll_addr_hi); | 4966 | WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, mqd->cp_hqd_pq_wptr_poll_addr_hi); |
4957 | 4967 | ||
4968 | /* enable the doorbell if requested */ | ||
4958 | WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); | 4969 | WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); |
4959 | 4970 | ||
4960 | /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ | 4971 | /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ |
@@ -4989,15 +5000,16 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) | |||
4989 | amdgpu_ring_clear_ring(ring); | 5000 | amdgpu_ring_clear_ring(ring); |
4990 | mutex_lock(&adev->srbm_mutex); | 5001 | mutex_lock(&adev->srbm_mutex); |
4991 | vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); | 5002 | vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); |
4992 | gfx_v8_0_kiq_init_register(ring); | 5003 | gfx_v8_0_deactivate_hqd(adev, 1); |
5004 | gfx_v8_0_mqd_commit(ring); | ||
4993 | vi_srbm_select(adev, 0, 0, 0, 0); | 5005 | vi_srbm_select(adev, 0, 0, 0, 0); |
4994 | mutex_unlock(&adev->srbm_mutex); | 5006 | mutex_unlock(&adev->srbm_mutex); |
4995 | } else { | 5007 | } else { |
4996 | memset((void *)mqd, 0, sizeof(*mqd)); | ||
4997 | mutex_lock(&adev->srbm_mutex); | 5008 | mutex_lock(&adev->srbm_mutex); |
4998 | vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); | 5009 | vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); |
4999 | gfx_v8_0_mqd_init(ring); | 5010 | gfx_v8_0_mqd_init(ring); |
5000 | gfx_v8_0_kiq_init_register(ring); | 5011 | gfx_v8_0_deactivate_hqd(adev, 1); |
5012 | gfx_v8_0_mqd_commit(ring); | ||
5001 | vi_srbm_select(adev, 0, 0, 0, 0); | 5013 | vi_srbm_select(adev, 0, 0, 0, 0); |
5002 | mutex_unlock(&adev->srbm_mutex); | 5014 | mutex_unlock(&adev->srbm_mutex); |
5003 | 5015 | ||
@@ -5015,7 +5027,6 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) | |||
5015 | int mqd_idx = ring - &adev->gfx.compute_ring[0]; | 5027 | int mqd_idx = ring - &adev->gfx.compute_ring[0]; |
5016 | 5028 | ||
5017 | if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { | 5029 | if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { |
5018 | memset((void *)mqd, 0, sizeof(*mqd)); | ||
5019 | mutex_lock(&adev->srbm_mutex); | 5030 | mutex_lock(&adev->srbm_mutex); |
5020 | vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); | 5031 | vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); |
5021 | gfx_v8_0_mqd_init(ring); | 5032 | gfx_v8_0_mqd_init(ring); |
@@ -5320,27 +5331,6 @@ static bool gfx_v8_0_check_soft_reset(void *handle) | |||
5320 | } | 5331 | } |
5321 | } | 5332 | } |
5322 | 5333 | ||
5323 | static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) | ||
5324 | { | ||
5325 | int i, r = 0; | ||
5326 | |||
5327 | if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { | ||
5328 | WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req); | ||
5329 | for (i = 0; i < adev->usec_timeout; i++) { | ||
5330 | if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) | ||
5331 | break; | ||
5332 | udelay(1); | ||
5333 | } | ||
5334 | if (i == adev->usec_timeout) | ||
5335 | r = -ETIMEDOUT; | ||
5336 | } | ||
5337 | WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); | ||
5338 | WREG32(mmCP_HQD_PQ_RPTR, 0); | ||
5339 | WREG32(mmCP_HQD_PQ_WPTR, 0); | ||
5340 | |||
5341 | return r; | ||
5342 | } | ||
5343 | |||
5344 | static int gfx_v8_0_pre_soft_reset(void *handle) | 5334 | static int gfx_v8_0_pre_soft_reset(void *handle) |
5345 | { | 5335 | { |
5346 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 5336 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |