diff options
author | Dave Airlie <airlied@redhat.com> | 2018-09-27 19:48:35 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-09-27 19:48:40 -0400 |
commit | 87c2ee740c07f1edae9eec8bc45cb9b32a68f323 (patch) | |
tree | 1515f53eacb86689f2f96279e51cf0053ae8a308 /drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |
parent | 2de0b0a158bf423208c3898522c8fa1c1078df48 (diff) | |
parent | 6a96243056217662843694a4cbc83158d0e84403 (diff) |
Merge branch 'drm-next-4.20' of git://people.freedesktop.org/~agd5f/linux into drm-next
More new features and fixes for 4.20:
- Add dynamic powergating support for VCN on picasso
- Scheduler cleanup
- Vega20 support for KFD
- DC cleanups and bug fixes
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180927184348.2696-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 122 |
1 files changed, 67 insertions, 55 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 470dc80f4fe7..2aeef2bb93a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -2049,35 +2049,35 @@ static int gfx_v8_0_sw_init(void *handle) | |||
2049 | adev->gfx.mec.num_queue_per_pipe = 8; | 2049 | adev->gfx.mec.num_queue_per_pipe = 8; |
2050 | 2050 | ||
2051 | /* KIQ event */ | 2051 | /* KIQ event */ |
2052 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq); | 2052 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq); |
2053 | if (r) | 2053 | if (r) |
2054 | return r; | 2054 | return r; |
2055 | 2055 | ||
2056 | /* EOP Event */ | 2056 | /* EOP Event */ |
2057 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq); | 2057 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq); |
2058 | if (r) | 2058 | if (r) |
2059 | return r; | 2059 | return r; |
2060 | 2060 | ||
2061 | /* Privileged reg */ | 2061 | /* Privileged reg */ |
2062 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT, | 2062 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT, |
2063 | &adev->gfx.priv_reg_irq); | 2063 | &adev->gfx.priv_reg_irq); |
2064 | if (r) | 2064 | if (r) |
2065 | return r; | 2065 | return r; |
2066 | 2066 | ||
2067 | /* Privileged inst */ | 2067 | /* Privileged inst */ |
2068 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT, | 2068 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT, |
2069 | &adev->gfx.priv_inst_irq); | 2069 | &adev->gfx.priv_inst_irq); |
2070 | if (r) | 2070 | if (r) |
2071 | return r; | 2071 | return r; |
2072 | 2072 | ||
2073 | /* Add CP EDC/ECC irq */ | 2073 | /* Add CP EDC/ECC irq */ |
2074 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR, | 2074 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR, |
2075 | &adev->gfx.cp_ecc_error_irq); | 2075 | &adev->gfx.cp_ecc_error_irq); |
2076 | if (r) | 2076 | if (r) |
2077 | return r; | 2077 | return r; |
2078 | 2078 | ||
2079 | /* SQ interrupts. */ | 2079 | /* SQ interrupts. */ |
2080 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG, | 2080 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG, |
2081 | &adev->gfx.sq_irq); | 2081 | &adev->gfx.sq_irq); |
2082 | if (r) { | 2082 | if (r) { |
2083 | DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r); | 2083 | DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r); |
@@ -3835,7 +3835,7 @@ static void gfx_v8_0_config_init(struct amdgpu_device *adev) | |||
3835 | } | 3835 | } |
3836 | } | 3836 | } |
3837 | 3837 | ||
3838 | static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) | 3838 | static void gfx_v8_0_constants_init(struct amdgpu_device *adev) |
3839 | { | 3839 | { |
3840 | u32 tmp, sh_static_mem_cfg; | 3840 | u32 tmp, sh_static_mem_cfg; |
3841 | int i; | 3841 | int i; |
@@ -4208,31 +4208,11 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) | |||
4208 | static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) | 4208 | static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) |
4209 | { | 4209 | { |
4210 | int r; | 4210 | int r; |
4211 | u32 tmp; | ||
4212 | 4211 | ||
4213 | gfx_v8_0_rlc_stop(adev); | 4212 | gfx_v8_0_rlc_stop(adev); |
4214 | |||
4215 | /* disable CG */ | ||
4216 | tmp = RREG32(mmRLC_CGCG_CGLS_CTRL); | ||
4217 | tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | | ||
4218 | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); | ||
4219 | WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); | ||
4220 | if (adev->asic_type == CHIP_POLARIS11 || | ||
4221 | adev->asic_type == CHIP_POLARIS10 || | ||
4222 | adev->asic_type == CHIP_POLARIS12 || | ||
4223 | adev->asic_type == CHIP_VEGAM) { | ||
4224 | tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D); | ||
4225 | tmp &= ~0x3; | ||
4226 | WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp); | ||
4227 | } | ||
4228 | |||
4229 | /* disable PG */ | ||
4230 | WREG32(mmRLC_PG_CNTL, 0); | ||
4231 | |||
4232 | gfx_v8_0_rlc_reset(adev); | 4213 | gfx_v8_0_rlc_reset(adev); |
4233 | gfx_v8_0_init_pg(adev); | 4214 | gfx_v8_0_init_pg(adev); |
4234 | 4215 | ||
4235 | |||
4236 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { | 4216 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { |
4237 | /* legacy rlc firmware loading */ | 4217 | /* legacy rlc firmware loading */ |
4238 | r = gfx_v8_0_rlc_load_microcode(adev); | 4218 | r = gfx_v8_0_rlc_load_microcode(adev); |
@@ -5039,7 +5019,7 @@ static int gfx_v8_0_hw_init(void *handle) | |||
5039 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 5019 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
5040 | 5020 | ||
5041 | gfx_v8_0_init_golden_registers(adev); | 5021 | gfx_v8_0_init_golden_registers(adev); |
5042 | gfx_v8_0_gpu_init(adev); | 5022 | gfx_v8_0_constants_init(adev); |
5043 | 5023 | ||
5044 | r = gfx_v8_0_rlc_resume(adev); | 5024 | r = gfx_v8_0_rlc_resume(adev); |
5045 | if (r) | 5025 | if (r) |
@@ -5080,6 +5060,55 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) | |||
5080 | return r; | 5060 | return r; |
5081 | } | 5061 | } |
5082 | 5062 | ||
5063 | static bool gfx_v8_0_is_idle(void *handle) | ||
5064 | { | ||
5065 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
5066 | |||
5067 | if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE) | ||
5068 | || RREG32(mmGRBM_STATUS2) != 0x8) | ||
5069 | return false; | ||
5070 | else | ||
5071 | return true; | ||
5072 | } | ||
5073 | |||
5074 | static bool gfx_v8_0_rlc_is_idle(void *handle) | ||
5075 | { | ||
5076 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
5077 | |||
5078 | if (RREG32(mmGRBM_STATUS2) != 0x8) | ||
5079 | return false; | ||
5080 | else | ||
5081 | return true; | ||
5082 | } | ||
5083 | |||
5084 | static int gfx_v8_0_wait_for_rlc_idle(void *handle) | ||
5085 | { | ||
5086 | unsigned int i; | ||
5087 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
5088 | |||
5089 | for (i = 0; i < adev->usec_timeout; i++) { | ||
5090 | if (gfx_v8_0_rlc_is_idle(handle)) | ||
5091 | return 0; | ||
5092 | |||
5093 | udelay(1); | ||
5094 | } | ||
5095 | return -ETIMEDOUT; | ||
5096 | } | ||
5097 | |||
5098 | static int gfx_v8_0_wait_for_idle(void *handle) | ||
5099 | { | ||
5100 | unsigned int i; | ||
5101 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
5102 | |||
5103 | for (i = 0; i < adev->usec_timeout; i++) { | ||
5104 | if (gfx_v8_0_is_idle(handle)) | ||
5105 | return 0; | ||
5106 | |||
5107 | udelay(1); | ||
5108 | } | ||
5109 | return -ETIMEDOUT; | ||
5110 | } | ||
5111 | |||
5083 | static int gfx_v8_0_hw_fini(void *handle) | 5112 | static int gfx_v8_0_hw_fini(void *handle) |
5084 | { | 5113 | { |
5085 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 5114 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -5098,9 +5127,16 @@ static int gfx_v8_0_hw_fini(void *handle) | |||
5098 | pr_debug("For SRIOV client, shouldn't do anything.\n"); | 5127 | pr_debug("For SRIOV client, shouldn't do anything.\n"); |
5099 | return 0; | 5128 | return 0; |
5100 | } | 5129 | } |
5101 | gfx_v8_0_cp_enable(adev, false); | 5130 | adev->gfx.rlc.funcs->enter_safe_mode(adev); |
5102 | gfx_v8_0_rlc_stop(adev); | 5131 | if (!gfx_v8_0_wait_for_idle(adev)) |
5103 | 5132 | gfx_v8_0_cp_enable(adev, false); | |
5133 | else | ||
5134 | pr_err("cp is busy, skip halt cp\n"); | ||
5135 | if (!gfx_v8_0_wait_for_rlc_idle(adev)) | ||
5136 | gfx_v8_0_rlc_stop(adev); | ||
5137 | else | ||
5138 | pr_err("rlc is busy, skip halt rlc\n"); | ||
5139 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | ||
5104 | return 0; | 5140 | return 0; |
5105 | } | 5141 | } |
5106 | 5142 | ||
@@ -5121,30 +5157,6 @@ static int gfx_v8_0_resume(void *handle) | |||
5121 | return r; | 5157 | return r; |
5122 | } | 5158 | } |
5123 | 5159 | ||
5124 | static bool gfx_v8_0_is_idle(void *handle) | ||
5125 | { | ||
5126 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
5127 | |||
5128 | if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)) | ||
5129 | return false; | ||
5130 | else | ||
5131 | return true; | ||
5132 | } | ||
5133 | |||
5134 | static int gfx_v8_0_wait_for_idle(void *handle) | ||
5135 | { | ||
5136 | unsigned i; | ||
5137 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
5138 | |||
5139 | for (i = 0; i < adev->usec_timeout; i++) { | ||
5140 | if (gfx_v8_0_is_idle(handle)) | ||
5141 | return 0; | ||
5142 | |||
5143 | udelay(1); | ||
5144 | } | ||
5145 | return -ETIMEDOUT; | ||
5146 | } | ||
5147 | |||
5148 | static bool gfx_v8_0_check_soft_reset(void *handle) | 5160 | static bool gfx_v8_0_check_soft_reset(void *handle) |
5149 | { | 5161 | { |
5150 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 5162 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |