diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/ci_dpm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 122 |
1 files changed, 107 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 1d8c375a3561..e9b1964d4e61 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
@@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) | |||
887 | { | 887 | { |
888 | struct ci_power_info *pi = ci_get_pi(adev); | 888 | struct ci_power_info *pi = ci_get_pi(adev); |
889 | 889 | ||
890 | if (pi->uvd_power_gated == gate) | ||
891 | return; | ||
892 | |||
893 | pi->uvd_power_gated = gate; | 890 | pi->uvd_power_gated = gate; |
894 | 891 | ||
895 | ci_update_uvd_dpm(adev, gate); | 892 | ci_update_uvd_dpm(adev, gate); |
@@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
960 | sclk = ps->performance_levels[0].sclk; | 957 | sclk = ps->performance_levels[0].sclk; |
961 | } | 958 | } |
962 | 959 | ||
960 | if (adev->pm.pm_display_cfg.min_core_set_clock > sclk) | ||
961 | sclk = adev->pm.pm_display_cfg.min_core_set_clock; | ||
962 | |||
963 | if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk) | ||
964 | mclk = adev->pm.pm_display_cfg.min_mem_set_clock; | ||
965 | |||
963 | if (rps->vce_active) { | 966 | if (rps->vce_active) { |
964 | if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) | 967 | if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) |
965 | sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; | 968 | sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; |
@@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev) | |||
2201 | struct ci_power_info *pi = ci_get_pi(adev); | 2204 | struct ci_power_info *pi = ci_get_pi(adev); |
2202 | int i, ret; | 2205 | int i, ret; |
2203 | 2206 | ||
2207 | if (amdgpu_ci_is_smc_running(adev)) { | ||
2208 | DRM_INFO("smc is running, no need to load smc firmware\n"); | ||
2209 | return 0; | ||
2210 | } | ||
2211 | |||
2204 | for (i = 0; i < adev->usec_timeout; i++) { | 2212 | for (i = 0; i < adev->usec_timeout; i++) { |
2205 | if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK) | 2213 | if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK) |
2206 | break; | 2214 | break; |
@@ -4190,8 +4198,15 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate) | |||
4190 | { | 4198 | { |
4191 | struct ci_power_info *pi = ci_get_pi(adev); | 4199 | struct ci_power_info *pi = ci_get_pi(adev); |
4192 | u32 tmp; | 4200 | u32 tmp; |
4201 | int ret = 0; | ||
4193 | 4202 | ||
4194 | if (!gate) { | 4203 | if (!gate) { |
4204 | /* turn the clocks on when decoding */ | ||
4205 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
4206 | AMD_CG_STATE_UNGATE); | ||
4207 | if (ret) | ||
4208 | return ret; | ||
4209 | |||
4195 | if (pi->caps_uvd_dpm || | 4210 | if (pi->caps_uvd_dpm || |
4196 | (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) | 4211 | (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) |
4197 | pi->smc_state_table.UvdBootLevel = 0; | 4212 | pi->smc_state_table.UvdBootLevel = 0; |
@@ -4203,9 +4218,17 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate) | |||
4203 | tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK; | 4218 | tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK; |
4204 | tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT); | 4219 | tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT); |
4205 | WREG32_SMC(ixDPM_TABLE_475, tmp); | 4220 | WREG32_SMC(ixDPM_TABLE_475, tmp); |
4221 | ret = ci_enable_uvd_dpm(adev, true); | ||
4222 | } else { | ||
4223 | ret = ci_enable_uvd_dpm(adev, false); | ||
4224 | if (ret) | ||
4225 | return ret; | ||
4226 | |||
4227 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
4228 | AMD_CG_STATE_GATE); | ||
4206 | } | 4229 | } |
4207 | 4230 | ||
4208 | return ci_enable_uvd_dpm(adev, !gate); | 4231 | return ret; |
4209 | } | 4232 | } |
4210 | 4233 | ||
4211 | static u8 ci_get_vce_boot_level(struct amdgpu_device *adev) | 4234 | static u8 ci_get_vce_boot_level(struct amdgpu_device *adev) |
@@ -4247,13 +4270,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, | |||
4247 | 4270 | ||
4248 | ret = ci_enable_vce_dpm(adev, true); | 4271 | ret = ci_enable_vce_dpm(adev, true); |
4249 | } else { | 4272 | } else { |
4273 | ret = ci_enable_vce_dpm(adev, false); | ||
4274 | if (ret) | ||
4275 | return ret; | ||
4250 | /* turn the clocks off when not encoding */ | 4276 | /* turn the clocks off when not encoding */ |
4251 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | 4277 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, |
4252 | AMD_CG_STATE_GATE); | 4278 | AMD_CG_STATE_GATE); |
4253 | if (ret) | ||
4254 | return ret; | ||
4255 | |||
4256 | ret = ci_enable_vce_dpm(adev, false); | ||
4257 | } | 4279 | } |
4258 | } | 4280 | } |
4259 | return ret; | 4281 | return ret; |
@@ -5219,6 +5241,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev, | |||
5219 | pi->current_rps = *rps; | 5241 | pi->current_rps = *rps; |
5220 | pi->current_ps = *new_ps; | 5242 | pi->current_ps = *new_ps; |
5221 | pi->current_rps.ps_priv = &pi->current_ps; | 5243 | pi->current_rps.ps_priv = &pi->current_ps; |
5244 | adev->pm.dpm.current_ps = &pi->current_rps; | ||
5222 | } | 5245 | } |
5223 | 5246 | ||
5224 | static void ci_update_requested_ps(struct amdgpu_device *adev, | 5247 | static void ci_update_requested_ps(struct amdgpu_device *adev, |
@@ -5230,6 +5253,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev, | |||
5230 | pi->requested_rps = *rps; | 5253 | pi->requested_rps = *rps; |
5231 | pi->requested_ps = *new_ps; | 5254 | pi->requested_ps = *new_ps; |
5232 | pi->requested_rps.ps_priv = &pi->requested_ps; | 5255 | pi->requested_rps.ps_priv = &pi->requested_ps; |
5256 | adev->pm.dpm.requested_ps = &pi->requested_rps; | ||
5233 | } | 5257 | } |
5234 | 5258 | ||
5235 | static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) | 5259 | static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) |
@@ -5267,8 +5291,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev) | |||
5267 | struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; | 5291 | struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; |
5268 | int ret; | 5292 | int ret; |
5269 | 5293 | ||
5270 | if (amdgpu_ci_is_smc_running(adev)) | ||
5271 | return -EINVAL; | ||
5272 | if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { | 5294 | if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { |
5273 | ci_enable_voltage_control(adev); | 5295 | ci_enable_voltage_control(adev); |
5274 | ret = ci_construct_voltage_tables(adev); | 5296 | ret = ci_construct_voltage_tables(adev); |
@@ -5689,7 +5711,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev) | |||
5689 | adev->pm.dpm.num_ps = state_array->ucNumEntries; | 5711 | adev->pm.dpm.num_ps = state_array->ucNumEntries; |
5690 | 5712 | ||
5691 | /* fill in the vce power states */ | 5713 | /* fill in the vce power states */ |
5692 | for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { | 5714 | for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { |
5693 | u32 sclk, mclk; | 5715 | u32 sclk, mclk; |
5694 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; | 5716 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; |
5695 | clock_info = (union pplib_clock_info *) | 5717 | clock_info = (union pplib_clock_info *) |
@@ -6094,6 +6116,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev, | |||
6094 | amdgpu_dpm_print_ps_status(adev, rps); | 6116 | amdgpu_dpm_print_ps_status(adev, rps); |
6095 | } | 6117 | } |
6096 | 6118 | ||
6119 | static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1, | ||
6120 | const struct ci_pl *ci_cpl2) | ||
6121 | { | ||
6122 | return ((ci_cpl1->mclk == ci_cpl2->mclk) && | ||
6123 | (ci_cpl1->sclk == ci_cpl2->sclk) && | ||
6124 | (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) && | ||
6125 | (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane)); | ||
6126 | } | ||
6127 | |||
6128 | static int ci_check_state_equal(struct amdgpu_device *adev, | ||
6129 | struct amdgpu_ps *cps, | ||
6130 | struct amdgpu_ps *rps, | ||
6131 | bool *equal) | ||
6132 | { | ||
6133 | struct ci_ps *ci_cps; | ||
6134 | struct ci_ps *ci_rps; | ||
6135 | int i; | ||
6136 | |||
6137 | if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) | ||
6138 | return -EINVAL; | ||
6139 | |||
6140 | ci_cps = ci_get_ps(cps); | ||
6141 | ci_rps = ci_get_ps(rps); | ||
6142 | |||
6143 | if (ci_cps == NULL) { | ||
6144 | *equal = false; | ||
6145 | return 0; | ||
6146 | } | ||
6147 | |||
6148 | if (ci_cps->performance_level_count != ci_rps->performance_level_count) { | ||
6149 | |||
6150 | *equal = false; | ||
6151 | return 0; | ||
6152 | } | ||
6153 | |||
6154 | for (i = 0; i < ci_cps->performance_level_count; i++) { | ||
6155 | if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]), | ||
6156 | &(ci_rps->performance_levels[i]))) { | ||
6157 | *equal = false; | ||
6158 | return 0; | ||
6159 | } | ||
6160 | } | ||
6161 | |||
6162 | /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ | ||
6163 | *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); | ||
6164 | *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); | ||
6165 | |||
6166 | return 0; | ||
6167 | } | ||
6168 | |||
6097 | static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) | 6169 | static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) |
6098 | { | 6170 | { |
6099 | struct ci_power_info *pi = ci_get_pi(adev); | 6171 | struct ci_power_info *pi = ci_get_pi(adev); |
@@ -6287,12 +6359,19 @@ static int ci_dpm_suspend(void *handle) | |||
6287 | 6359 | ||
6288 | if (adev->pm.dpm_enabled) { | 6360 | if (adev->pm.dpm_enabled) { |
6289 | mutex_lock(&adev->pm.mutex); | 6361 | mutex_lock(&adev->pm.mutex); |
6290 | /* disable dpm */ | 6362 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
6291 | ci_dpm_disable(adev); | 6363 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); |
6292 | /* reset the power state */ | 6364 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
6293 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; | 6365 | AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); |
6366 | adev->pm.dpm.last_user_state = adev->pm.dpm.user_state; | ||
6367 | adev->pm.dpm.last_state = adev->pm.dpm.state; | ||
6368 | adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT; | ||
6369 | adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT; | ||
6294 | mutex_unlock(&adev->pm.mutex); | 6370 | mutex_unlock(&adev->pm.mutex); |
6371 | amdgpu_pm_compute_clocks(adev); | ||
6372 | |||
6295 | } | 6373 | } |
6374 | |||
6296 | return 0; | 6375 | return 0; |
6297 | } | 6376 | } |
6298 | 6377 | ||
@@ -6310,6 +6389,8 @@ static int ci_dpm_resume(void *handle) | |||
6310 | adev->pm.dpm_enabled = false; | 6389 | adev->pm.dpm_enabled = false; |
6311 | else | 6390 | else |
6312 | adev->pm.dpm_enabled = true; | 6391 | adev->pm.dpm_enabled = true; |
6392 | adev->pm.dpm.user_state = adev->pm.dpm.last_user_state; | ||
6393 | adev->pm.dpm.state = adev->pm.dpm.last_state; | ||
6313 | mutex_unlock(&adev->pm.mutex); | 6394 | mutex_unlock(&adev->pm.mutex); |
6314 | if (adev->pm.dpm_enabled) | 6395 | if (adev->pm.dpm_enabled) |
6315 | amdgpu_pm_compute_clocks(adev); | 6396 | amdgpu_pm_compute_clocks(adev); |
@@ -6644,6 +6725,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = { | |||
6644 | .set_sclk_od = ci_dpm_set_sclk_od, | 6725 | .set_sclk_od = ci_dpm_set_sclk_od, |
6645 | .get_mclk_od = ci_dpm_get_mclk_od, | 6726 | .get_mclk_od = ci_dpm_get_mclk_od, |
6646 | .set_mclk_od = ci_dpm_set_mclk_od, | 6727 | .set_mclk_od = ci_dpm_set_mclk_od, |
6728 | .check_state_equal = ci_check_state_equal, | ||
6729 | .get_vce_clock_state = amdgpu_get_vce_clock_state, | ||
6647 | }; | 6730 | }; |
6648 | 6731 | ||
6649 | static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) | 6732 | static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) |
@@ -6662,3 +6745,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev) | |||
6662 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; | 6745 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; |
6663 | adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs; | 6746 | adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs; |
6664 | } | 6747 | } |
6748 | |||
6749 | const struct amdgpu_ip_block_version ci_dpm_ip_block = | ||
6750 | { | ||
6751 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
6752 | .major = 7, | ||
6753 | .minor = 0, | ||
6754 | .rev = 0, | ||
6755 | .funcs = &ci_dpm_ip_funcs, | ||
6756 | }; | ||