diff options
author | Rex Zhu <Rex.Zhu@amd.com> | 2018-03-14 05:29:54 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-03-15 10:58:47 -0400 |
commit | 47ce4a9f84d783ae0243a449de64a20c7fafe6a4 (patch) | |
tree | d1234aa7a7930e61a6745265f3733384d609d879 | |
parent | ce1ace35b7b9e64fab7b0a261bee488e6d69c343 (diff) |
drm/amd/pp: Replace function/struct name cz_* with smu8_*
hw ip smu8 was used on CZ/ST,
so use smu8 as the prefix of the function/struct name in powerplay.
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 890 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h | 71 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | 562 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h | 82 |
5 files changed, 806 insertions, 807 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 055a4ecaa6e0..f0f3900eae1a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | |||
@@ -46,26 +46,26 @@ | |||
46 | #define CURRENT_GFX_VID_MASK 0xff000000 | 46 | #define CURRENT_GFX_VID_MASK 0xff000000 |
47 | #define CURRENT_GFX_VID__SHIFT 24 | 47 | #define CURRENT_GFX_VID__SHIFT 24 |
48 | 48 | ||
49 | static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic; | 49 | static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic; |
50 | 50 | ||
51 | static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_ps) | 51 | static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps) |
52 | { | 52 | { |
53 | if (PhwCz_Magic != hw_ps->magic) | 53 | if (smu8_magic != hw_ps->magic) |
54 | return NULL; | 54 | return NULL; |
55 | 55 | ||
56 | return (struct cz_power_state *)hw_ps; | 56 | return (struct smu8_power_state *)hw_ps; |
57 | } | 57 | } |
58 | 58 | ||
59 | static const struct cz_power_state *cast_const_PhwCzPowerState( | 59 | static const struct smu8_power_state *cast_const_smu8_power_state( |
60 | const struct pp_hw_power_state *hw_ps) | 60 | const struct pp_hw_power_state *hw_ps) |
61 | { | 61 | { |
62 | if (PhwCz_Magic != hw_ps->magic) | 62 | if (smu8_magic != hw_ps->magic) |
63 | return NULL; | 63 | return NULL; |
64 | 64 | ||
65 | return (struct cz_power_state *)hw_ps; | 65 | return (struct smu8_power_state *)hw_ps; |
66 | } | 66 | } |
67 | 67 | ||
68 | static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr, | 68 | static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr, |
69 | uint32_t clock, uint32_t msg) | 69 | uint32_t clock, uint32_t msg) |
70 | { | 70 | { |
71 | int i = 0; | 71 | int i = 0; |
@@ -96,7 +96,7 @@ static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr, | |||
96 | return i; | 96 | return i; |
97 | } | 97 | } |
98 | 98 | ||
99 | static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr, | 99 | static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr, |
100 | uint32_t clock, uint32_t msg) | 100 | uint32_t clock, uint32_t msg) |
101 | { | 101 | { |
102 | int i = 0; | 102 | int i = 0; |
@@ -126,7 +126,7 @@ static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr, | |||
126 | return i; | 126 | return i; |
127 | } | 127 | } |
128 | 128 | ||
129 | static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr, | 129 | static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr, |
130 | uint32_t clock, uint32_t msg) | 130 | uint32_t clock, uint32_t msg) |
131 | { | 131 | { |
132 | int i = 0; | 132 | int i = 0; |
@@ -157,42 +157,42 @@ static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr, | |||
157 | return i; | 157 | return i; |
158 | } | 158 | } |
159 | 159 | ||
160 | static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr) | 160 | static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr) |
161 | { | 161 | { |
162 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 162 | struct smu8_hwmgr *data = hwmgr->backend; |
163 | 163 | ||
164 | if (cz_hwmgr->max_sclk_level == 0) { | 164 | if (data->max_sclk_level == 0) { |
165 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel); | 165 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel); |
166 | cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1; | 166 | data->max_sclk_level = smum_get_argument(hwmgr) + 1; |
167 | } | 167 | } |
168 | 168 | ||
169 | return cz_hwmgr->max_sclk_level; | 169 | return data->max_sclk_level; |
170 | } | 170 | } |
171 | 171 | ||
172 | static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) | 172 | static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) |
173 | { | 173 | { |
174 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 174 | struct smu8_hwmgr *data = hwmgr->backend; |
175 | struct amdgpu_device *adev = hwmgr->adev; | 175 | struct amdgpu_device *adev = hwmgr->adev; |
176 | 176 | ||
177 | cz_hwmgr->gfx_ramp_step = 256*25/100; | 177 | data->gfx_ramp_step = 256*25/100; |
178 | cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */ | 178 | data->gfx_ramp_delay = 1; /* by default, we delay 1us */ |
179 | 179 | ||
180 | cz_hwmgr->mgcg_cgtt_local0 = 0x00000000; | 180 | data->mgcg_cgtt_local0 = 0x00000000; |
181 | cz_hwmgr->mgcg_cgtt_local1 = 0x00000000; | 181 | data->mgcg_cgtt_local1 = 0x00000000; |
182 | cz_hwmgr->clock_slow_down_freq = 25000; | 182 | data->clock_slow_down_freq = 25000; |
183 | cz_hwmgr->skip_clock_slow_down = 1; | 183 | data->skip_clock_slow_down = 1; |
184 | cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */ | 184 | data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */ |
185 | cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */ | 185 | data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */ |
186 | cz_hwmgr->voting_rights_clients = 0x00C00033; | 186 | data->voting_rights_clients = 0x00C00033; |
187 | cz_hwmgr->static_screen_threshold = 8; | 187 | data->static_screen_threshold = 8; |
188 | cz_hwmgr->ddi_power_gating_disabled = 0; | 188 | data->ddi_power_gating_disabled = 0; |
189 | cz_hwmgr->bapm_enabled = 1; | 189 | data->bapm_enabled = 1; |
190 | cz_hwmgr->voltage_drop_threshold = 0; | 190 | data->voltage_drop_threshold = 0; |
191 | cz_hwmgr->gfx_power_gating_threshold = 500; | 191 | data->gfx_power_gating_threshold = 500; |
192 | cz_hwmgr->vce_slow_sclk_threshold = 20000; | 192 | data->vce_slow_sclk_threshold = 20000; |
193 | cz_hwmgr->dce_slow_sclk_threshold = 30000; | 193 | data->dce_slow_sclk_threshold = 30000; |
194 | cz_hwmgr->disable_driver_thermal_policy = 1; | 194 | data->disable_driver_thermal_policy = 1; |
195 | cz_hwmgr->disable_nb_ps3_in_battery = 0; | 195 | data->disable_nb_ps3_in_battery = 0; |
196 | 196 | ||
197 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | 197 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
198 | PHM_PlatformCaps_ABM); | 198 | PHM_PlatformCaps_ABM); |
@@ -203,14 +203,14 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
203 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | 203 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
204 | PHM_PlatformCaps_DynamicM3Arbiter); | 204 | PHM_PlatformCaps_DynamicM3Arbiter); |
205 | 205 | ||
206 | cz_hwmgr->override_dynamic_mgpg = 1; | 206 | data->override_dynamic_mgpg = 1; |
207 | 207 | ||
208 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 208 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
209 | PHM_PlatformCaps_DynamicPatchPowerState); | 209 | PHM_PlatformCaps_DynamicPatchPowerState); |
210 | 210 | ||
211 | cz_hwmgr->thermal_auto_throttling_treshold = 0; | 211 | data->thermal_auto_throttling_treshold = 0; |
212 | cz_hwmgr->tdr_clock = 0; | 212 | data->tdr_clock = 0; |
213 | cz_hwmgr->disable_gfx_power_gating_in_uvd = 0; | 213 | data->disable_gfx_power_gating_in_uvd = 0; |
214 | 214 | ||
215 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 215 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
216 | PHM_PlatformCaps_DynamicUVDState); | 216 | PHM_PlatformCaps_DynamicUVDState); |
@@ -220,10 +220,10 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
220 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 220 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
221 | PHM_PlatformCaps_VCEDPM); | 221 | PHM_PlatformCaps_VCEDPM); |
222 | 222 | ||
223 | cz_hwmgr->cc6_settings.cpu_cc6_disable = false; | 223 | data->cc6_settings.cpu_cc6_disable = false; |
224 | cz_hwmgr->cc6_settings.cpu_pstate_disable = false; | 224 | data->cc6_settings.cpu_pstate_disable = false; |
225 | cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false; | 225 | data->cc6_settings.nb_pstate_switch_disable = false; |
226 | cz_hwmgr->cc6_settings.cpu_pstate_separation_time = 0; | 226 | data->cc6_settings.cpu_pstate_separation_time = 0; |
227 | 227 | ||
228 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 228 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
229 | PHM_PlatformCaps_DisableVoltageIsland); | 229 | PHM_PlatformCaps_DisableVoltageIsland); |
@@ -244,30 +244,30 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
244 | return 0; | 244 | return 0; |
245 | } | 245 | } |
246 | 246 | ||
247 | static uint32_t cz_convert_8Bit_index_to_voltage( | 247 | static uint32_t smu8_convert_8Bit_index_to_voltage( |
248 | struct pp_hwmgr *hwmgr, uint16_t voltage) | 248 | struct pp_hwmgr *hwmgr, uint16_t voltage) |
249 | { | 249 | { |
250 | return 6200 - (voltage * 25); | 250 | return 6200 - (voltage * 25); |
251 | } | 251 | } |
252 | 252 | ||
253 | static int cz_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, | 253 | static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, |
254 | struct phm_clock_and_voltage_limits *table) | 254 | struct phm_clock_and_voltage_limits *table) |
255 | { | 255 | { |
256 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 256 | struct smu8_hwmgr *data = hwmgr->backend; |
257 | struct cz_sys_info *sys_info = &cz_hwmgr->sys_info; | 257 | struct smu8_sys_info *sys_info = &data->sys_info; |
258 | struct phm_clock_voltage_dependency_table *dep_table = | 258 | struct phm_clock_voltage_dependency_table *dep_table = |
259 | hwmgr->dyn_state.vddc_dependency_on_sclk; | 259 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
260 | 260 | ||
261 | if (dep_table->count > 0) { | 261 | if (dep_table->count > 0) { |
262 | table->sclk = dep_table->entries[dep_table->count-1].clk; | 262 | table->sclk = dep_table->entries[dep_table->count-1].clk; |
263 | table->vddc = cz_convert_8Bit_index_to_voltage(hwmgr, | 263 | table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr, |
264 | (uint16_t)dep_table->entries[dep_table->count-1].v); | 264 | (uint16_t)dep_table->entries[dep_table->count-1].v); |
265 | } | 265 | } |
266 | table->mclk = sys_info->nbp_memory_clock[0]; | 266 | table->mclk = sys_info->nbp_memory_clock[0]; |
267 | return 0; | 267 | return 0; |
268 | } | 268 | } |
269 | 269 | ||
270 | static int cz_init_dynamic_state_adjustment_rule_settings( | 270 | static int smu8_init_dynamic_state_adjustment_rule_settings( |
271 | struct pp_hwmgr *hwmgr, | 271 | struct pp_hwmgr *hwmgr, |
272 | ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table) | 272 | ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table) |
273 | { | 273 | { |
@@ -305,9 +305,9 @@ static int cz_init_dynamic_state_adjustment_rule_settings( | |||
305 | return 0; | 305 | return 0; |
306 | } | 306 | } |
307 | 307 | ||
308 | static int cz_get_system_info_data(struct pp_hwmgr *hwmgr) | 308 | static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr) |
309 | { | 309 | { |
310 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 310 | struct smu8_hwmgr *data = hwmgr->backend; |
311 | ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL; | 311 | ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL; |
312 | uint32_t i; | 312 | uint32_t i; |
313 | int result = 0; | 313 | int result = 0; |
@@ -329,67 +329,67 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr) | |||
329 | return -EINVAL; | 329 | return -EINVAL; |
330 | } | 330 | } |
331 | 331 | ||
332 | cz_hwmgr->sys_info.bootup_uma_clock = | 332 | data->sys_info.bootup_uma_clock = |
333 | le32_to_cpu(info->ulBootUpUMAClock); | 333 | le32_to_cpu(info->ulBootUpUMAClock); |
334 | 334 | ||
335 | cz_hwmgr->sys_info.bootup_engine_clock = | 335 | data->sys_info.bootup_engine_clock = |
336 | le32_to_cpu(info->ulBootUpEngineClock); | 336 | le32_to_cpu(info->ulBootUpEngineClock); |
337 | 337 | ||
338 | cz_hwmgr->sys_info.dentist_vco_freq = | 338 | data->sys_info.dentist_vco_freq = |
339 | le32_to_cpu(info->ulDentistVCOFreq); | 339 | le32_to_cpu(info->ulDentistVCOFreq); |
340 | 340 | ||
341 | cz_hwmgr->sys_info.system_config = | 341 | data->sys_info.system_config = |
342 | le32_to_cpu(info->ulSystemConfig); | 342 | le32_to_cpu(info->ulSystemConfig); |
343 | 343 | ||
344 | cz_hwmgr->sys_info.bootup_nb_voltage_index = | 344 | data->sys_info.bootup_nb_voltage_index = |
345 | le16_to_cpu(info->usBootUpNBVoltage); | 345 | le16_to_cpu(info->usBootUpNBVoltage); |
346 | 346 | ||
347 | cz_hwmgr->sys_info.htc_hyst_lmt = | 347 | data->sys_info.htc_hyst_lmt = |
348 | (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt; | 348 | (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt; |
349 | 349 | ||
350 | cz_hwmgr->sys_info.htc_tmp_lmt = | 350 | data->sys_info.htc_tmp_lmt = |
351 | (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt; | 351 | (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt; |
352 | 352 | ||
353 | if (cz_hwmgr->sys_info.htc_tmp_lmt <= | 353 | if (data->sys_info.htc_tmp_lmt <= |
354 | cz_hwmgr->sys_info.htc_hyst_lmt) { | 354 | data->sys_info.htc_hyst_lmt) { |
355 | pr_err("The htcTmpLmt should be larger than htcHystLmt.\n"); | 355 | pr_err("The htcTmpLmt should be larger than htcHystLmt.\n"); |
356 | return -EINVAL; | 356 | return -EINVAL; |
357 | } | 357 | } |
358 | 358 | ||
359 | cz_hwmgr->sys_info.nb_dpm_enable = | 359 | data->sys_info.nb_dpm_enable = |
360 | cz_hwmgr->enable_nb_ps_policy && | 360 | data->enable_nb_ps_policy && |
361 | (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1); | 361 | (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1); |
362 | 362 | ||
363 | for (i = 0; i < CZ_NUM_NBPSTATES; i++) { | 363 | for (i = 0; i < SMU8_NUM_NBPSTATES; i++) { |
364 | if (i < CZ_NUM_NBPMEMORYCLOCK) { | 364 | if (i < SMU8_NUM_NBPMEMORYCLOCK) { |
365 | cz_hwmgr->sys_info.nbp_memory_clock[i] = | 365 | data->sys_info.nbp_memory_clock[i] = |
366 | le32_to_cpu(info->ulNbpStateMemclkFreq[i]); | 366 | le32_to_cpu(info->ulNbpStateMemclkFreq[i]); |
367 | } | 367 | } |
368 | cz_hwmgr->sys_info.nbp_n_clock[i] = | 368 | data->sys_info.nbp_n_clock[i] = |
369 | le32_to_cpu(info->ulNbpStateNClkFreq[i]); | 369 | le32_to_cpu(info->ulNbpStateNClkFreq[i]); |
370 | } | 370 | } |
371 | 371 | ||
372 | for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) { | 372 | for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) { |
373 | cz_hwmgr->sys_info.display_clock[i] = | 373 | data->sys_info.display_clock[i] = |
374 | le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK); | 374 | le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK); |
375 | } | 375 | } |
376 | 376 | ||
377 | /* Here use 4 levels, make sure not exceed */ | 377 | /* Here use 4 levels, make sure not exceed */ |
378 | for (i = 0; i < CZ_NUM_NBPSTATES; i++) { | 378 | for (i = 0; i < SMU8_NUM_NBPSTATES; i++) { |
379 | cz_hwmgr->sys_info.nbp_voltage_index[i] = | 379 | data->sys_info.nbp_voltage_index[i] = |
380 | le16_to_cpu(info->usNBPStateVoltage[i]); | 380 | le16_to_cpu(info->usNBPStateVoltage[i]); |
381 | } | 381 | } |
382 | 382 | ||
383 | if (!cz_hwmgr->sys_info.nb_dpm_enable) { | 383 | if (!data->sys_info.nb_dpm_enable) { |
384 | for (i = 1; i < CZ_NUM_NBPSTATES; i++) { | 384 | for (i = 1; i < SMU8_NUM_NBPSTATES; i++) { |
385 | if (i < CZ_NUM_NBPMEMORYCLOCK) { | 385 | if (i < SMU8_NUM_NBPMEMORYCLOCK) { |
386 | cz_hwmgr->sys_info.nbp_memory_clock[i] = | 386 | data->sys_info.nbp_memory_clock[i] = |
387 | cz_hwmgr->sys_info.nbp_memory_clock[0]; | 387 | data->sys_info.nbp_memory_clock[0]; |
388 | } | 388 | } |
389 | cz_hwmgr->sys_info.nbp_n_clock[i] = | 389 | data->sys_info.nbp_n_clock[i] = |
390 | cz_hwmgr->sys_info.nbp_n_clock[0]; | 390 | data->sys_info.nbp_n_clock[0]; |
391 | cz_hwmgr->sys_info.nbp_voltage_index[i] = | 391 | data->sys_info.nbp_voltage_index[i] = |
392 | cz_hwmgr->sys_info.nbp_voltage_index[0]; | 392 | data->sys_info.nbp_voltage_index[0]; |
393 | } | 393 | } |
394 | } | 394 | } |
395 | 395 | ||
@@ -399,40 +399,40 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr) | |||
399 | PHM_PlatformCaps_EnableDFSBypass); | 399 | PHM_PlatformCaps_EnableDFSBypass); |
400 | } | 400 | } |
401 | 401 | ||
402 | cz_hwmgr->sys_info.uma_channel_number = info->ucUMAChannelNumber; | 402 | data->sys_info.uma_channel_number = info->ucUMAChannelNumber; |
403 | 403 | ||
404 | cz_construct_max_power_limits_table (hwmgr, | 404 | smu8_construct_max_power_limits_table (hwmgr, |
405 | &hwmgr->dyn_state.max_clock_voltage_on_ac); | 405 | &hwmgr->dyn_state.max_clock_voltage_on_ac); |
406 | 406 | ||
407 | cz_init_dynamic_state_adjustment_rule_settings(hwmgr, | 407 | smu8_init_dynamic_state_adjustment_rule_settings(hwmgr, |
408 | &info->sDISPCLK_Voltage[0]); | 408 | &info->sDISPCLK_Voltage[0]); |
409 | 409 | ||
410 | return result; | 410 | return result; |
411 | } | 411 | } |
412 | 412 | ||
413 | static int cz_construct_boot_state(struct pp_hwmgr *hwmgr) | 413 | static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr) |
414 | { | 414 | { |
415 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 415 | struct smu8_hwmgr *data = hwmgr->backend; |
416 | 416 | ||
417 | cz_hwmgr->boot_power_level.engineClock = | 417 | data->boot_power_level.engineClock = |
418 | cz_hwmgr->sys_info.bootup_engine_clock; | 418 | data->sys_info.bootup_engine_clock; |
419 | 419 | ||
420 | cz_hwmgr->boot_power_level.vddcIndex = | 420 | data->boot_power_level.vddcIndex = |
421 | (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index; | 421 | (uint8_t)data->sys_info.bootup_nb_voltage_index; |
422 | 422 | ||
423 | cz_hwmgr->boot_power_level.dsDividerIndex = 0; | 423 | data->boot_power_level.dsDividerIndex = 0; |
424 | cz_hwmgr->boot_power_level.ssDividerIndex = 0; | 424 | data->boot_power_level.ssDividerIndex = 0; |
425 | cz_hwmgr->boot_power_level.allowGnbSlow = 1; | 425 | data->boot_power_level.allowGnbSlow = 1; |
426 | cz_hwmgr->boot_power_level.forceNBPstate = 0; | 426 | data->boot_power_level.forceNBPstate = 0; |
427 | cz_hwmgr->boot_power_level.hysteresis_up = 0; | 427 | data->boot_power_level.hysteresis_up = 0; |
428 | cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0; | 428 | data->boot_power_level.numSIMDToPowerDown = 0; |
429 | cz_hwmgr->boot_power_level.display_wm = 0; | 429 | data->boot_power_level.display_wm = 0; |
430 | cz_hwmgr->boot_power_level.vce_wm = 0; | 430 | data->boot_power_level.vce_wm = 0; |
431 | 431 | ||
432 | return 0; | 432 | return 0; |
433 | } | 433 | } |
434 | 434 | ||
435 | static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) | 435 | static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) |
436 | { | 436 | { |
437 | struct SMU8_Fusion_ClkTable *clock_table; | 437 | struct SMU8_Fusion_ClkTable *clock_table; |
438 | int ret; | 438 | int ret; |
@@ -462,18 +462,18 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) | |||
462 | clock_table = (struct SMU8_Fusion_ClkTable *)table; | 462 | clock_table = (struct SMU8_Fusion_ClkTable *)table; |
463 | 463 | ||
464 | /* patch clock table */ | 464 | /* patch clock table */ |
465 | PP_ASSERT_WITH_CODE((vddc_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), | 465 | PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS), |
466 | "Dependency table entry exceeds max limit!", return -EINVAL;); | 466 | "Dependency table entry exceeds max limit!", return -EINVAL;); |
467 | PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), | 467 | PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS), |
468 | "Dependency table entry exceeds max limit!", return -EINVAL;); | 468 | "Dependency table entry exceeds max limit!", return -EINVAL;); |
469 | PP_ASSERT_WITH_CODE((acp_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), | 469 | PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS), |
470 | "Dependency table entry exceeds max limit!", return -EINVAL;); | 470 | "Dependency table entry exceeds max limit!", return -EINVAL;); |
471 | PP_ASSERT_WITH_CODE((uvd_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), | 471 | PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS), |
472 | "Dependency table entry exceeds max limit!", return -EINVAL;); | 472 | "Dependency table entry exceeds max limit!", return -EINVAL;); |
473 | PP_ASSERT_WITH_CODE((vce_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), | 473 | PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS), |
474 | "Dependency table entry exceeds max limit!", return -EINVAL;); | 474 | "Dependency table entry exceeds max limit!", return -EINVAL;); |
475 | 475 | ||
476 | for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) { | 476 | for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) { |
477 | 477 | ||
478 | /* vddc_sclk */ | 478 | /* vddc_sclk */ |
479 | clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = | 479 | clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = |
@@ -551,9 +551,9 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) | |||
551 | return ret; | 551 | return ret; |
552 | } | 552 | } |
553 | 553 | ||
554 | static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr) | 554 | static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr) |
555 | { | 555 | { |
556 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 556 | struct smu8_hwmgr *data = hwmgr->backend; |
557 | struct phm_clock_voltage_dependency_table *table = | 557 | struct phm_clock_voltage_dependency_table *table = |
558 | hwmgr->dyn_state.vddc_dependency_on_sclk; | 558 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
559 | unsigned long clock = 0, level; | 559 | unsigned long clock = 0, level; |
@@ -561,25 +561,25 @@ static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr) | |||
561 | if (NULL == table || table->count <= 0) | 561 | if (NULL == table || table->count <= 0) |
562 | return -EINVAL; | 562 | return -EINVAL; |
563 | 563 | ||
564 | cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; | 564 | data->sclk_dpm.soft_min_clk = table->entries[0].clk; |
565 | cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk; | 565 | data->sclk_dpm.hard_min_clk = table->entries[0].clk; |
566 | 566 | ||
567 | level = cz_get_max_sclk_level(hwmgr) - 1; | 567 | level = smu8_get_max_sclk_level(hwmgr) - 1; |
568 | 568 | ||
569 | if (level < table->count) | 569 | if (level < table->count) |
570 | clock = table->entries[level].clk; | 570 | clock = table->entries[level].clk; |
571 | else | 571 | else |
572 | clock = table->entries[table->count - 1].clk; | 572 | clock = table->entries[table->count - 1].clk; |
573 | 573 | ||
574 | cz_hwmgr->sclk_dpm.soft_max_clk = clock; | 574 | data->sclk_dpm.soft_max_clk = clock; |
575 | cz_hwmgr->sclk_dpm.hard_max_clk = clock; | 575 | data->sclk_dpm.hard_max_clk = clock; |
576 | 576 | ||
577 | return 0; | 577 | return 0; |
578 | } | 578 | } |
579 | 579 | ||
580 | static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr) | 580 | static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr) |
581 | { | 581 | { |
582 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 582 | struct smu8_hwmgr *data = hwmgr->backend; |
583 | struct phm_uvd_clock_voltage_dependency_table *table = | 583 | struct phm_uvd_clock_voltage_dependency_table *table = |
584 | hwmgr->dyn_state.uvd_clock_voltage_dependency_table; | 584 | hwmgr->dyn_state.uvd_clock_voltage_dependency_table; |
585 | unsigned long clock = 0, level; | 585 | unsigned long clock = 0, level; |
@@ -587,8 +587,8 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr) | |||
587 | if (NULL == table || table->count <= 0) | 587 | if (NULL == table || table->count <= 0) |
588 | return -EINVAL; | 588 | return -EINVAL; |
589 | 589 | ||
590 | cz_hwmgr->uvd_dpm.soft_min_clk = 0; | 590 | data->uvd_dpm.soft_min_clk = 0; |
591 | cz_hwmgr->uvd_dpm.hard_min_clk = 0; | 591 | data->uvd_dpm.hard_min_clk = 0; |
592 | 592 | ||
593 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel); | 593 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel); |
594 | level = smum_get_argument(hwmgr); | 594 | level = smum_get_argument(hwmgr); |
@@ -598,15 +598,15 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr) | |||
598 | else | 598 | else |
599 | clock = table->entries[table->count - 1].vclk; | 599 | clock = table->entries[table->count - 1].vclk; |
600 | 600 | ||
601 | cz_hwmgr->uvd_dpm.soft_max_clk = clock; | 601 | data->uvd_dpm.soft_max_clk = clock; |
602 | cz_hwmgr->uvd_dpm.hard_max_clk = clock; | 602 | data->uvd_dpm.hard_max_clk = clock; |
603 | 603 | ||
604 | return 0; | 604 | return 0; |
605 | } | 605 | } |
606 | 606 | ||
607 | static int cz_init_vce_limit(struct pp_hwmgr *hwmgr) | 607 | static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr) |
608 | { | 608 | { |
609 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 609 | struct smu8_hwmgr *data = hwmgr->backend; |
610 | struct phm_vce_clock_voltage_dependency_table *table = | 610 | struct phm_vce_clock_voltage_dependency_table *table = |
611 | hwmgr->dyn_state.vce_clock_voltage_dependency_table; | 611 | hwmgr->dyn_state.vce_clock_voltage_dependency_table; |
612 | unsigned long clock = 0, level; | 612 | unsigned long clock = 0, level; |
@@ -614,8 +614,8 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr) | |||
614 | if (NULL == table || table->count <= 0) | 614 | if (NULL == table || table->count <= 0) |
615 | return -EINVAL; | 615 | return -EINVAL; |
616 | 616 | ||
617 | cz_hwmgr->vce_dpm.soft_min_clk = 0; | 617 | data->vce_dpm.soft_min_clk = 0; |
618 | cz_hwmgr->vce_dpm.hard_min_clk = 0; | 618 | data->vce_dpm.hard_min_clk = 0; |
619 | 619 | ||
620 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel); | 620 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel); |
621 | level = smum_get_argument(hwmgr); | 621 | level = smum_get_argument(hwmgr); |
@@ -625,15 +625,15 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr) | |||
625 | else | 625 | else |
626 | clock = table->entries[table->count - 1].ecclk; | 626 | clock = table->entries[table->count - 1].ecclk; |
627 | 627 | ||
628 | cz_hwmgr->vce_dpm.soft_max_clk = clock; | 628 | data->vce_dpm.soft_max_clk = clock; |
629 | cz_hwmgr->vce_dpm.hard_max_clk = clock; | 629 | data->vce_dpm.hard_max_clk = clock; |
630 | 630 | ||
631 | return 0; | 631 | return 0; |
632 | } | 632 | } |
633 | 633 | ||
634 | static int cz_init_acp_limit(struct pp_hwmgr *hwmgr) | 634 | static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr) |
635 | { | 635 | { |
636 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 636 | struct smu8_hwmgr *data = hwmgr->backend; |
637 | struct phm_acp_clock_voltage_dependency_table *table = | 637 | struct phm_acp_clock_voltage_dependency_table *table = |
638 | hwmgr->dyn_state.acp_clock_voltage_dependency_table; | 638 | hwmgr->dyn_state.acp_clock_voltage_dependency_table; |
639 | unsigned long clock = 0, level; | 639 | unsigned long clock = 0, level; |
@@ -641,8 +641,8 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr) | |||
641 | if (NULL == table || table->count <= 0) | 641 | if (NULL == table || table->count <= 0) |
642 | return -EINVAL; | 642 | return -EINVAL; |
643 | 643 | ||
644 | cz_hwmgr->acp_dpm.soft_min_clk = 0; | 644 | data->acp_dpm.soft_min_clk = 0; |
645 | cz_hwmgr->acp_dpm.hard_min_clk = 0; | 645 | data->acp_dpm.hard_min_clk = 0; |
646 | 646 | ||
647 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel); | 647 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel); |
648 | level = smum_get_argument(hwmgr); | 648 | level = smum_get_argument(hwmgr); |
@@ -652,32 +652,32 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr) | |||
652 | else | 652 | else |
653 | clock = table->entries[table->count - 1].acpclk; | 653 | clock = table->entries[table->count - 1].acpclk; |
654 | 654 | ||
655 | cz_hwmgr->acp_dpm.soft_max_clk = clock; | 655 | data->acp_dpm.soft_max_clk = clock; |
656 | cz_hwmgr->acp_dpm.hard_max_clk = clock; | 656 | data->acp_dpm.hard_max_clk = clock; |
657 | return 0; | 657 | return 0; |
658 | } | 658 | } |
659 | 659 | ||
660 | static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr) | 660 | static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr) |
661 | { | 661 | { |
662 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 662 | struct smu8_hwmgr *data = hwmgr->backend; |
663 | 663 | ||
664 | cz_hwmgr->uvd_power_gated = false; | 664 | data->uvd_power_gated = false; |
665 | cz_hwmgr->vce_power_gated = false; | 665 | data->vce_power_gated = false; |
666 | cz_hwmgr->samu_power_gated = false; | 666 | data->samu_power_gated = false; |
667 | cz_hwmgr->acp_power_gated = false; | 667 | data->acp_power_gated = false; |
668 | cz_hwmgr->pgacpinit = true; | 668 | data->pgacpinit = true; |
669 | } | 669 | } |
670 | 670 | ||
671 | static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr) | 671 | static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr) |
672 | { | 672 | { |
673 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 673 | struct smu8_hwmgr *data = hwmgr->backend; |
674 | 674 | ||
675 | cz_hwmgr->low_sclk_interrupt_threshold = 0; | 675 | data->low_sclk_interrupt_threshold = 0; |
676 | } | 676 | } |
677 | 677 | ||
678 | static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) | 678 | static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr) |
679 | { | 679 | { |
680 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 680 | struct smu8_hwmgr *data = hwmgr->backend; |
681 | struct phm_clock_voltage_dependency_table *table = | 681 | struct phm_clock_voltage_dependency_table *table = |
682 | hwmgr->dyn_state.vddc_dependency_on_sclk; | 682 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
683 | 683 | ||
@@ -686,29 +686,29 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) | |||
686 | unsigned long stable_pstate_sclk; | 686 | unsigned long stable_pstate_sclk; |
687 | unsigned long percentage; | 687 | unsigned long percentage; |
688 | 688 | ||
689 | cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; | 689 | data->sclk_dpm.soft_min_clk = table->entries[0].clk; |
690 | level = cz_get_max_sclk_level(hwmgr) - 1; | 690 | level = smu8_get_max_sclk_level(hwmgr) - 1; |
691 | 691 | ||
692 | if (level < table->count) | 692 | if (level < table->count) |
693 | cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[level].clk; | 693 | data->sclk_dpm.soft_max_clk = table->entries[level].clk; |
694 | else | 694 | else |
695 | cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; | 695 | data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; |
696 | 696 | ||
697 | clock = hwmgr->display_config.min_core_set_clock; | 697 | clock = hwmgr->display_config.min_core_set_clock; |
698 | if (clock == 0) | 698 | if (clock == 0) |
699 | pr_debug("min_core_set_clock not set\n"); | 699 | pr_debug("min_core_set_clock not set\n"); |
700 | 700 | ||
701 | if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) { | 701 | if (data->sclk_dpm.hard_min_clk != clock) { |
702 | cz_hwmgr->sclk_dpm.hard_min_clk = clock; | 702 | data->sclk_dpm.hard_min_clk = clock; |
703 | 703 | ||
704 | smum_send_msg_to_smc_with_parameter(hwmgr, | 704 | smum_send_msg_to_smc_with_parameter(hwmgr, |
705 | PPSMC_MSG_SetSclkHardMin, | 705 | PPSMC_MSG_SetSclkHardMin, |
706 | cz_get_sclk_level(hwmgr, | 706 | smu8_get_sclk_level(hwmgr, |
707 | cz_hwmgr->sclk_dpm.hard_min_clk, | 707 | data->sclk_dpm.hard_min_clk, |
708 | PPSMC_MSG_SetSclkHardMin)); | 708 | PPSMC_MSG_SetSclkHardMin)); |
709 | } | 709 | } |
710 | 710 | ||
711 | clock = cz_hwmgr->sclk_dpm.soft_min_clk; | 711 | clock = data->sclk_dpm.soft_min_clk; |
712 | 712 | ||
713 | /* update minimum clocks for Stable P-State feature */ | 713 | /* update minimum clocks for Stable P-State feature */ |
714 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 714 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
@@ -722,36 +722,36 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) | |||
722 | clock = stable_pstate_sclk; | 722 | clock = stable_pstate_sclk; |
723 | } | 723 | } |
724 | 724 | ||
725 | if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) { | 725 | if (data->sclk_dpm.soft_min_clk != clock) { |
726 | cz_hwmgr->sclk_dpm.soft_min_clk = clock; | 726 | data->sclk_dpm.soft_min_clk = clock; |
727 | smum_send_msg_to_smc_with_parameter(hwmgr, | 727 | smum_send_msg_to_smc_with_parameter(hwmgr, |
728 | PPSMC_MSG_SetSclkSoftMin, | 728 | PPSMC_MSG_SetSclkSoftMin, |
729 | cz_get_sclk_level(hwmgr, | 729 | smu8_get_sclk_level(hwmgr, |
730 | cz_hwmgr->sclk_dpm.soft_min_clk, | 730 | data->sclk_dpm.soft_min_clk, |
731 | PPSMC_MSG_SetSclkSoftMin)); | 731 | PPSMC_MSG_SetSclkSoftMin)); |
732 | } | 732 | } |
733 | 733 | ||
734 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 734 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
735 | PHM_PlatformCaps_StablePState) && | 735 | PHM_PlatformCaps_StablePState) && |
736 | cz_hwmgr->sclk_dpm.soft_max_clk != clock) { | 736 | data->sclk_dpm.soft_max_clk != clock) { |
737 | cz_hwmgr->sclk_dpm.soft_max_clk = clock; | 737 | data->sclk_dpm.soft_max_clk = clock; |
738 | smum_send_msg_to_smc_with_parameter(hwmgr, | 738 | smum_send_msg_to_smc_with_parameter(hwmgr, |
739 | PPSMC_MSG_SetSclkSoftMax, | 739 | PPSMC_MSG_SetSclkSoftMax, |
740 | cz_get_sclk_level(hwmgr, | 740 | smu8_get_sclk_level(hwmgr, |
741 | cz_hwmgr->sclk_dpm.soft_max_clk, | 741 | data->sclk_dpm.soft_max_clk, |
742 | PPSMC_MSG_SetSclkSoftMax)); | 742 | PPSMC_MSG_SetSclkSoftMax)); |
743 | } | 743 | } |
744 | 744 | ||
745 | return 0; | 745 | return 0; |
746 | } | 746 | } |
747 | 747 | ||
748 | static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) | 748 | static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) |
749 | { | 749 | { |
750 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 750 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
751 | PHM_PlatformCaps_SclkDeepSleep)) { | 751 | PHM_PlatformCaps_SclkDeepSleep)) { |
752 | uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr; | 752 | uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr; |
753 | if (clks == 0) | 753 | if (clks == 0) |
754 | clks = CZ_MIN_DEEP_SLEEP_SCLK; | 754 | clks = SMU8_MIN_DEEP_SLEEP_SCLK; |
755 | 755 | ||
756 | PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks); | 756 | PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks); |
757 | 757 | ||
@@ -763,21 +763,21 @@ static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) | |||
763 | return 0; | 763 | return 0; |
764 | } | 764 | } |
765 | 765 | ||
766 | static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr) | 766 | static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr) |
767 | { | 767 | { |
768 | struct cz_hwmgr *cz_hwmgr = | 768 | struct smu8_hwmgr *data = |
769 | hwmgr->backend; | 769 | hwmgr->backend; |
770 | 770 | ||
771 | smum_send_msg_to_smc_with_parameter(hwmgr, | 771 | smum_send_msg_to_smc_with_parameter(hwmgr, |
772 | PPSMC_MSG_SetWatermarkFrequency, | 772 | PPSMC_MSG_SetWatermarkFrequency, |
773 | cz_hwmgr->sclk_dpm.soft_max_clk); | 773 | data->sclk_dpm.soft_max_clk); |
774 | 774 | ||
775 | return 0; | 775 | return 0; |
776 | } | 776 | } |
777 | 777 | ||
778 | static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock) | 778 | static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock) |
779 | { | 779 | { |
780 | struct cz_hwmgr *hw_data = hwmgr->backend; | 780 | struct smu8_hwmgr *hw_data = hwmgr->backend; |
781 | 781 | ||
782 | if (hw_data->is_nb_dpm_enabled) { | 782 | if (hw_data->is_nb_dpm_enabled) { |
783 | if (enable) { | 783 | if (enable) { |
@@ -798,35 +798,35 @@ static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, b | |||
798 | return 0; | 798 | return 0; |
799 | } | 799 | } |
800 | 800 | ||
801 | static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr) | 801 | static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr) |
802 | { | 802 | { |
803 | int ret = 0; | 803 | int ret = 0; |
804 | 804 | ||
805 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 805 | struct smu8_hwmgr *data = hwmgr->backend; |
806 | unsigned long dpm_features = 0; | 806 | unsigned long dpm_features = 0; |
807 | 807 | ||
808 | if (cz_hwmgr->is_nb_dpm_enabled) { | 808 | if (data->is_nb_dpm_enabled) { |
809 | cz_nbdpm_pstate_enable_disable(hwmgr, true, true); | 809 | smu8_nbdpm_pstate_enable_disable(hwmgr, true, true); |
810 | dpm_features |= NB_DPM_MASK; | 810 | dpm_features |= NB_DPM_MASK; |
811 | ret = smum_send_msg_to_smc_with_parameter( | 811 | ret = smum_send_msg_to_smc_with_parameter( |
812 | hwmgr, | 812 | hwmgr, |
813 | PPSMC_MSG_DisableAllSmuFeatures, | 813 | PPSMC_MSG_DisableAllSmuFeatures, |
814 | dpm_features); | 814 | dpm_features); |
815 | if (ret == 0) | 815 | if (ret == 0) |
816 | cz_hwmgr->is_nb_dpm_enabled = false; | 816 | data->is_nb_dpm_enabled = false; |
817 | } | 817 | } |
818 | 818 | ||
819 | return ret; | 819 | return ret; |
820 | } | 820 | } |
821 | 821 | ||
822 | static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr) | 822 | static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr) |
823 | { | 823 | { |
824 | int ret = 0; | 824 | int ret = 0; |
825 | 825 | ||
826 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 826 | struct smu8_hwmgr *data = hwmgr->backend; |
827 | unsigned long dpm_features = 0; | 827 | unsigned long dpm_features = 0; |
828 | 828 | ||
829 | if (!cz_hwmgr->is_nb_dpm_enabled) { | 829 | if (!data->is_nb_dpm_enabled) { |
830 | PP_DBG_LOG("enabling ALL SMU features.\n"); | 830 | PP_DBG_LOG("enabling ALL SMU features.\n"); |
831 | dpm_features |= NB_DPM_MASK; | 831 | dpm_features |= NB_DPM_MASK; |
832 | ret = smum_send_msg_to_smc_with_parameter( | 832 | ret = smum_send_msg_to_smc_with_parameter( |
@@ -834,94 +834,94 @@ static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr) | |||
834 | PPSMC_MSG_EnableAllSmuFeatures, | 834 | PPSMC_MSG_EnableAllSmuFeatures, |
835 | dpm_features); | 835 | dpm_features); |
836 | if (ret == 0) | 836 | if (ret == 0) |
837 | cz_hwmgr->is_nb_dpm_enabled = true; | 837 | data->is_nb_dpm_enabled = true; |
838 | } | 838 | } |
839 | 839 | ||
840 | return ret; | 840 | return ret; |
841 | } | 841 | } |
842 | 842 | ||
843 | static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input) | 843 | static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input) |
844 | { | 844 | { |
845 | bool disable_switch; | 845 | bool disable_switch; |
846 | bool enable_low_mem_state; | 846 | bool enable_low_mem_state; |
847 | struct cz_hwmgr *hw_data = hwmgr->backend; | 847 | struct smu8_hwmgr *hw_data = hwmgr->backend; |
848 | const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input; | 848 | const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input; |
849 | const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state); | 849 | const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state); |
850 | 850 | ||
851 | if (hw_data->sys_info.nb_dpm_enable) { | 851 | if (hw_data->sys_info.nb_dpm_enable) { |
852 | disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false; | 852 | disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false; |
853 | enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true; | 853 | enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true; |
854 | 854 | ||
855 | if (pnew_state->action == FORCE_HIGH) | 855 | if (pnew_state->action == FORCE_HIGH) |
856 | cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); | 856 | smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); |
857 | else if (pnew_state->action == CANCEL_FORCE_HIGH) | 857 | else if (pnew_state->action == CANCEL_FORCE_HIGH) |
858 | cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch); | 858 | smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch); |
859 | else | 859 | else |
860 | cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch); | 860 | smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch); |
861 | } | 861 | } |
862 | return 0; | 862 | return 0; |
863 | } | 863 | } |
864 | 864 | ||
865 | static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) | 865 | static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) |
866 | { | 866 | { |
867 | int ret = 0; | 867 | int ret = 0; |
868 | 868 | ||
869 | cz_update_sclk_limit(hwmgr); | 869 | smu8_update_sclk_limit(hwmgr); |
870 | cz_set_deep_sleep_sclk_threshold(hwmgr); | 870 | smu8_set_deep_sleep_sclk_threshold(hwmgr); |
871 | cz_set_watermark_threshold(hwmgr); | 871 | smu8_set_watermark_threshold(hwmgr); |
872 | ret = cz_enable_nb_dpm(hwmgr); | 872 | ret = smu8_enable_nb_dpm(hwmgr); |
873 | if (ret) | 873 | if (ret) |
874 | return ret; | 874 | return ret; |
875 | cz_update_low_mem_pstate(hwmgr, input); | 875 | smu8_update_low_mem_pstate(hwmgr, input); |
876 | 876 | ||
877 | return 0; | 877 | return 0; |
878 | }; | 878 | }; |
879 | 879 | ||
880 | 880 | ||
881 | static int cz_setup_asic_task(struct pp_hwmgr *hwmgr) | 881 | static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr) |
882 | { | 882 | { |
883 | int ret; | 883 | int ret; |
884 | 884 | ||
885 | ret = cz_upload_pptable_to_smu(hwmgr); | 885 | ret = smu8_upload_pptable_to_smu(hwmgr); |
886 | if (ret) | 886 | if (ret) |
887 | return ret; | 887 | return ret; |
888 | ret = cz_init_sclk_limit(hwmgr); | 888 | ret = smu8_init_sclk_limit(hwmgr); |
889 | if (ret) | 889 | if (ret) |
890 | return ret; | 890 | return ret; |
891 | ret = cz_init_uvd_limit(hwmgr); | 891 | ret = smu8_init_uvd_limit(hwmgr); |
892 | if (ret) | 892 | if (ret) |
893 | return ret; | 893 | return ret; |
894 | ret = cz_init_vce_limit(hwmgr); | 894 | ret = smu8_init_vce_limit(hwmgr); |
895 | if (ret) | 895 | if (ret) |
896 | return ret; | 896 | return ret; |
897 | ret = cz_init_acp_limit(hwmgr); | 897 | ret = smu8_init_acp_limit(hwmgr); |
898 | if (ret) | 898 | if (ret) |
899 | return ret; | 899 | return ret; |
900 | 900 | ||
901 | cz_init_power_gate_state(hwmgr); | 901 | smu8_init_power_gate_state(hwmgr); |
902 | cz_init_sclk_threshold(hwmgr); | 902 | smu8_init_sclk_threshold(hwmgr); |
903 | 903 | ||
904 | return 0; | 904 | return 0; |
905 | } | 905 | } |
906 | 906 | ||
907 | static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr) | 907 | static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr) |
908 | { | 908 | { |
909 | struct cz_hwmgr *hw_data = hwmgr->backend; | 909 | struct smu8_hwmgr *hw_data = hwmgr->backend; |
910 | 910 | ||
911 | hw_data->disp_clk_bypass_pending = false; | 911 | hw_data->disp_clk_bypass_pending = false; |
912 | hw_data->disp_clk_bypass = false; | 912 | hw_data->disp_clk_bypass = false; |
913 | } | 913 | } |
914 | 914 | ||
915 | static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr) | 915 | static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr) |
916 | { | 916 | { |
917 | struct cz_hwmgr *hw_data = hwmgr->backend; | 917 | struct smu8_hwmgr *hw_data = hwmgr->backend; |
918 | 918 | ||
919 | hw_data->is_nb_dpm_enabled = false; | 919 | hw_data->is_nb_dpm_enabled = false; |
920 | } | 920 | } |
921 | 921 | ||
922 | static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr) | 922 | static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr) |
923 | { | 923 | { |
924 | struct cz_hwmgr *hw_data = hwmgr->backend; | 924 | struct smu8_hwmgr *hw_data = hwmgr->backend; |
925 | 925 | ||
926 | hw_data->cc6_settings.cc6_setting_changed = false; | 926 | hw_data->cc6_settings.cc6_setting_changed = false; |
927 | hw_data->cc6_settings.cpu_pstate_separation_time = 0; | 927 | hw_data->cc6_settings.cpu_pstate_separation_time = 0; |
@@ -929,45 +929,47 @@ static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr) | |||
929 | hw_data->cc6_settings.cpu_pstate_disable = false; | 929 | hw_data->cc6_settings.cpu_pstate_disable = false; |
930 | } | 930 | } |
931 | 931 | ||
932 | static int cz_power_off_asic(struct pp_hwmgr *hwmgr) | 932 | static int smu8_power_off_asic(struct pp_hwmgr *hwmgr) |
933 | { | 933 | { |
934 | cz_power_up_display_clock_sys_pll(hwmgr); | 934 | smu8_power_up_display_clock_sys_pll(hwmgr); |
935 | cz_clear_nb_dpm_flag(hwmgr); | 935 | smu8_clear_nb_dpm_flag(hwmgr); |
936 | cz_reset_cc6_data(hwmgr); | 936 | smu8_reset_cc6_data(hwmgr); |
937 | return 0; | 937 | return 0; |
938 | }; | 938 | }; |
939 | 939 | ||
940 | static void cz_program_voting_clients(struct pp_hwmgr *hwmgr) | 940 | static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr) |
941 | { | 941 | { |
942 | PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, | 942 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
943 | PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); | 943 | ixCG_FREQ_TRAN_VOTING_0, |
944 | SMU8_VOTINGRIGHTSCLIENTS_DFLT0); | ||
944 | } | 945 | } |
945 | 946 | ||
946 | static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr) | 947 | static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr) |
947 | { | 948 | { |
948 | PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0); | 949 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
950 | ixCG_FREQ_TRAN_VOTING_0, 0); | ||
949 | } | 951 | } |
950 | 952 | ||
951 | static int cz_start_dpm(struct pp_hwmgr *hwmgr) | 953 | static int smu8_start_dpm(struct pp_hwmgr *hwmgr) |
952 | { | 954 | { |
953 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 955 | struct smu8_hwmgr *data = hwmgr->backend; |
954 | 956 | ||
955 | cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled; | 957 | data->dpm_flags |= DPMFlags_SCLK_Enabled; |
956 | 958 | ||
957 | return smum_send_msg_to_smc_with_parameter(hwmgr, | 959 | return smum_send_msg_to_smc_with_parameter(hwmgr, |
958 | PPSMC_MSG_EnableAllSmuFeatures, | 960 | PPSMC_MSG_EnableAllSmuFeatures, |
959 | SCLK_DPM_MASK); | 961 | SCLK_DPM_MASK); |
960 | } | 962 | } |
961 | 963 | ||
962 | static int cz_stop_dpm(struct pp_hwmgr *hwmgr) | 964 | static int smu8_stop_dpm(struct pp_hwmgr *hwmgr) |
963 | { | 965 | { |
964 | int ret = 0; | 966 | int ret = 0; |
965 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 967 | struct smu8_hwmgr *data = hwmgr->backend; |
966 | unsigned long dpm_features = 0; | 968 | unsigned long dpm_features = 0; |
967 | 969 | ||
968 | if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) { | 970 | if (data->dpm_flags & DPMFlags_SCLK_Enabled) { |
969 | dpm_features |= SCLK_DPM_MASK; | 971 | dpm_features |= SCLK_DPM_MASK; |
970 | cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled; | 972 | data->dpm_flags &= ~DPMFlags_SCLK_Enabled; |
971 | ret = smum_send_msg_to_smc_with_parameter(hwmgr, | 973 | ret = smum_send_msg_to_smc_with_parameter(hwmgr, |
972 | PPSMC_MSG_DisableAllSmuFeatures, | 974 | PPSMC_MSG_DisableAllSmuFeatures, |
973 | dpm_features); | 975 | dpm_features); |
@@ -975,80 +977,80 @@ static int cz_stop_dpm(struct pp_hwmgr *hwmgr) | |||
975 | return ret; | 977 | return ret; |
976 | } | 978 | } |
977 | 979 | ||
978 | static int cz_program_bootup_state(struct pp_hwmgr *hwmgr) | 980 | static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr) |
979 | { | 981 | { |
980 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 982 | struct smu8_hwmgr *data = hwmgr->backend; |
981 | 983 | ||
982 | cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock; | 984 | data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock; |
983 | cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock; | 985 | data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock; |
984 | 986 | ||
985 | smum_send_msg_to_smc_with_parameter(hwmgr, | 987 | smum_send_msg_to_smc_with_parameter(hwmgr, |
986 | PPSMC_MSG_SetSclkSoftMin, | 988 | PPSMC_MSG_SetSclkSoftMin, |
987 | cz_get_sclk_level(hwmgr, | 989 | smu8_get_sclk_level(hwmgr, |
988 | cz_hwmgr->sclk_dpm.soft_min_clk, | 990 | data->sclk_dpm.soft_min_clk, |
989 | PPSMC_MSG_SetSclkSoftMin)); | 991 | PPSMC_MSG_SetSclkSoftMin)); |
990 | 992 | ||
991 | smum_send_msg_to_smc_with_parameter(hwmgr, | 993 | smum_send_msg_to_smc_with_parameter(hwmgr, |
992 | PPSMC_MSG_SetSclkSoftMax, | 994 | PPSMC_MSG_SetSclkSoftMax, |
993 | cz_get_sclk_level(hwmgr, | 995 | smu8_get_sclk_level(hwmgr, |
994 | cz_hwmgr->sclk_dpm.soft_max_clk, | 996 | data->sclk_dpm.soft_max_clk, |
995 | PPSMC_MSG_SetSclkSoftMax)); | 997 | PPSMC_MSG_SetSclkSoftMax)); |
996 | 998 | ||
997 | return 0; | 999 | return 0; |
998 | } | 1000 | } |
999 | 1001 | ||
1000 | static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr) | 1002 | static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr) |
1001 | { | 1003 | { |
1002 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1004 | struct smu8_hwmgr *data = hwmgr->backend; |
1003 | 1005 | ||
1004 | cz_hwmgr->acp_boot_level = 0xff; | 1006 | data->acp_boot_level = 0xff; |
1005 | } | 1007 | } |
1006 | 1008 | ||
1007 | static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr) | 1009 | static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr) |
1008 | { | 1010 | { |
1009 | cz_disable_nb_dpm(hwmgr); | 1011 | smu8_disable_nb_dpm(hwmgr); |
1010 | 1012 | ||
1011 | cz_clear_voting_clients(hwmgr); | 1013 | smu8_clear_voting_clients(hwmgr); |
1012 | if (cz_stop_dpm(hwmgr)) | 1014 | if (smu8_stop_dpm(hwmgr)) |
1013 | return -EINVAL; | 1015 | return -EINVAL; |
1014 | 1016 | ||
1015 | return 0; | 1017 | return 0; |
1016 | }; | 1018 | }; |
1017 | 1019 | ||
1018 | static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr) | 1020 | static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr) |
1019 | { | 1021 | { |
1020 | cz_program_voting_clients(hwmgr); | 1022 | smu8_program_voting_clients(hwmgr); |
1021 | if (cz_start_dpm(hwmgr)) | 1023 | if (smu8_start_dpm(hwmgr)) |
1022 | return -EINVAL; | 1024 | return -EINVAL; |
1023 | cz_program_bootup_state(hwmgr); | 1025 | smu8_program_bootup_state(hwmgr); |
1024 | cz_reset_acp_boot_level(hwmgr); | 1026 | smu8_reset_acp_boot_level(hwmgr); |
1025 | 1027 | ||
1026 | return 0; | 1028 | return 0; |
1027 | }; | 1029 | }; |
1028 | 1030 | ||
1029 | static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | 1031 | static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, |
1030 | struct pp_power_state *prequest_ps, | 1032 | struct pp_power_state *prequest_ps, |
1031 | const struct pp_power_state *pcurrent_ps) | 1033 | const struct pp_power_state *pcurrent_ps) |
1032 | { | 1034 | { |
1033 | struct cz_power_state *cz_ps = | 1035 | struct smu8_power_state *smu8_ps = |
1034 | cast_PhwCzPowerState(&prequest_ps->hardware); | 1036 | cast_smu8_power_state(&prequest_ps->hardware); |
1035 | 1037 | ||
1036 | const struct cz_power_state *cz_current_ps = | 1038 | const struct smu8_power_state *smu8_current_ps = |
1037 | cast_const_PhwCzPowerState(&pcurrent_ps->hardware); | 1039 | cast_const_smu8_power_state(&pcurrent_ps->hardware); |
1038 | 1040 | ||
1039 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1041 | struct smu8_hwmgr *data = hwmgr->backend; |
1040 | struct PP_Clocks clocks = {0, 0, 0, 0}; | 1042 | struct PP_Clocks clocks = {0, 0, 0, 0}; |
1041 | bool force_high; | 1043 | bool force_high; |
1042 | uint32_t num_of_active_displays = 0; | 1044 | uint32_t num_of_active_displays = 0; |
1043 | struct cgs_display_info info = {0}; | 1045 | struct cgs_display_info info = {0}; |
1044 | 1046 | ||
1045 | cz_ps->need_dfs_bypass = true; | 1047 | smu8_ps->need_dfs_bypass = true; |
1046 | 1048 | ||
1047 | cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); | 1049 | data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); |
1048 | 1050 | ||
1049 | clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? | 1051 | clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? |
1050 | hwmgr->display_config.min_mem_set_clock : | 1052 | hwmgr->display_config.min_mem_set_clock : |
1051 | cz_hwmgr->sys_info.nbp_memory_clock[1]; | 1053 | data->sys_info.nbp_memory_clock[1]; |
1052 | 1054 | ||
1053 | cgs_get_active_displays_info(hwmgr->device, &info); | 1055 | cgs_get_active_displays_info(hwmgr->device, &info); |
1054 | num_of_active_displays = info.display_count; | 1056 | num_of_active_displays = info.display_count; |
@@ -1056,56 +1058,56 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |||
1056 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) | 1058 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) |
1057 | clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; | 1059 | clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; |
1058 | 1060 | ||
1059 | force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]) | 1061 | force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1]) |
1060 | || (num_of_active_displays >= 3); | 1062 | || (num_of_active_displays >= 3); |
1061 | 1063 | ||
1062 | cz_ps->action = cz_current_ps->action; | 1064 | smu8_ps->action = smu8_current_ps->action; |
1063 | 1065 | ||
1064 | if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) | 1066 | if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
1065 | cz_nbdpm_pstate_enable_disable(hwmgr, false, false); | 1067 | smu8_nbdpm_pstate_enable_disable(hwmgr, false, false); |
1066 | else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) | 1068 | else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) |
1067 | cz_nbdpm_pstate_enable_disable(hwmgr, false, true); | 1069 | smu8_nbdpm_pstate_enable_disable(hwmgr, false, true); |
1068 | else if (!force_high && (cz_ps->action == FORCE_HIGH)) | 1070 | else if (!force_high && (smu8_ps->action == FORCE_HIGH)) |
1069 | cz_ps->action = CANCEL_FORCE_HIGH; | 1071 | smu8_ps->action = CANCEL_FORCE_HIGH; |
1070 | else if (force_high && (cz_ps->action != FORCE_HIGH)) | 1072 | else if (force_high && (smu8_ps->action != FORCE_HIGH)) |
1071 | cz_ps->action = FORCE_HIGH; | 1073 | smu8_ps->action = FORCE_HIGH; |
1072 | else | 1074 | else |
1073 | cz_ps->action = DO_NOTHING; | 1075 | smu8_ps->action = DO_NOTHING; |
1074 | 1076 | ||
1075 | return 0; | 1077 | return 0; |
1076 | } | 1078 | } |
1077 | 1079 | ||
1078 | static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | 1080 | static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr) |
1079 | { | 1081 | { |
1080 | int result = 0; | 1082 | int result = 0; |
1081 | struct cz_hwmgr *data; | 1083 | struct smu8_hwmgr *data; |
1082 | 1084 | ||
1083 | data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL); | 1085 | data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL); |
1084 | if (data == NULL) | 1086 | if (data == NULL) |
1085 | return -ENOMEM; | 1087 | return -ENOMEM; |
1086 | 1088 | ||
1087 | hwmgr->backend = data; | 1089 | hwmgr->backend = data; |
1088 | 1090 | ||
1089 | result = cz_initialize_dpm_defaults(hwmgr); | 1091 | result = smu8_initialize_dpm_defaults(hwmgr); |
1090 | if (result != 0) { | 1092 | if (result != 0) { |
1091 | pr_err("cz_initialize_dpm_defaults failed\n"); | 1093 | pr_err("smu8_initialize_dpm_defaults failed\n"); |
1092 | return result; | 1094 | return result; |
1093 | } | 1095 | } |
1094 | 1096 | ||
1095 | result = cz_get_system_info_data(hwmgr); | 1097 | result = smu8_get_system_info_data(hwmgr); |
1096 | if (result != 0) { | 1098 | if (result != 0) { |
1097 | pr_err("cz_get_system_info_data failed\n"); | 1099 | pr_err("smu8_get_system_info_data failed\n"); |
1098 | return result; | 1100 | return result; |
1099 | } | 1101 | } |
1100 | 1102 | ||
1101 | cz_construct_boot_state(hwmgr); | 1103 | smu8_construct_boot_state(hwmgr); |
1102 | 1104 | ||
1103 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS; | 1105 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU8_MAX_HARDWARE_POWERLEVELS; |
1104 | 1106 | ||
1105 | return result; | 1107 | return result; |
1106 | } | 1108 | } |
1107 | 1109 | ||
1108 | static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) | 1110 | static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) |
1109 | { | 1111 | { |
1110 | if (hwmgr != NULL) { | 1112 | if (hwmgr != NULL) { |
1111 | kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); | 1113 | kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); |
@@ -1117,28 +1119,28 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) | |||
1117 | return 0; | 1119 | return 0; |
1118 | } | 1120 | } |
1119 | 1121 | ||
1120 | static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) | 1122 | static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) |
1121 | { | 1123 | { |
1122 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1124 | struct smu8_hwmgr *data = hwmgr->backend; |
1123 | 1125 | ||
1124 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1126 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1125 | PPSMC_MSG_SetSclkSoftMin, | 1127 | PPSMC_MSG_SetSclkSoftMin, |
1126 | cz_get_sclk_level(hwmgr, | 1128 | smu8_get_sclk_level(hwmgr, |
1127 | cz_hwmgr->sclk_dpm.soft_max_clk, | 1129 | data->sclk_dpm.soft_max_clk, |
1128 | PPSMC_MSG_SetSclkSoftMin)); | 1130 | PPSMC_MSG_SetSclkSoftMin)); |
1129 | 1131 | ||
1130 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1132 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1131 | PPSMC_MSG_SetSclkSoftMax, | 1133 | PPSMC_MSG_SetSclkSoftMax, |
1132 | cz_get_sclk_level(hwmgr, | 1134 | smu8_get_sclk_level(hwmgr, |
1133 | cz_hwmgr->sclk_dpm.soft_max_clk, | 1135 | data->sclk_dpm.soft_max_clk, |
1134 | PPSMC_MSG_SetSclkSoftMax)); | 1136 | PPSMC_MSG_SetSclkSoftMax)); |
1135 | 1137 | ||
1136 | return 0; | 1138 | return 0; |
1137 | } | 1139 | } |
1138 | 1140 | ||
1139 | static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) | 1141 | static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) |
1140 | { | 1142 | { |
1141 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1143 | struct smu8_hwmgr *data = hwmgr->backend; |
1142 | struct phm_clock_voltage_dependency_table *table = | 1144 | struct phm_clock_voltage_dependency_table *table = |
1143 | hwmgr->dyn_state.vddc_dependency_on_sclk; | 1145 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
1144 | unsigned long clock = 0, level; | 1146 | unsigned long clock = 0, level; |
@@ -1146,56 +1148,56 @@ static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) | |||
1146 | if (NULL == table || table->count <= 0) | 1148 | if (NULL == table || table->count <= 0) |
1147 | return -EINVAL; | 1149 | return -EINVAL; |
1148 | 1150 | ||
1149 | cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; | 1151 | data->sclk_dpm.soft_min_clk = table->entries[0].clk; |
1150 | cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk; | 1152 | data->sclk_dpm.hard_min_clk = table->entries[0].clk; |
1151 | hwmgr->pstate_sclk = table->entries[0].clk; | 1153 | hwmgr->pstate_sclk = table->entries[0].clk; |
1152 | hwmgr->pstate_mclk = 0; | 1154 | hwmgr->pstate_mclk = 0; |
1153 | 1155 | ||
1154 | level = cz_get_max_sclk_level(hwmgr) - 1; | 1156 | level = smu8_get_max_sclk_level(hwmgr) - 1; |
1155 | 1157 | ||
1156 | if (level < table->count) | 1158 | if (level < table->count) |
1157 | clock = table->entries[level].clk; | 1159 | clock = table->entries[level].clk; |
1158 | else | 1160 | else |
1159 | clock = table->entries[table->count - 1].clk; | 1161 | clock = table->entries[table->count - 1].clk; |
1160 | 1162 | ||
1161 | cz_hwmgr->sclk_dpm.soft_max_clk = clock; | 1163 | data->sclk_dpm.soft_max_clk = clock; |
1162 | cz_hwmgr->sclk_dpm.hard_max_clk = clock; | 1164 | data->sclk_dpm.hard_max_clk = clock; |
1163 | 1165 | ||
1164 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1166 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1165 | PPSMC_MSG_SetSclkSoftMin, | 1167 | PPSMC_MSG_SetSclkSoftMin, |
1166 | cz_get_sclk_level(hwmgr, | 1168 | smu8_get_sclk_level(hwmgr, |
1167 | cz_hwmgr->sclk_dpm.soft_min_clk, | 1169 | data->sclk_dpm.soft_min_clk, |
1168 | PPSMC_MSG_SetSclkSoftMin)); | 1170 | PPSMC_MSG_SetSclkSoftMin)); |
1169 | 1171 | ||
1170 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1172 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1171 | PPSMC_MSG_SetSclkSoftMax, | 1173 | PPSMC_MSG_SetSclkSoftMax, |
1172 | cz_get_sclk_level(hwmgr, | 1174 | smu8_get_sclk_level(hwmgr, |
1173 | cz_hwmgr->sclk_dpm.soft_max_clk, | 1175 | data->sclk_dpm.soft_max_clk, |
1174 | PPSMC_MSG_SetSclkSoftMax)); | 1176 | PPSMC_MSG_SetSclkSoftMax)); |
1175 | 1177 | ||
1176 | return 0; | 1178 | return 0; |
1177 | } | 1179 | } |
1178 | 1180 | ||
1179 | static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) | 1181 | static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) |
1180 | { | 1182 | { |
1181 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1183 | struct smu8_hwmgr *data = hwmgr->backend; |
1182 | 1184 | ||
1183 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1185 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1184 | PPSMC_MSG_SetSclkSoftMax, | 1186 | PPSMC_MSG_SetSclkSoftMax, |
1185 | cz_get_sclk_level(hwmgr, | 1187 | smu8_get_sclk_level(hwmgr, |
1186 | cz_hwmgr->sclk_dpm.soft_min_clk, | 1188 | data->sclk_dpm.soft_min_clk, |
1187 | PPSMC_MSG_SetSclkSoftMax)); | 1189 | PPSMC_MSG_SetSclkSoftMax)); |
1188 | 1190 | ||
1189 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1191 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1190 | PPSMC_MSG_SetSclkSoftMin, | 1192 | PPSMC_MSG_SetSclkSoftMin, |
1191 | cz_get_sclk_level(hwmgr, | 1193 | smu8_get_sclk_level(hwmgr, |
1192 | cz_hwmgr->sclk_dpm.soft_min_clk, | 1194 | data->sclk_dpm.soft_min_clk, |
1193 | PPSMC_MSG_SetSclkSoftMin)); | 1195 | PPSMC_MSG_SetSclkSoftMin)); |
1194 | 1196 | ||
1195 | return 0; | 1197 | return 0; |
1196 | } | 1198 | } |
1197 | 1199 | ||
1198 | static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, | 1200 | static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, |
1199 | enum amd_dpm_forced_level level) | 1201 | enum amd_dpm_forced_level level) |
1200 | { | 1202 | { |
1201 | int ret = 0; | 1203 | int ret = 0; |
@@ -1203,15 +1205,15 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, | |||
1203 | switch (level) { | 1205 | switch (level) { |
1204 | case AMD_DPM_FORCED_LEVEL_HIGH: | 1206 | case AMD_DPM_FORCED_LEVEL_HIGH: |
1205 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | 1207 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: |
1206 | ret = cz_phm_force_dpm_highest(hwmgr); | 1208 | ret = smu8_phm_force_dpm_highest(hwmgr); |
1207 | break; | 1209 | break; |
1208 | case AMD_DPM_FORCED_LEVEL_LOW: | 1210 | case AMD_DPM_FORCED_LEVEL_LOW: |
1209 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: | 1211 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: |
1210 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: | 1212 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: |
1211 | ret = cz_phm_force_dpm_lowest(hwmgr); | 1213 | ret = smu8_phm_force_dpm_lowest(hwmgr); |
1212 | break; | 1214 | break; |
1213 | case AMD_DPM_FORCED_LEVEL_AUTO: | 1215 | case AMD_DPM_FORCED_LEVEL_AUTO: |
1214 | ret = cz_phm_unforce_dpm_levels(hwmgr); | 1216 | ret = smu8_phm_unforce_dpm_levels(hwmgr); |
1215 | break; | 1217 | break; |
1216 | case AMD_DPM_FORCED_LEVEL_MANUAL: | 1218 | case AMD_DPM_FORCED_LEVEL_MANUAL: |
1217 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: | 1219 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: |
@@ -1222,14 +1224,14 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, | |||
1222 | return ret; | 1224 | return ret; |
1223 | } | 1225 | } |
1224 | 1226 | ||
1225 | static int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) | 1227 | static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) |
1226 | { | 1228 | { |
1227 | if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) | 1229 | if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) |
1228 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); | 1230 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); |
1229 | return 0; | 1231 | return 0; |
1230 | } | 1232 | } |
1231 | 1233 | ||
1232 | static int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) | 1234 | static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) |
1233 | { | 1235 | { |
1234 | if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { | 1236 | if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { |
1235 | return smum_send_msg_to_smc_with_parameter( | 1237 | return smum_send_msg_to_smc_with_parameter( |
@@ -1241,22 +1243,22 @@ static int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) | |||
1241 | return 0; | 1243 | return 0; |
1242 | } | 1244 | } |
1243 | 1245 | ||
1244 | static int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) | 1246 | static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) |
1245 | { | 1247 | { |
1246 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1248 | struct smu8_hwmgr *data = hwmgr->backend; |
1247 | struct phm_vce_clock_voltage_dependency_table *ptable = | 1249 | struct phm_vce_clock_voltage_dependency_table *ptable = |
1248 | hwmgr->dyn_state.vce_clock_voltage_dependency_table; | 1250 | hwmgr->dyn_state.vce_clock_voltage_dependency_table; |
1249 | 1251 | ||
1250 | /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ | 1252 | /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ |
1251 | if (PP_CAP(PHM_PlatformCaps_StablePState) || | 1253 | if (PP_CAP(PHM_PlatformCaps_StablePState) || |
1252 | hwmgr->en_umd_pstate) { | 1254 | hwmgr->en_umd_pstate) { |
1253 | cz_hwmgr->vce_dpm.hard_min_clk = | 1255 | data->vce_dpm.hard_min_clk = |
1254 | ptable->entries[ptable->count - 1].ecclk; | 1256 | ptable->entries[ptable->count - 1].ecclk; |
1255 | 1257 | ||
1256 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1258 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1257 | PPSMC_MSG_SetEclkHardMin, | 1259 | PPSMC_MSG_SetEclkHardMin, |
1258 | cz_get_eclk_level(hwmgr, | 1260 | smu8_get_eclk_level(hwmgr, |
1259 | cz_hwmgr->vce_dpm.hard_min_clk, | 1261 | data->vce_dpm.hard_min_clk, |
1260 | PPSMC_MSG_SetEclkHardMin)); | 1262 | PPSMC_MSG_SetEclkHardMin)); |
1261 | } else { | 1263 | } else { |
1262 | 1264 | ||
@@ -1270,7 +1272,7 @@ static int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) | |||
1270 | return 0; | 1272 | return 0; |
1271 | } | 1273 | } |
1272 | 1274 | ||
1273 | static int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) | 1275 | static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) |
1274 | { | 1276 | { |
1275 | if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) | 1277 | if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) |
1276 | return smum_send_msg_to_smc(hwmgr, | 1278 | return smum_send_msg_to_smc(hwmgr, |
@@ -1278,7 +1280,7 @@ static int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) | |||
1278 | return 0; | 1280 | return 0; |
1279 | } | 1281 | } |
1280 | 1282 | ||
1281 | static int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr) | 1283 | static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr) |
1282 | { | 1284 | { |
1283 | if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) | 1285 | if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) |
1284 | return smum_send_msg_to_smc(hwmgr, | 1286 | return smum_send_msg_to_smc(hwmgr, |
@@ -1286,17 +1288,17 @@ static int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr) | |||
1286 | return 0; | 1288 | return 0; |
1287 | } | 1289 | } |
1288 | 1290 | ||
1289 | static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) | 1291 | static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) |
1290 | { | 1292 | { |
1291 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1293 | struct smu8_hwmgr *data = hwmgr->backend; |
1292 | 1294 | ||
1293 | return cz_hwmgr->sys_info.bootup_uma_clock; | 1295 | return data->sys_info.bootup_uma_clock; |
1294 | } | 1296 | } |
1295 | 1297 | ||
1296 | static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) | 1298 | static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) |
1297 | { | 1299 | { |
1298 | struct pp_power_state *ps; | 1300 | struct pp_power_state *ps; |
1299 | struct cz_power_state *cz_ps; | 1301 | struct smu8_power_state *smu8_ps; |
1300 | 1302 | ||
1301 | if (hwmgr == NULL) | 1303 | if (hwmgr == NULL) |
1302 | return -EINVAL; | 1304 | return -EINVAL; |
@@ -1306,59 +1308,59 @@ static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) | |||
1306 | if (ps == NULL) | 1308 | if (ps == NULL) |
1307 | return -EINVAL; | 1309 | return -EINVAL; |
1308 | 1310 | ||
1309 | cz_ps = cast_PhwCzPowerState(&ps->hardware); | 1311 | smu8_ps = cast_smu8_power_state(&ps->hardware); |
1310 | 1312 | ||
1311 | if (low) | 1313 | if (low) |
1312 | return cz_ps->levels[0].engineClock; | 1314 | return smu8_ps->levels[0].engineClock; |
1313 | else | 1315 | else |
1314 | return cz_ps->levels[cz_ps->level-1].engineClock; | 1316 | return smu8_ps->levels[smu8_ps->level-1].engineClock; |
1315 | } | 1317 | } |
1316 | 1318 | ||
1317 | static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, | 1319 | static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, |
1318 | struct pp_hw_power_state *hw_ps) | 1320 | struct pp_hw_power_state *hw_ps) |
1319 | { | 1321 | { |
1320 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1322 | struct smu8_hwmgr *data = hwmgr->backend; |
1321 | struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps); | 1323 | struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps); |
1322 | 1324 | ||
1323 | cz_ps->level = 1; | 1325 | smu8_ps->level = 1; |
1324 | cz_ps->nbps_flags = 0; | 1326 | smu8_ps->nbps_flags = 0; |
1325 | cz_ps->bapm_flags = 0; | 1327 | smu8_ps->bapm_flags = 0; |
1326 | cz_ps->levels[0] = cz_hwmgr->boot_power_level; | 1328 | smu8_ps->levels[0] = data->boot_power_level; |
1327 | 1329 | ||
1328 | return 0; | 1330 | return 0; |
1329 | } | 1331 | } |
1330 | 1332 | ||
1331 | static int cz_dpm_get_pp_table_entry_callback( | 1333 | static int smu8_dpm_get_pp_table_entry_callback( |
1332 | struct pp_hwmgr *hwmgr, | 1334 | struct pp_hwmgr *hwmgr, |
1333 | struct pp_hw_power_state *hw_ps, | 1335 | struct pp_hw_power_state *hw_ps, |
1334 | unsigned int index, | 1336 | unsigned int index, |
1335 | const void *clock_info) | 1337 | const void *clock_info) |
1336 | { | 1338 | { |
1337 | struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps); | 1339 | struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps); |
1338 | 1340 | ||
1339 | const ATOM_PPLIB_CZ_CLOCK_INFO *cz_clock_info = clock_info; | 1341 | const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info; |
1340 | 1342 | ||
1341 | struct phm_clock_voltage_dependency_table *table = | 1343 | struct phm_clock_voltage_dependency_table *table = |
1342 | hwmgr->dyn_state.vddc_dependency_on_sclk; | 1344 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
1343 | uint8_t clock_info_index = cz_clock_info->index; | 1345 | uint8_t clock_info_index = smu8_clock_info->index; |
1344 | 1346 | ||
1345 | if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1)) | 1347 | if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1)) |
1346 | clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1); | 1348 | clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1); |
1347 | 1349 | ||
1348 | cz_ps->levels[index].engineClock = table->entries[clock_info_index].clk; | 1350 | smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk; |
1349 | cz_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v; | 1351 | smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v; |
1350 | 1352 | ||
1351 | cz_ps->level = index + 1; | 1353 | smu8_ps->level = index + 1; |
1352 | 1354 | ||
1353 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { | 1355 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { |
1354 | cz_ps->levels[index].dsDividerIndex = 5; | 1356 | smu8_ps->levels[index].dsDividerIndex = 5; |
1355 | cz_ps->levels[index].ssDividerIndex = 5; | 1357 | smu8_ps->levels[index].ssDividerIndex = 5; |
1356 | } | 1358 | } |
1357 | 1359 | ||
1358 | return 0; | 1360 | return 0; |
1359 | } | 1361 | } |
1360 | 1362 | ||
1361 | static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) | 1363 | static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) |
1362 | { | 1364 | { |
1363 | int result; | 1365 | int result; |
1364 | unsigned long ret = 0; | 1366 | unsigned long ret = 0; |
@@ -1368,31 +1370,31 @@ static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) | |||
1368 | return result ? 0 : ret; | 1370 | return result ? 0 : ret; |
1369 | } | 1371 | } |
1370 | 1372 | ||
1371 | static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, | 1373 | static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, |
1372 | unsigned long entry, struct pp_power_state *ps) | 1374 | unsigned long entry, struct pp_power_state *ps) |
1373 | { | 1375 | { |
1374 | int result; | 1376 | int result; |
1375 | struct cz_power_state *cz_ps; | 1377 | struct smu8_power_state *smu8_ps; |
1376 | 1378 | ||
1377 | ps->hardware.magic = PhwCz_Magic; | 1379 | ps->hardware.magic = smu8_magic; |
1378 | 1380 | ||
1379 | cz_ps = cast_PhwCzPowerState(&(ps->hardware)); | 1381 | smu8_ps = cast_smu8_power_state(&(ps->hardware)); |
1380 | 1382 | ||
1381 | result = pp_tables_get_entry(hwmgr, entry, ps, | 1383 | result = pp_tables_get_entry(hwmgr, entry, ps, |
1382 | cz_dpm_get_pp_table_entry_callback); | 1384 | smu8_dpm_get_pp_table_entry_callback); |
1383 | 1385 | ||
1384 | cz_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK; | 1386 | smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK; |
1385 | cz_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK; | 1387 | smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK; |
1386 | 1388 | ||
1387 | return result; | 1389 | return result; |
1388 | } | 1390 | } |
1389 | 1391 | ||
1390 | static int cz_get_power_state_size(struct pp_hwmgr *hwmgr) | 1392 | static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr) |
1391 | { | 1393 | { |
1392 | return sizeof(struct cz_power_state); | 1394 | return sizeof(struct smu8_power_state); |
1393 | } | 1395 | } |
1394 | 1396 | ||
1395 | static void cz_hw_print_display_cfg( | 1397 | static void smu8_hw_print_display_cfg( |
1396 | const struct cc6_settings *cc6_settings) | 1398 | const struct cc6_settings *cc6_settings) |
1397 | { | 1399 | { |
1398 | PP_DBG_LOG("New Display Configuration:\n"); | 1400 | PP_DBG_LOG("New Display Configuration:\n"); |
@@ -1407,16 +1409,16 @@ static void cz_hw_print_display_cfg( | |||
1407 | cc6_settings->cpu_pstate_separation_time); | 1409 | cc6_settings->cpu_pstate_separation_time); |
1408 | } | 1410 | } |
1409 | 1411 | ||
1410 | static int cz_set_cpu_power_state(struct pp_hwmgr *hwmgr) | 1412 | static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr) |
1411 | { | 1413 | { |
1412 | struct cz_hwmgr *hw_data = hwmgr->backend; | 1414 | struct smu8_hwmgr *hw_data = hwmgr->backend; |
1413 | uint32_t data = 0; | 1415 | uint32_t data = 0; |
1414 | 1416 | ||
1415 | if (hw_data->cc6_settings.cc6_setting_changed) { | 1417 | if (hw_data->cc6_settings.cc6_setting_changed) { |
1416 | 1418 | ||
1417 | hw_data->cc6_settings.cc6_setting_changed = false; | 1419 | hw_data->cc6_settings.cc6_setting_changed = false; |
1418 | 1420 | ||
1419 | cz_hw_print_display_cfg(&hw_data->cc6_settings); | 1421 | smu8_hw_print_display_cfg(&hw_data->cc6_settings); |
1420 | 1422 | ||
1421 | data |= (hw_data->cc6_settings.cpu_pstate_separation_time | 1423 | data |= (hw_data->cc6_settings.cpu_pstate_separation_time |
1422 | & PWRMGT_SEPARATION_TIME_MASK) | 1424 | & PWRMGT_SEPARATION_TIME_MASK) |
@@ -1440,10 +1442,10 @@ static void cz_hw_print_display_cfg( | |||
1440 | } | 1442 | } |
1441 | 1443 | ||
1442 | 1444 | ||
1443 | static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, | 1445 | static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, |
1444 | bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) | 1446 | bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) |
1445 | { | 1447 | { |
1446 | struct cz_hwmgr *hw_data = hwmgr->backend; | 1448 | struct smu8_hwmgr *hw_data = hwmgr->backend; |
1447 | 1449 | ||
1448 | if (separation_time != | 1450 | if (separation_time != |
1449 | hw_data->cc6_settings.cpu_pstate_separation_time || | 1451 | hw_data->cc6_settings.cpu_pstate_separation_time || |
@@ -1467,7 +1469,7 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, | |||
1467 | return 0; | 1469 | return 0; |
1468 | } | 1470 | } |
1469 | 1471 | ||
1470 | static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, | 1472 | static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr, |
1471 | struct amd_pp_simple_clock_info *info) | 1473 | struct amd_pp_simple_clock_info *info) |
1472 | { | 1474 | { |
1473 | uint32_t i; | 1475 | uint32_t i; |
@@ -1488,7 +1490,7 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, | |||
1488 | return -EINVAL; | 1490 | return -EINVAL; |
1489 | } | 1491 | } |
1490 | 1492 | ||
1491 | static int cz_force_clock_level(struct pp_hwmgr *hwmgr, | 1493 | static int smu8_force_clock_level(struct pp_hwmgr *hwmgr, |
1492 | enum pp_clock_type type, uint32_t mask) | 1494 | enum pp_clock_type type, uint32_t mask) |
1493 | { | 1495 | { |
1494 | switch (type) { | 1496 | switch (type) { |
@@ -1507,10 +1509,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr, | |||
1507 | return 0; | 1509 | return 0; |
1508 | } | 1510 | } |
1509 | 1511 | ||
1510 | static int cz_print_clock_levels(struct pp_hwmgr *hwmgr, | 1512 | static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, |
1511 | enum pp_clock_type type, char *buf) | 1513 | enum pp_clock_type type, char *buf) |
1512 | { | 1514 | { |
1513 | struct cz_hwmgr *data = hwmgr->backend; | 1515 | struct smu8_hwmgr *data = hwmgr->backend; |
1514 | struct phm_clock_voltage_dependency_table *sclk_table = | 1516 | struct phm_clock_voltage_dependency_table *sclk_table = |
1515 | hwmgr->dyn_state.vddc_dependency_on_sclk; | 1517 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
1516 | int i, now, size = 0; | 1518 | int i, now, size = 0; |
@@ -1535,10 +1537,10 @@ static int cz_print_clock_levels(struct pp_hwmgr *hwmgr, | |||
1535 | TARGET_AND_CURRENT_PROFILE_INDEX, | 1537 | TARGET_AND_CURRENT_PROFILE_INDEX, |
1536 | CURR_MCLK_INDEX); | 1538 | CURR_MCLK_INDEX); |
1537 | 1539 | ||
1538 | for (i = CZ_NUM_NBPMEMORYCLOCK; i > 0; i--) | 1540 | for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--) |
1539 | size += sprintf(buf + size, "%d: %uMhz %s\n", | 1541 | size += sprintf(buf + size, "%d: %uMhz %s\n", |
1540 | CZ_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, | 1542 | SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, |
1541 | (CZ_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); | 1543 | (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); |
1542 | break; | 1544 | break; |
1543 | default: | 1545 | default: |
1544 | break; | 1546 | break; |
@@ -1546,12 +1548,12 @@ static int cz_print_clock_levels(struct pp_hwmgr *hwmgr, | |||
1546 | return size; | 1548 | return size; |
1547 | } | 1549 | } |
1548 | 1550 | ||
1549 | static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, | 1551 | static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, |
1550 | PHM_PerformanceLevelDesignation designation, uint32_t index, | 1552 | PHM_PerformanceLevelDesignation designation, uint32_t index, |
1551 | PHM_PerformanceLevel *level) | 1553 | PHM_PerformanceLevel *level) |
1552 | { | 1554 | { |
1553 | const struct cz_power_state *ps; | 1555 | const struct smu8_power_state *ps; |
1554 | struct cz_hwmgr *data; | 1556 | struct smu8_hwmgr *data; |
1555 | uint32_t level_index; | 1557 | uint32_t level_index; |
1556 | uint32_t i; | 1558 | uint32_t i; |
1557 | 1559 | ||
@@ -1559,7 +1561,7 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p | |||
1559 | return -EINVAL; | 1561 | return -EINVAL; |
1560 | 1562 | ||
1561 | data = hwmgr->backend; | 1563 | data = hwmgr->backend; |
1562 | ps = cast_const_PhwCzPowerState(state); | 1564 | ps = cast_const_smu8_power_state(state); |
1563 | 1565 | ||
1564 | level_index = index > ps->level - 1 ? ps->level - 1 : index; | 1566 | level_index = index > ps->level - 1 ? ps->level - 1 : index; |
1565 | level->coreClock = ps->levels[level_index].engineClock; | 1567 | level->coreClock = ps->levels[level_index].engineClock; |
@@ -1574,21 +1576,21 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p | |||
1574 | } | 1576 | } |
1575 | 1577 | ||
1576 | if (level_index == 0) | 1578 | if (level_index == 0) |
1577 | level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]; | 1579 | level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1]; |
1578 | else | 1580 | else |
1579 | level->memory_clock = data->sys_info.nbp_memory_clock[0]; | 1581 | level->memory_clock = data->sys_info.nbp_memory_clock[0]; |
1580 | 1582 | ||
1581 | level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4; | 1583 | level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4; |
1582 | level->nonLocalMemoryFreq = 0; | 1584 | level->nonLocalMemoryFreq = 0; |
1583 | level->nonLocalMemoryWidth = 0; | 1585 | level->nonLocalMemoryWidth = 0; |
1584 | 1586 | ||
1585 | return 0; | 1587 | return 0; |
1586 | } | 1588 | } |
1587 | 1589 | ||
1588 | static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, | 1590 | static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, |
1589 | const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) | 1591 | const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) |
1590 | { | 1592 | { |
1591 | const struct cz_power_state *ps = cast_const_PhwCzPowerState(state); | 1593 | const struct smu8_power_state *ps = cast_const_smu8_power_state(state); |
1592 | 1594 | ||
1593 | clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex)); | 1595 | clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex)); |
1594 | clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex)); | 1596 | clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex)); |
@@ -1596,14 +1598,14 @@ static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, | |||
1596 | return 0; | 1598 | return 0; |
1597 | } | 1599 | } |
1598 | 1600 | ||
1599 | static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, | 1601 | static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, |
1600 | struct amd_pp_clocks *clocks) | 1602 | struct amd_pp_clocks *clocks) |
1601 | { | 1603 | { |
1602 | struct cz_hwmgr *data = hwmgr->backend; | 1604 | struct smu8_hwmgr *data = hwmgr->backend; |
1603 | int i; | 1605 | int i; |
1604 | struct phm_clock_voltage_dependency_table *table; | 1606 | struct phm_clock_voltage_dependency_table *table; |
1605 | 1607 | ||
1606 | clocks->count = cz_get_max_sclk_level(hwmgr); | 1608 | clocks->count = smu8_get_max_sclk_level(hwmgr); |
1607 | switch (type) { | 1609 | switch (type) { |
1608 | case amd_pp_disp_clock: | 1610 | case amd_pp_disp_clock: |
1609 | for (i = 0; i < clocks->count; i++) | 1611 | for (i = 0; i < clocks->count; i++) |
@@ -1615,7 +1617,7 @@ static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type t | |||
1615 | clocks->clock[i] = table->entries[i].clk; | 1617 | clocks->clock[i] = table->entries[i].clk; |
1616 | break; | 1618 | break; |
1617 | case amd_pp_mem_clock: | 1619 | case amd_pp_mem_clock: |
1618 | clocks->count = CZ_NUM_NBPMEMORYCLOCK; | 1620 | clocks->count = SMU8_NUM_NBPMEMORYCLOCK; |
1619 | for (i = 0; i < clocks->count; i++) | 1621 | for (i = 0; i < clocks->count; i++) |
1620 | clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i]; | 1622 | clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i]; |
1621 | break; | 1623 | break; |
@@ -1626,7 +1628,7 @@ static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type t | |||
1626 | return 0; | 1628 | return 0; |
1627 | } | 1629 | } |
1628 | 1630 | ||
1629 | static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) | 1631 | static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) |
1630 | { | 1632 | { |
1631 | struct phm_clock_voltage_dependency_table *table = | 1633 | struct phm_clock_voltage_dependency_table *table = |
1632 | hwmgr->dyn_state.vddc_dependency_on_sclk; | 1634 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
@@ -1637,7 +1639,7 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c | |||
1637 | if ((NULL == table) || (table->count <= 0) || (clocks == NULL)) | 1639 | if ((NULL == table) || (table->count <= 0) || (clocks == NULL)) |
1638 | return -EINVAL; | 1640 | return -EINVAL; |
1639 | 1641 | ||
1640 | level = cz_get_max_sclk_level(hwmgr) - 1; | 1642 | level = smu8_get_max_sclk_level(hwmgr) - 1; |
1641 | 1643 | ||
1642 | if (level < table->count) | 1644 | if (level < table->count) |
1643 | clocks->engine_max_clock = table->entries[level].clk; | 1645 | clocks->engine_max_clock = table->entries[level].clk; |
@@ -1649,7 +1651,7 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c | |||
1649 | return 0; | 1651 | return 0; |
1650 | } | 1652 | } |
1651 | 1653 | ||
1652 | static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr) | 1654 | static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr) |
1653 | { | 1655 | { |
1654 | int actual_temp = 0; | 1656 | int actual_temp = 0; |
1655 | uint32_t val = cgs_read_ind_register(hwmgr->device, | 1657 | uint32_t val = cgs_read_ind_register(hwmgr->device, |
@@ -1664,10 +1666,10 @@ static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr) | |||
1664 | return actual_temp; | 1666 | return actual_temp; |
1665 | } | 1667 | } |
1666 | 1668 | ||
1667 | static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, | 1669 | static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx, |
1668 | void *value, int *size) | 1670 | void *value, int *size) |
1669 | { | 1671 | { |
1670 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1672 | struct smu8_hwmgr *data = hwmgr->backend; |
1671 | 1673 | ||
1672 | struct phm_clock_voltage_dependency_table *table = | 1674 | struct phm_clock_voltage_dependency_table *table = |
1673 | hwmgr->dyn_state.vddc_dependency_on_sclk; | 1675 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
@@ -1705,18 +1707,18 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, | |||
1705 | case AMDGPU_PP_SENSOR_VDDNB: | 1707 | case AMDGPU_PP_SENSOR_VDDNB: |
1706 | tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & | 1708 | tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & |
1707 | CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; | 1709 | CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; |
1708 | vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp); | 1710 | vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp); |
1709 | *((uint32_t *)value) = vddnb; | 1711 | *((uint32_t *)value) = vddnb; |
1710 | return 0; | 1712 | return 0; |
1711 | case AMDGPU_PP_SENSOR_VDDGFX: | 1713 | case AMDGPU_PP_SENSOR_VDDGFX: |
1712 | tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & | 1714 | tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & |
1713 | CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; | 1715 | CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; |
1714 | vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); | 1716 | vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); |
1715 | *((uint32_t *)value) = vddgfx; | 1717 | *((uint32_t *)value) = vddgfx; |
1716 | return 0; | 1718 | return 0; |
1717 | case AMDGPU_PP_SENSOR_UVD_VCLK: | 1719 | case AMDGPU_PP_SENSOR_UVD_VCLK: |
1718 | if (!cz_hwmgr->uvd_power_gated) { | 1720 | if (!data->uvd_power_gated) { |
1719 | if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { | 1721 | if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) { |
1720 | return -EINVAL; | 1722 | return -EINVAL; |
1721 | } else { | 1723 | } else { |
1722 | vclk = uvd_table->entries[uvd_index].vclk; | 1724 | vclk = uvd_table->entries[uvd_index].vclk; |
@@ -1727,8 +1729,8 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, | |||
1727 | *((uint32_t *)value) = 0; | 1729 | *((uint32_t *)value) = 0; |
1728 | return 0; | 1730 | return 0; |
1729 | case AMDGPU_PP_SENSOR_UVD_DCLK: | 1731 | case AMDGPU_PP_SENSOR_UVD_DCLK: |
1730 | if (!cz_hwmgr->uvd_power_gated) { | 1732 | if (!data->uvd_power_gated) { |
1731 | if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { | 1733 | if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) { |
1732 | return -EINVAL; | 1734 | return -EINVAL; |
1733 | } else { | 1735 | } else { |
1734 | dclk = uvd_table->entries[uvd_index].dclk; | 1736 | dclk = uvd_table->entries[uvd_index].dclk; |
@@ -1739,8 +1741,8 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, | |||
1739 | *((uint32_t *)value) = 0; | 1741 | *((uint32_t *)value) = 0; |
1740 | return 0; | 1742 | return 0; |
1741 | case AMDGPU_PP_SENSOR_VCE_ECCLK: | 1743 | case AMDGPU_PP_SENSOR_VCE_ECCLK: |
1742 | if (!cz_hwmgr->vce_power_gated) { | 1744 | if (!data->vce_power_gated) { |
1743 | if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { | 1745 | if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) { |
1744 | return -EINVAL; | 1746 | return -EINVAL; |
1745 | } else { | 1747 | } else { |
1746 | ecclk = vce_table->entries[vce_index].ecclk; | 1748 | ecclk = vce_table->entries[vce_index].ecclk; |
@@ -1761,20 +1763,20 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, | |||
1761 | *((uint32_t *)value) = activity_percent; | 1763 | *((uint32_t *)value) = activity_percent; |
1762 | return 0; | 1764 | return 0; |
1763 | case AMDGPU_PP_SENSOR_UVD_POWER: | 1765 | case AMDGPU_PP_SENSOR_UVD_POWER: |
1764 | *((uint32_t *)value) = cz_hwmgr->uvd_power_gated ? 0 : 1; | 1766 | *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; |
1765 | return 0; | 1767 | return 0; |
1766 | case AMDGPU_PP_SENSOR_VCE_POWER: | 1768 | case AMDGPU_PP_SENSOR_VCE_POWER: |
1767 | *((uint32_t *)value) = cz_hwmgr->vce_power_gated ? 0 : 1; | 1769 | *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; |
1768 | return 0; | 1770 | return 0; |
1769 | case AMDGPU_PP_SENSOR_GPU_TEMP: | 1771 | case AMDGPU_PP_SENSOR_GPU_TEMP: |
1770 | *((uint32_t *)value) = cz_thermal_get_temperature(hwmgr); | 1772 | *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr); |
1771 | return 0; | 1773 | return 0; |
1772 | default: | 1774 | default: |
1773 | return -EINVAL; | 1775 | return -EINVAL; |
1774 | } | 1776 | } |
1775 | } | 1777 | } |
1776 | 1778 | ||
1777 | static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, | 1779 | static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, |
1778 | uint32_t virtual_addr_low, | 1780 | uint32_t virtual_addr_low, |
1779 | uint32_t virtual_addr_hi, | 1781 | uint32_t virtual_addr_hi, |
1780 | uint32_t mc_addr_low, | 1782 | uint32_t mc_addr_low, |
@@ -1800,44 +1802,44 @@ static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, | |||
1800 | return 0; | 1802 | return 0; |
1801 | } | 1803 | } |
1802 | 1804 | ||
1803 | static int cz_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, | 1805 | static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, |
1804 | struct PP_TemperatureRange *thermal_data) | 1806 | struct PP_TemperatureRange *thermal_data) |
1805 | { | 1807 | { |
1806 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1808 | struct smu8_hwmgr *data = hwmgr->backend; |
1807 | 1809 | ||
1808 | memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); | 1810 | memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); |
1809 | 1811 | ||
1810 | thermal_data->max = (cz_hwmgr->thermal_auto_throttling_treshold + | 1812 | thermal_data->max = (data->thermal_auto_throttling_treshold + |
1811 | cz_hwmgr->sys_info.htc_hyst_lmt) * | 1813 | data->sys_info.htc_hyst_lmt) * |
1812 | PP_TEMPERATURE_UNITS_PER_CENTIGRADES; | 1814 | PP_TEMPERATURE_UNITS_PER_CENTIGRADES; |
1813 | 1815 | ||
1814 | return 0; | 1816 | return 0; |
1815 | } | 1817 | } |
1816 | 1818 | ||
1817 | static int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) | 1819 | static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) |
1818 | { | 1820 | { |
1819 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1821 | struct smu8_hwmgr *data = hwmgr->backend; |
1820 | uint32_t dpm_features = 0; | 1822 | uint32_t dpm_features = 0; |
1821 | 1823 | ||
1822 | if (enable && | 1824 | if (enable && |
1823 | phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 1825 | phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
1824 | PHM_PlatformCaps_UVDDPM)) { | 1826 | PHM_PlatformCaps_UVDDPM)) { |
1825 | cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled; | 1827 | data->dpm_flags |= DPMFlags_UVD_Enabled; |
1826 | dpm_features |= UVD_DPM_MASK; | 1828 | dpm_features |= UVD_DPM_MASK; |
1827 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1829 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1828 | PPSMC_MSG_EnableAllSmuFeatures, dpm_features); | 1830 | PPSMC_MSG_EnableAllSmuFeatures, dpm_features); |
1829 | } else { | 1831 | } else { |
1830 | dpm_features |= UVD_DPM_MASK; | 1832 | dpm_features |= UVD_DPM_MASK; |
1831 | cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled; | 1833 | data->dpm_flags &= ~DPMFlags_UVD_Enabled; |
1832 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1834 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1833 | PPSMC_MSG_DisableAllSmuFeatures, dpm_features); | 1835 | PPSMC_MSG_DisableAllSmuFeatures, dpm_features); |
1834 | } | 1836 | } |
1835 | return 0; | 1837 | return 0; |
1836 | } | 1838 | } |
1837 | 1839 | ||
1838 | int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) | 1840 | int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) |
1839 | { | 1841 | { |
1840 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1842 | struct smu8_hwmgr *data = hwmgr->backend; |
1841 | struct phm_uvd_clock_voltage_dependency_table *ptable = | 1843 | struct phm_uvd_clock_voltage_dependency_table *ptable = |
1842 | hwmgr->dyn_state.uvd_clock_voltage_dependency_table; | 1844 | hwmgr->dyn_state.uvd_clock_voltage_dependency_table; |
1843 | 1845 | ||
@@ -1845,41 +1847,41 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) | |||
1845 | /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */ | 1847 | /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */ |
1846 | if (PP_CAP(PHM_PlatformCaps_StablePState) || | 1848 | if (PP_CAP(PHM_PlatformCaps_StablePState) || |
1847 | hwmgr->en_umd_pstate) { | 1849 | hwmgr->en_umd_pstate) { |
1848 | cz_hwmgr->uvd_dpm.hard_min_clk = | 1850 | data->uvd_dpm.hard_min_clk = |
1849 | ptable->entries[ptable->count - 1].vclk; | 1851 | ptable->entries[ptable->count - 1].vclk; |
1850 | 1852 | ||
1851 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1853 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1852 | PPSMC_MSG_SetUvdHardMin, | 1854 | PPSMC_MSG_SetUvdHardMin, |
1853 | cz_get_uvd_level(hwmgr, | 1855 | smu8_get_uvd_level(hwmgr, |
1854 | cz_hwmgr->uvd_dpm.hard_min_clk, | 1856 | data->uvd_dpm.hard_min_clk, |
1855 | PPSMC_MSG_SetUvdHardMin)); | 1857 | PPSMC_MSG_SetUvdHardMin)); |
1856 | 1858 | ||
1857 | cz_enable_disable_uvd_dpm(hwmgr, true); | 1859 | smu8_enable_disable_uvd_dpm(hwmgr, true); |
1858 | } else { | 1860 | } else { |
1859 | cz_enable_disable_uvd_dpm(hwmgr, true); | 1861 | smu8_enable_disable_uvd_dpm(hwmgr, true); |
1860 | } | 1862 | } |
1861 | } else { | 1863 | } else { |
1862 | cz_enable_disable_uvd_dpm(hwmgr, false); | 1864 | smu8_enable_disable_uvd_dpm(hwmgr, false); |
1863 | } | 1865 | } |
1864 | 1866 | ||
1865 | return 0; | 1867 | return 0; |
1866 | } | 1868 | } |
1867 | 1869 | ||
1868 | static int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) | 1870 | static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) |
1869 | { | 1871 | { |
1870 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1872 | struct smu8_hwmgr *data = hwmgr->backend; |
1871 | uint32_t dpm_features = 0; | 1873 | uint32_t dpm_features = 0; |
1872 | 1874 | ||
1873 | if (enable && phm_cap_enabled( | 1875 | if (enable && phm_cap_enabled( |
1874 | hwmgr->platform_descriptor.platformCaps, | 1876 | hwmgr->platform_descriptor.platformCaps, |
1875 | PHM_PlatformCaps_VCEDPM)) { | 1877 | PHM_PlatformCaps_VCEDPM)) { |
1876 | cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled; | 1878 | data->dpm_flags |= DPMFlags_VCE_Enabled; |
1877 | dpm_features |= VCE_DPM_MASK; | 1879 | dpm_features |= VCE_DPM_MASK; |
1878 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1880 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1879 | PPSMC_MSG_EnableAllSmuFeatures, dpm_features); | 1881 | PPSMC_MSG_EnableAllSmuFeatures, dpm_features); |
1880 | } else { | 1882 | } else { |
1881 | dpm_features |= VCE_DPM_MASK; | 1883 | dpm_features |= VCE_DPM_MASK; |
1882 | cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled; | 1884 | data->dpm_flags &= ~DPMFlags_VCE_Enabled; |
1883 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1885 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1884 | PPSMC_MSG_DisableAllSmuFeatures, dpm_features); | 1886 | PPSMC_MSG_DisableAllSmuFeatures, dpm_features); |
1885 | } | 1887 | } |
@@ -1888,11 +1890,11 @@ static int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) | |||
1888 | } | 1890 | } |
1889 | 1891 | ||
1890 | 1892 | ||
1891 | static void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) | 1893 | static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) |
1892 | { | 1894 | { |
1893 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1895 | struct smu8_hwmgr *data = hwmgr->backend; |
1894 | 1896 | ||
1895 | cz_hwmgr->uvd_power_gated = bgate; | 1897 | data->uvd_power_gated = bgate; |
1896 | 1898 | ||
1897 | if (bgate) { | 1899 | if (bgate) { |
1898 | cgs_set_powergating_state(hwmgr->device, | 1900 | cgs_set_powergating_state(hwmgr->device, |
@@ -1901,24 +1903,24 @@ static void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) | |||
1901 | cgs_set_clockgating_state(hwmgr->device, | 1903 | cgs_set_clockgating_state(hwmgr->device, |
1902 | AMD_IP_BLOCK_TYPE_UVD, | 1904 | AMD_IP_BLOCK_TYPE_UVD, |
1903 | AMD_CG_STATE_GATE); | 1905 | AMD_CG_STATE_GATE); |
1904 | cz_dpm_update_uvd_dpm(hwmgr, true); | 1906 | smu8_dpm_update_uvd_dpm(hwmgr, true); |
1905 | cz_dpm_powerdown_uvd(hwmgr); | 1907 | smu8_dpm_powerdown_uvd(hwmgr); |
1906 | } else { | 1908 | } else { |
1907 | cz_dpm_powerup_uvd(hwmgr); | 1909 | smu8_dpm_powerup_uvd(hwmgr); |
1908 | cgs_set_clockgating_state(hwmgr->device, | 1910 | cgs_set_clockgating_state(hwmgr->device, |
1909 | AMD_IP_BLOCK_TYPE_UVD, | 1911 | AMD_IP_BLOCK_TYPE_UVD, |
1910 | AMD_CG_STATE_UNGATE); | 1912 | AMD_CG_STATE_UNGATE); |
1911 | cgs_set_powergating_state(hwmgr->device, | 1913 | cgs_set_powergating_state(hwmgr->device, |
1912 | AMD_IP_BLOCK_TYPE_UVD, | 1914 | AMD_IP_BLOCK_TYPE_UVD, |
1913 | AMD_PG_STATE_UNGATE); | 1915 | AMD_PG_STATE_UNGATE); |
1914 | cz_dpm_update_uvd_dpm(hwmgr, false); | 1916 | smu8_dpm_update_uvd_dpm(hwmgr, false); |
1915 | } | 1917 | } |
1916 | 1918 | ||
1917 | } | 1919 | } |
1918 | 1920 | ||
1919 | static void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | 1921 | static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) |
1920 | { | 1922 | { |
1921 | struct cz_hwmgr *cz_hwmgr = hwmgr->backend; | 1923 | struct smu8_hwmgr *data = hwmgr->backend; |
1922 | 1924 | ||
1923 | if (bgate) { | 1925 | if (bgate) { |
1924 | cgs_set_powergating_state( | 1926 | cgs_set_powergating_state( |
@@ -1929,12 +1931,12 @@ static void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | |||
1929 | hwmgr->device, | 1931 | hwmgr->device, |
1930 | AMD_IP_BLOCK_TYPE_VCE, | 1932 | AMD_IP_BLOCK_TYPE_VCE, |
1931 | AMD_CG_STATE_GATE); | 1933 | AMD_CG_STATE_GATE); |
1932 | cz_enable_disable_vce_dpm(hwmgr, false); | 1934 | smu8_enable_disable_vce_dpm(hwmgr, false); |
1933 | cz_dpm_powerdown_vce(hwmgr); | 1935 | smu8_dpm_powerdown_vce(hwmgr); |
1934 | cz_hwmgr->vce_power_gated = true; | 1936 | data->vce_power_gated = true; |
1935 | } else { | 1937 | } else { |
1936 | cz_dpm_powerup_vce(hwmgr); | 1938 | smu8_dpm_powerup_vce(hwmgr); |
1937 | cz_hwmgr->vce_power_gated = false; | 1939 | data->vce_power_gated = false; |
1938 | cgs_set_clockgating_state( | 1940 | cgs_set_clockgating_state( |
1939 | hwmgr->device, | 1941 | hwmgr->device, |
1940 | AMD_IP_BLOCK_TYPE_VCE, | 1942 | AMD_IP_BLOCK_TYPE_VCE, |
@@ -1943,47 +1945,47 @@ static void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | |||
1943 | hwmgr->device, | 1945 | hwmgr->device, |
1944 | AMD_IP_BLOCK_TYPE_VCE, | 1946 | AMD_IP_BLOCK_TYPE_VCE, |
1945 | AMD_PG_STATE_UNGATE); | 1947 | AMD_PG_STATE_UNGATE); |
1946 | cz_dpm_update_vce_dpm(hwmgr); | 1948 | smu8_dpm_update_vce_dpm(hwmgr); |
1947 | cz_enable_disable_vce_dpm(hwmgr, true); | 1949 | smu8_enable_disable_vce_dpm(hwmgr, true); |
1948 | } | 1950 | } |
1949 | } | 1951 | } |
1950 | 1952 | ||
1951 | static const struct pp_hwmgr_func cz_hwmgr_funcs = { | 1953 | static const struct pp_hwmgr_func smu8_hwmgr_funcs = { |
1952 | .backend_init = cz_hwmgr_backend_init, | 1954 | .backend_init = smu8_hwmgr_backend_init, |
1953 | .backend_fini = cz_hwmgr_backend_fini, | 1955 | .backend_fini = smu8_hwmgr_backend_fini, |
1954 | .apply_state_adjust_rules = cz_apply_state_adjust_rules, | 1956 | .apply_state_adjust_rules = smu8_apply_state_adjust_rules, |
1955 | .force_dpm_level = cz_dpm_force_dpm_level, | 1957 | .force_dpm_level = smu8_dpm_force_dpm_level, |
1956 | .get_power_state_size = cz_get_power_state_size, | 1958 | .get_power_state_size = smu8_get_power_state_size, |
1957 | .powerdown_uvd = cz_dpm_powerdown_uvd, | 1959 | .powerdown_uvd = smu8_dpm_powerdown_uvd, |
1958 | .powergate_uvd = cz_dpm_powergate_uvd, | 1960 | .powergate_uvd = smu8_dpm_powergate_uvd, |
1959 | .powergate_vce = cz_dpm_powergate_vce, | 1961 | .powergate_vce = smu8_dpm_powergate_vce, |
1960 | .get_mclk = cz_dpm_get_mclk, | 1962 | .get_mclk = smu8_dpm_get_mclk, |
1961 | .get_sclk = cz_dpm_get_sclk, | 1963 | .get_sclk = smu8_dpm_get_sclk, |
1962 | .patch_boot_state = cz_dpm_patch_boot_state, | 1964 | .patch_boot_state = smu8_dpm_patch_boot_state, |
1963 | .get_pp_table_entry = cz_dpm_get_pp_table_entry, | 1965 | .get_pp_table_entry = smu8_dpm_get_pp_table_entry, |
1964 | .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries, | 1966 | .get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries, |
1965 | .set_cpu_power_state = cz_set_cpu_power_state, | 1967 | .set_cpu_power_state = smu8_set_cpu_power_state, |
1966 | .store_cc6_data = cz_store_cc6_data, | 1968 | .store_cc6_data = smu8_store_cc6_data, |
1967 | .force_clock_level = cz_force_clock_level, | 1969 | .force_clock_level = smu8_force_clock_level, |
1968 | .print_clock_levels = cz_print_clock_levels, | 1970 | .print_clock_levels = smu8_print_clock_levels, |
1969 | .get_dal_power_level = cz_get_dal_power_level, | 1971 | .get_dal_power_level = smu8_get_dal_power_level, |
1970 | .get_performance_level = cz_get_performance_level, | 1972 | .get_performance_level = smu8_get_performance_level, |
1971 | .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, | 1973 | .get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks, |
1972 | .get_clock_by_type = cz_get_clock_by_type, | 1974 | .get_clock_by_type = smu8_get_clock_by_type, |
1973 | .get_max_high_clocks = cz_get_max_high_clocks, | 1975 | .get_max_high_clocks = smu8_get_max_high_clocks, |
1974 | .read_sensor = cz_read_sensor, | 1976 | .read_sensor = smu8_read_sensor, |
1975 | .power_off_asic = cz_power_off_asic, | 1977 | .power_off_asic = smu8_power_off_asic, |
1976 | .asic_setup = cz_setup_asic_task, | 1978 | .asic_setup = smu8_setup_asic_task, |
1977 | .dynamic_state_management_enable = cz_enable_dpm_tasks, | 1979 | .dynamic_state_management_enable = smu8_enable_dpm_tasks, |
1978 | .power_state_set = cz_set_power_state_tasks, | 1980 | .power_state_set = smu8_set_power_state_tasks, |
1979 | .dynamic_state_management_disable = cz_disable_dpm_tasks, | 1981 | .dynamic_state_management_disable = smu8_disable_dpm_tasks, |
1980 | .notify_cac_buffer_info = cz_notify_cac_buffer_info, | 1982 | .notify_cac_buffer_info = smu8_notify_cac_buffer_info, |
1981 | .get_thermal_temperature_range = cz_get_thermal_temperature_range, | 1983 | .get_thermal_temperature_range = smu8_get_thermal_temperature_range, |
1982 | }; | 1984 | }; |
1983 | 1985 | ||
1984 | int cz_init_function_pointers(struct pp_hwmgr *hwmgr) | 1986 | int smu8_init_function_pointers(struct pp_hwmgr *hwmgr) |
1985 | { | 1987 | { |
1986 | hwmgr->hwmgr_func = &cz_hwmgr_funcs; | 1988 | hwmgr->hwmgr_func = &smu8_hwmgr_funcs; |
1987 | hwmgr->pptable_func = &pptable_funcs; | 1989 | hwmgr->pptable_func = &pptable_funcs; |
1988 | return 0; | 1990 | return 0; |
1989 | } | 1991 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h index 604102b885c4..05a06083e1b8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h | |||
@@ -21,18 +21,18 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #ifndef _CZ_HWMGR_H_ | 24 | #ifndef _SMU8_HWMGR_H_ |
25 | #define _CZ_HWMGR_H_ | 25 | #define _SMU8_HWMGR_H_ |
26 | 26 | ||
27 | #include "cgs_common.h" | 27 | #include "cgs_common.h" |
28 | #include "ppatomctrl.h" | 28 | #include "ppatomctrl.h" |
29 | 29 | ||
30 | #define CZ_NUM_NBPSTATES 4 | 30 | #define SMU8_NUM_NBPSTATES 4 |
31 | #define CZ_NUM_NBPMEMORYCLOCK 2 | 31 | #define SMU8_NUM_NBPMEMORYCLOCK 2 |
32 | #define MAX_DISPLAY_CLOCK_LEVEL 8 | 32 | #define MAX_DISPLAY_CLOCK_LEVEL 8 |
33 | #define CZ_MAX_HARDWARE_POWERLEVELS 8 | 33 | #define SMU8_MAX_HARDWARE_POWERLEVELS 8 |
34 | #define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 | 34 | #define SMU8_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 |
35 | #define CZ_MIN_DEEP_SLEEP_SCLK 800 | 35 | #define SMU8_MIN_DEEP_SLEEP_SCLK 800 |
36 | 36 | ||
37 | /* Carrizo device IDs */ | 37 | /* Carrizo device IDs */ |
38 | #define DEVICE_ID_CZ_9870 0x9870 | 38 | #define DEVICE_ID_CZ_9870 0x9870 |
@@ -41,24 +41,21 @@ | |||
41 | #define DEVICE_ID_CZ_9876 0x9876 | 41 | #define DEVICE_ID_CZ_9876 0x9876 |
42 | #define DEVICE_ID_CZ_9877 0x9877 | 42 | #define DEVICE_ID_CZ_9877 0x9877 |
43 | 43 | ||
44 | #define PHMCZ_WRITE_SMC_REGISTER(device, reg, value) \ | 44 | struct smu8_dpm_entry { |
45 | cgs_write_ind_register(device, CGS_IND_REG__SMC, ix##reg, value) | ||
46 | |||
47 | struct cz_dpm_entry { | ||
48 | uint32_t soft_min_clk; | 45 | uint32_t soft_min_clk; |
49 | uint32_t hard_min_clk; | 46 | uint32_t hard_min_clk; |
50 | uint32_t soft_max_clk; | 47 | uint32_t soft_max_clk; |
51 | uint32_t hard_max_clk; | 48 | uint32_t hard_max_clk; |
52 | }; | 49 | }; |
53 | 50 | ||
54 | struct cz_sys_info { | 51 | struct smu8_sys_info { |
55 | uint32_t bootup_uma_clock; | 52 | uint32_t bootup_uma_clock; |
56 | uint32_t bootup_engine_clock; | 53 | uint32_t bootup_engine_clock; |
57 | uint32_t dentist_vco_freq; | 54 | uint32_t dentist_vco_freq; |
58 | uint32_t nb_dpm_enable; | 55 | uint32_t nb_dpm_enable; |
59 | uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK]; | 56 | uint32_t nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK]; |
60 | uint32_t nbp_n_clock[CZ_NUM_NBPSTATES]; | 57 | uint32_t nbp_n_clock[SMU8_NUM_NBPSTATES]; |
61 | uint16_t nbp_voltage_index[CZ_NUM_NBPSTATES]; | 58 | uint16_t nbp_voltage_index[SMU8_NUM_NBPSTATES]; |
62 | uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL]; | 59 | uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL]; |
63 | uint16_t bootup_nb_voltage_index; | 60 | uint16_t bootup_nb_voltage_index; |
64 | uint8_t htc_tmp_lmt; | 61 | uint8_t htc_tmp_lmt; |
@@ -85,21 +82,21 @@ struct cz_sys_info { | |||
85 | ((tx) ? DISPLAYPHY_TX_SELECT : 0) | \ | 82 | ((tx) ? DISPLAYPHY_TX_SELECT : 0) | \ |
86 | ((core) ? DISPLAYPHY_CORE_SELECT : 0)) | 83 | ((core) ? DISPLAYPHY_CORE_SELECT : 0)) |
87 | 84 | ||
88 | struct cz_display_phy_info_entry { | 85 | struct smu8_display_phy_info_entry { |
89 | uint8_t phy_present; | 86 | uint8_t phy_present; |
90 | uint8_t active_lane_mapping; | 87 | uint8_t active_lane_mapping; |
91 | uint8_t display_config_type; | 88 | uint8_t display_config_type; |
92 | uint8_t active_number_of_lanes; | 89 | uint8_t active_number_of_lanes; |
93 | }; | 90 | }; |
94 | 91 | ||
95 | #define CZ_MAX_DISPLAYPHY_IDS 10 | 92 | #define SMU8_MAX_DISPLAYPHY_IDS 10 |
96 | 93 | ||
97 | struct cz_display_phy_info { | 94 | struct smu8_display_phy_info { |
98 | bool display_phy_access_initialized; | 95 | bool display_phy_access_initialized; |
99 | struct cz_display_phy_info_entry entries[CZ_MAX_DISPLAYPHY_IDS]; | 96 | struct smu8_display_phy_info_entry entries[SMU8_MAX_DISPLAYPHY_IDS]; |
100 | }; | 97 | }; |
101 | 98 | ||
102 | struct cz_power_level { | 99 | struct smu8_power_level { |
103 | uint32_t engineClock; | 100 | uint32_t engineClock; |
104 | uint8_t vddcIndex; | 101 | uint8_t vddcIndex; |
105 | uint8_t dsDividerIndex; | 102 | uint8_t dsDividerIndex; |
@@ -113,7 +110,7 @@ struct cz_power_level { | |||
113 | uint8_t rsv[3]; | 110 | uint8_t rsv[3]; |
114 | }; | 111 | }; |
115 | 112 | ||
116 | struct cz_uvd_clocks { | 113 | struct smu8_uvd_clocks { |
117 | uint32_t vclk; | 114 | uint32_t vclk; |
118 | uint32_t dclk; | 115 | uint32_t dclk; |
119 | uint32_t vclk_low_divider; | 116 | uint32_t vclk_low_divider; |
@@ -122,7 +119,7 @@ struct cz_uvd_clocks { | |||
122 | uint32_t dclk_high_divider; | 119 | uint32_t dclk_high_divider; |
123 | }; | 120 | }; |
124 | 121 | ||
125 | enum cz_pstate_previous_action { | 122 | enum smu8_pstate_previous_action { |
126 | DO_NOTHING = 1, | 123 | DO_NOTHING = 1, |
127 | FORCE_HIGH, | 124 | FORCE_HIGH, |
128 | CANCEL_FORCE_HIGH | 125 | CANCEL_FORCE_HIGH |
@@ -143,10 +140,10 @@ struct pp_disable_nb_ps_flags { | |||
143 | }; | 140 | }; |
144 | }; | 141 | }; |
145 | 142 | ||
146 | struct cz_power_state { | 143 | struct smu8_power_state { |
147 | unsigned int magic; | 144 | unsigned int magic; |
148 | uint32_t level; | 145 | uint32_t level; |
149 | struct cz_uvd_clocks uvd_clocks; | 146 | struct smu8_uvd_clocks uvd_clocks; |
150 | uint32_t evclk; | 147 | uint32_t evclk; |
151 | uint32_t ecclk; | 148 | uint32_t ecclk; |
152 | uint32_t samclk; | 149 | uint32_t samclk; |
@@ -158,8 +155,8 @@ struct cz_power_state { | |||
158 | uint8_t dpm_0_pg_nb_ps_high; | 155 | uint8_t dpm_0_pg_nb_ps_high; |
159 | uint8_t dpm_x_nb_ps_low; | 156 | uint8_t dpm_x_nb_ps_low; |
160 | uint8_t dpm_x_nb_ps_high; | 157 | uint8_t dpm_x_nb_ps_high; |
161 | enum cz_pstate_previous_action action; | 158 | enum smu8_pstate_previous_action action; |
162 | struct cz_power_level levels[CZ_MAX_HARDWARE_POWERLEVELS]; | 159 | struct smu8_power_level levels[SMU8_MAX_HARDWARE_POWERLEVELS]; |
163 | struct pp_disable_nb_ps_flags disable_nb_ps_flag; | 160 | struct pp_disable_nb_ps_flags disable_nb_ps_flag; |
164 | }; | 161 | }; |
165 | 162 | ||
@@ -182,7 +179,7 @@ struct cc6_settings { | |||
182 | uint32_t cpu_pstate_separation_time; | 179 | uint32_t cpu_pstate_separation_time; |
183 | }; | 180 | }; |
184 | 181 | ||
185 | struct cz_hwmgr { | 182 | struct smu8_hwmgr { |
186 | uint32_t dpm_interval; | 183 | uint32_t dpm_interval; |
187 | 184 | ||
188 | uint32_t voltage_drop_threshold; | 185 | uint32_t voltage_drop_threshold; |
@@ -202,11 +199,11 @@ struct cz_hwmgr { | |||
202 | 199 | ||
203 | uint32_t thermal_auto_throttling_treshold; | 200 | uint32_t thermal_auto_throttling_treshold; |
204 | 201 | ||
205 | struct cz_sys_info sys_info; | 202 | struct smu8_sys_info sys_info; |
206 | 203 | ||
207 | struct cz_power_level boot_power_level; | 204 | struct smu8_power_level boot_power_level; |
208 | struct cz_power_state *cz_current_ps; | 205 | struct smu8_power_state *smu8_current_ps; |
209 | struct cz_power_state *cz_requested_ps; | 206 | struct smu8_power_state *smu8_requested_ps; |
210 | 207 | ||
211 | uint32_t mgcg_cgtt_local0; | 208 | uint32_t mgcg_cgtt_local0; |
212 | uint32_t mgcg_cgtt_local1; | 209 | uint32_t mgcg_cgtt_local1; |
@@ -219,7 +216,7 @@ struct cz_hwmgr { | |||
219 | 216 | ||
220 | uint32_t lock_nb_ps_in_uvd_play_back; | 217 | uint32_t lock_nb_ps_in_uvd_play_back; |
221 | 218 | ||
222 | struct cz_display_phy_info display_phy_info; | 219 | struct smu8_display_phy_info display_phy_info; |
223 | uint32_t vce_slow_sclk_threshold; /* default 200mhz */ | 220 | uint32_t vce_slow_sclk_threshold; /* default 200mhz */ |
224 | uint32_t dce_slow_sclk_threshold; /* default 300mhz */ | 221 | uint32_t dce_slow_sclk_threshold; /* default 300mhz */ |
225 | uint32_t min_sclk_did; /* minimum sclk divider */ | 222 | uint32_t min_sclk_did; /* minimum sclk divider */ |
@@ -270,10 +267,10 @@ struct cz_hwmgr { | |||
270 | uint32_t fps_low_threshold; | 267 | uint32_t fps_low_threshold; |
271 | 268 | ||
272 | uint32_t dpm_flags; | 269 | uint32_t dpm_flags; |
273 | struct cz_dpm_entry sclk_dpm; | 270 | struct smu8_dpm_entry sclk_dpm; |
274 | struct cz_dpm_entry uvd_dpm; | 271 | struct smu8_dpm_entry uvd_dpm; |
275 | struct cz_dpm_entry vce_dpm; | 272 | struct smu8_dpm_entry vce_dpm; |
276 | struct cz_dpm_entry acp_dpm; | 273 | struct smu8_dpm_entry acp_dpm; |
277 | 274 | ||
278 | uint8_t uvd_boot_level; | 275 | uint8_t uvd_boot_level; |
279 | uint8_t vce_boot_level; | 276 | uint8_t vce_boot_level; |
@@ -311,4 +308,4 @@ struct cz_hwmgr { | |||
311 | uint32_t num_of_clk_entries; | 308 | uint32_t num_of_clk_entries; |
312 | }; | 309 | }; |
313 | 310 | ||
314 | #endif /* _CZ_HWMGR_H_ */ | 311 | #endif /* _SMU8_HWMGR_H_ */ |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 238dd59caf63..229030027f3e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include "pp_psm.h" | 35 | #include "pp_psm.h" |
36 | 36 | ||
37 | extern const struct pp_smumgr_func ci_smu_funcs; | 37 | extern const struct pp_smumgr_func ci_smu_funcs; |
38 | extern const struct pp_smumgr_func cz_smu_funcs; | 38 | extern const struct pp_smumgr_func smu8_smu_funcs; |
39 | extern const struct pp_smumgr_func iceland_smu_funcs; | 39 | extern const struct pp_smumgr_func iceland_smu_funcs; |
40 | extern const struct pp_smumgr_func tonga_smu_funcs; | 40 | extern const struct pp_smumgr_func tonga_smu_funcs; |
41 | extern const struct pp_smumgr_func fiji_smu_funcs; | 41 | extern const struct pp_smumgr_func fiji_smu_funcs; |
@@ -44,7 +44,7 @@ extern const struct pp_smumgr_func vega10_smu_funcs; | |||
44 | extern const struct pp_smumgr_func smu10_smu_funcs; | 44 | extern const struct pp_smumgr_func smu10_smu_funcs; |
45 | 45 | ||
46 | extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); | 46 | extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); |
47 | extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr); | 47 | extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr); |
48 | extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr); | 48 | extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr); |
49 | extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr); | 49 | extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr); |
50 | 50 | ||
@@ -144,8 +144,8 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) | |||
144 | break; | 144 | break; |
145 | case AMDGPU_FAMILY_CZ: | 145 | case AMDGPU_FAMILY_CZ: |
146 | hwmgr->od_enabled = false; | 146 | hwmgr->od_enabled = false; |
147 | hwmgr->smumgr_funcs = &cz_smu_funcs; | 147 | hwmgr->smumgr_funcs = &smu8_smu_funcs; |
148 | cz_init_function_pointers(hwmgr); | 148 | smu8_init_function_pointers(hwmgr); |
149 | break; | 149 | break; |
150 | case AMDGPU_FAMILY_VI: | 150 | case AMDGPU_FAMILY_VI: |
151 | switch (hwmgr->chip_id) { | 151 | switch (hwmgr->chip_id) { |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 871b8a3c9b0c..b998a16e5a2c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | |||
@@ -41,18 +41,18 @@ | |||
41 | 41 | ||
42 | #define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32) | 42 | #define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32) |
43 | 43 | ||
44 | static const enum cz_scratch_entry firmware_list[] = { | 44 | static const enum smu8_scratch_entry firmware_list[] = { |
45 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, | 45 | SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, |
46 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, | 46 | SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, |
47 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, | 47 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, |
48 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, | 48 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, |
49 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, | 49 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, |
50 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, | 50 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, |
51 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, | 51 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, |
52 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, | 52 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, |
53 | }; | 53 | }; |
54 | 54 | ||
55 | static int cz_smum_get_argument(struct pp_hwmgr *hwmgr) | 55 | static int smu8_smum_get_argument(struct pp_hwmgr *hwmgr) |
56 | { | 56 | { |
57 | if (hwmgr == NULL || hwmgr->device == NULL) | 57 | if (hwmgr == NULL || hwmgr->device == NULL) |
58 | return -EINVAL; | 58 | return -EINVAL; |
@@ -61,7 +61,7 @@ static int cz_smum_get_argument(struct pp_hwmgr *hwmgr) | |||
61 | mmSMU_MP1_SRBM2P_ARG_0); | 61 | mmSMU_MP1_SRBM2P_ARG_0); |
62 | } | 62 | } |
63 | 63 | ||
64 | static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) | 64 | static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) |
65 | { | 65 | { |
66 | int result = 0; | 66 | int result = 0; |
67 | 67 | ||
@@ -71,7 +71,7 @@ static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) | |||
71 | result = PHM_WAIT_FIELD_UNEQUAL(hwmgr, | 71 | result = PHM_WAIT_FIELD_UNEQUAL(hwmgr, |
72 | SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); | 72 | SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); |
73 | if (result != 0) { | 73 | if (result != 0) { |
74 | pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg); | 74 | pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg); |
75 | return result; | 75 | return result; |
76 | } | 76 | } |
77 | 77 | ||
@@ -82,11 +82,11 @@ static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) | |||
82 | } | 82 | } |
83 | 83 | ||
84 | /* Send a message to the SMC, and wait for its response.*/ | 84 | /* Send a message to the SMC, and wait for its response.*/ |
85 | static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) | 85 | static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) |
86 | { | 86 | { |
87 | int result = 0; | 87 | int result = 0; |
88 | 88 | ||
89 | result = cz_send_msg_to_smc_async(hwmgr, msg); | 89 | result = smu8_send_msg_to_smc_async(hwmgr, msg); |
90 | if (result != 0) | 90 | if (result != 0) |
91 | return result; | 91 | return result; |
92 | 92 | ||
@@ -94,7 +94,7 @@ static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) | |||
94 | SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); | 94 | SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); |
95 | } | 95 | } |
96 | 96 | ||
97 | static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr, | 97 | static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr, |
98 | uint32_t smc_address, uint32_t limit) | 98 | uint32_t smc_address, uint32_t limit) |
99 | { | 99 | { |
100 | if (hwmgr == NULL || hwmgr->device == NULL) | 100 | if (hwmgr == NULL || hwmgr->device == NULL) |
@@ -116,7 +116,7 @@ static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr, | |||
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr, | 119 | static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr, |
120 | uint32_t smc_address, uint32_t value, uint32_t limit) | 120 | uint32_t smc_address, uint32_t value, uint32_t limit) |
121 | { | 121 | { |
122 | int result; | 122 | int result; |
@@ -124,14 +124,14 @@ static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr, | |||
124 | if (hwmgr == NULL || hwmgr->device == NULL) | 124 | if (hwmgr == NULL || hwmgr->device == NULL) |
125 | return -EINVAL; | 125 | return -EINVAL; |
126 | 126 | ||
127 | result = cz_set_smc_sram_address(hwmgr, smc_address, limit); | 127 | result = smu8_set_smc_sram_address(hwmgr, smc_address, limit); |
128 | if (!result) | 128 | if (!result) |
129 | cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value); | 129 | cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value); |
130 | 130 | ||
131 | return result; | 131 | return result; |
132 | } | 132 | } |
133 | 133 | ||
134 | static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, | 134 | static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, |
135 | uint16_t msg, uint32_t parameter) | 135 | uint16_t msg, uint32_t parameter) |
136 | { | 136 | { |
137 | if (hwmgr == NULL || hwmgr->device == NULL) | 137 | if (hwmgr == NULL || hwmgr->device == NULL) |
@@ -139,10 +139,10 @@ static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, | |||
139 | 139 | ||
140 | cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); | 140 | cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); |
141 | 141 | ||
142 | return cz_send_msg_to_smc(hwmgr, msg); | 142 | return smu8_send_msg_to_smc(hwmgr, msg); |
143 | } | 143 | } |
144 | 144 | ||
145 | static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr, | 145 | static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr, |
146 | uint32_t firmware) | 146 | uint32_t firmware) |
147 | { | 147 | { |
148 | int i; | 148 | int i; |
@@ -170,18 +170,18 @@ static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr, | |||
170 | return 0; | 170 | return 0; |
171 | } | 171 | } |
172 | 172 | ||
173 | static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr) | 173 | static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr) |
174 | { | 174 | { |
175 | uint32_t reg_data; | 175 | uint32_t reg_data; |
176 | uint32_t tmp; | 176 | uint32_t tmp; |
177 | int ret = 0; | 177 | int ret = 0; |
178 | struct cgs_firmware_info info = {0}; | 178 | struct cgs_firmware_info info = {0}; |
179 | struct cz_smumgr *cz_smu; | 179 | struct smu8_smumgr *smu8_smu; |
180 | 180 | ||
181 | if (hwmgr == NULL || hwmgr->device == NULL) | 181 | if (hwmgr == NULL || hwmgr->device == NULL) |
182 | return -EINVAL; | 182 | return -EINVAL; |
183 | 183 | ||
184 | cz_smu = hwmgr->smu_backend; | 184 | smu8_smu = hwmgr->smu_backend; |
185 | ret = cgs_get_firmware_info(hwmgr->device, | 185 | ret = cgs_get_firmware_info(hwmgr->device, |
186 | CGS_UCODE_ID_CP_MEC, &info); | 186 | CGS_UCODE_ID_CP_MEC, &info); |
187 | 187 | ||
@@ -215,72 +215,72 @@ static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr) | |||
215 | return 0; | 215 | return 0; |
216 | } | 216 | } |
217 | 217 | ||
218 | static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr, | 218 | static uint8_t smu8_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr, |
219 | enum cz_scratch_entry firmware_enum) | 219 | enum smu8_scratch_entry firmware_enum) |
220 | { | 220 | { |
221 | uint8_t ret = 0; | 221 | uint8_t ret = 0; |
222 | 222 | ||
223 | switch (firmware_enum) { | 223 | switch (firmware_enum) { |
224 | case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0: | 224 | case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0: |
225 | ret = UCODE_ID_SDMA0; | 225 | ret = UCODE_ID_SDMA0; |
226 | break; | 226 | break; |
227 | case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: | 227 | case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1: |
228 | if (hwmgr->chip_id == CHIP_STONEY) | 228 | if (hwmgr->chip_id == CHIP_STONEY) |
229 | ret = UCODE_ID_SDMA0; | 229 | ret = UCODE_ID_SDMA0; |
230 | else | 230 | else |
231 | ret = UCODE_ID_SDMA1; | 231 | ret = UCODE_ID_SDMA1; |
232 | break; | 232 | break; |
233 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE: | 233 | case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE: |
234 | ret = UCODE_ID_CP_CE; | 234 | ret = UCODE_ID_CP_CE; |
235 | break; | 235 | break; |
236 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP: | 236 | case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP: |
237 | ret = UCODE_ID_CP_PFP; | 237 | ret = UCODE_ID_CP_PFP; |
238 | break; | 238 | break; |
239 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME: | 239 | case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME: |
240 | ret = UCODE_ID_CP_ME; | 240 | ret = UCODE_ID_CP_ME; |
241 | break; | 241 | break; |
242 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1: | 242 | case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1: |
243 | ret = UCODE_ID_CP_MEC_JT1; | 243 | ret = UCODE_ID_CP_MEC_JT1; |
244 | break; | 244 | break; |
245 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: | 245 | case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: |
246 | if (hwmgr->chip_id == CHIP_STONEY) | 246 | if (hwmgr->chip_id == CHIP_STONEY) |
247 | ret = UCODE_ID_CP_MEC_JT1; | 247 | ret = UCODE_ID_CP_MEC_JT1; |
248 | else | 248 | else |
249 | ret = UCODE_ID_CP_MEC_JT2; | 249 | ret = UCODE_ID_CP_MEC_JT2; |
250 | break; | 250 | break; |
251 | case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: | 251 | case SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: |
252 | ret = UCODE_ID_GMCON_RENG; | 252 | ret = UCODE_ID_GMCON_RENG; |
253 | break; | 253 | break; |
254 | case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G: | 254 | case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G: |
255 | ret = UCODE_ID_RLC_G; | 255 | ret = UCODE_ID_RLC_G; |
256 | break; | 256 | break; |
257 | case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH: | 257 | case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH: |
258 | ret = UCODE_ID_RLC_SCRATCH; | 258 | ret = UCODE_ID_RLC_SCRATCH; |
259 | break; | 259 | break; |
260 | case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM: | 260 | case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM: |
261 | ret = UCODE_ID_RLC_SRM_ARAM; | 261 | ret = UCODE_ID_RLC_SRM_ARAM; |
262 | break; | 262 | break; |
263 | case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM: | 263 | case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM: |
264 | ret = UCODE_ID_RLC_SRM_DRAM; | 264 | ret = UCODE_ID_RLC_SRM_DRAM; |
265 | break; | 265 | break; |
266 | case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM: | 266 | case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM: |
267 | ret = UCODE_ID_DMCU_ERAM; | 267 | ret = UCODE_ID_DMCU_ERAM; |
268 | break; | 268 | break; |
269 | case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM: | 269 | case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM: |
270 | ret = UCODE_ID_DMCU_IRAM; | 270 | ret = UCODE_ID_DMCU_IRAM; |
271 | break; | 271 | break; |
272 | case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING: | 272 | case SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING: |
273 | ret = TASK_ARG_INIT_MM_PWR_LOG; | 273 | ret = TASK_ARG_INIT_MM_PWR_LOG; |
274 | break; | 274 | break; |
275 | case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT: | 275 | case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT: |
276 | case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING: | 276 | case SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING: |
277 | case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS: | 277 | case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS: |
278 | case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT: | 278 | case SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT: |
279 | case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START: | 279 | case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START: |
280 | case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS: | 280 | case SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS: |
281 | ret = TASK_ARG_REG_MMIO; | 281 | ret = TASK_ARG_REG_MMIO; |
282 | break; | 282 | break; |
283 | case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE: | 283 | case SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE: |
284 | ret = TASK_ARG_INIT_CLK_TABLE; | 284 | ret = TASK_ARG_INIT_CLK_TABLE; |
285 | break; | 285 | break; |
286 | } | 286 | } |
@@ -288,7 +288,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr, | |||
288 | return ret; | 288 | return ret; |
289 | } | 289 | } |
290 | 290 | ||
291 | static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type) | 291 | static enum cgs_ucode_id smu8_convert_fw_type_to_cgs(uint32_t fw_type) |
292 | { | 292 | { |
293 | enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; | 293 | enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; |
294 | 294 | ||
@@ -324,36 +324,36 @@ static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type) | |||
324 | return result; | 324 | return result; |
325 | } | 325 | } |
326 | 326 | ||
327 | static int cz_smu_populate_single_scratch_task( | 327 | static int smu8_smu_populate_single_scratch_task( |
328 | struct pp_hwmgr *hwmgr, | 328 | struct pp_hwmgr *hwmgr, |
329 | enum cz_scratch_entry fw_enum, | 329 | enum smu8_scratch_entry fw_enum, |
330 | uint8_t type, bool is_last) | 330 | uint8_t type, bool is_last) |
331 | { | 331 | { |
332 | uint8_t i; | 332 | uint8_t i; |
333 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 333 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
334 | struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; | 334 | struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr; |
335 | struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; | 335 | struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++]; |
336 | 336 | ||
337 | task->type = type; | 337 | task->type = type; |
338 | task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum); | 338 | task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum); |
339 | task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; | 339 | task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count; |
340 | 340 | ||
341 | for (i = 0; i < cz_smu->scratch_buffer_length; i++) | 341 | for (i = 0; i < smu8_smu->scratch_buffer_length; i++) |
342 | if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum) | 342 | if (smu8_smu->scratch_buffer[i].firmware_ID == fw_enum) |
343 | break; | 343 | break; |
344 | 344 | ||
345 | if (i >= cz_smu->scratch_buffer_length) { | 345 | if (i >= smu8_smu->scratch_buffer_length) { |
346 | pr_err("Invalid Firmware Type\n"); | 346 | pr_err("Invalid Firmware Type\n"); |
347 | return -EINVAL; | 347 | return -EINVAL; |
348 | } | 348 | } |
349 | 349 | ||
350 | task->addr.low = lower_32_bits(cz_smu->scratch_buffer[i].mc_addr); | 350 | task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr); |
351 | task->addr.high = upper_32_bits(cz_smu->scratch_buffer[i].mc_addr); | 351 | task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr); |
352 | task->size_bytes = cz_smu->scratch_buffer[i].data_size; | 352 | task->size_bytes = smu8_smu->scratch_buffer[i].data_size; |
353 | 353 | ||
354 | if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) { | 354 | if (SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) { |
355 | struct cz_ih_meta_data *pIHReg_restore = | 355 | struct smu8_ih_meta_data *pIHReg_restore = |
356 | (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr; | 356 | (struct smu8_ih_meta_data *)smu8_smu->scratch_buffer[i].kaddr; |
357 | pIHReg_restore->command = | 357 | pIHReg_restore->command = |
358 | METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD; | 358 | METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD; |
359 | } | 359 | } |
@@ -361,53 +361,53 @@ static int cz_smu_populate_single_scratch_task( | |||
361 | return 0; | 361 | return 0; |
362 | } | 362 | } |
363 | 363 | ||
364 | static int cz_smu_populate_single_ucode_load_task( | 364 | static int smu8_smu_populate_single_ucode_load_task( |
365 | struct pp_hwmgr *hwmgr, | 365 | struct pp_hwmgr *hwmgr, |
366 | enum cz_scratch_entry fw_enum, | 366 | enum smu8_scratch_entry fw_enum, |
367 | bool is_last) | 367 | bool is_last) |
368 | { | 368 | { |
369 | uint8_t i; | 369 | uint8_t i; |
370 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 370 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
371 | struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; | 371 | struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr; |
372 | struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; | 372 | struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++]; |
373 | 373 | ||
374 | task->type = TASK_TYPE_UCODE_LOAD; | 374 | task->type = TASK_TYPE_UCODE_LOAD; |
375 | task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum); | 375 | task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum); |
376 | task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; | 376 | task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count; |
377 | 377 | ||
378 | for (i = 0; i < cz_smu->driver_buffer_length; i++) | 378 | for (i = 0; i < smu8_smu->driver_buffer_length; i++) |
379 | if (cz_smu->driver_buffer[i].firmware_ID == fw_enum) | 379 | if (smu8_smu->driver_buffer[i].firmware_ID == fw_enum) |
380 | break; | 380 | break; |
381 | 381 | ||
382 | if (i >= cz_smu->driver_buffer_length) { | 382 | if (i >= smu8_smu->driver_buffer_length) { |
383 | pr_err("Invalid Firmware Type\n"); | 383 | pr_err("Invalid Firmware Type\n"); |
384 | return -EINVAL; | 384 | return -EINVAL; |
385 | } | 385 | } |
386 | 386 | ||
387 | task->addr.low = lower_32_bits(cz_smu->driver_buffer[i].mc_addr); | 387 | task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr); |
388 | task->addr.high = upper_32_bits(cz_smu->driver_buffer[i].mc_addr); | 388 | task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr); |
389 | task->size_bytes = cz_smu->driver_buffer[i].data_size; | 389 | task->size_bytes = smu8_smu->driver_buffer[i].data_size; |
390 | 390 | ||
391 | return 0; | 391 | return 0; |
392 | } | 392 | } |
393 | 393 | ||
394 | static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr) | 394 | static int smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr) |
395 | { | 395 | { |
396 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 396 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
397 | 397 | ||
398 | cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count; | 398 | smu8_smu->toc_entry_aram = smu8_smu->toc_entry_used_count; |
399 | cz_smu_populate_single_scratch_task(hwmgr, | 399 | smu8_smu_populate_single_scratch_task(hwmgr, |
400 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, | 400 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, |
401 | TASK_TYPE_UCODE_SAVE, true); | 401 | TASK_TYPE_UCODE_SAVE, true); |
402 | 402 | ||
403 | return 0; | 403 | return 0; |
404 | } | 404 | } |
405 | 405 | ||
406 | static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr) | 406 | static int smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr) |
407 | { | 407 | { |
408 | int i; | 408 | int i; |
409 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 409 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
410 | struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; | 410 | struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr; |
411 | 411 | ||
412 | for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) | 412 | for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) |
413 | toc->JobList[i] = (uint8_t)IGNORE_JOB; | 413 | toc->JobList[i] = (uint8_t)IGNORE_JOB; |
@@ -415,248 +415,248 @@ static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr) | |||
415 | return 0; | 415 | return 0; |
416 | } | 416 | } |
417 | 417 | ||
418 | static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr) | 418 | static int smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr) |
419 | { | 419 | { |
420 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 420 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
421 | struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; | 421 | struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr; |
422 | 422 | ||
423 | toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count; | 423 | toc->JobList[JOB_GFX_SAVE] = (uint8_t)smu8_smu->toc_entry_used_count; |
424 | cz_smu_populate_single_scratch_task(hwmgr, | 424 | smu8_smu_populate_single_scratch_task(hwmgr, |
425 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, | 425 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, |
426 | TASK_TYPE_UCODE_SAVE, false); | 426 | TASK_TYPE_UCODE_SAVE, false); |
427 | 427 | ||
428 | cz_smu_populate_single_scratch_task(hwmgr, | 428 | smu8_smu_populate_single_scratch_task(hwmgr, |
429 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, | 429 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, |
430 | TASK_TYPE_UCODE_SAVE, true); | 430 | TASK_TYPE_UCODE_SAVE, true); |
431 | 431 | ||
432 | return 0; | 432 | return 0; |
433 | } | 433 | } |
434 | 434 | ||
435 | 435 | ||
436 | static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr) | 436 | static int smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr) |
437 | { | 437 | { |
438 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 438 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
439 | struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; | 439 | struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr; |
440 | 440 | ||
441 | toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count; | 441 | toc->JobList[JOB_GFX_RESTORE] = (uint8_t)smu8_smu->toc_entry_used_count; |
442 | 442 | ||
443 | cz_smu_populate_single_ucode_load_task(hwmgr, | 443 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
444 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); | 444 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); |
445 | cz_smu_populate_single_ucode_load_task(hwmgr, | 445 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
446 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); | 446 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); |
447 | cz_smu_populate_single_ucode_load_task(hwmgr, | 447 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
448 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); | 448 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); |
449 | cz_smu_populate_single_ucode_load_task(hwmgr, | 449 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
450 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); | 450 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); |
451 | 451 | ||
452 | if (hwmgr->chip_id == CHIP_STONEY) | 452 | if (hwmgr->chip_id == CHIP_STONEY) |
453 | cz_smu_populate_single_ucode_load_task(hwmgr, | 453 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
454 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); | 454 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); |
455 | else | 455 | else |
456 | cz_smu_populate_single_ucode_load_task(hwmgr, | 456 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
457 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); | 457 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); |
458 | 458 | ||
459 | cz_smu_populate_single_ucode_load_task(hwmgr, | 459 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
460 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); | 460 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); |
461 | 461 | ||
462 | /* populate scratch */ | 462 | /* populate scratch */ |
463 | cz_smu_populate_single_scratch_task(hwmgr, | 463 | smu8_smu_populate_single_scratch_task(hwmgr, |
464 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, | 464 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, |
465 | TASK_TYPE_UCODE_LOAD, false); | 465 | TASK_TYPE_UCODE_LOAD, false); |
466 | 466 | ||
467 | cz_smu_populate_single_scratch_task(hwmgr, | 467 | smu8_smu_populate_single_scratch_task(hwmgr, |
468 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, | 468 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, |
469 | TASK_TYPE_UCODE_LOAD, false); | 469 | TASK_TYPE_UCODE_LOAD, false); |
470 | 470 | ||
471 | cz_smu_populate_single_scratch_task(hwmgr, | 471 | smu8_smu_populate_single_scratch_task(hwmgr, |
472 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, | 472 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, |
473 | TASK_TYPE_UCODE_LOAD, true); | 473 | TASK_TYPE_UCODE_LOAD, true); |
474 | 474 | ||
475 | return 0; | 475 | return 0; |
476 | } | 476 | } |
477 | 477 | ||
478 | static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr) | 478 | static int smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr) |
479 | { | 479 | { |
480 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 480 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
481 | 481 | ||
482 | cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count; | 482 | smu8_smu->toc_entry_power_profiling_index = smu8_smu->toc_entry_used_count; |
483 | 483 | ||
484 | cz_smu_populate_single_scratch_task(hwmgr, | 484 | smu8_smu_populate_single_scratch_task(hwmgr, |
485 | CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, | 485 | SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, |
486 | TASK_TYPE_INITIALIZE, true); | 486 | TASK_TYPE_INITIALIZE, true); |
487 | return 0; | 487 | return 0; |
488 | } | 488 | } |
489 | 489 | ||
490 | static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr) | 490 | static int smu8_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr) |
491 | { | 491 | { |
492 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 492 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
493 | 493 | ||
494 | cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count; | 494 | smu8_smu->toc_entry_initialize_index = smu8_smu->toc_entry_used_count; |
495 | 495 | ||
496 | cz_smu_populate_single_ucode_load_task(hwmgr, | 496 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
497 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); | 497 | SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); |
498 | if (hwmgr->chip_id != CHIP_STONEY) | 498 | if (hwmgr->chip_id != CHIP_STONEY) |
499 | cz_smu_populate_single_ucode_load_task(hwmgr, | 499 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
500 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); | 500 | SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); |
501 | cz_smu_populate_single_ucode_load_task(hwmgr, | 501 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
502 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); | 502 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); |
503 | cz_smu_populate_single_ucode_load_task(hwmgr, | 503 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
504 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); | 504 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); |
505 | cz_smu_populate_single_ucode_load_task(hwmgr, | 505 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
506 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); | 506 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); |
507 | cz_smu_populate_single_ucode_load_task(hwmgr, | 507 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
508 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); | 508 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); |
509 | if (hwmgr->chip_id != CHIP_STONEY) | 509 | if (hwmgr->chip_id != CHIP_STONEY) |
510 | cz_smu_populate_single_ucode_load_task(hwmgr, | 510 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
511 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); | 511 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); |
512 | cz_smu_populate_single_ucode_load_task(hwmgr, | 512 | smu8_smu_populate_single_ucode_load_task(hwmgr, |
513 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); | 513 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); |
514 | 514 | ||
515 | return 0; | 515 | return 0; |
516 | } | 516 | } |
517 | 517 | ||
518 | static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr) | 518 | static int smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr) |
519 | { | 519 | { |
520 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 520 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
521 | 521 | ||
522 | cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count; | 522 | smu8_smu->toc_entry_clock_table = smu8_smu->toc_entry_used_count; |
523 | 523 | ||
524 | cz_smu_populate_single_scratch_task(hwmgr, | 524 | smu8_smu_populate_single_scratch_task(hwmgr, |
525 | CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, | 525 | SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, |
526 | TASK_TYPE_INITIALIZE, true); | 526 | TASK_TYPE_INITIALIZE, true); |
527 | 527 | ||
528 | return 0; | 528 | return 0; |
529 | } | 529 | } |
530 | 530 | ||
531 | static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr) | 531 | static int smu8_smu_construct_toc(struct pp_hwmgr *hwmgr) |
532 | { | 532 | { |
533 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 533 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
534 | 534 | ||
535 | cz_smu->toc_entry_used_count = 0; | 535 | smu8_smu->toc_entry_used_count = 0; |
536 | cz_smu_initialize_toc_empty_job_list(hwmgr); | 536 | smu8_smu_initialize_toc_empty_job_list(hwmgr); |
537 | cz_smu_construct_toc_for_rlc_aram_save(hwmgr); | 537 | smu8_smu_construct_toc_for_rlc_aram_save(hwmgr); |
538 | cz_smu_construct_toc_for_vddgfx_enter(hwmgr); | 538 | smu8_smu_construct_toc_for_vddgfx_enter(hwmgr); |
539 | cz_smu_construct_toc_for_vddgfx_exit(hwmgr); | 539 | smu8_smu_construct_toc_for_vddgfx_exit(hwmgr); |
540 | cz_smu_construct_toc_for_power_profiling(hwmgr); | 540 | smu8_smu_construct_toc_for_power_profiling(hwmgr); |
541 | cz_smu_construct_toc_for_bootup(hwmgr); | 541 | smu8_smu_construct_toc_for_bootup(hwmgr); |
542 | cz_smu_construct_toc_for_clock_table(hwmgr); | 542 | smu8_smu_construct_toc_for_clock_table(hwmgr); |
543 | 543 | ||
544 | return 0; | 544 | return 0; |
545 | } | 545 | } |
546 | 546 | ||
547 | static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr) | 547 | static int smu8_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr) |
548 | { | 548 | { |
549 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 549 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
550 | uint32_t firmware_type; | 550 | uint32_t firmware_type; |
551 | uint32_t i; | 551 | uint32_t i; |
552 | int ret; | 552 | int ret; |
553 | enum cgs_ucode_id ucode_id; | 553 | enum cgs_ucode_id ucode_id; |
554 | struct cgs_firmware_info info = {0}; | 554 | struct cgs_firmware_info info = {0}; |
555 | 555 | ||
556 | cz_smu->driver_buffer_length = 0; | 556 | smu8_smu->driver_buffer_length = 0; |
557 | 557 | ||
558 | for (i = 0; i < ARRAY_SIZE(firmware_list); i++) { | 558 | for (i = 0; i < ARRAY_SIZE(firmware_list); i++) { |
559 | 559 | ||
560 | firmware_type = cz_translate_firmware_enum_to_arg(hwmgr, | 560 | firmware_type = smu8_translate_firmware_enum_to_arg(hwmgr, |
561 | firmware_list[i]); | 561 | firmware_list[i]); |
562 | 562 | ||
563 | ucode_id = cz_convert_fw_type_to_cgs(firmware_type); | 563 | ucode_id = smu8_convert_fw_type_to_cgs(firmware_type); |
564 | 564 | ||
565 | ret = cgs_get_firmware_info(hwmgr->device, | 565 | ret = cgs_get_firmware_info(hwmgr->device, |
566 | ucode_id, &info); | 566 | ucode_id, &info); |
567 | 567 | ||
568 | if (ret == 0) { | 568 | if (ret == 0) { |
569 | cz_smu->driver_buffer[i].mc_addr = info.mc_addr; | 569 | smu8_smu->driver_buffer[i].mc_addr = info.mc_addr; |
570 | 570 | ||
571 | cz_smu->driver_buffer[i].data_size = info.image_size; | 571 | smu8_smu->driver_buffer[i].data_size = info.image_size; |
572 | 572 | ||
573 | cz_smu->driver_buffer[i].firmware_ID = firmware_list[i]; | 573 | smu8_smu->driver_buffer[i].firmware_ID = firmware_list[i]; |
574 | cz_smu->driver_buffer_length++; | 574 | smu8_smu->driver_buffer_length++; |
575 | } | 575 | } |
576 | } | 576 | } |
577 | 577 | ||
578 | return 0; | 578 | return 0; |
579 | } | 579 | } |
580 | 580 | ||
581 | static int cz_smu_populate_single_scratch_entry( | 581 | static int smu8_smu_populate_single_scratch_entry( |
582 | struct pp_hwmgr *hwmgr, | 582 | struct pp_hwmgr *hwmgr, |
583 | enum cz_scratch_entry scratch_type, | 583 | enum smu8_scratch_entry scratch_type, |
584 | uint32_t ulsize_byte, | 584 | uint32_t ulsize_byte, |
585 | struct cz_buffer_entry *entry) | 585 | struct smu8_buffer_entry *entry) |
586 | { | 586 | { |
587 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 587 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
588 | uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte); | 588 | uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte); |
589 | 589 | ||
590 | entry->data_size = ulsize_byte; | 590 | entry->data_size = ulsize_byte; |
591 | entry->kaddr = (char *) cz_smu->smu_buffer.kaddr + | 591 | entry->kaddr = (char *) smu8_smu->smu_buffer.kaddr + |
592 | cz_smu->smu_buffer_used_bytes; | 592 | smu8_smu->smu_buffer_used_bytes; |
593 | entry->mc_addr = cz_smu->smu_buffer.mc_addr + cz_smu->smu_buffer_used_bytes; | 593 | entry->mc_addr = smu8_smu->smu_buffer.mc_addr + smu8_smu->smu_buffer_used_bytes; |
594 | entry->firmware_ID = scratch_type; | 594 | entry->firmware_ID = scratch_type; |
595 | 595 | ||
596 | cz_smu->smu_buffer_used_bytes += ulsize_aligned; | 596 | smu8_smu->smu_buffer_used_bytes += ulsize_aligned; |
597 | 597 | ||
598 | return 0; | 598 | return 0; |
599 | } | 599 | } |
600 | 600 | ||
601 | static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table) | 601 | static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table) |
602 | { | 602 | { |
603 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 603 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
604 | unsigned long i; | 604 | unsigned long i; |
605 | 605 | ||
606 | for (i = 0; i < cz_smu->scratch_buffer_length; i++) { | 606 | for (i = 0; i < smu8_smu->scratch_buffer_length; i++) { |
607 | if (cz_smu->scratch_buffer[i].firmware_ID | 607 | if (smu8_smu->scratch_buffer[i].firmware_ID |
608 | == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) | 608 | == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) |
609 | break; | 609 | break; |
610 | } | 610 | } |
611 | 611 | ||
612 | *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr; | 612 | *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr; |
613 | 613 | ||
614 | cz_send_msg_to_smc_with_parameter(hwmgr, | 614 | smu8_send_msg_to_smc_with_parameter(hwmgr, |
615 | PPSMC_MSG_SetClkTableAddrHi, | 615 | PPSMC_MSG_SetClkTableAddrHi, |
616 | upper_32_bits(cz_smu->scratch_buffer[i].mc_addr)); | 616 | upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); |
617 | 617 | ||
618 | cz_send_msg_to_smc_with_parameter(hwmgr, | 618 | smu8_send_msg_to_smc_with_parameter(hwmgr, |
619 | PPSMC_MSG_SetClkTableAddrLo, | 619 | PPSMC_MSG_SetClkTableAddrLo, |
620 | lower_32_bits(cz_smu->scratch_buffer[i].mc_addr)); | 620 | lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); |
621 | 621 | ||
622 | cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, | 622 | smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, |
623 | cz_smu->toc_entry_clock_table); | 623 | smu8_smu->toc_entry_clock_table); |
624 | 624 | ||
625 | cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram); | 625 | smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram); |
626 | 626 | ||
627 | return 0; | 627 | return 0; |
628 | } | 628 | } |
629 | 629 | ||
630 | static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr) | 630 | static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr) |
631 | { | 631 | { |
632 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 632 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
633 | unsigned long i; | 633 | unsigned long i; |
634 | 634 | ||
635 | for (i = 0; i < cz_smu->scratch_buffer_length; i++) { | 635 | for (i = 0; i < smu8_smu->scratch_buffer_length; i++) { |
636 | if (cz_smu->scratch_buffer[i].firmware_ID | 636 | if (smu8_smu->scratch_buffer[i].firmware_ID |
637 | == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) | 637 | == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) |
638 | break; | 638 | break; |
639 | } | 639 | } |
640 | 640 | ||
641 | cz_send_msg_to_smc_with_parameter(hwmgr, | 641 | smu8_send_msg_to_smc_with_parameter(hwmgr, |
642 | PPSMC_MSG_SetClkTableAddrHi, | 642 | PPSMC_MSG_SetClkTableAddrHi, |
643 | upper_32_bits(cz_smu->scratch_buffer[i].mc_addr)); | 643 | upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); |
644 | 644 | ||
645 | cz_send_msg_to_smc_with_parameter(hwmgr, | 645 | smu8_send_msg_to_smc_with_parameter(hwmgr, |
646 | PPSMC_MSG_SetClkTableAddrLo, | 646 | PPSMC_MSG_SetClkTableAddrLo, |
647 | lower_32_bits(cz_smu->scratch_buffer[i].mc_addr)); | 647 | lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); |
648 | 648 | ||
649 | cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, | 649 | smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, |
650 | cz_smu->toc_entry_clock_table); | 650 | smu8_smu->toc_entry_clock_table); |
651 | 651 | ||
652 | cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu); | 652 | smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu); |
653 | 653 | ||
654 | return 0; | 654 | return 0; |
655 | } | 655 | } |
656 | 656 | ||
657 | static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr) | 657 | static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr) |
658 | { | 658 | { |
659 | struct cz_smumgr *cz_smu = hwmgr->smu_backend; | 659 | struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; |
660 | uint32_t smc_address; | 660 | uint32_t smc_address; |
661 | 661 | ||
662 | if (!hwmgr->reload_fw) { | 662 | if (!hwmgr->reload_fw) { |
@@ -664,37 +664,37 @@ static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr) | |||
664 | return 0; | 664 | return 0; |
665 | } | 665 | } |
666 | 666 | ||
667 | cz_smu_populate_firmware_entries(hwmgr); | 667 | smu8_smu_populate_firmware_entries(hwmgr); |
668 | 668 | ||
669 | cz_smu_construct_toc(hwmgr); | 669 | smu8_smu_construct_toc(hwmgr); |
670 | 670 | ||
671 | smc_address = SMU8_FIRMWARE_HEADER_LOCATION + | 671 | smc_address = SMU8_FIRMWARE_HEADER_LOCATION + |
672 | offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); | 672 | offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); |
673 | 673 | ||
674 | cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4); | 674 | smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4); |
675 | 675 | ||
676 | cz_send_msg_to_smc_with_parameter(hwmgr, | 676 | smu8_send_msg_to_smc_with_parameter(hwmgr, |
677 | PPSMC_MSG_DriverDramAddrHi, | 677 | PPSMC_MSG_DriverDramAddrHi, |
678 | upper_32_bits(cz_smu->toc_buffer.mc_addr)); | 678 | upper_32_bits(smu8_smu->toc_buffer.mc_addr)); |
679 | 679 | ||
680 | cz_send_msg_to_smc_with_parameter(hwmgr, | 680 | smu8_send_msg_to_smc_with_parameter(hwmgr, |
681 | PPSMC_MSG_DriverDramAddrLo, | 681 | PPSMC_MSG_DriverDramAddrLo, |
682 | lower_32_bits(cz_smu->toc_buffer.mc_addr)); | 682 | lower_32_bits(smu8_smu->toc_buffer.mc_addr)); |
683 | 683 | ||
684 | cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs); | 684 | smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs); |
685 | 685 | ||
686 | cz_send_msg_to_smc_with_parameter(hwmgr, | 686 | smu8_send_msg_to_smc_with_parameter(hwmgr, |
687 | PPSMC_MSG_ExecuteJob, | 687 | PPSMC_MSG_ExecuteJob, |
688 | cz_smu->toc_entry_aram); | 688 | smu8_smu->toc_entry_aram); |
689 | cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, | 689 | smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, |
690 | cz_smu->toc_entry_power_profiling_index); | 690 | smu8_smu->toc_entry_power_profiling_index); |
691 | 691 | ||
692 | return cz_send_msg_to_smc_with_parameter(hwmgr, | 692 | return smu8_send_msg_to_smc_with_parameter(hwmgr, |
693 | PPSMC_MSG_ExecuteJob, | 693 | PPSMC_MSG_ExecuteJob, |
694 | cz_smu->toc_entry_initialize_index); | 694 | smu8_smu->toc_entry_initialize_index); |
695 | } | 695 | } |
696 | 696 | ||
697 | static int cz_start_smu(struct pp_hwmgr *hwmgr) | 697 | static int smu8_start_smu(struct pp_hwmgr *hwmgr) |
698 | { | 698 | { |
699 | int ret = 0; | 699 | int ret = 0; |
700 | uint32_t fw_to_check = 0; | 700 | uint32_t fw_to_check = 0; |
@@ -724,32 +724,32 @@ static int cz_start_smu(struct pp_hwmgr *hwmgr) | |||
724 | if (hwmgr->chip_id == CHIP_STONEY) | 724 | if (hwmgr->chip_id == CHIP_STONEY) |
725 | fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); | 725 | fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); |
726 | 726 | ||
727 | ret = cz_request_smu_load_fw(hwmgr); | 727 | ret = smu8_request_smu_load_fw(hwmgr); |
728 | if (ret) | 728 | if (ret) |
729 | pr_err("SMU firmware load failed\n"); | 729 | pr_err("SMU firmware load failed\n"); |
730 | 730 | ||
731 | cz_check_fw_load_finish(hwmgr, fw_to_check); | 731 | smu8_check_fw_load_finish(hwmgr, fw_to_check); |
732 | 732 | ||
733 | ret = cz_load_mec_firmware(hwmgr); | 733 | ret = smu8_load_mec_firmware(hwmgr); |
734 | if (ret) | 734 | if (ret) |
735 | pr_err("Mec Firmware load failed\n"); | 735 | pr_err("Mec Firmware load failed\n"); |
736 | 736 | ||
737 | return ret; | 737 | return ret; |
738 | } | 738 | } |
739 | 739 | ||
740 | static int cz_smu_init(struct pp_hwmgr *hwmgr) | 740 | static int smu8_smu_init(struct pp_hwmgr *hwmgr) |
741 | { | 741 | { |
742 | int ret = 0; | 742 | int ret = 0; |
743 | struct cz_smumgr *cz_smu; | 743 | struct smu8_smumgr *smu8_smu; |
744 | 744 | ||
745 | cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL); | 745 | smu8_smu = kzalloc(sizeof(struct smu8_smumgr), GFP_KERNEL); |
746 | if (cz_smu == NULL) | 746 | if (smu8_smu == NULL) |
747 | return -ENOMEM; | 747 | return -ENOMEM; |
748 | 748 | ||
749 | hwmgr->smu_backend = cz_smu; | 749 | hwmgr->smu_backend = smu8_smu; |
750 | 750 | ||
751 | cz_smu->toc_buffer.data_size = 4096; | 751 | smu8_smu->toc_buffer.data_size = 4096; |
752 | cz_smu->smu_buffer.data_size = | 752 | smu8_smu->smu_buffer.data_size = |
753 | ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) + | 753 | ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) + |
754 | ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) + | 754 | ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) + |
755 | ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) + | 755 | ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) + |
@@ -757,60 +757,60 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr) | |||
757 | ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); | 757 | ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); |
758 | 758 | ||
759 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, | 759 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
760 | cz_smu->toc_buffer.data_size, | 760 | smu8_smu->toc_buffer.data_size, |
761 | PAGE_SIZE, | 761 | PAGE_SIZE, |
762 | AMDGPU_GEM_DOMAIN_VRAM, | 762 | AMDGPU_GEM_DOMAIN_VRAM, |
763 | &cz_smu->toc_buffer.handle, | 763 | &smu8_smu->toc_buffer.handle, |
764 | &cz_smu->toc_buffer.mc_addr, | 764 | &smu8_smu->toc_buffer.mc_addr, |
765 | &cz_smu->toc_buffer.kaddr); | 765 | &smu8_smu->toc_buffer.kaddr); |
766 | if (ret) | 766 | if (ret) |
767 | goto err2; | 767 | goto err2; |
768 | 768 | ||
769 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, | 769 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
770 | cz_smu->smu_buffer.data_size, | 770 | smu8_smu->smu_buffer.data_size, |
771 | PAGE_SIZE, | 771 | PAGE_SIZE, |
772 | AMDGPU_GEM_DOMAIN_VRAM, | 772 | AMDGPU_GEM_DOMAIN_VRAM, |
773 | &cz_smu->smu_buffer.handle, | 773 | &smu8_smu->smu_buffer.handle, |
774 | &cz_smu->smu_buffer.mc_addr, | 774 | &smu8_smu->smu_buffer.mc_addr, |
775 | &cz_smu->smu_buffer.kaddr); | 775 | &smu8_smu->smu_buffer.kaddr); |
776 | if (ret) | 776 | if (ret) |
777 | goto err1; | 777 | goto err1; |
778 | 778 | ||
779 | if (0 != cz_smu_populate_single_scratch_entry(hwmgr, | 779 | if (0 != smu8_smu_populate_single_scratch_entry(hwmgr, |
780 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, | 780 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, |
781 | UCODE_ID_RLC_SCRATCH_SIZE_BYTE, | 781 | UCODE_ID_RLC_SCRATCH_SIZE_BYTE, |
782 | &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { | 782 | &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) { |
783 | pr_err("Error when Populate Firmware Entry.\n"); | 783 | pr_err("Error when Populate Firmware Entry.\n"); |
784 | goto err0; | 784 | goto err0; |
785 | } | 785 | } |
786 | 786 | ||
787 | if (0 != cz_smu_populate_single_scratch_entry(hwmgr, | 787 | if (0 != smu8_smu_populate_single_scratch_entry(hwmgr, |
788 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, | 788 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, |
789 | UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, | 789 | UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, |
790 | &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { | 790 | &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) { |
791 | pr_err("Error when Populate Firmware Entry.\n"); | 791 | pr_err("Error when Populate Firmware Entry.\n"); |
792 | goto err0; | 792 | goto err0; |
793 | } | 793 | } |
794 | if (0 != cz_smu_populate_single_scratch_entry(hwmgr, | 794 | if (0 != smu8_smu_populate_single_scratch_entry(hwmgr, |
795 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, | 795 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, |
796 | UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, | 796 | UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, |
797 | &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { | 797 | &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) { |
798 | pr_err("Error when Populate Firmware Entry.\n"); | 798 | pr_err("Error when Populate Firmware Entry.\n"); |
799 | goto err0; | 799 | goto err0; |
800 | } | 800 | } |
801 | 801 | ||
802 | if (0 != cz_smu_populate_single_scratch_entry(hwmgr, | 802 | if (0 != smu8_smu_populate_single_scratch_entry(hwmgr, |
803 | CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, | 803 | SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, |
804 | sizeof(struct SMU8_MultimediaPowerLogData), | 804 | sizeof(struct SMU8_MultimediaPowerLogData), |
805 | &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { | 805 | &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) { |
806 | pr_err("Error when Populate Firmware Entry.\n"); | 806 | pr_err("Error when Populate Firmware Entry.\n"); |
807 | goto err0; | 807 | goto err0; |
808 | } | 808 | } |
809 | 809 | ||
810 | if (0 != cz_smu_populate_single_scratch_entry(hwmgr, | 810 | if (0 != smu8_smu_populate_single_scratch_entry(hwmgr, |
811 | CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, | 811 | SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, |
812 | sizeof(struct SMU8_Fusion_ClkTable), | 812 | sizeof(struct SMU8_Fusion_ClkTable), |
813 | &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { | 813 | &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) { |
814 | pr_err("Error when Populate Firmware Entry.\n"); | 814 | pr_err("Error when Populate Firmware Entry.\n"); |
815 | goto err0; | 815 | goto err0; |
816 | } | 816 | } |
@@ -818,46 +818,46 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr) | |||
818 | return 0; | 818 | return 0; |
819 | 819 | ||
820 | err0: | 820 | err0: |
821 | amdgpu_bo_free_kernel(&cz_smu->smu_buffer.handle, | 821 | amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle, |
822 | &cz_smu->smu_buffer.mc_addr, | 822 | &smu8_smu->smu_buffer.mc_addr, |
823 | &cz_smu->smu_buffer.kaddr); | 823 | &smu8_smu->smu_buffer.kaddr); |
824 | err1: | 824 | err1: |
825 | amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle, | 825 | amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle, |
826 | &cz_smu->toc_buffer.mc_addr, | 826 | &smu8_smu->toc_buffer.mc_addr, |
827 | &cz_smu->toc_buffer.kaddr); | 827 | &smu8_smu->toc_buffer.kaddr); |
828 | err2: | 828 | err2: |
829 | kfree(cz_smu); | 829 | kfree(smu8_smu); |
830 | return -EINVAL; | 830 | return -EINVAL; |
831 | } | 831 | } |
832 | 832 | ||
833 | static int cz_smu_fini(struct pp_hwmgr *hwmgr) | 833 | static int smu8_smu_fini(struct pp_hwmgr *hwmgr) |
834 | { | 834 | { |
835 | struct cz_smumgr *cz_smu; | 835 | struct smu8_smumgr *smu8_smu; |
836 | 836 | ||
837 | if (hwmgr == NULL || hwmgr->device == NULL) | 837 | if (hwmgr == NULL || hwmgr->device == NULL) |
838 | return -EINVAL; | 838 | return -EINVAL; |
839 | 839 | ||
840 | cz_smu = hwmgr->smu_backend; | 840 | smu8_smu = hwmgr->smu_backend; |
841 | if (cz_smu) { | 841 | if (smu8_smu) { |
842 | amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle, | 842 | amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle, |
843 | &cz_smu->toc_buffer.mc_addr, | 843 | &smu8_smu->toc_buffer.mc_addr, |
844 | &cz_smu->toc_buffer.kaddr); | 844 | &smu8_smu->toc_buffer.kaddr); |
845 | amdgpu_bo_free_kernel(&cz_smu->smu_buffer.handle, | 845 | amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle, |
846 | &cz_smu->smu_buffer.mc_addr, | 846 | &smu8_smu->smu_buffer.mc_addr, |
847 | &cz_smu->smu_buffer.kaddr); | 847 | &smu8_smu->smu_buffer.kaddr); |
848 | kfree(cz_smu); | 848 | kfree(smu8_smu); |
849 | } | 849 | } |
850 | 850 | ||
851 | return 0; | 851 | return 0; |
852 | } | 852 | } |
853 | 853 | ||
854 | static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr, | 854 | static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr, |
855 | unsigned long check_feature) | 855 | unsigned long check_feature) |
856 | { | 856 | { |
857 | int result; | 857 | int result; |
858 | unsigned long features; | 858 | unsigned long features; |
859 | 859 | ||
860 | result = cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0); | 860 | result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0); |
861 | if (result == 0) { | 861 | if (result == 0) { |
862 | features = smum_get_argument(hwmgr); | 862 | features = smum_get_argument(hwmgr); |
863 | if (features & check_feature) | 863 | if (features & check_feature) |
@@ -867,25 +867,25 @@ static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr, | |||
867 | return false; | 867 | return false; |
868 | } | 868 | } |
869 | 869 | ||
870 | static bool cz_is_dpm_running(struct pp_hwmgr *hwmgr) | 870 | static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr) |
871 | { | 871 | { |
872 | if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn)) | 872 | if (smu8_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn)) |
873 | return true; | 873 | return true; |
874 | return false; | 874 | return false; |
875 | } | 875 | } |
876 | 876 | ||
877 | const struct pp_smumgr_func cz_smu_funcs = { | 877 | const struct pp_smumgr_func smu8_smu_funcs = { |
878 | .smu_init = cz_smu_init, | 878 | .smu_init = smu8_smu_init, |
879 | .smu_fini = cz_smu_fini, | 879 | .smu_fini = smu8_smu_fini, |
880 | .start_smu = cz_start_smu, | 880 | .start_smu = smu8_start_smu, |
881 | .check_fw_load_finish = cz_check_fw_load_finish, | 881 | .check_fw_load_finish = smu8_check_fw_load_finish, |
882 | .request_smu_load_fw = NULL, | 882 | .request_smu_load_fw = NULL, |
883 | .request_smu_load_specific_fw = NULL, | 883 | .request_smu_load_specific_fw = NULL, |
884 | .get_argument = cz_smum_get_argument, | 884 | .get_argument = smu8_smum_get_argument, |
885 | .send_msg_to_smc = cz_send_msg_to_smc, | 885 | .send_msg_to_smc = smu8_send_msg_to_smc, |
886 | .send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter, | 886 | .send_msg_to_smc_with_parameter = smu8_send_msg_to_smc_with_parameter, |
887 | .download_pptable_settings = cz_download_pptable_settings, | 887 | .download_pptable_settings = smu8_download_pptable_settings, |
888 | .upload_pptable_settings = cz_upload_pptable_settings, | 888 | .upload_pptable_settings = smu8_upload_pptable_settings, |
889 | .is_dpm_running = cz_is_dpm_running, | 889 | .is_dpm_running = smu8_is_dpm_running, |
890 | }; | 890 | }; |
891 | 891 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h index c13ab8377e26..c7b61222d258 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h | |||
@@ -20,63 +20,63 @@ | |||
20 | * OTHER DEALINGS IN THE SOFTWARE. | 20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | #ifndef _CZ_SMUMGR_H_ | 23 | #ifndef _SMU8_SMUMGR_H_ |
24 | #define _CZ_SMUMGR_H_ | 24 | #define _SMU8_SMUMGR_H_ |
25 | 25 | ||
26 | 26 | ||
27 | #define MAX_NUM_FIRMWARE 8 | 27 | #define MAX_NUM_FIRMWARE 8 |
28 | #define MAX_NUM_SCRATCH 11 | 28 | #define MAX_NUM_SCRATCH 11 |
29 | #define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024 | 29 | #define SMU8_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024 |
30 | #define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048 | 30 | #define SMU8_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048 |
31 | #define CZ_SCRATCH_SIZE_SDMA_METADATA 1024 | 31 | #define SMU8_SCRATCH_SIZE_SDMA_METADATA 1024 |
32 | #define CZ_SCRATCH_SIZE_IH ((2*256+1)*4) | 32 | #define SMU8_SCRATCH_SIZE_IH ((2*256+1)*4) |
33 | 33 | ||
34 | #define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000 | 34 | #define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000 |
35 | 35 | ||
36 | enum cz_scratch_entry { | 36 | enum smu8_scratch_entry { |
37 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0, | 37 | SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0, |
38 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, | 38 | SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, |
39 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, | 39 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, |
40 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, | 40 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, |
41 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, | 41 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, |
42 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, | 42 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, |
43 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, | 43 | SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, |
44 | CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG, | 44 | SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG, |
45 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, | 45 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, |
46 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, | 46 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, |
47 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, | 47 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, |
48 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, | 48 | SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, |
49 | CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM, | 49 | SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM, |
50 | CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM, | 50 | SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM, |
51 | CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, | 51 | SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, |
52 | CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT, | 52 | SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT, |
53 | CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING, | 53 | SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING, |
54 | CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS, | 54 | SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS, |
55 | CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT, | 55 | SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT, |
56 | CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START, | 56 | SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START, |
57 | CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS, | 57 | SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS, |
58 | CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE | 58 | SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE |
59 | }; | 59 | }; |
60 | 60 | ||
61 | struct cz_buffer_entry { | 61 | struct smu8_buffer_entry { |
62 | uint32_t data_size; | 62 | uint32_t data_size; |
63 | uint64_t mc_addr; | 63 | uint64_t mc_addr; |
64 | void *kaddr; | 64 | void *kaddr; |
65 | enum cz_scratch_entry firmware_ID; | 65 | enum smu8_scratch_entry firmware_ID; |
66 | struct amdgpu_bo *handle; /* as bo handle used when release bo */ | 66 | struct amdgpu_bo *handle; /* as bo handle used when release bo */ |
67 | }; | 67 | }; |
68 | 68 | ||
69 | struct cz_register_index_data_pair { | 69 | struct smu8_register_index_data_pair { |
70 | uint32_t offset; | 70 | uint32_t offset; |
71 | uint32_t value; | 71 | uint32_t value; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct cz_ih_meta_data { | 74 | struct smu8_ih_meta_data { |
75 | uint32_t command; | 75 | uint32_t command; |
76 | struct cz_register_index_data_pair register_index_value_pair[1]; | 76 | struct smu8_register_index_data_pair register_index_value_pair[1]; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | struct cz_smumgr { | 79 | struct smu8_smumgr { |
80 | uint8_t driver_buffer_length; | 80 | uint8_t driver_buffer_length; |
81 | uint8_t scratch_buffer_length; | 81 | uint8_t scratch_buffer_length; |
82 | uint16_t toc_entry_used_count; | 82 | uint16_t toc_entry_used_count; |
@@ -88,12 +88,12 @@ struct cz_smumgr { | |||
88 | uint16_t ih_register_restore_task_size; | 88 | uint16_t ih_register_restore_task_size; |
89 | uint16_t smu_buffer_used_bytes; | 89 | uint16_t smu_buffer_used_bytes; |
90 | 90 | ||
91 | struct cz_buffer_entry toc_buffer; | 91 | struct smu8_buffer_entry toc_buffer; |
92 | struct cz_buffer_entry smu_buffer; | 92 | struct smu8_buffer_entry smu_buffer; |
93 | struct cz_buffer_entry firmware_buffer; | 93 | struct smu8_buffer_entry firmware_buffer; |
94 | struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE]; | 94 | struct smu8_buffer_entry driver_buffer[MAX_NUM_FIRMWARE]; |
95 | struct cz_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE]; | 95 | struct smu8_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE]; |
96 | struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH]; | 96 | struct smu8_buffer_entry scratch_buffer[MAX_NUM_SCRATCH]; |
97 | }; | 97 | }; |
98 | 98 | ||
99 | #endif | 99 | #endif |