aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRex Zhu <Rex.Zhu@amd.com>2018-03-06 04:28:38 -0500
committerAlex Deucher <alexander.deucher@amd.com>2018-03-15 10:57:12 -0400
commitc425688520990d6cec769faaa97f4af45d361fd1 (patch)
tree65de745c31e06c8cf63f2969c0e6bf11236eb78c
parent567cc73a1be96ee84fb5b4086538297105835064 (diff)
drm/amd/pp: Replace rv_* with smu10_*
Powerplay is for the hw ip smu, for RV, smu10 is used, so use smu10 as the prefix of the files name/function name. Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Rex Zhu <Rex.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c)470
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h)130
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h)4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c)120
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h)6
9 files changed, 371 insertions, 371 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index e8c5a4f84324..1fa9a97e24c1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -30,7 +30,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
30 smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ 30 smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
31 smu7_clockpowergating.o \ 31 smu7_clockpowergating.o \
32 vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \ 32 vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
33 vega10_thermal.o rv_hwmgr.o pp_psm.o\ 33 vega10_thermal.o smu10_hwmgr.o pp_psm.o\
34 pp_overdriver.o 34 pp_overdriver.o
35 35
36AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) 36AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index af1b22d964fd..3432dc066fe7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -43,7 +43,7 @@ extern const struct pp_smumgr_func tonga_smu_funcs;
43extern const struct pp_smumgr_func fiji_smu_funcs; 43extern const struct pp_smumgr_func fiji_smu_funcs;
44extern const struct pp_smumgr_func polaris10_smu_funcs; 44extern const struct pp_smumgr_func polaris10_smu_funcs;
45extern const struct pp_smumgr_func vega10_smu_funcs; 45extern const struct pp_smumgr_func vega10_smu_funcs;
46extern const struct pp_smumgr_func rv_smu_funcs; 46extern const struct pp_smumgr_func smu10_smu_funcs;
47 47
48extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr); 48extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
49static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); 49static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
@@ -230,8 +230,8 @@ int hwmgr_early_init(struct pp_instance *handle)
230 switch (hwmgr->chip_id) { 230 switch (hwmgr->chip_id) {
231 case CHIP_RAVEN: 231 case CHIP_RAVEN:
232 hwmgr->od_enabled = false; 232 hwmgr->od_enabled = false;
233 hwmgr->smumgr_funcs = &rv_smu_funcs; 233 hwmgr->smumgr_funcs = &smu10_smu_funcs;
234 rv_init_function_pointers(hwmgr); 234 smu10_init_function_pointers(hwmgr);
235 break; 235 break;
236 default: 236 default:
237 return -EINVAL; 237 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 4bdb28fd287a..10253b89b3d8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -32,52 +32,52 @@
32#include "hwmgr.h" 32#include "hwmgr.h"
33#include "hardwaremanager.h" 33#include "hardwaremanager.h"
34#include "rv_ppsmc.h" 34#include "rv_ppsmc.h"
35#include "rv_hwmgr.h" 35#include "smu10_hwmgr.h"
36#include "power_state.h" 36#include "power_state.h"
37#include "pp_soc15.h" 37#include "pp_soc15.h"
38 38
39#define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID 5 39#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
40#define RAVEN_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ 40#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
41#define SCLK_MIN_DIV_INTV_SHIFT 12 41#define SCLK_MIN_DIV_INTV_SHIFT 12
42#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */ 42#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
43#define SMC_RAM_END 0x40000 43#define SMC_RAM_END 0x40000
44 44
45static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic; 45static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
46 46
47 47
48int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 48static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
49 struct pp_display_clock_request *clock_req); 49 struct pp_display_clock_request *clock_req);
50 50
51 51
52static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps) 52static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
53{ 53{
54 if (PhwRaven_Magic != hw_ps->magic) 54 if (SMU10_Magic != hw_ps->magic)
55 return NULL; 55 return NULL;
56 56
57 return (struct rv_power_state *)hw_ps; 57 return (struct smu10_power_state *)hw_ps;
58} 58}
59 59
60static const struct rv_power_state *cast_const_rv_ps( 60static const struct smu10_power_state *cast_const_smu10_ps(
61 const struct pp_hw_power_state *hw_ps) 61 const struct pp_hw_power_state *hw_ps)
62{ 62{
63 if (PhwRaven_Magic != hw_ps->magic) 63 if (SMU10_Magic != hw_ps->magic)
64 return NULL; 64 return NULL;
65 65
66 return (struct rv_power_state *)hw_ps; 66 return (struct smu10_power_state *)hw_ps;
67} 67}
68 68
69static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) 69static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
70{ 70{
71 struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend); 71 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
72 72
73 rv_hwmgr->dce_slow_sclk_threshold = 30000; 73 smu10_data->dce_slow_sclk_threshold = 30000;
74 rv_hwmgr->thermal_auto_throttling_treshold = 0; 74 smu10_data->thermal_auto_throttling_treshold = 0;
75 rv_hwmgr->is_nb_dpm_enabled = 1; 75 smu10_data->is_nb_dpm_enabled = 1;
76 rv_hwmgr->dpm_flags = 1; 76 smu10_data->dpm_flags = 1;
77 rv_hwmgr->gfx_off_controled_by_driver = false; 77 smu10_data->gfx_off_controled_by_driver = false;
78 rv_hwmgr->need_min_deep_sleep_dcefclk = true; 78 smu10_data->need_min_deep_sleep_dcefclk = true;
79 rv_hwmgr->num_active_display = 0; 79 smu10_data->num_active_display = 0;
80 rv_hwmgr->deep_sleep_dcefclk = 0; 80 smu10_data->deep_sleep_dcefclk = 0;
81 81
82 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 82 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
83 PHM_PlatformCaps_SclkDeepSleep); 83 PHM_PlatformCaps_SclkDeepSleep);
@@ -90,13 +90,13 @@ static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
90 return 0; 90 return 0;
91} 91}
92 92
93static int rv_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, 93static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
94 struct phm_clock_and_voltage_limits *table) 94 struct phm_clock_and_voltage_limits *table)
95{ 95{
96 return 0; 96 return 0;
97} 97}
98 98
99static int rv_init_dynamic_state_adjustment_rule_settings( 99static int smu10_init_dynamic_state_adjustment_rule_settings(
100 struct pp_hwmgr *hwmgr) 100 struct pp_hwmgr *hwmgr)
101{ 101{
102 uint32_t table_size = 102 uint32_t table_size =
@@ -133,30 +133,30 @@ static int rv_init_dynamic_state_adjustment_rule_settings(
133 return 0; 133 return 0;
134} 134}
135 135
136static int rv_get_system_info_data(struct pp_hwmgr *hwmgr) 136static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
137{ 137{
138 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)hwmgr->backend; 138 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
139 139
140 rv_data->sys_info.htc_hyst_lmt = 5; 140 smu10_data->sys_info.htc_hyst_lmt = 5;
141 rv_data->sys_info.htc_tmp_lmt = 203; 141 smu10_data->sys_info.htc_tmp_lmt = 203;
142 142
143 if (rv_data->thermal_auto_throttling_treshold == 0) 143 if (smu10_data->thermal_auto_throttling_treshold == 0)
144 rv_data->thermal_auto_throttling_treshold = 203; 144 smu10_data->thermal_auto_throttling_treshold = 203;
145 145
146 rv_construct_max_power_limits_table (hwmgr, 146 smu10_construct_max_power_limits_table (hwmgr,
147 &hwmgr->dyn_state.max_clock_voltage_on_ac); 147 &hwmgr->dyn_state.max_clock_voltage_on_ac);
148 148
149 rv_init_dynamic_state_adjustment_rule_settings(hwmgr); 149 smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
150 150
151 return 0; 151 return 0;
152} 152}
153 153
154static int rv_construct_boot_state(struct pp_hwmgr *hwmgr) 154static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
155{ 155{
156 return 0; 156 return 0;
157} 157}
158 158
159static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) 159static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
160{ 160{
161 struct PP_Clocks clocks = {0}; 161 struct PP_Clocks clocks = {0};
162 struct pp_display_clock_request clock_req; 162 struct pp_display_clock_request clock_req;
@@ -165,109 +165,109 @@ static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
165 clock_req.clock_type = amd_pp_dcf_clock; 165 clock_req.clock_type = amd_pp_dcf_clock;
166 clock_req.clock_freq_in_khz = clocks.dcefClock * 10; 166 clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
167 167
168 PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req), 168 PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
169 "Attempt to set DCF Clock Failed!", return -EINVAL); 169 "Attempt to set DCF Clock Failed!", return -EINVAL);
170 170
171 return 0; 171 return 0;
172} 172}
173 173
174static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) 174static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
175{ 175{
176 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 176 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
177 177
178 if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) { 178 if (smu10_data->need_min_deep_sleep_dcefclk && smu10_data->deep_sleep_dcefclk != clock/100) {
179 rv_data->deep_sleep_dcefclk = clock/100; 179 smu10_data->deep_sleep_dcefclk = clock/100;
180 smum_send_msg_to_smc_with_parameter(hwmgr, 180 smum_send_msg_to_smc_with_parameter(hwmgr,
181 PPSMC_MSG_SetMinDeepSleepDcefclk, 181 PPSMC_MSG_SetMinDeepSleepDcefclk,
182 rv_data->deep_sleep_dcefclk); 182 smu10_data->deep_sleep_dcefclk);
183 } 183 }
184 return 0; 184 return 0;
185} 185}
186 186
187static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) 187static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
188{ 188{
189 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 189 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
190 190
191 if (rv_data->num_active_display != count) { 191 if (smu10_data->num_active_display != count) {
192 rv_data->num_active_display = count; 192 smu10_data->num_active_display = count;
193 smum_send_msg_to_smc_with_parameter(hwmgr, 193 smum_send_msg_to_smc_with_parameter(hwmgr,
194 PPSMC_MSG_SetDisplayCount, 194 PPSMC_MSG_SetDisplayCount,
195 rv_data->num_active_display); 195 smu10_data->num_active_display);
196 } 196 }
197 197
198 return 0; 198 return 0;
199} 199}
200 200
201static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 201static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
202{ 202{
203 return rv_set_clock_limit(hwmgr, input); 203 return smu10_set_clock_limit(hwmgr, input);
204} 204}
205 205
206static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr) 206static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
207{ 207{
208 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 208 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
209 209
210 rv_data->vcn_power_gated = true; 210 smu10_data->vcn_power_gated = true;
211 rv_data->isp_tileA_power_gated = true; 211 smu10_data->isp_tileA_power_gated = true;
212 rv_data->isp_tileB_power_gated = true; 212 smu10_data->isp_tileB_power_gated = true;
213 213
214 return 0; 214 return 0;
215} 215}
216 216
217 217
218static int rv_setup_asic_task(struct pp_hwmgr *hwmgr) 218static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
219{ 219{
220 return rv_init_power_gate_state(hwmgr); 220 return smu10_init_power_gate_state(hwmgr);
221} 221}
222 222
223static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr) 223static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
224{ 224{
225 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 225 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
226 226
227 rv_data->separation_time = 0; 227 smu10_data->separation_time = 0;
228 rv_data->cc6_disable = false; 228 smu10_data->cc6_disable = false;
229 rv_data->pstate_disable = false; 229 smu10_data->pstate_disable = false;
230 rv_data->cc6_setting_changed = false; 230 smu10_data->cc6_setting_changed = false;
231 231
232 return 0; 232 return 0;
233} 233}
234 234
235static int rv_power_off_asic(struct pp_hwmgr *hwmgr) 235static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
236{ 236{
237 return rv_reset_cc6_data(hwmgr); 237 return smu10_reset_cc6_data(hwmgr);
238} 238}
239 239
240static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr) 240static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
241{ 241{
242 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 242 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
243 243
244 if (rv_data->gfx_off_controled_by_driver) 244 if (smu10_data->gfx_off_controled_by_driver)
245 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff); 245 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
246 246
247 return 0; 247 return 0;
248} 248}
249 249
250static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 250static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
251{ 251{
252 return rv_disable_gfx_off(hwmgr); 252 return smu10_disable_gfx_off(hwmgr);
253} 253}
254 254
255static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr) 255static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
256{ 256{
257 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 257 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
258 258
259 if (rv_data->gfx_off_controled_by_driver) 259 if (smu10_data->gfx_off_controled_by_driver)
260 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff); 260 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
261 261
262 return 0; 262 return 0;
263} 263}
264 264
265static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 265static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
266{ 266{
267 return rv_enable_gfx_off(hwmgr); 267 return smu10_enable_gfx_off(hwmgr);
268} 268}
269 269
270static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 270static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
271 struct pp_power_state *prequest_ps, 271 struct pp_power_state *prequest_ps,
272 const struct pp_power_state *pcurrent_ps) 272 const struct pp_power_state *pcurrent_ps)
273{ 273{
@@ -311,14 +311,14 @@ static const DpmClock_t VddPhyClk[]= {
311 { 810, 3600}, 311 { 810, 3600},
312}; 312};
313 313
314static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr, 314static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
315 struct rv_voltage_dependency_table **pptable, 315 struct smu10_voltage_dependency_table **pptable,
316 uint32_t num_entry, const DpmClock_t *pclk_dependency_table) 316 uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
317{ 317{
318 uint32_t table_size, i; 318 uint32_t table_size, i;
319 struct rv_voltage_dependency_table *ptable; 319 struct smu10_voltage_dependency_table *ptable;
320 320
321 table_size = sizeof(uint32_t) + sizeof(struct rv_voltage_dependency_table) * num_entry; 321 table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
322 ptable = kzalloc(table_size, GFP_KERNEL); 322 ptable = kzalloc(table_size, GFP_KERNEL);
323 323
324 if (NULL == ptable) 324 if (NULL == ptable)
@@ -338,13 +338,13 @@ static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
338} 338}
339 339
340 340
341static int rv_populate_clock_table(struct pp_hwmgr *hwmgr) 341static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
342{ 342{
343 int result; 343 int result;
344 344
345 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 345 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
346 DpmClocks_t *table = &(rv_data->clock_table); 346 DpmClocks_t *table = &(smu10_data->clock_table);
347 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 347 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
348 348
349 result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true); 349 result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
350 350
@@ -353,80 +353,80 @@ static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
353 return result); 353 return result);
354 354
355 if (0 == result && table->DcefClocks[0].Freq != 0) { 355 if (0 == result && table->DcefClocks[0].Freq != 0) {
356 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk, 356 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
357 NUM_DCEFCLK_DPM_LEVELS, 357 NUM_DCEFCLK_DPM_LEVELS,
358 &rv_data->clock_table.DcefClocks[0]); 358 &smu10_data->clock_table.DcefClocks[0]);
359 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk, 359 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
360 NUM_SOCCLK_DPM_LEVELS, 360 NUM_SOCCLK_DPM_LEVELS,
361 &rv_data->clock_table.SocClocks[0]); 361 &smu10_data->clock_table.SocClocks[0]);
362 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk, 362 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
363 NUM_FCLK_DPM_LEVELS, 363 NUM_FCLK_DPM_LEVELS,
364 &rv_data->clock_table.FClocks[0]); 364 &smu10_data->clock_table.FClocks[0]);
365 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk, 365 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
366 NUM_MEMCLK_DPM_LEVELS, 366 NUM_MEMCLK_DPM_LEVELS,
367 &rv_data->clock_table.MemClocks[0]); 367 &smu10_data->clock_table.MemClocks[0]);
368 } else { 368 } else {
369 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk, 369 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
370 ARRAY_SIZE(VddDcfClk), 370 ARRAY_SIZE(VddDcfClk),
371 &VddDcfClk[0]); 371 &VddDcfClk[0]);
372 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk, 372 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
373 ARRAY_SIZE(VddSocClk), 373 ARRAY_SIZE(VddSocClk),
374 &VddSocClk[0]); 374 &VddSocClk[0]);
375 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk, 375 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
376 ARRAY_SIZE(VddFClk), 376 ARRAY_SIZE(VddFClk),
377 &VddFClk[0]); 377 &VddFClk[0]);
378 } 378 }
379 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk, 379 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
380 ARRAY_SIZE(VddDispClk), 380 ARRAY_SIZE(VddDispClk),
381 &VddDispClk[0]); 381 &VddDispClk[0]);
382 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk, 382 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
383 ARRAY_SIZE(VddDppClk), &VddDppClk[0]); 383 ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
384 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk, 384 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
385 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]); 385 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
386 386
387 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency); 387 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
388 result = smum_get_argument(hwmgr); 388 result = smum_get_argument(hwmgr);
389 rv_data->gfx_min_freq_limit = result * 100; 389 smu10_data->gfx_min_freq_limit = result * 100;
390 390
391 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency); 391 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
392 result = smum_get_argument(hwmgr); 392 result = smum_get_argument(hwmgr);
393 rv_data->gfx_max_freq_limit = result * 100; 393 smu10_data->gfx_max_freq_limit = result * 100;
394 394
395 return 0; 395 return 0;
396} 396}
397 397
398static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 398static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
399{ 399{
400 int result = 0; 400 int result = 0;
401 struct rv_hwmgr *data; 401 struct smu10_hwmgr *data;
402 402
403 data = kzalloc(sizeof(struct rv_hwmgr), GFP_KERNEL); 403 data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
404 if (data == NULL) 404 if (data == NULL)
405 return -ENOMEM; 405 return -ENOMEM;
406 406
407 hwmgr->backend = data; 407 hwmgr->backend = data;
408 408
409 result = rv_initialize_dpm_defaults(hwmgr); 409 result = smu10_initialize_dpm_defaults(hwmgr);
410 if (result != 0) { 410 if (result != 0) {
411 pr_err("rv_initialize_dpm_defaults failed\n"); 411 pr_err("smu10_initialize_dpm_defaults failed\n");
412 return result; 412 return result;
413 } 413 }
414 414
415 rv_populate_clock_table(hwmgr); 415 smu10_populate_clock_table(hwmgr);
416 416
417 result = rv_get_system_info_data(hwmgr); 417 result = smu10_get_system_info_data(hwmgr);
418 if (result != 0) { 418 if (result != 0) {
419 pr_err("rv_get_system_info_data failed\n"); 419 pr_err("smu10_get_system_info_data failed\n");
420 return result; 420 return result;
421 } 421 }
422 422
423 rv_construct_boot_state(hwmgr); 423 smu10_construct_boot_state(hwmgr);
424 424
425 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 425 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
426 RAVEN_MAX_HARDWARE_POWERLEVELS; 426 SMU10_MAX_HARDWARE_POWERLEVELS;
427 427
428 hwmgr->platform_descriptor.hardwarePerformanceLevels = 428 hwmgr->platform_descriptor.hardwarePerformanceLevels =
429 RAVEN_MAX_HARDWARE_POWERLEVELS; 429 SMU10_MAX_HARDWARE_POWERLEVELS;
430 430
431 hwmgr->platform_descriptor.vbiosInterruptId = 0; 431 hwmgr->platform_descriptor.vbiosInterruptId = 0;
432 432
@@ -436,16 +436,16 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
436 436
437 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 437 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
438 438
439 hwmgr->pstate_sclk = RAVEN_UMD_PSTATE_GFXCLK; 439 hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
440 hwmgr->pstate_mclk = RAVEN_UMD_PSTATE_FCLK; 440 hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
441 441
442 return result; 442 return result;
443} 443}
444 444
445static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 445static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
446{ 446{
447 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 447 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
448 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 448 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
449 449
450 kfree(pinfo->vdd_dep_on_dcefclk); 450 kfree(pinfo->vdd_dep_on_dcefclk);
451 pinfo->vdd_dep_on_dcefclk = NULL; 451 pinfo->vdd_dep_on_dcefclk = NULL;
@@ -469,7 +469,7 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
469 return 0; 469 return 0;
470} 470}
471 471
472static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 472static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
473 enum amd_dpm_forced_level level) 473 enum amd_dpm_forced_level level)
474{ 474{
475 if (hwmgr->smu_version < 0x1E3700) { 475 if (hwmgr->smu_version < 0x1E3700) {
@@ -482,113 +482,113 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
482 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 482 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
483 smum_send_msg_to_smc_with_parameter(hwmgr, 483 smum_send_msg_to_smc_with_parameter(hwmgr,
484 PPSMC_MSG_SetHardMinGfxClk, 484 PPSMC_MSG_SetHardMinGfxClk,
485 RAVEN_UMD_PSTATE_PEAK_GFXCLK); 485 SMU10_UMD_PSTATE_PEAK_GFXCLK);
486 smum_send_msg_to_smc_with_parameter(hwmgr, 486 smum_send_msg_to_smc_with_parameter(hwmgr,
487 PPSMC_MSG_SetHardMinFclkByFreq, 487 PPSMC_MSG_SetHardMinFclkByFreq,
488 RAVEN_UMD_PSTATE_PEAK_FCLK); 488 SMU10_UMD_PSTATE_PEAK_FCLK);
489 smum_send_msg_to_smc_with_parameter(hwmgr, 489 smum_send_msg_to_smc_with_parameter(hwmgr,
490 PPSMC_MSG_SetHardMinSocclkByFreq, 490 PPSMC_MSG_SetHardMinSocclkByFreq,
491 RAVEN_UMD_PSTATE_PEAK_SOCCLK); 491 SMU10_UMD_PSTATE_PEAK_SOCCLK);
492 smum_send_msg_to_smc_with_parameter(hwmgr, 492 smum_send_msg_to_smc_with_parameter(hwmgr,
493 PPSMC_MSG_SetHardMinVcn, 493 PPSMC_MSG_SetHardMinVcn,
494 RAVEN_UMD_PSTATE_VCE); 494 SMU10_UMD_PSTATE_VCE);
495 495
496 smum_send_msg_to_smc_with_parameter(hwmgr, 496 smum_send_msg_to_smc_with_parameter(hwmgr,
497 PPSMC_MSG_SetSoftMaxGfxClk, 497 PPSMC_MSG_SetSoftMaxGfxClk,
498 RAVEN_UMD_PSTATE_PEAK_GFXCLK); 498 SMU10_UMD_PSTATE_PEAK_GFXCLK);
499 smum_send_msg_to_smc_with_parameter(hwmgr, 499 smum_send_msg_to_smc_with_parameter(hwmgr,
500 PPSMC_MSG_SetSoftMaxFclkByFreq, 500 PPSMC_MSG_SetSoftMaxFclkByFreq,
501 RAVEN_UMD_PSTATE_PEAK_FCLK); 501 SMU10_UMD_PSTATE_PEAK_FCLK);
502 smum_send_msg_to_smc_with_parameter(hwmgr, 502 smum_send_msg_to_smc_with_parameter(hwmgr,
503 PPSMC_MSG_SetSoftMaxSocclkByFreq, 503 PPSMC_MSG_SetSoftMaxSocclkByFreq,
504 RAVEN_UMD_PSTATE_PEAK_SOCCLK); 504 SMU10_UMD_PSTATE_PEAK_SOCCLK);
505 smum_send_msg_to_smc_with_parameter(hwmgr, 505 smum_send_msg_to_smc_with_parameter(hwmgr,
506 PPSMC_MSG_SetSoftMaxVcn, 506 PPSMC_MSG_SetSoftMaxVcn,
507 RAVEN_UMD_PSTATE_VCE); 507 SMU10_UMD_PSTATE_VCE);
508 break; 508 break;
509 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 509 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
510 smum_send_msg_to_smc_with_parameter(hwmgr, 510 smum_send_msg_to_smc_with_parameter(hwmgr,
511 PPSMC_MSG_SetHardMinGfxClk, 511 PPSMC_MSG_SetHardMinGfxClk,
512 RAVEN_UMD_PSTATE_MIN_GFXCLK); 512 SMU10_UMD_PSTATE_MIN_GFXCLK);
513 smum_send_msg_to_smc_with_parameter(hwmgr, 513 smum_send_msg_to_smc_with_parameter(hwmgr,
514 PPSMC_MSG_SetSoftMaxGfxClk, 514 PPSMC_MSG_SetSoftMaxGfxClk,
515 RAVEN_UMD_PSTATE_MIN_GFXCLK); 515 SMU10_UMD_PSTATE_MIN_GFXCLK);
516 break; 516 break;
517 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 517 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
518 smum_send_msg_to_smc_with_parameter(hwmgr, 518 smum_send_msg_to_smc_with_parameter(hwmgr,
519 PPSMC_MSG_SetHardMinFclkByFreq, 519 PPSMC_MSG_SetHardMinFclkByFreq,
520 RAVEN_UMD_PSTATE_MIN_FCLK); 520 SMU10_UMD_PSTATE_MIN_FCLK);
521 smum_send_msg_to_smc_with_parameter(hwmgr, 521 smum_send_msg_to_smc_with_parameter(hwmgr,
522 PPSMC_MSG_SetSoftMaxFclkByFreq, 522 PPSMC_MSG_SetSoftMaxFclkByFreq,
523 RAVEN_UMD_PSTATE_MIN_FCLK); 523 SMU10_UMD_PSTATE_MIN_FCLK);
524 break; 524 break;
525 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 525 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
526 smum_send_msg_to_smc_with_parameter(hwmgr, 526 smum_send_msg_to_smc_with_parameter(hwmgr,
527 PPSMC_MSG_SetHardMinGfxClk, 527 PPSMC_MSG_SetHardMinGfxClk,
528 RAVEN_UMD_PSTATE_GFXCLK); 528 SMU10_UMD_PSTATE_GFXCLK);
529 smum_send_msg_to_smc_with_parameter(hwmgr, 529 smum_send_msg_to_smc_with_parameter(hwmgr,
530 PPSMC_MSG_SetHardMinFclkByFreq, 530 PPSMC_MSG_SetHardMinFclkByFreq,
531 RAVEN_UMD_PSTATE_FCLK); 531 SMU10_UMD_PSTATE_FCLK);
532 smum_send_msg_to_smc_with_parameter(hwmgr, 532 smum_send_msg_to_smc_with_parameter(hwmgr,
533 PPSMC_MSG_SetHardMinSocclkByFreq, 533 PPSMC_MSG_SetHardMinSocclkByFreq,
534 RAVEN_UMD_PSTATE_SOCCLK); 534 SMU10_UMD_PSTATE_SOCCLK);
535 smum_send_msg_to_smc_with_parameter(hwmgr, 535 smum_send_msg_to_smc_with_parameter(hwmgr,
536 PPSMC_MSG_SetHardMinVcn, 536 PPSMC_MSG_SetHardMinVcn,
537 RAVEN_UMD_PSTATE_VCE); 537 SMU10_UMD_PSTATE_VCE);
538 538
539 smum_send_msg_to_smc_with_parameter(hwmgr, 539 smum_send_msg_to_smc_with_parameter(hwmgr,
540 PPSMC_MSG_SetSoftMaxGfxClk, 540 PPSMC_MSG_SetSoftMaxGfxClk,
541 RAVEN_UMD_PSTATE_GFXCLK); 541 SMU10_UMD_PSTATE_GFXCLK);
542 smum_send_msg_to_smc_with_parameter(hwmgr, 542 smum_send_msg_to_smc_with_parameter(hwmgr,
543 PPSMC_MSG_SetSoftMaxFclkByFreq, 543 PPSMC_MSG_SetSoftMaxFclkByFreq,
544 RAVEN_UMD_PSTATE_FCLK); 544 SMU10_UMD_PSTATE_FCLK);
545 smum_send_msg_to_smc_with_parameter(hwmgr, 545 smum_send_msg_to_smc_with_parameter(hwmgr,
546 PPSMC_MSG_SetSoftMaxSocclkByFreq, 546 PPSMC_MSG_SetSoftMaxSocclkByFreq,
547 RAVEN_UMD_PSTATE_SOCCLK); 547 SMU10_UMD_PSTATE_SOCCLK);
548 smum_send_msg_to_smc_with_parameter(hwmgr, 548 smum_send_msg_to_smc_with_parameter(hwmgr,
549 PPSMC_MSG_SetSoftMaxVcn, 549 PPSMC_MSG_SetSoftMaxVcn,
550 RAVEN_UMD_PSTATE_VCE); 550 SMU10_UMD_PSTATE_VCE);
551 break; 551 break;
552 case AMD_DPM_FORCED_LEVEL_AUTO: 552 case AMD_DPM_FORCED_LEVEL_AUTO:
553 smum_send_msg_to_smc_with_parameter(hwmgr, 553 smum_send_msg_to_smc_with_parameter(hwmgr,
554 PPSMC_MSG_SetHardMinGfxClk, 554 PPSMC_MSG_SetHardMinGfxClk,
555 RAVEN_UMD_PSTATE_MIN_GFXCLK); 555 SMU10_UMD_PSTATE_MIN_GFXCLK);
556 smum_send_msg_to_smc_with_parameter(hwmgr, 556 smum_send_msg_to_smc_with_parameter(hwmgr,
557 PPSMC_MSG_SetHardMinFclkByFreq, 557 PPSMC_MSG_SetHardMinFclkByFreq,
558 RAVEN_UMD_PSTATE_MIN_FCLK); 558 SMU10_UMD_PSTATE_MIN_FCLK);
559 smum_send_msg_to_smc_with_parameter(hwmgr, 559 smum_send_msg_to_smc_with_parameter(hwmgr,
560 PPSMC_MSG_SetHardMinSocclkByFreq, 560 PPSMC_MSG_SetHardMinSocclkByFreq,
561 RAVEN_UMD_PSTATE_MIN_SOCCLK); 561 SMU10_UMD_PSTATE_MIN_SOCCLK);
562 smum_send_msg_to_smc_with_parameter(hwmgr, 562 smum_send_msg_to_smc_with_parameter(hwmgr,
563 PPSMC_MSG_SetHardMinVcn, 563 PPSMC_MSG_SetHardMinVcn,
564 RAVEN_UMD_PSTATE_MIN_VCE); 564 SMU10_UMD_PSTATE_MIN_VCE);
565 565
566 smum_send_msg_to_smc_with_parameter(hwmgr, 566 smum_send_msg_to_smc_with_parameter(hwmgr,
567 PPSMC_MSG_SetSoftMaxGfxClk, 567 PPSMC_MSG_SetSoftMaxGfxClk,
568 RAVEN_UMD_PSTATE_PEAK_GFXCLK); 568 SMU10_UMD_PSTATE_PEAK_GFXCLK);
569 smum_send_msg_to_smc_with_parameter(hwmgr, 569 smum_send_msg_to_smc_with_parameter(hwmgr,
570 PPSMC_MSG_SetSoftMaxFclkByFreq, 570 PPSMC_MSG_SetSoftMaxFclkByFreq,
571 RAVEN_UMD_PSTATE_PEAK_FCLK); 571 SMU10_UMD_PSTATE_PEAK_FCLK);
572 smum_send_msg_to_smc_with_parameter(hwmgr, 572 smum_send_msg_to_smc_with_parameter(hwmgr,
573 PPSMC_MSG_SetSoftMaxSocclkByFreq, 573 PPSMC_MSG_SetSoftMaxSocclkByFreq,
574 RAVEN_UMD_PSTATE_PEAK_SOCCLK); 574 SMU10_UMD_PSTATE_PEAK_SOCCLK);
575 smum_send_msg_to_smc_with_parameter(hwmgr, 575 smum_send_msg_to_smc_with_parameter(hwmgr,
576 PPSMC_MSG_SetSoftMaxVcn, 576 PPSMC_MSG_SetSoftMaxVcn,
577 RAVEN_UMD_PSTATE_VCE); 577 SMU10_UMD_PSTATE_VCE);
578 break; 578 break;
579 case AMD_DPM_FORCED_LEVEL_LOW: 579 case AMD_DPM_FORCED_LEVEL_LOW:
580 smum_send_msg_to_smc_with_parameter(hwmgr, 580 smum_send_msg_to_smc_with_parameter(hwmgr,
581 PPSMC_MSG_SetHardMinGfxClk, 581 PPSMC_MSG_SetHardMinGfxClk,
582 RAVEN_UMD_PSTATE_MIN_GFXCLK); 582 SMU10_UMD_PSTATE_MIN_GFXCLK);
583 smum_send_msg_to_smc_with_parameter(hwmgr, 583 smum_send_msg_to_smc_with_parameter(hwmgr,
584 PPSMC_MSG_SetSoftMaxGfxClk, 584 PPSMC_MSG_SetSoftMaxGfxClk,
585 RAVEN_UMD_PSTATE_MIN_GFXCLK); 585 SMU10_UMD_PSTATE_MIN_GFXCLK);
586 smum_send_msg_to_smc_with_parameter(hwmgr, 586 smum_send_msg_to_smc_with_parameter(hwmgr,
587 PPSMC_MSG_SetHardMinFclkByFreq, 587 PPSMC_MSG_SetHardMinFclkByFreq,
588 RAVEN_UMD_PSTATE_MIN_FCLK); 588 SMU10_UMD_PSTATE_MIN_FCLK);
589 smum_send_msg_to_smc_with_parameter(hwmgr, 589 smum_send_msg_to_smc_with_parameter(hwmgr,
590 PPSMC_MSG_SetSoftMaxFclkByFreq, 590 PPSMC_MSG_SetSoftMaxFclkByFreq,
591 RAVEN_UMD_PSTATE_MIN_FCLK); 591 SMU10_UMD_PSTATE_MIN_FCLK);
592 break; 592 break;
593 case AMD_DPM_FORCED_LEVEL_MANUAL: 593 case AMD_DPM_FORCED_LEVEL_MANUAL:
594 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 594 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -598,14 +598,14 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
598 return 0; 598 return 0;
599} 599}
600 600
601static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 601static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
602{ 602{
603 struct rv_hwmgr *data; 603 struct smu10_hwmgr *data;
604 604
605 if (hwmgr == NULL) 605 if (hwmgr == NULL)
606 return -EINVAL; 606 return -EINVAL;
607 607
608 data = (struct rv_hwmgr *)(hwmgr->backend); 608 data = (struct smu10_hwmgr *)(hwmgr->backend);
609 609
610 if (low) 610 if (low)
611 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; 611 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
@@ -614,14 +614,14 @@ static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
614 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk; 614 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
615} 615}
616 616
617static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 617static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
618{ 618{
619 struct rv_hwmgr *data; 619 struct smu10_hwmgr *data;
620 620
621 if (hwmgr == NULL) 621 if (hwmgr == NULL)
622 return -EINVAL; 622 return -EINVAL;
623 623
624 data = (struct rv_hwmgr *)(hwmgr->backend); 624 data = (struct smu10_hwmgr *)(hwmgr->backend);
625 625
626 if (low) 626 if (low)
627 return data->gfx_min_freq_limit; 627 return data->gfx_min_freq_limit;
@@ -629,34 +629,34 @@ static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
629 return data->gfx_max_freq_limit; 629 return data->gfx_max_freq_limit;
630} 630}
631 631
632static int rv_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, 632static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
633 struct pp_hw_power_state *hw_ps) 633 struct pp_hw_power_state *hw_ps)
634{ 634{
635 return 0; 635 return 0;
636} 636}
637 637
638static int rv_dpm_get_pp_table_entry_callback( 638static int smu10_dpm_get_pp_table_entry_callback(
639 struct pp_hwmgr *hwmgr, 639 struct pp_hwmgr *hwmgr,
640 struct pp_hw_power_state *hw_ps, 640 struct pp_hw_power_state *hw_ps,
641 unsigned int index, 641 unsigned int index,
642 const void *clock_info) 642 const void *clock_info)
643{ 643{
644 struct rv_power_state *rv_ps = cast_rv_ps(hw_ps); 644 struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
645 645
646 rv_ps->levels[index].engine_clock = 0; 646 smu10_ps->levels[index].engine_clock = 0;
647 647
648 rv_ps->levels[index].vddc_index = 0; 648 smu10_ps->levels[index].vddc_index = 0;
649 rv_ps->level = index + 1; 649 smu10_ps->level = index + 1;
650 650
651 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 651 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
652 rv_ps->levels[index].ds_divider_index = 5; 652 smu10_ps->levels[index].ds_divider_index = 5;
653 rv_ps->levels[index].ss_divider_index = 5; 653 smu10_ps->levels[index].ss_divider_index = 5;
654 } 654 }
655 655
656 return 0; 656 return 0;
657} 657}
658 658
659static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) 659static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
660{ 660{
661 int result; 661 int result;
662 unsigned long ret = 0; 662 unsigned long ret = 0;
@@ -666,59 +666,59 @@ static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
666 return result ? 0 : ret; 666 return result ? 0 : ret;
667} 667}
668 668
669static int rv_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, 669static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
670 unsigned long entry, struct pp_power_state *ps) 670 unsigned long entry, struct pp_power_state *ps)
671{ 671{
672 int result; 672 int result;
673 struct rv_power_state *rv_ps; 673 struct smu10_power_state *smu10_ps;
674 674
675 ps->hardware.magic = PhwRaven_Magic; 675 ps->hardware.magic = SMU10_Magic;
676 676
677 rv_ps = cast_rv_ps(&(ps->hardware)); 677 smu10_ps = cast_smu10_ps(&(ps->hardware));
678 678
679 result = pp_tables_get_entry(hwmgr, entry, ps, 679 result = pp_tables_get_entry(hwmgr, entry, ps,
680 rv_dpm_get_pp_table_entry_callback); 680 smu10_dpm_get_pp_table_entry_callback);
681 681
682 rv_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK; 682 smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
683 rv_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK; 683 smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
684 684
685 return result; 685 return result;
686} 686}
687 687
688static int rv_get_power_state_size(struct pp_hwmgr *hwmgr) 688static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
689{ 689{
690 return sizeof(struct rv_power_state); 690 return sizeof(struct smu10_power_state);
691} 691}
692 692
693static int rv_set_cpu_power_state(struct pp_hwmgr *hwmgr) 693static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
694{ 694{
695 return 0; 695 return 0;
696} 696}
697 697
698 698
699static int rv_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, 699static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
700 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) 700 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
701{ 701{
702 return 0; 702 return 0;
703} 703}
704 704
705static int rv_get_dal_power_level(struct pp_hwmgr *hwmgr, 705static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
706 struct amd_pp_simple_clock_info *info) 706 struct amd_pp_simple_clock_info *info)
707{ 707{
708 return -EINVAL; 708 return -EINVAL;
709} 709}
710 710
711static int rv_force_clock_level(struct pp_hwmgr *hwmgr, 711static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
712 enum pp_clock_type type, uint32_t mask) 712 enum pp_clock_type type, uint32_t mask)
713{ 713{
714 return 0; 714 return 0;
715} 715}
716 716
717static int rv_print_clock_levels(struct pp_hwmgr *hwmgr, 717static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
718 enum pp_clock_type type, char *buf) 718 enum pp_clock_type type, char *buf)
719{ 719{
720 struct rv_hwmgr *data = (struct rv_hwmgr *)(hwmgr->backend); 720 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
721 struct rv_voltage_dependency_table *mclk_table = 721 struct smu10_voltage_dependency_table *mclk_table =
722 data->clock_vol_info.vdd_dep_on_fclk; 722 data->clock_vol_info.vdd_dep_on_fclk;
723 int i, now, size = 0; 723 int i, now, size = 0;
724 724
@@ -754,16 +754,16 @@ static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
754 return size; 754 return size;
755} 755}
756 756
757static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 757static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
758 PHM_PerformanceLevelDesignation designation, uint32_t index, 758 PHM_PerformanceLevelDesignation designation, uint32_t index,
759 PHM_PerformanceLevel *level) 759 PHM_PerformanceLevel *level)
760{ 760{
761 struct rv_hwmgr *data; 761 struct smu10_hwmgr *data;
762 762
763 if (level == NULL || hwmgr == NULL || state == NULL) 763 if (level == NULL || hwmgr == NULL || state == NULL)
764 return -EINVAL; 764 return -EINVAL;
765 765
766 data = (struct rv_hwmgr *)(hwmgr->backend); 766 data = (struct smu10_hwmgr *)(hwmgr->backend);
767 767
768 if (index == 0) { 768 if (index == 0) {
769 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; 769 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
@@ -780,10 +780,10 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
780 return 0; 780 return 0;
781} 781}
782 782
783static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, 783static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
784 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) 784 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
785{ 785{
786 const struct rv_power_state *ps = cast_const_rv_ps(state); 786 const struct smu10_power_state *ps = cast_const_smu10_ps(state);
787 787
788 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index)); 788 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
789 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index)); 789 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
@@ -798,7 +798,7 @@ static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
798#define MEM_LATENCY_ERR 0xFFFF 798#define MEM_LATENCY_ERR 0xFFFF
799 799
800 800
801static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr, 801static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
802 uint32_t clock) 802 uint32_t clock)
803{ 803{
804 if (clock >= MEM_FREQ_LOW_LATENCY && 804 if (clock >= MEM_FREQ_LOW_LATENCY &&
@@ -810,14 +810,14 @@ static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr,
810 return MEM_LATENCY_ERR; 810 return MEM_LATENCY_ERR;
811} 811}
812 812
813static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 813static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
814 enum amd_pp_clock_type type, 814 enum amd_pp_clock_type type,
815 struct pp_clock_levels_with_latency *clocks) 815 struct pp_clock_levels_with_latency *clocks)
816{ 816{
817 uint32_t i; 817 uint32_t i;
818 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 818 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
819 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 819 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
820 struct rv_voltage_dependency_table *pclk_vol_table; 820 struct smu10_voltage_dependency_table *pclk_vol_table;
821 bool latency_required = false; 821 bool latency_required = false;
822 822
823 if (pinfo == NULL) 823 if (pinfo == NULL)
@@ -854,7 +854,7 @@ static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
854 for (i = 0; i < pclk_vol_table->count; i++) { 854 for (i = 0; i < pclk_vol_table->count; i++) {
855 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk; 855 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
856 clocks->data[i].latency_in_us = latency_required ? 856 clocks->data[i].latency_in_us = latency_required ?
857 rv_get_mem_latency(hwmgr, 857 smu10_get_mem_latency(hwmgr,
858 pclk_vol_table->entries[i].clk) : 858 pclk_vol_table->entries[i].clk) :
859 0; 859 0;
860 clocks->num_levels++; 860 clocks->num_levels++;
@@ -863,14 +863,14 @@ static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
863 return 0; 863 return 0;
864} 864}
865 865
866static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, 866static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
867 enum amd_pp_clock_type type, 867 enum amd_pp_clock_type type,
868 struct pp_clock_levels_with_voltage *clocks) 868 struct pp_clock_levels_with_voltage *clocks)
869{ 869{
870 uint32_t i; 870 uint32_t i;
871 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 871 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
872 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 872 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
873 struct rv_voltage_dependency_table *pclk_vol_table = NULL; 873 struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
874 874
875 if (pinfo == NULL) 875 if (pinfo == NULL)
876 return -EINVAL; 876 return -EINVAL;
@@ -905,28 +905,28 @@ static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
905 return 0; 905 return 0;
906} 906}
907 907
908int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 908static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
909 struct pp_display_clock_request *clock_req) 909 struct pp_display_clock_request *clock_req)
910{ 910{
911 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 911 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
912 enum amd_pp_clock_type clk_type = clock_req->clock_type; 912 enum amd_pp_clock_type clk_type = clock_req->clock_type;
913 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 913 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
914 PPSMC_Msg msg; 914 PPSMC_Msg msg;
915 915
916 switch (clk_type) { 916 switch (clk_type) {
917 case amd_pp_dcf_clock: 917 case amd_pp_dcf_clock:
918 if (clk_freq == rv_data->dcf_actual_hard_min_freq) 918 if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
919 return 0; 919 return 0;
920 msg = PPSMC_MSG_SetHardMinDcefclkByFreq; 920 msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
921 rv_data->dcf_actual_hard_min_freq = clk_freq; 921 smu10_data->dcf_actual_hard_min_freq = clk_freq;
922 break; 922 break;
923 case amd_pp_soc_clock: 923 case amd_pp_soc_clock:
924 msg = PPSMC_MSG_SetHardMinSocclkByFreq; 924 msg = PPSMC_MSG_SetHardMinSocclkByFreq;
925 break; 925 break;
926 case amd_pp_f_clock: 926 case amd_pp_f_clock:
927 if (clk_freq == rv_data->f_actual_hard_min_freq) 927 if (clk_freq == smu10_data->f_actual_hard_min_freq)
928 return 0; 928 return 0;
929 rv_data->f_actual_hard_min_freq = clk_freq; 929 smu10_data->f_actual_hard_min_freq = clk_freq;
930 msg = PPSMC_MSG_SetHardMinFclkByFreq; 930 msg = PPSMC_MSG_SetHardMinFclkByFreq;
931 break; 931 break;
932 default: 932 default:
@@ -939,13 +939,13 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
939 return 0; 939 return 0;
940} 940}
941 941
942static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) 942static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
943{ 943{
944 clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */ 944 clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
945 return 0; 945 return 0;
946} 946}
947 947
948static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr) 948static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
949{ 949{
950 uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0, 950 uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0,
951 mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP); 951 mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP);
@@ -961,7 +961,7 @@ static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
961 return cur_temp; 961 return cur_temp;
962} 962}
963 963
964static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx, 964static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
965 void *value, int *size) 965 void *value, int *size)
966{ 966{
967 uint32_t sclk, mclk; 967 uint32_t sclk, mclk;
@@ -983,7 +983,7 @@ static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
983 *size = 4; 983 *size = 4;
984 break; 984 break;
985 case AMDGPU_PP_SENSOR_GPU_TEMP: 985 case AMDGPU_PP_SENSOR_GPU_TEMP:
986 *((uint32_t *)value) = rv_thermal_get_temperature(hwmgr); 986 *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
987 break; 987 break;
988 default: 988 default:
989 ret = -EINVAL; 989 ret = -EINVAL;
@@ -993,50 +993,50 @@ static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
993 return ret; 993 return ret;
994} 994}
995 995
996static int rv_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr) 996static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
997{ 997{
998 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); 998 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
999} 999}
1000 1000
1001static const struct pp_hwmgr_func rv_hwmgr_funcs = { 1001static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1002 .backend_init = rv_hwmgr_backend_init, 1002 .backend_init = smu10_hwmgr_backend_init,
1003 .backend_fini = rv_hwmgr_backend_fini, 1003 .backend_fini = smu10_hwmgr_backend_fini,
1004 .asic_setup = NULL, 1004 .asic_setup = NULL,
1005 .apply_state_adjust_rules = rv_apply_state_adjust_rules, 1005 .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
1006 .force_dpm_level = rv_dpm_force_dpm_level, 1006 .force_dpm_level = smu10_dpm_force_dpm_level,
1007 .get_power_state_size = rv_get_power_state_size, 1007 .get_power_state_size = smu10_get_power_state_size,
1008 .powerdown_uvd = NULL, 1008 .powerdown_uvd = NULL,
1009 .powergate_uvd = NULL, 1009 .powergate_uvd = NULL,
1010 .powergate_vce = NULL, 1010 .powergate_vce = NULL,
1011 .get_mclk = rv_dpm_get_mclk, 1011 .get_mclk = smu10_dpm_get_mclk,
1012 .get_sclk = rv_dpm_get_sclk, 1012 .get_sclk = smu10_dpm_get_sclk,
1013 .patch_boot_state = rv_dpm_patch_boot_state, 1013 .patch_boot_state = smu10_dpm_patch_boot_state,
1014 .get_pp_table_entry = rv_dpm_get_pp_table_entry, 1014 .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
1015 .get_num_of_pp_table_entries = rv_dpm_get_num_of_pp_table_entries, 1015 .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
1016 .set_cpu_power_state = rv_set_cpu_power_state, 1016 .set_cpu_power_state = smu10_set_cpu_power_state,
1017 .store_cc6_data = rv_store_cc6_data, 1017 .store_cc6_data = smu10_store_cc6_data,
1018 .force_clock_level = rv_force_clock_level, 1018 .force_clock_level = smu10_force_clock_level,
1019 .print_clock_levels = rv_print_clock_levels, 1019 .print_clock_levels = smu10_print_clock_levels,
1020 .get_dal_power_level = rv_get_dal_power_level, 1020 .get_dal_power_level = smu10_get_dal_power_level,
1021 .get_performance_level = rv_get_performance_level, 1021 .get_performance_level = smu10_get_performance_level,
1022 .get_current_shallow_sleep_clocks = rv_get_current_shallow_sleep_clocks, 1022 .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1023 .get_clock_by_type_with_latency = rv_get_clock_by_type_with_latency, 1023 .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1024 .get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage, 1024 .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1025 .get_max_high_clocks = rv_get_max_high_clocks, 1025 .get_max_high_clocks = smu10_get_max_high_clocks,
1026 .read_sensor = rv_read_sensor, 1026 .read_sensor = smu10_read_sensor,
1027 .set_active_display_count = rv_set_active_display_count, 1027 .set_active_display_count = smu10_set_active_display_count,
1028 .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk, 1028 .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
1029 .dynamic_state_management_enable = rv_enable_dpm_tasks, 1029 .dynamic_state_management_enable = smu10_enable_dpm_tasks,
1030 .power_off_asic = rv_power_off_asic, 1030 .power_off_asic = smu10_power_off_asic,
1031 .asic_setup = rv_setup_asic_task, 1031 .asic_setup = smu10_setup_asic_task,
1032 .power_state_set = rv_set_power_state_tasks, 1032 .power_state_set = smu10_set_power_state_tasks,
1033 .dynamic_state_management_disable = rv_disable_dpm_tasks, 1033 .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1034 .set_mmhub_powergating_by_smu = rv_set_mmhub_powergating_by_smu, 1034 .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
1035}; 1035};
1036 1036
1037int rv_init_function_pointers(struct pp_hwmgr *hwmgr) 1037int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
1038{ 1038{
1039 hwmgr->hwmgr_func = &rv_hwmgr_funcs; 1039 hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
1040 hwmgr->pptable_func = &pptable_funcs; 1040 hwmgr->pptable_func = &pptable_funcs;
1041 return 0; 1041 return 0;
1042} 1042}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index c3bc311dc59f..175c3a592b6c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -21,17 +21,17 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef RAVEN_HWMGR_H 24#ifndef SMU10_HWMGR_H
25#define RAVEN_HWMGR_H 25#define SMU10_HWMGR_H
26 26
27#include "hwmgr.h" 27#include "hwmgr.h"
28#include "rv_inc.h" 28#include "smu10_inc.h"
29#include "smu10_driver_if.h" 29#include "smu10_driver_if.h"
30#include "rv_ppsmc.h" 30#include "rv_ppsmc.h"
31 31
32 32
33#define RAVEN_MAX_HARDWARE_POWERLEVELS 8 33#define SMU10_MAX_HARDWARE_POWERLEVELS 8
34#define PHMRAVEN_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15 34#define SMU10_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
35 35
36#define DPMFlags_SCLK_Enabled 0x00000001 36#define DPMFlags_SCLK_Enabled 0x00000001
37#define DPMFlags_UVD_Enabled 0x00000002 37#define DPMFlags_UVD_Enabled 0x00000002
@@ -47,10 +47,10 @@
47 47
48#define SMU_PHYID_SHIFT 8 48#define SMU_PHYID_SHIFT 8
49 49
50#define RAVEN_PCIE_POWERGATING_TARGET_GFX 0 50#define SMU10_PCIE_POWERGATING_TARGET_GFX 0
51#define RAVEN_PCIE_POWERGATING_TARGET_DDI 1 51#define SMU10_PCIE_POWERGATING_TARGET_DDI 1
52#define RAVEN_PCIE_POWERGATING_TARGET_PLLCASCADE 2 52#define SMU10_PCIE_POWERGATING_TARGET_PLLCASCADE 2
53#define RAVEN_PCIE_POWERGATING_TARGET_PHY 3 53#define SMU10_PCIE_POWERGATING_TARGET_PHY 3
54 54
55enum VQ_TYPE { 55enum VQ_TYPE {
56 CLOCK_TYPE_DCLK = 0L, 56 CLOCK_TYPE_DCLK = 0L,
@@ -65,14 +65,14 @@ enum VQ_TYPE {
65#define SUSTAINABLE_CU_MASK 0xff000000 65#define SUSTAINABLE_CU_MASK 0xff000000
66#define SUSTAINABLE_CU_SHIFT 24 66#define SUSTAINABLE_CU_SHIFT 24
67 67
68struct rv_dpm_entry { 68struct smu10_dpm_entry {
69 uint32_t soft_min_clk; 69 uint32_t soft_min_clk;
70 uint32_t hard_min_clk; 70 uint32_t hard_min_clk;
71 uint32_t soft_max_clk; 71 uint32_t soft_max_clk;
72 uint32_t hard_max_clk; 72 uint32_t hard_max_clk;
73}; 73};
74 74
75struct rv_power_level { 75struct smu10_power_level {
76 uint32_t engine_clock; 76 uint32_t engine_clock;
77 uint8_t vddc_index; 77 uint8_t vddc_index;
78 uint8_t ds_divider_index; 78 uint8_t ds_divider_index;
@@ -86,14 +86,14 @@ struct rv_power_level {
86 uint8_t rsv[3]; 86 uint8_t rsv[3];
87}; 87};
88 88
89/*used for the nbpsFlags field in rv_power state*/ 89/*used for the nbpsFlags field in smu10_power state*/
90#define RAVEN_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0) 90#define SMU10_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0)
91#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1) 91#define SMU10_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1)
92#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2) 92#define SMU10_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2)
93 93
94#define RAVEN_POWERSTATE_FLAGS_BAPM_DISABLE (1<<0) 94#define SMU10_POWERSTATE_FLAGS_BAPM_DISABLE (1<<0)
95 95
96struct rv_uvd_clocks { 96struct smu10_uvd_clocks {
97 uint32_t vclk; 97 uint32_t vclk;
98 uint32_t dclk; 98 uint32_t dclk;
99 uint32_t vclk_low_divider; 99 uint32_t vclk_low_divider;
@@ -118,16 +118,16 @@ struct pp_disable_nbpslo_flags {
118}; 118};
119 119
120 120
121enum rv_pstate_previous_action { 121enum smu10_pstate_previous_action {
122 DO_NOTHING = 1, 122 DO_NOTHING = 1,
123 FORCE_HIGH, 123 FORCE_HIGH,
124 CANCEL_FORCE_HIGH 124 CANCEL_FORCE_HIGH
125}; 125};
126 126
127struct rv_power_state { 127struct smu10_power_state {
128 unsigned int magic; 128 unsigned int magic;
129 uint32_t level; 129 uint32_t level;
130 struct rv_uvd_clocks uvd_clocks; 130 struct smu10_uvd_clocks uvd_clocks;
131 uint32_t evclk; 131 uint32_t evclk;
132 uint32_t ecclk; 132 uint32_t ecclk;
133 uint32_t samclk; 133 uint32_t samclk;
@@ -141,79 +141,79 @@ struct rv_power_state {
141 uint8_t dpm_x_nbps_low; 141 uint8_t dpm_x_nbps_low;
142 uint8_t dpm_x_nbps_high; 142 uint8_t dpm_x_nbps_high;
143 143
144 enum rv_pstate_previous_action action; 144 enum smu10_pstate_previous_action action;
145 145
146 struct rv_power_level levels[RAVEN_MAX_HARDWARE_POWERLEVELS]; 146 struct smu10_power_level levels[SMU10_MAX_HARDWARE_POWERLEVELS];
147 struct pp_disable_nbpslo_flags nbpslo_flags; 147 struct pp_disable_nbpslo_flags nbpslo_flags;
148}; 148};
149 149
150#define RAVEN_NUM_NBPSTATES 4 150#define SMU10_NUM_NBPSTATES 4
151#define RAVEN_NUM_NBPMEMORYCLOCK 2 151#define SMU10_NUM_NBPMEMORYCLOCK 2
152 152
153 153
154struct rv_display_phy_info_entry { 154struct smu10_display_phy_info_entry {
155 uint8_t phy_present; 155 uint8_t phy_present;
156 uint8_t active_lane_mapping; 156 uint8_t active_lane_mapping;
157 uint8_t display_config_type; 157 uint8_t display_config_type;
158 uint8_t active_num_of_lanes; 158 uint8_t active_num_of_lanes;
159}; 159};
160 160
161#define RAVEN_MAX_DISPLAYPHY_IDS 10 161#define SMU10_MAX_DISPLAYPHY_IDS 10
162 162
163struct rv_display_phy_info { 163struct smu10_display_phy_info {
164 bool display_phy_access_initialized; 164 bool display_phy_access_initialized;
165 struct rv_display_phy_info_entry entries[RAVEN_MAX_DISPLAYPHY_IDS]; 165 struct smu10_display_phy_info_entry entries[SMU10_MAX_DISPLAYPHY_IDS];
166}; 166};
167 167
168#define MAX_DISPLAY_CLOCK_LEVEL 8 168#define MAX_DISPLAY_CLOCK_LEVEL 8
169 169
170struct rv_system_info{ 170struct smu10_system_info{
171 uint8_t htc_tmp_lmt; 171 uint8_t htc_tmp_lmt;
172 uint8_t htc_hyst_lmt; 172 uint8_t htc_hyst_lmt;
173}; 173};
174 174
175#define MAX_REGULAR_DPM_NUMBER 8 175#define MAX_REGULAR_DPM_NUMBER 8
176 176
177struct rv_mclk_latency_entries { 177struct smu10_mclk_latency_entries {
178 uint32_t frequency; 178 uint32_t frequency;
179 uint32_t latency; 179 uint32_t latency;
180}; 180};
181 181
182struct rv_mclk_latency_table { 182struct smu10_mclk_latency_table {
183 uint32_t count; 183 uint32_t count;
184 struct rv_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER]; 184 struct smu10_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER];
185}; 185};
186 186
187struct rv_clock_voltage_dependency_record { 187struct smu10_clock_voltage_dependency_record {
188 uint32_t clk; 188 uint32_t clk;
189 uint32_t vol; 189 uint32_t vol;
190}; 190};
191 191
192 192
193struct rv_voltage_dependency_table { 193struct smu10_voltage_dependency_table {
194 uint32_t count; 194 uint32_t count;
195 struct rv_clock_voltage_dependency_record entries[1]; 195 struct smu10_clock_voltage_dependency_record entries[1];
196}; 196};
197 197
198struct rv_clock_voltage_information { 198struct smu10_clock_voltage_information {
199 struct rv_voltage_dependency_table *vdd_dep_on_dcefclk; 199 struct smu10_voltage_dependency_table *vdd_dep_on_dcefclk;
200 struct rv_voltage_dependency_table *vdd_dep_on_socclk; 200 struct smu10_voltage_dependency_table *vdd_dep_on_socclk;
201 struct rv_voltage_dependency_table *vdd_dep_on_fclk; 201 struct smu10_voltage_dependency_table *vdd_dep_on_fclk;
202 struct rv_voltage_dependency_table *vdd_dep_on_mclk; 202 struct smu10_voltage_dependency_table *vdd_dep_on_mclk;
203 struct rv_voltage_dependency_table *vdd_dep_on_dispclk; 203 struct smu10_voltage_dependency_table *vdd_dep_on_dispclk;
204 struct rv_voltage_dependency_table *vdd_dep_on_dppclk; 204 struct smu10_voltage_dependency_table *vdd_dep_on_dppclk;
205 struct rv_voltage_dependency_table *vdd_dep_on_phyclk; 205 struct smu10_voltage_dependency_table *vdd_dep_on_phyclk;
206}; 206};
207 207
208struct rv_hwmgr { 208struct smu10_hwmgr {
209 uint32_t disable_driver_thermal_policy; 209 uint32_t disable_driver_thermal_policy;
210 uint32_t thermal_auto_throttling_treshold; 210 uint32_t thermal_auto_throttling_treshold;
211 struct rv_system_info sys_info; 211 struct smu10_system_info sys_info;
212 struct rv_mclk_latency_table mclk_latency_table; 212 struct smu10_mclk_latency_table mclk_latency_table;
213 213
214 uint32_t ddi_power_gating_disabled; 214 uint32_t ddi_power_gating_disabled;
215 215
216 struct rv_display_phy_info_entry display_phy_info; 216 struct smu10_display_phy_info_entry display_phy_info;
217 uint32_t dce_slow_sclk_threshold; 217 uint32_t dce_slow_sclk_threshold;
218 218
219 bool disp_clk_bypass; 219 bool disp_clk_bypass;
@@ -255,10 +255,10 @@ struct rv_hwmgr {
255 uint32_t fps_low_threshold; 255 uint32_t fps_low_threshold;
256 256
257 uint32_t dpm_flags; 257 uint32_t dpm_flags;
258 struct rv_dpm_entry sclk_dpm; 258 struct smu10_dpm_entry sclk_dpm;
259 struct rv_dpm_entry uvd_dpm; 259 struct smu10_dpm_entry uvd_dpm;
260 struct rv_dpm_entry vce_dpm; 260 struct smu10_dpm_entry vce_dpm;
261 struct rv_dpm_entry acp_dpm; 261 struct smu10_dpm_entry acp_dpm;
262 bool acp_power_up_no_dsp; 262 bool acp_power_up_no_dsp;
263 263
264 uint32_t max_sclk_level; 264 uint32_t max_sclk_level;
@@ -291,7 +291,7 @@ struct rv_hwmgr {
291 291
292 bool gfx_off_controled_by_driver; 292 bool gfx_off_controled_by_driver;
293 Watermarks_t water_marks_table; 293 Watermarks_t water_marks_table;
294 struct rv_clock_voltage_information clock_vol_info; 294 struct smu10_clock_voltage_information clock_vol_info;
295 DpmClocks_t clock_table; 295 DpmClocks_t clock_table;
296 296
297 uint32_t active_process_mask; 297 uint32_t active_process_mask;
@@ -302,21 +302,21 @@ struct rv_hwmgr {
302 302
303struct pp_hwmgr; 303struct pp_hwmgr;
304 304
305int rv_init_function_pointers(struct pp_hwmgr *hwmgr); 305int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
306 306
307/* UMD PState Raven Msg Parameters in MHz */ 307/* UMD PState SMU10 Msg Parameters in MHz */
308#define RAVEN_UMD_PSTATE_GFXCLK 700 308#define SMU10_UMD_PSTATE_GFXCLK 700
309#define RAVEN_UMD_PSTATE_SOCCLK 626 309#define SMU10_UMD_PSTATE_SOCCLK 626
310#define RAVEN_UMD_PSTATE_FCLK 933 310#define SMU10_UMD_PSTATE_FCLK 933
311#define RAVEN_UMD_PSTATE_VCE 0x03C00320 311#define SMU10_UMD_PSTATE_VCE 0x03C00320
312 312
313#define RAVEN_UMD_PSTATE_PEAK_GFXCLK 1100 313#define SMU10_UMD_PSTATE_PEAK_GFXCLK 1100
314#define RAVEN_UMD_PSTATE_PEAK_SOCCLK 757 314#define SMU10_UMD_PSTATE_PEAK_SOCCLK 757
315#define RAVEN_UMD_PSTATE_PEAK_FCLK 1200 315#define SMU10_UMD_PSTATE_PEAK_FCLK 1200
316 316
317#define RAVEN_UMD_PSTATE_MIN_GFXCLK 200 317#define SMU10_UMD_PSTATE_MIN_GFXCLK 200
318#define RAVEN_UMD_PSTATE_MIN_FCLK 400 318#define SMU10_UMD_PSTATE_MIN_FCLK 400
319#define RAVEN_UMD_PSTATE_MIN_SOCCLK 200 319#define SMU10_UMD_PSTATE_MIN_SOCCLK 200
320#define RAVEN_UMD_PSTATE_MIN_VCE 0x0190012C 320#define SMU10_UMD_PSTATE_MIN_VCE 0x0190012C
321 321
322#endif 322#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
index ae59a3fdea8a..edb68e302f6f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef RAVEN_INC_H 24#ifndef SMU10_INC_H
25#define RAVEN_INC_H 25#define SMU10_INC_H
26 26
27 27
28#include "asic_reg/mp/mp_10_0_default.h" 28#include "asic_reg/mp/mp_10_0_default.h"
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 494f8914fdd2..9bdad4875fc4 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -820,7 +820,7 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
820 820
821extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); 821extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
822extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr); 822extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
823extern int rv_init_function_pointers(struct pp_hwmgr *hwmgr); 823extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
824 824
825extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 825extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
826 uint32_t sclk, uint16_t id, uint16_t *voltage); 826 uint32_t sclk, uint16_t id, uint16_t *voltage);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 98e701e4f553..f5c45425fcc7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -25,7 +25,7 @@
25 25
26SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \ 26SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
27 polaris10_smumgr.o iceland_smumgr.o \ 27 polaris10_smumgr.o iceland_smumgr.o \
28 smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o 28 smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o
29 29
30AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) 30AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
31 31
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 3dec4819dc7f..bef0b2dcac9a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -22,9 +22,9 @@
22 */ 22 */
23 23
24#include "smumgr.h" 24#include "smumgr.h"
25#include "rv_inc.h" 25#include "smu10_inc.h"
26#include "pp_soc15.h" 26#include "pp_soc15.h"
27#include "rv_smumgr.h" 27#include "smu10_smumgr.h"
28#include "ppatomctrl.h" 28#include "ppatomctrl.h"
29#include "rv_ppsmc.h" 29#include "rv_ppsmc.h"
30#include "smu10_driver_if.h" 30#include "smu10_driver_if.h"
@@ -47,7 +47,7 @@
47#define smnMP1_FIRMWARE_FLAGS 0x3010028 47#define smnMP1_FIRMWARE_FLAGS 0x3010028
48 48
49 49
50static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr) 50static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr)
51{ 51{
52 uint32_t reg; 52 uint32_t reg;
53 53
@@ -60,7 +60,7 @@ static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
60 return cgs_read_register(hwmgr->device, reg); 60 return cgs_read_register(hwmgr->device, reg);
61} 61}
62 62
63static int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, 63static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
64 uint16_t msg) 64 uint16_t msg)
65{ 65{
66 uint32_t reg; 66 uint32_t reg;
@@ -72,7 +72,7 @@ static int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
72 return 0; 72 return 0;
73} 73}
74 74
75static int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr) 75static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
76{ 76{
77 uint32_t reg; 77 uint32_t reg;
78 78
@@ -82,31 +82,31 @@ static int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr)
82 return cgs_read_register(hwmgr->device, reg); 82 return cgs_read_register(hwmgr->device, reg);
83} 83}
84 84
85static int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) 85static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
86{ 86{
87 uint32_t reg; 87 uint32_t reg;
88 88
89 rv_wait_for_response(hwmgr); 89 smu10_wait_for_response(hwmgr);
90 90
91 reg = soc15_get_register_offset(MP1_HWID, 0, 91 reg = soc15_get_register_offset(MP1_HWID, 0,
92 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); 92 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
93 cgs_write_register(hwmgr->device, reg, 0); 93 cgs_write_register(hwmgr->device, reg, 0);
94 94
95 rv_send_msg_to_smc_without_waiting(hwmgr, msg); 95 smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
96 96
97 if (rv_wait_for_response(hwmgr) == 0) 97 if (smu10_wait_for_response(hwmgr) == 0)
98 printk("Failed to send Message %x.\n", msg); 98 printk("Failed to send Message %x.\n", msg);
99 99
100 return 0; 100 return 0;
101} 101}
102 102
103 103
104static int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 104static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
105 uint16_t msg, uint32_t parameter) 105 uint16_t msg, uint32_t parameter)
106{ 106{
107 uint32_t reg; 107 uint32_t reg;
108 108
109 rv_wait_for_response(hwmgr); 109 smu10_wait_for_response(hwmgr);
110 110
111 reg = soc15_get_register_offset(MP1_HWID, 0, 111 reg = soc15_get_register_offset(MP1_HWID, 0,
112 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); 112 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
@@ -116,20 +116,20 @@ static int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
116 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); 116 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
117 cgs_write_register(hwmgr->device, reg, parameter); 117 cgs_write_register(hwmgr->device, reg, parameter);
118 118
119 rv_send_msg_to_smc_without_waiting(hwmgr, msg); 119 smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
120 120
121 121
122 if (rv_wait_for_response(hwmgr) == 0) 122 if (smu10_wait_for_response(hwmgr) == 0)
123 printk("Failed to send Message %x.\n", msg); 123 printk("Failed to send Message %x.\n", msg);
124 124
125 return 0; 125 return 0;
126} 126}
127 127
128static int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr, 128static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
129 uint8_t *table, int16_t table_id) 129 uint8_t *table, int16_t table_id)
130{ 130{
131 struct rv_smumgr *priv = 131 struct smu10_smumgr *priv =
132 (struct rv_smumgr *)(hwmgr->smu_backend); 132 (struct smu10_smumgr *)(hwmgr->smu_backend);
133 133
134 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, 134 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
135 "Invalid SMU Table ID!", return -EINVAL;); 135 "Invalid SMU Table ID!", return -EINVAL;);
@@ -137,13 +137,13 @@ static int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
137 "Invalid SMU Table version!", return -EINVAL;); 137 "Invalid SMU Table version!", return -EINVAL;);
138 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 138 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
139 "Invalid SMU Table Length!", return -EINVAL;); 139 "Invalid SMU Table Length!", return -EINVAL;);
140 rv_send_msg_to_smc_with_parameter(hwmgr, 140 smu10_send_msg_to_smc_with_parameter(hwmgr,
141 PPSMC_MSG_SetDriverDramAddrHigh, 141 PPSMC_MSG_SetDriverDramAddrHigh,
142 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 142 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
143 rv_send_msg_to_smc_with_parameter(hwmgr, 143 smu10_send_msg_to_smc_with_parameter(hwmgr,
144 PPSMC_MSG_SetDriverDramAddrLow, 144 PPSMC_MSG_SetDriverDramAddrLow,
145 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 145 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
146 rv_send_msg_to_smc_with_parameter(hwmgr, 146 smu10_send_msg_to_smc_with_parameter(hwmgr,
147 PPSMC_MSG_TransferTableSmu2Dram, 147 PPSMC_MSG_TransferTableSmu2Dram,
148 priv->smu_tables.entry[table_id].table_id); 148 priv->smu_tables.entry[table_id].table_id);
149 149
@@ -153,11 +153,11 @@ static int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
153 return 0; 153 return 0;
154} 154}
155 155
156static int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr, 156static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
157 uint8_t *table, int16_t table_id) 157 uint8_t *table, int16_t table_id)
158{ 158{
159 struct rv_smumgr *priv = 159 struct smu10_smumgr *priv =
160 (struct rv_smumgr *)(hwmgr->smu_backend); 160 (struct smu10_smumgr *)(hwmgr->smu_backend);
161 161
162 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, 162 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
163 "Invalid SMU Table ID!", return -EINVAL;); 163 "Invalid SMU Table ID!", return -EINVAL;);
@@ -169,26 +169,26 @@ static int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
169 memcpy(priv->smu_tables.entry[table_id].table, table, 169 memcpy(priv->smu_tables.entry[table_id].table, table,
170 priv->smu_tables.entry[table_id].size); 170 priv->smu_tables.entry[table_id].size);
171 171
172 rv_send_msg_to_smc_with_parameter(hwmgr, 172 smu10_send_msg_to_smc_with_parameter(hwmgr,
173 PPSMC_MSG_SetDriverDramAddrHigh, 173 PPSMC_MSG_SetDriverDramAddrHigh,
174 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 174 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
175 rv_send_msg_to_smc_with_parameter(hwmgr, 175 smu10_send_msg_to_smc_with_parameter(hwmgr,
176 PPSMC_MSG_SetDriverDramAddrLow, 176 PPSMC_MSG_SetDriverDramAddrLow,
177 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 177 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
178 rv_send_msg_to_smc_with_parameter(hwmgr, 178 smu10_send_msg_to_smc_with_parameter(hwmgr,
179 PPSMC_MSG_TransferTableDram2Smu, 179 PPSMC_MSG_TransferTableDram2Smu,
180 priv->smu_tables.entry[table_id].table_id); 180 priv->smu_tables.entry[table_id].table_id);
181 181
182 return 0; 182 return 0;
183} 183}
184 184
185static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr) 185static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
186{ 186{
187 uint32_t smc_driver_if_version; 187 uint32_t smc_driver_if_version;
188 188
189 rv_send_msg_to_smc(hwmgr, 189 smu10_send_msg_to_smc(hwmgr,
190 PPSMC_MSG_GetDriverIfVersion); 190 PPSMC_MSG_GetDriverIfVersion);
191 smc_driver_if_version = rv_read_arg_from_smc(hwmgr); 191 smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
192 192
193 if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION) { 193 if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION) {
194 pr_err("Attempt to read SMC IF Version Number Failed!\n"); 194 pr_err("Attempt to read SMC IF Version Number Failed!\n");
@@ -199,39 +199,39 @@ static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
199} 199}
200 200
201/* sdma is disabled by default in vbios, need to re-enable in driver */ 201/* sdma is disabled by default in vbios, need to re-enable in driver */
202static void rv_smc_enable_sdma(struct pp_hwmgr *hwmgr) 202static void smu10_smc_enable_sdma(struct pp_hwmgr *hwmgr)
203{ 203{
204 rv_send_msg_to_smc(hwmgr, 204 smu10_send_msg_to_smc(hwmgr,
205 PPSMC_MSG_PowerUpSdma); 205 PPSMC_MSG_PowerUpSdma);
206} 206}
207 207
208static void rv_smc_disable_sdma(struct pp_hwmgr *hwmgr) 208static void smu10_smc_disable_sdma(struct pp_hwmgr *hwmgr)
209{ 209{
210 rv_send_msg_to_smc(hwmgr, 210 smu10_send_msg_to_smc(hwmgr,
211 PPSMC_MSG_PowerDownSdma); 211 PPSMC_MSG_PowerDownSdma);
212} 212}
213 213
214/* vcn is disabled by default in vbios, need to re-enable in driver */ 214/* vcn is disabled by default in vbios, need to re-enable in driver */
215static void rv_smc_enable_vcn(struct pp_hwmgr *hwmgr) 215static void smu10_smc_enable_vcn(struct pp_hwmgr *hwmgr)
216{ 216{
217 rv_send_msg_to_smc_with_parameter(hwmgr, 217 smu10_send_msg_to_smc_with_parameter(hwmgr,
218 PPSMC_MSG_PowerUpVcn, 0); 218 PPSMC_MSG_PowerUpVcn, 0);
219} 219}
220 220
221static void rv_smc_disable_vcn(struct pp_hwmgr *hwmgr) 221static void smu10_smc_disable_vcn(struct pp_hwmgr *hwmgr)
222{ 222{
223 rv_send_msg_to_smc_with_parameter(hwmgr, 223 smu10_send_msg_to_smc_with_parameter(hwmgr,
224 PPSMC_MSG_PowerDownVcn, 0); 224 PPSMC_MSG_PowerDownVcn, 0);
225} 225}
226 226
227static int rv_smu_fini(struct pp_hwmgr *hwmgr) 227static int smu10_smu_fini(struct pp_hwmgr *hwmgr)
228{ 228{
229 struct rv_smumgr *priv = 229 struct smu10_smumgr *priv =
230 (struct rv_smumgr *)(hwmgr->smu_backend); 230 (struct smu10_smumgr *)(hwmgr->smu_backend);
231 231
232 if (priv) { 232 if (priv) {
233 rv_smc_disable_sdma(hwmgr); 233 smu10_smc_disable_sdma(hwmgr);
234 rv_smc_disable_vcn(hwmgr); 234 smu10_smc_disable_vcn(hwmgr);
235 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle, 235 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
236 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 236 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
237 &priv->smu_tables.entry[SMU10_WMTABLE].table); 237 &priv->smu_tables.entry[SMU10_WMTABLE].table);
@@ -245,29 +245,29 @@ static int rv_smu_fini(struct pp_hwmgr *hwmgr)
245 return 0; 245 return 0;
246} 246}
247 247
248static int rv_start_smu(struct pp_hwmgr *hwmgr) 248static int smu10_start_smu(struct pp_hwmgr *hwmgr)
249{ 249{
250 struct cgs_firmware_info info = {0}; 250 struct cgs_firmware_info info = {0};
251 251
252 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); 252 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
253 hwmgr->smu_version = rv_read_arg_from_smc(hwmgr); 253 hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
254 info.version = hwmgr->smu_version >> 8; 254 info.version = hwmgr->smu_version >> 8;
255 255
256 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info); 256 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
257 257
258 if (rv_verify_smc_interface(hwmgr)) 258 if (smu10_verify_smc_interface(hwmgr))
259 return -EINVAL; 259 return -EINVAL;
260 rv_smc_enable_sdma(hwmgr); 260 smu10_smc_enable_sdma(hwmgr);
261 rv_smc_enable_vcn(hwmgr); 261 smu10_smc_enable_vcn(hwmgr);
262 return 0; 262 return 0;
263} 263}
264 264
265static int rv_smu_init(struct pp_hwmgr *hwmgr) 265static int smu10_smu_init(struct pp_hwmgr *hwmgr)
266{ 266{
267 struct rv_smumgr *priv; 267 struct smu10_smumgr *priv;
268 int r; 268 int r;
269 269
270 priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL); 270 priv = kzalloc(sizeof(struct smu10_smumgr), GFP_KERNEL);
271 271
272 if (!priv) 272 if (!priv)
273 return -ENOMEM; 273 return -ENOMEM;
@@ -317,30 +317,30 @@ err0:
317 return -EINVAL; 317 return -EINVAL;
318} 318}
319 319
320static int rv_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw) 320static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
321{ 321{
322 int ret; 322 int ret;
323 323
324 if (rw) 324 if (rw)
325 ret = rv_copy_table_from_smc(hwmgr, table, table_id); 325 ret = smu10_copy_table_from_smc(hwmgr, table, table_id);
326 else 326 else
327 ret = rv_copy_table_to_smc(hwmgr, table, table_id); 327 ret = smu10_copy_table_to_smc(hwmgr, table, table_id);
328 328
329 return ret; 329 return ret;
330} 330}
331 331
332 332
333const struct pp_smumgr_func rv_smu_funcs = { 333const struct pp_smumgr_func smu10_smu_funcs = {
334 .smu_init = &rv_smu_init, 334 .smu_init = &smu10_smu_init,
335 .smu_fini = &rv_smu_fini, 335 .smu_fini = &smu10_smu_fini,
336 .start_smu = &rv_start_smu, 336 .start_smu = &smu10_start_smu,
337 .request_smu_load_specific_fw = NULL, 337 .request_smu_load_specific_fw = NULL,
338 .send_msg_to_smc = &rv_send_msg_to_smc, 338 .send_msg_to_smc = &smu10_send_msg_to_smc,
339 .send_msg_to_smc_with_parameter = &rv_send_msg_to_smc_with_parameter, 339 .send_msg_to_smc_with_parameter = &smu10_send_msg_to_smc_with_parameter,
340 .download_pptable_settings = NULL, 340 .download_pptable_settings = NULL,
341 .upload_pptable_settings = NULL, 341 .upload_pptable_settings = NULL,
342 .get_argument = rv_read_arg_from_smc, 342 .get_argument = smu10_read_arg_from_smc,
343 .smc_table_manager = rv_smc_table_manager, 343 .smc_table_manager = smu10_smc_table_manager,
344}; 344};
345 345
346 346
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
index 7b537981d0f6..9c2be74a2b2f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef PP_RAVEN_SMUMANAGER_H 24#ifndef PP_SMU10_SMUMANAGER_H
25#define PP_RAVEN_SMUMANAGER_H 25#define PP_SMU10_SMUMANAGER_H
26 26
27#include "rv_ppsmc.h" 27#include "rv_ppsmc.h"
28#include "smu10_driver_if.h" 28#include "smu10_driver_if.h"
@@ -42,7 +42,7 @@ struct smu_table_array {
42 struct smu_table_entry entry[MAX_SMU_TABLE]; 42 struct smu_table_entry entry[MAX_SMU_TABLE];
43}; 43};
44 44
45struct rv_smumgr { 45struct smu10_smumgr {
46 struct smu_table_array smu_tables; 46 struct smu_table_array smu_tables;
47}; 47};
48 48