aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c2444
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h470
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c1364
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h53
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c430
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c324
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h66
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h21
13 files changed, 5254 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index 721473199921..7852952d1fde 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -23,7 +23,7 @@
23#ifndef _DM_PP_INTERFACE_ 23#ifndef _DM_PP_INTERFACE_
24#define _DM_PP_INTERFACE_ 24#define _DM_PP_INTERFACE_
25 25
26#define PP_MAX_CLOCK_LEVELS 8 26#define PP_MAX_CLOCK_LEVELS 16
27 27
28enum amd_pp_display_config_type{ 28enum amd_pp_display_config_type{
29 AMD_PP_DisplayConfigType_None = 0, 29 AMD_PP_DisplayConfigType_None = 0,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index c1249e03c912..9446dbc47551 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -31,7 +31,9 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
31 smu7_clockpowergating.o \ 31 smu7_clockpowergating.o \
32 vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \ 32 vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
33 vega10_thermal.o smu10_hwmgr.o pp_psm.o\ 33 vega10_thermal.o smu10_hwmgr.o pp_psm.o\
34 pp_overdriver.o smu_helper.o pp_psm_legacy.o pp_psm_new.o 34 pp_overdriver.o smu_helper.o pp_psm_legacy.o pp_psm_new.o \
35 vega12_processpptables.o vega12_hwmgr.o \
36 vega12_powertune.o vega12_thermal.o
35 37
36AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) 38AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
37 39
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 229030027f3e..8f032e693842 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -41,11 +41,13 @@ extern const struct pp_smumgr_func tonga_smu_funcs;
41extern const struct pp_smumgr_func fiji_smu_funcs; 41extern const struct pp_smumgr_func fiji_smu_funcs;
42extern const struct pp_smumgr_func polaris10_smu_funcs; 42extern const struct pp_smumgr_func polaris10_smu_funcs;
43extern const struct pp_smumgr_func vega10_smu_funcs; 43extern const struct pp_smumgr_func vega10_smu_funcs;
44extern const struct pp_smumgr_func vega12_smu_funcs;
44extern const struct pp_smumgr_func smu10_smu_funcs; 45extern const struct pp_smumgr_func smu10_smu_funcs;
45 46
46extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); 47extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
47extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr); 48extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
48extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr); 49extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
50extern int vega12_hwmgr_init(struct pp_hwmgr *hwmgr);
49extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr); 51extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
50 52
51static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); 53static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
@@ -186,6 +188,10 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
186 hwmgr->smumgr_funcs = &vega10_smu_funcs; 188 hwmgr->smumgr_funcs = &vega10_smu_funcs;
187 vega10_hwmgr_init(hwmgr); 189 vega10_hwmgr_init(hwmgr);
188 break; 190 break;
191 case CHIP_VEGA12:
192 hwmgr->smumgr_funcs = &vega12_smu_funcs;
193 vega12_hwmgr_init(hwmgr);
194 break;
189 default: 195 default:
190 return -EINVAL; 196 return -EINVAL;
191 } 197 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
new file mode 100644
index 000000000000..66633b6375f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -0,0 +1,2444 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/fb.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28
29#include "hwmgr.h"
30#include "amd_powerplay.h"
31#include "vega12_smumgr.h"
32#include "hardwaremanager.h"
33#include "ppatomfwctrl.h"
34#include "atomfirmware.h"
35#include "cgs_common.h"
36#include "vega12_powertune.h"
37#include "vega12_inc.h"
38#include "pp_soc15.h"
39#include "pppcielanes.h"
40#include "vega12_hwmgr.h"
41#include "vega12_processpptables.h"
42#include "vega12_pptable.h"
43#include "vega12_thermal.h"
44#include "vega12_ppsmc.h"
45#include "pp_debug.h"
46#include "amd_pcie_helpers.h"
47#include "cgs_linux.h"
48#include "ppinterrupt.h"
49#include "pp_overdriver.h"
50#include "pp_thermal.h"
51
52static const ULONG PhwVega12_Magic = (ULONG)(PHM_VIslands_Magic);
53
54static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
55 enum pp_clock_type type, uint32_t mask);
56static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
57 uint32_t *clock,
58 PPCLK_e clock_select,
59 bool max);
60
61struct vega12_power_state *cast_phw_vega12_power_state(
62 struct pp_hw_power_state *hw_ps)
63{
64 PP_ASSERT_WITH_CODE((PhwVega12_Magic == hw_ps->magic),
65 "Invalid Powerstate Type!",
66 return NULL;);
67
68 return (struct vega12_power_state *)hw_ps;
69}
70
71const struct vega12_power_state *cast_const_phw_vega12_power_state(
72 const struct pp_hw_power_state *hw_ps)
73{
74 PP_ASSERT_WITH_CODE((PhwVega12_Magic == hw_ps->magic),
75 "Invalid Powerstate Type!",
76 return NULL;);
77
78 return (const struct vega12_power_state *)hw_ps;
79}
80
81static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
82{
83 struct vega12_hwmgr *data =
84 (struct vega12_hwmgr *)(hwmgr->backend);
85
86 data->gfxclk_average_alpha = PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT;
87 data->socclk_average_alpha = PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT;
88 data->uclk_average_alpha = PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT;
89 data->gfx_activity_average_alpha = PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT;
90 data->lowest_uclk_reserved_for_ulv = PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT;
91
92 data->display_voltage_mode = PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT;
93 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
94 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
95 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
96 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
97 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
98 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
99 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
100 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
101 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
102 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
103 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
104 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
105
106 data->registry_data.disallowed_features = 0x0;
107 data->registry_data.od_state_in_dc_support = 0;
108 data->registry_data.skip_baco_hardware = 0;
109
110 data->registry_data.log_avfs_param = 0;
111 data->registry_data.sclk_throttle_low_notification = 1;
112 data->registry_data.force_dpm_high = 0;
113 data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
114
115 data->registry_data.didt_support = 0;
116 if (data->registry_data.didt_support) {
117 data->registry_data.didt_mode = 6;
118 data->registry_data.sq_ramping_support = 1;
119 data->registry_data.db_ramping_support = 0;
120 data->registry_data.td_ramping_support = 0;
121 data->registry_data.tcp_ramping_support = 0;
122 data->registry_data.dbr_ramping_support = 0;
123 data->registry_data.edc_didt_support = 1;
124 data->registry_data.gc_didt_support = 0;
125 data->registry_data.psm_didt_support = 0;
126 }
127
128 data->registry_data.pcie_lane_override = 0xff;
129 data->registry_data.pcie_speed_override = 0xff;
130 data->registry_data.pcie_clock_override = 0xffffffff;
131 data->registry_data.regulator_hot_gpio_support = 1;
132 data->registry_data.ac_dc_switch_gpio_support = 0;
133 data->registry_data.quick_transition_support = 0;
134 data->registry_data.zrpm_start_temp = 0xffff;
135 data->registry_data.zrpm_stop_temp = 0xffff;
136 data->registry_data.odn_feature_enable = 1;
137 data->registry_data.disable_water_mark = 0;
138 data->registry_data.disable_pp_tuning = 0;
139 data->registry_data.disable_xlpp_tuning = 0;
140 data->registry_data.disable_workload_policy = 0;
141 data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
142 data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
143 data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
144 data->registry_data.force_workload_policy_mask = 0;
145 data->registry_data.disable_3d_fs_detection = 0;
146 data->registry_data.fps_support = 1;
147 data->registry_data.disable_auto_wattman = 1;
148 data->registry_data.auto_wattman_debug = 0;
149 data->registry_data.auto_wattman_sample_period = 100;
150 data->registry_data.auto_wattman_threshold = 50;
151}
152
153static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
154{
155 struct vega12_hwmgr *data =
156 (struct vega12_hwmgr *)(hwmgr->backend);
157 struct amdgpu_device *adev = hwmgr->adev;
158
159 if (data->vddci_control == VEGA12_VOLTAGE_CONTROL_NONE)
160 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
161 PHM_PlatformCaps_ControlVDDCI);
162
163 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
164 PHM_PlatformCaps_TablelessHardwareInterface);
165
166 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
167 PHM_PlatformCaps_EnableSMU7ThermalManagement);
168
169 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) {
170 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
171 PHM_PlatformCaps_UVDPowerGating);
172 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
173 PHM_PlatformCaps_UVDDynamicPowerGating);
174 }
175
176 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
177 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
178 PHM_PlatformCaps_VCEPowerGating);
179
180 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
181 PHM_PlatformCaps_UnTabledHardwareInterface);
182
183 if (data->registry_data.odn_feature_enable)
184 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
185 PHM_PlatformCaps_ODNinACSupport);
186 else {
187 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
188 PHM_PlatformCaps_OD6inACSupport);
189 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
190 PHM_PlatformCaps_OD6PlusinACSupport);
191 }
192
193 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194 PHM_PlatformCaps_ActivityReporting);
195 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
196 PHM_PlatformCaps_FanSpeedInTableIsRPM);
197
198 if (data->registry_data.od_state_in_dc_support) {
199 if (data->registry_data.odn_feature_enable)
200 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
201 PHM_PlatformCaps_ODNinDCSupport);
202 else {
203 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 PHM_PlatformCaps_OD6inDCSupport);
205 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206 PHM_PlatformCaps_OD6PlusinDCSupport);
207 }
208 }
209
210 if (data->registry_data.thermal_support
211 && data->registry_data.fuzzy_fan_control_support
212 && hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
213 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
214 PHM_PlatformCaps_ODFuzzyFanControlSupport);
215
216 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
217 PHM_PlatformCaps_DynamicPowerManagement);
218 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219 PHM_PlatformCaps_SMC);
220 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
221 PHM_PlatformCaps_ThermalPolicyDelay);
222
223 if (data->registry_data.force_dpm_high)
224 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
225 PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
226
227 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_DynamicUVDState);
229
230 if (data->registry_data.sclk_throttle_low_notification)
231 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
232 PHM_PlatformCaps_SclkThrottleLowNotification);
233
234 /* power tune caps */
235 /* assume disabled */
236 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
237 PHM_PlatformCaps_PowerContainment);
238 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
239 PHM_PlatformCaps_DiDtSupport);
240 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
241 PHM_PlatformCaps_SQRamping);
242 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
243 PHM_PlatformCaps_DBRamping);
244 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
245 PHM_PlatformCaps_TDRamping);
246 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_TCPRamping);
248 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
249 PHM_PlatformCaps_DBRRamping);
250 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
251 PHM_PlatformCaps_DiDtEDCEnable);
252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_GCEDC);
254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_PSM);
256
257 if (data->registry_data.didt_support) {
258 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
259 if (data->registry_data.sq_ramping_support)
260 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
261 if (data->registry_data.db_ramping_support)
262 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
263 if (data->registry_data.td_ramping_support)
264 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
265 if (data->registry_data.tcp_ramping_support)
266 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
267 if (data->registry_data.dbr_ramping_support)
268 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
269 if (data->registry_data.edc_didt_support)
270 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
271 if (data->registry_data.gc_didt_support)
272 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
273 if (data->registry_data.psm_didt_support)
274 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
275 }
276
277 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
278 PHM_PlatformCaps_RegulatorHot);
279
280 if (data->registry_data.ac_dc_switch_gpio_support) {
281 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
282 PHM_PlatformCaps_AutomaticDCTransition);
283 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
284 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
285 }
286
287 if (data->registry_data.quick_transition_support) {
288 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
289 PHM_PlatformCaps_AutomaticDCTransition);
290 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
291 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
292 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
293 PHM_PlatformCaps_Falcon_QuickTransition);
294 }
295
296 if (data->lowest_uclk_reserved_for_ulv != PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT) {
297 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
298 PHM_PlatformCaps_LowestUclkReservedForUlv);
299 if (data->lowest_uclk_reserved_for_ulv == 1)
300 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
301 PHM_PlatformCaps_LowestUclkReservedForUlv);
302 }
303
304 if (data->registry_data.custom_fan_support)
305 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
306 PHM_PlatformCaps_CustomFanControlSupport);
307
308 return 0;
309}
310
311static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
312{
313 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
314 int i;
315
316 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
317 FEATURE_DPM_PREFETCHER_BIT;
318 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
319 FEATURE_DPM_GFXCLK_BIT;
320 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
321 FEATURE_DPM_UCLK_BIT;
322 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
323 FEATURE_DPM_SOCCLK_BIT;
324 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
325 FEATURE_DPM_UVD_BIT;
326 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
327 FEATURE_DPM_VCE_BIT;
328 data->smu_features[GNLD_ULV].smu_feature_id =
329 FEATURE_ULV_BIT;
330 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
331 FEATURE_DPM_MP0CLK_BIT;
332 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
333 FEATURE_DPM_LINK_BIT;
334 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
335 FEATURE_DPM_DCEFCLK_BIT;
336 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
337 FEATURE_DS_GFXCLK_BIT;
338 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
339 FEATURE_DS_SOCCLK_BIT;
340 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
341 FEATURE_DS_LCLK_BIT;
342 data->smu_features[GNLD_PPT].smu_feature_id =
343 FEATURE_PPT_BIT;
344 data->smu_features[GNLD_TDC].smu_feature_id =
345 FEATURE_TDC_BIT;
346 data->smu_features[GNLD_THERMAL].smu_feature_id =
347 FEATURE_THERMAL_BIT;
348 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
349 FEATURE_GFX_PER_CU_CG_BIT;
350 data->smu_features[GNLD_RM].smu_feature_id =
351 FEATURE_RM_BIT;
352 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
353 FEATURE_DS_DCEFCLK_BIT;
354 data->smu_features[GNLD_ACDC].smu_feature_id =
355 FEATURE_ACDC_BIT;
356 data->smu_features[GNLD_VR0HOT].smu_feature_id =
357 FEATURE_VR0HOT_BIT;
358 data->smu_features[GNLD_VR1HOT].smu_feature_id =
359 FEATURE_VR1HOT_BIT;
360 data->smu_features[GNLD_FW_CTF].smu_feature_id =
361 FEATURE_FW_CTF_BIT;
362 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
363 FEATURE_LED_DISPLAY_BIT;
364 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
365 FEATURE_FAN_CONTROL_BIT;
366 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
367 data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
368 data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
369 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
370
371 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
372 data->smu_features[i].smu_feature_bitmap =
373 (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
374 data->smu_features[i].allowed =
375 ((data->registry_data.disallowed_features >> i) & 1) ?
376 false : true;
377 }
378}
379
380static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
381{
382 return 0;
383}
384
385static int vega12_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
386{
387 kfree(hwmgr->backend);
388 hwmgr->backend = NULL;
389
390 return 0;
391}
392
393static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
394{
395 int result = 0;
396 struct vega12_hwmgr *data;
397 struct amdgpu_device *adev = hwmgr->adev;
398
399 data = kzalloc(sizeof(struct vega12_hwmgr), GFP_KERNEL);
400 if (data == NULL)
401 return -ENOMEM;
402
403 hwmgr->backend = data;
404
405 vega12_set_default_registry_data(hwmgr);
406
407 data->disable_dpm_mask = 0xff;
408 data->workload_mask = 0xff;
409
410 /* need to set voltage control types before EVV patching */
411 data->vddc_control = VEGA12_VOLTAGE_CONTROL_NONE;
412 data->mvdd_control = VEGA12_VOLTAGE_CONTROL_NONE;
413 data->vddci_control = VEGA12_VOLTAGE_CONTROL_NONE;
414
415 data->water_marks_bitmap = 0;
416 data->avfs_exist = false;
417
418 vega12_set_features_platform_caps(hwmgr);
419
420 vega12_init_dpm_defaults(hwmgr);
421
422 /* Parse pptable data read from VBIOS */
423 vega12_set_private_data_based_on_pptable(hwmgr);
424
425 data->is_tlu_enabled = false;
426
427 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
428 VEGA12_MAX_HARDWARE_POWERLEVELS;
429 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
430 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
431
432 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
433 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
434 hwmgr->platform_descriptor.clockStep.engineClock = 500;
435 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
436
437 data->total_active_cus = adev->gfx.cu_info.number;
438 /* Setup default Overdrive Fan control settings */
439 data->odn_fan_table.target_fan_speed =
440 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
441 data->odn_fan_table.target_temperature =
442 hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature;
443 data->odn_fan_table.min_performance_clock =
444 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit;
445 data->odn_fan_table.min_fan_limit =
446 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
447 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
448
449 return result;
450}
451
452static int vega12_init_sclk_threshold(struct pp_hwmgr *hwmgr)
453{
454 struct vega12_hwmgr *data =
455 (struct vega12_hwmgr *)(hwmgr->backend);
456
457 data->low_sclk_interrupt_threshold = 0;
458
459 return 0;
460}
461
462static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
463{
464 PP_ASSERT_WITH_CODE(!vega12_init_sclk_threshold(hwmgr),
465 "Failed to init sclk threshold!",
466 return -EINVAL);
467
468 return 0;
469}
470
471/*
472 * @fn vega12_init_dpm_state
473 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
474 *
475 * @param dpm_state - the address of the DPM Table to initiailize.
476 * @return None.
477 */
478static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
479{
480 dpm_state->soft_min_level = 0xff;
481 dpm_state->soft_max_level = 0xff;
482 dpm_state->hard_min_level = 0xff;
483 dpm_state->hard_max_level = 0xff;
484}
485
486/*
487 * This function is to initialize all DPM state tables
488 * for SMU based on the dependency table.
489 * Dynamic state patching function will then trim these
490 * state tables to the allowed range based
491 * on the power policy or external client requests,
492 * such as UVD request, etc.
493 */
494static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
495{
496 struct vega12_hwmgr *data =
497 (struct vega12_hwmgr *)(hwmgr->backend);
498 struct vega12_single_dpm_table *dpm_table;
499
500 memset(&data->dpm_table, 0, sizeof(data->dpm_table));
501
502 /* Initialize Sclk DPM table based on allow Sclk values */
503 dpm_table = &(data->dpm_table.soc_table);
504 vega12_init_dpm_state(&(dpm_table->dpm_state));
505
506 dpm_table = &(data->dpm_table.gfx_table);
507 vega12_init_dpm_state(&(dpm_table->dpm_state));
508
509 /* Initialize Mclk DPM table based on allow Mclk values */
510 dpm_table = &(data->dpm_table.mem_table);
511 vega12_init_dpm_state(&(dpm_table->dpm_state));
512
513 dpm_table = &(data->dpm_table.eclk_table);
514 vega12_init_dpm_state(&(dpm_table->dpm_state));
515
516 dpm_table = &(data->dpm_table.vclk_table);
517 vega12_init_dpm_state(&(dpm_table->dpm_state));
518
519 dpm_table = &(data->dpm_table.dclk_table);
520 vega12_init_dpm_state(&(dpm_table->dpm_state));
521
522 /* Assume there is no headless Vega12 for now */
523 dpm_table = &(data->dpm_table.dcef_table);
524 vega12_init_dpm_state(&(dpm_table->dpm_state));
525
526 dpm_table = &(data->dpm_table.pixel_table);
527 vega12_init_dpm_state(&(dpm_table->dpm_state));
528
529 dpm_table = &(data->dpm_table.display_table);
530 vega12_init_dpm_state(&(dpm_table->dpm_state));
531
532 dpm_table = &(data->dpm_table.phy_table);
533 vega12_init_dpm_state(&(dpm_table->dpm_state));
534
535 /* save a copy of the default DPM table */
536 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
537 sizeof(struct vega12_dpm_table));
538
539 return 0;
540}
541
542#if 0
543static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
544{
545 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
546 struct vega12_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
547 uint32_t min_level;
548
549 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
550 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
551
552 /* Optimize compute power profile: Use only highest
553 * 2 power levels (if more than 2 are available)
554 */
555 if (dpm_table->count > 2)
556 min_level = dpm_table->count - 2;
557 else if (dpm_table->count == 2)
558 min_level = 1;
559 else
560 min_level = 0;
561
562 hwmgr->default_compute_power_profile.min_sclk =
563 dpm_table->dpm_levels[min_level].value;
564
565 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
566 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
567
568 return 0;
569}
570#endif
571
572/**
573* Initializes the SMC table and uploads it
574*
575* @param hwmgr the address of the powerplay hardware manager.
576* @param pInput the pointer to input data (PowerState)
577* @return always 0
578*/
579static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
580{
581 int result;
582 struct vega12_hwmgr *data =
583 (struct vega12_hwmgr *)(hwmgr->backend);
584 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
585 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
586 struct phm_ppt_v3_information *pptable_information =
587 (struct phm_ppt_v3_information *)hwmgr->pptable;
588
589 result = vega12_setup_default_dpm_tables(hwmgr);
590 PP_ASSERT_WITH_CODE(!result,
591 "Failed to setup default DPM tables!",
592 return result);
593
594 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
595 if (!result) {
596 data->vbios_boot_state.vddc = boot_up_values.usVddc;
597 data->vbios_boot_state.vddci = boot_up_values.usVddci;
598 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
599 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
600 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
601 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
602 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
603 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
604 if (0 != boot_up_values.usVddc) {
605 smum_send_msg_to_smc_with_parameter(hwmgr,
606 PPSMC_MSG_SetFloorSocVoltage,
607 (boot_up_values.usVddc * 4));
608 data->vbios_boot_state.bsoc_vddc_lock = true;
609 } else {
610 data->vbios_boot_state.bsoc_vddc_lock = false;
611 }
612 smum_send_msg_to_smc_with_parameter(hwmgr,
613 PPSMC_MSG_SetMinDeepSleepDcefclk,
614 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
615 }
616
617 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
618
619 result = vega12_copy_table_to_smc(hwmgr,
620 (uint8_t *)pp_table, TABLE_PPTABLE);
621 PP_ASSERT_WITH_CODE(!result,
622 "Failed to upload PPtable!", return result);
623
624 return 0;
625}
626
627static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
628{
629 struct vega12_hwmgr *data =
630 (struct vega12_hwmgr *)(hwmgr->backend);
631 int i;
632 uint32_t allowed_features_low = 0, allowed_features_high = 0;
633
634 for (i = 0; i < GNLD_FEATURES_MAX; i++)
635 if (data->smu_features[i].allowed)
636 data->smu_features[i].smu_feature_id > 31 ?
637 (allowed_features_high |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) & 0xFFFFFFFF)) :
638 (allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
639
640 PP_ASSERT_WITH_CODE(
641 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
642 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
643 return -1);
644
645 PP_ASSERT_WITH_CODE(
646 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
647 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
648 return -1);
649
650 return 0;
651}
652
653static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
654{
655 struct vega12_hwmgr *data =
656 (struct vega12_hwmgr *)(hwmgr->backend);
657 uint64_t features_enabled;
658 int i;
659 bool enabled;
660
661 PP_ASSERT_WITH_CODE(
662 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
663 "[EnableAllSMUFeatures] Failed to enable all smu features!",
664 return -1);
665
666 if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
667 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
668 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
669 data->smu_features[i].enabled = enabled;
670 data->smu_features[i].supported = enabled;
671 PP_ASSERT(
672 !data->smu_features[i].allowed || enabled,
673 "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
674 }
675 }
676
677 return 0;
678}
679
680static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
681{
682 struct vega12_hwmgr *data =
683 (struct vega12_hwmgr *)(hwmgr->backend);
684 uint64_t features_enabled;
685 int i;
686 bool enabled;
687
688 PP_ASSERT_WITH_CODE(
689 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
690 "[DisableAllSMUFeatures] Failed to disable all smu features!",
691 return -1);
692
693 if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
694 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
695 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
696 data->smu_features[i].enabled = enabled;
697 data->smu_features[i].supported = enabled;
698 }
699 }
700
701 return 0;
702}
703
704static int vega12_odn_initialize_default_settings(
705 struct pp_hwmgr *hwmgr)
706{
707 return 0;
708}
709
710static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
711{
712 int tmp_result, result = 0;
713
714 smum_send_msg_to_smc_with_parameter(hwmgr,
715 PPSMC_MSG_NumOfDisplays, 0);
716
717 result = vega12_set_allowed_featuresmask(hwmgr);
718 PP_ASSERT_WITH_CODE(result == 0,
719 "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
720 return result);
721
722 tmp_result = vega12_init_smc_table(hwmgr);
723 PP_ASSERT_WITH_CODE(!tmp_result,
724 "Failed to initialize SMC table!",
725 result = tmp_result);
726
727 result = vega12_enable_all_smu_features(hwmgr);
728 PP_ASSERT_WITH_CODE(!result,
729 "Failed to enable all smu features!",
730 return result);
731
732 tmp_result = vega12_power_control_set_level(hwmgr);
733 PP_ASSERT_WITH_CODE(!tmp_result,
734 "Failed to power control set level!",
735 result = tmp_result);
736
737 result = vega12_odn_initialize_default_settings(hwmgr);
738 PP_ASSERT_WITH_CODE(!result,
739 "Failed to power control set level!",
740 return result);
741
742 return result;
743}
744
745static int vega12_get_power_state_size(struct pp_hwmgr *hwmgr)
746{
747 return sizeof(struct vega12_power_state);
748}
749
750static int vega12_get_number_of_pp_table_entries(struct pp_hwmgr *hwmgr)
751{
752 return 0;
753}
754
755static int vega12_patch_boot_state(struct pp_hwmgr *hwmgr,
756 struct pp_hw_power_state *hw_ps)
757{
758 return 0;
759}
760
761static int vega12_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
762 struct pp_power_state *request_ps,
763 const struct pp_power_state *current_ps)
764{
765 struct vega12_power_state *vega12_ps =
766 cast_phw_vega12_power_state(&request_ps->hardware);
767 uint32_t sclk;
768 uint32_t mclk;
769 struct PP_Clocks minimum_clocks = {0};
770 bool disable_mclk_switching;
771 bool disable_mclk_switching_for_frame_lock;
772 bool disable_mclk_switching_for_vr;
773 bool force_mclk_high;
774 struct cgs_display_info info = {0};
775 const struct phm_clock_and_voltage_limits *max_limits;
776 uint32_t i;
777 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
778 struct phm_ppt_v2_information *table_info =
779 (struct phm_ppt_v2_information *)(hwmgr->pptable);
780 int32_t count;
781 uint32_t stable_pstate_sclk_dpm_percentage;
782 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
783 uint32_t latency;
784
785 data->battery_state = (PP_StateUILabel_Battery ==
786 request_ps->classification.ui_label);
787
788 if (vega12_ps->performance_level_count != 2)
789 pr_info("VI should always have 2 performance levels");
790
791 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
792 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
793 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
794
795 /* Cap clock DPM tables at DC MAX if it is in DC. */
796 if (PP_PowerSource_DC == hwmgr->power_source) {
797 for (i = 0; i < vega12_ps->performance_level_count; i++) {
798 if (vega12_ps->performance_levels[i].mem_clock >
799 max_limits->mclk)
800 vega12_ps->performance_levels[i].mem_clock =
801 max_limits->mclk;
802 if (vega12_ps->performance_levels[i].gfx_clock >
803 max_limits->sclk)
804 vega12_ps->performance_levels[i].gfx_clock =
805 max_limits->sclk;
806 }
807 }
808
809 cgs_get_active_displays_info(hwmgr->device, &info);
810
811 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
812 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
813 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
814
815 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
816 PP_ASSERT_WITH_CODE(
817 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
818 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
819 "percent sclk value must range from 1% to 100%, setting default value",
820 stable_pstate_sclk_dpm_percentage = 75);
821
822 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
823 stable_pstate_sclk = (max_limits->sclk *
824 stable_pstate_sclk_dpm_percentage) / 100;
825
826 for (count = table_info->vdd_dep_on_sclk->count - 1;
827 count >= 0; count--) {
828 if (stable_pstate_sclk >=
829 table_info->vdd_dep_on_sclk->entries[count].clk) {
830 stable_pstate_sclk =
831 table_info->vdd_dep_on_sclk->entries[count].clk;
832 break;
833 }
834 }
835
836 if (count < 0)
837 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
838
839 stable_pstate_mclk = max_limits->mclk;
840
841 minimum_clocks.engineClock = stable_pstate_sclk;
842 minimum_clocks.memoryClock = stable_pstate_mclk;
843 }
844
845 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
846 hwmgr->platform_descriptor.platformCaps,
847 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
848 disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
849 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
850
851 if (info.display_count == 0)
852 disable_mclk_switching = false;
853 else
854 disable_mclk_switching = (info.display_count > 1) ||
855 disable_mclk_switching_for_frame_lock ||
856 disable_mclk_switching_for_vr ||
857 force_mclk_high;
858
859 sclk = vega12_ps->performance_levels[0].gfx_clock;
860 mclk = vega12_ps->performance_levels[0].mem_clock;
861
862 if (sclk < minimum_clocks.engineClock)
863 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
864 max_limits->sclk : minimum_clocks.engineClock;
865
866 if (mclk < minimum_clocks.memoryClock)
867 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
868 max_limits->mclk : minimum_clocks.memoryClock;
869
870 vega12_ps->performance_levels[0].gfx_clock = sclk;
871 vega12_ps->performance_levels[0].mem_clock = mclk;
872
873 if (vega12_ps->performance_levels[1].gfx_clock <
874 vega12_ps->performance_levels[0].gfx_clock)
875 vega12_ps->performance_levels[0].gfx_clock =
876 vega12_ps->performance_levels[1].gfx_clock;
877
878 if (disable_mclk_switching) {
879 /* Set Mclk the max of level 0 and level 1 */
880 if (mclk < vega12_ps->performance_levels[1].mem_clock)
881 mclk = vega12_ps->performance_levels[1].mem_clock;
882 /* Find the lowest MCLK frequency that is within
883 * the tolerable latency defined in DAL
884 */
885 latency = 0;
886 for (i = 0; i < data->mclk_latency_table.count; i++) {
887 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
888 (data->mclk_latency_table.entries[i].frequency >=
889 vega12_ps->performance_levels[0].mem_clock) &&
890 (data->mclk_latency_table.entries[i].frequency <=
891 vega12_ps->performance_levels[1].mem_clock))
892 mclk = data->mclk_latency_table.entries[i].frequency;
893 }
894 vega12_ps->performance_levels[0].mem_clock = mclk;
895 } else {
896 if (vega12_ps->performance_levels[1].mem_clock <
897 vega12_ps->performance_levels[0].mem_clock)
898 vega12_ps->performance_levels[0].mem_clock =
899 vega12_ps->performance_levels[1].mem_clock;
900 }
901
902 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
903 for (i = 0; i < vega12_ps->performance_level_count; i++) {
904 vega12_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
905 vega12_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
906 }
907 }
908
909 return 0;
910}
911
912static int vega12_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
913{
914 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
915 struct PP_Clocks min_clocks = {0};
916 struct cgs_display_info info = {0};
917
918 data->need_update_dpm_table = 0;
919
920 min_clocks.engineClockInSR = hwmgr->display_config.min_core_set_clock_in_sr;
921 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
922 (min_clocks.engineClockInSR >= VEGA12_MINIMUM_ENGINE_CLOCK ||
923 data->display_timing.min_clock_in_sr >= VEGA12_MINIMUM_ENGINE_CLOCK))
924 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
925
926 cgs_get_active_displays_info(hwmgr->device, &info);
927 if (data->display_timing.num_existing_displays != info.display_count)
928 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
929
930 return 0;
931}
932
933static int vega12_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
934 struct vega12_single_dpm_table *dpm_table,
935 uint32_t low_limit, uint32_t high_limit)
936{
937 uint32_t i;
938
939 for (i = 0; i < dpm_table->count; i++) {
940 if ((dpm_table->dpm_levels[i].value < low_limit) ||
941 (dpm_table->dpm_levels[i].value > high_limit))
942 dpm_table->dpm_levels[i].enabled = false;
943 else
944 dpm_table->dpm_levels[i].enabled = true;
945 }
946 return 0;
947}
948
949static int vega12_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
950 struct vega12_single_dpm_table *dpm_table,
951 uint32_t low_limit, uint32_t high_limit,
952 uint32_t disable_dpm_mask)
953{
954 uint32_t i;
955
956 for (i = 0; i < dpm_table->count; i++) {
957 if ((dpm_table->dpm_levels[i].value < low_limit) ||
958 (dpm_table->dpm_levels[i].value > high_limit))
959 dpm_table->dpm_levels[i].enabled = false;
960 else if ((!((1 << i) & disable_dpm_mask)) &&
961 !(low_limit == high_limit))
962 dpm_table->dpm_levels[i].enabled = false;
963 else
964 dpm_table->dpm_levels[i].enabled = true;
965 }
966 return 0;
967}
968
969static int vega12_trim_dpm_states(struct pp_hwmgr *hwmgr,
970 const struct vega12_power_state *vega12_ps)
971{
972 struct vega12_hwmgr *data =
973 (struct vega12_hwmgr *)(hwmgr->backend);
974 uint32_t high_limit_count;
975
976 PP_ASSERT_WITH_CODE((vega12_ps->performance_level_count >= 1),
977 "power state did not have any performance level",
978 return -1);
979
980 high_limit_count = (vega12_ps->performance_level_count == 1) ? 0 : 1;
981
982 vega12_trim_single_dpm_states(hwmgr,
983 &(data->dpm_table.soc_table),
984 vega12_ps->performance_levels[0].soc_clock,
985 vega12_ps->performance_levels[high_limit_count].soc_clock);
986
987 vega12_trim_single_dpm_states_with_mask(hwmgr,
988 &(data->dpm_table.gfx_table),
989 vega12_ps->performance_levels[0].gfx_clock,
990 vega12_ps->performance_levels[high_limit_count].gfx_clock,
991 data->disable_dpm_mask);
992
993 vega12_trim_single_dpm_states(hwmgr,
994 &(data->dpm_table.mem_table),
995 vega12_ps->performance_levels[0].mem_clock,
996 vega12_ps->performance_levels[high_limit_count].mem_clock);
997
998 return 0;
999}
1000
1001static uint32_t vega12_find_lowest_dpm_level(
1002 struct vega12_single_dpm_table *table)
1003{
1004 uint32_t i;
1005
1006 for (i = 0; i < table->count; i++) {
1007 if (table->dpm_levels[i].enabled)
1008 break;
1009 }
1010
1011 return i;
1012}
1013
1014static uint32_t vega12_find_highest_dpm_level(
1015 struct vega12_single_dpm_table *table)
1016{
1017 uint32_t i = 0;
1018
1019 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
1020 for (i = table->count; i > 0; i--) {
1021 if (table->dpm_levels[i - 1].enabled)
1022 return i - 1;
1023 }
1024 } else {
1025 pr_info("DPM Table Has Too Many Entries!");
1026 return MAX_REGULAR_DPM_NUMBER - 1;
1027 }
1028
1029 return i;
1030}
1031
1032static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
1033{
1034 return 0;
1035}
1036
1037static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
1038{
1039 return 0;
1040}
1041
1042static int vega12_generate_dpm_level_enable_mask(
1043 struct pp_hwmgr *hwmgr, const void *input)
1044{
1045 struct vega12_hwmgr *data =
1046 (struct vega12_hwmgr *)(hwmgr->backend);
1047 const struct phm_set_power_state_input *states =
1048 (const struct phm_set_power_state_input *)input;
1049 const struct vega12_power_state *vega12_ps =
1050 cast_const_phw_vega12_power_state(states->pnew_state);
1051 int i;
1052
1053 PP_ASSERT_WITH_CODE(!vega12_trim_dpm_states(hwmgr, vega12_ps),
1054 "Attempt to Trim DPM States Failed!",
1055 return -1);
1056
1057 data->smc_state_table.gfx_boot_level =
1058 vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1059 data->smc_state_table.gfx_max_level =
1060 vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1061 data->smc_state_table.mem_boot_level =
1062 vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1063 data->smc_state_table.mem_max_level =
1064 vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1065
1066 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1067 "Attempt to upload DPM Bootup Levels Failed!",
1068 return -1);
1069 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1070 "Attempt to upload DPM Max Levels Failed!",
1071 return -1);
1072 for (i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
1073 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
1074
1075
1076 for (i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
1077 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
1078
1079 return 0;
1080}
1081
1082int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1083{
1084 struct vega12_hwmgr *data =
1085 (struct vega12_hwmgr *)(hwmgr->backend);
1086
1087 if (data->smu_features[GNLD_DPM_VCE].supported) {
1088 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1089 enable,
1090 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
1091 "Attempt to Enable/Disable DPM VCE Failed!",
1092 return -1);
1093 data->smu_features[GNLD_DPM_VCE].enabled = enable;
1094 }
1095
1096 return 0;
1097}
1098
1099static int vega12_update_sclk_threshold(struct pp_hwmgr *hwmgr)
1100{
1101 return 0;
1102}
1103
1104static int vega12_set_power_state_tasks(struct pp_hwmgr *hwmgr,
1105 const void *input)
1106{
1107 int tmp_result, result = 0;
1108 struct vega12_hwmgr *data =
1109 (struct vega12_hwmgr *)(hwmgr->backend);
1110 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1111
1112 tmp_result = vega12_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
1113 PP_ASSERT_WITH_CODE(!tmp_result,
1114 "Failed to find DPM states clocks in DPM table!",
1115 result = tmp_result);
1116
1117 tmp_result = vega12_generate_dpm_level_enable_mask(hwmgr, input);
1118 PP_ASSERT_WITH_CODE(!tmp_result,
1119 "Failed to generate DPM level enabled mask!",
1120 result = tmp_result);
1121
1122 tmp_result = vega12_update_sclk_threshold(hwmgr);
1123 PP_ASSERT_WITH_CODE(!tmp_result,
1124 "Failed to update SCLK threshold!",
1125 result = tmp_result);
1126
1127 result = vega12_copy_table_to_smc(hwmgr,
1128 (uint8_t *)pp_table, TABLE_PPTABLE);
1129 PP_ASSERT_WITH_CODE(!result,
1130 "Failed to upload PPtable!", return result);
1131
1132 data->apply_optimized_settings = false;
1133 data->apply_overdrive_next_settings_mask = 0;
1134
1135 return 0;
1136}
1137
1138static uint32_t vega12_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1139{
1140 struct vega12_hwmgr *data =
1141 (struct vega12_hwmgr *)(hwmgr->backend);
1142 uint32_t gfx_clk;
1143
1144 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1145 return -1;
1146
1147 if (low)
1148 PP_ASSERT_WITH_CODE(
1149 vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false) == 0,
1150 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1151 return -1);
1152 else
1153 PP_ASSERT_WITH_CODE(
1154 vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true) == 0,
1155 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1156 return -1);
1157
1158 return (gfx_clk * 100);
1159}
1160
1161static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1162{
1163 struct vega12_hwmgr *data =
1164 (struct vega12_hwmgr *)(hwmgr->backend);
1165 uint32_t mem_clk;
1166
1167 if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1168 return -1;
1169
1170 if (low)
1171 PP_ASSERT_WITH_CODE(
1172 vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false) == 0,
1173 "[GetMclks]: fail to get min PPCLK_UCLK\n",
1174 return -1);
1175 else
1176 PP_ASSERT_WITH_CODE(
1177 vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true) == 0,
1178 "[GetMclks]: fail to get max PPCLK_UCLK\n",
1179 return -1);
1180
1181 return (mem_clk * 100);
1182}
1183
1184static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr,
1185 struct pp_gpu_power *query)
1186{
1187#if 0
1188 uint32_t value;
1189
1190 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
1191 PPSMC_MSG_GetCurrPkgPwr),
1192 "Failed to get current package power!",
1193 return -EINVAL);
1194
1195 vega12_read_arg_from_smc(hwmgr, &value);
1196 /* power value is an integer */
1197 query->average_gpu_power = value << 8;
1198#endif
1199 return 0;
1200}
1201
1202static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
1203{
1204 uint32_t gfx_clk = 0;
1205
1206 *gfx_freq = 0;
1207
1208 PP_ASSERT_WITH_CODE(
1209 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
1210 "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1211 return -1);
1212 PP_ASSERT_WITH_CODE(
1213 vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0,
1214 "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed",
1215 return -1);
1216
1217 *gfx_freq = gfx_clk * 100;
1218
1219 return 0;
1220}
1221
1222static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
1223{
1224 uint32_t mem_clk = 0;
1225
1226 *mclk_freq = 0;
1227
1228 PP_ASSERT_WITH_CODE(
1229 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
1230 "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
1231 return -1);
1232 PP_ASSERT_WITH_CODE(
1233 vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0,
1234 "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed",
1235 return -1);
1236
1237 *mclk_freq = mem_clk * 100;
1238
1239 return 0;
1240}
1241
1242static int vega12_get_current_activity_percent(
1243 struct pp_hwmgr *hwmgr,
1244 uint32_t *activity_percent)
1245{
1246 int ret = 0;
1247 uint32_t current_activity = 50;
1248
1249#if 0
1250 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
1251 if (!ret) {
1252 ret = vega12_read_arg_from_smc(hwmgr, &current_activity);
1253 if (!ret) {
1254 if (current_activity > 100) {
1255 PP_ASSERT(false,
1256 "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
1257 current_activity = 100;
1258 }
1259 } else
1260 PP_ASSERT(false,
1261 "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!");
1262 } else
1263 PP_ASSERT(false,
1264 "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
1265#endif
1266 *activity_percent = current_activity;
1267
1268 return ret;
1269}
1270
1271static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1272 void *value, int *size)
1273{
1274 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1275 int ret = 0;
1276
1277 switch (idx) {
1278 case AMDGPU_PP_SENSOR_GFX_SCLK:
1279 ret = vega12_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
1280 if (!ret)
1281 *size = 4;
1282 break;
1283 case AMDGPU_PP_SENSOR_GFX_MCLK:
1284 ret = vega12_get_current_mclk_freq(hwmgr, (uint32_t *)value);
1285 if (!ret)
1286 *size = 4;
1287 break;
1288 case AMDGPU_PP_SENSOR_GPU_LOAD:
1289 ret = vega12_get_current_activity_percent(hwmgr, (uint32_t *)value);
1290 if (!ret)
1291 *size = 4;
1292 break;
1293 case AMDGPU_PP_SENSOR_GPU_TEMP:
1294 *((uint32_t *)value) = vega12_thermal_get_temperature(hwmgr);
1295 *size = 4;
1296 break;
1297 case AMDGPU_PP_SENSOR_UVD_POWER:
1298 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1299 *size = 4;
1300 break;
1301 case AMDGPU_PP_SENSOR_VCE_POWER:
1302 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1303 *size = 4;
1304 break;
1305 case AMDGPU_PP_SENSOR_GPU_POWER:
1306 if (*size < sizeof(struct pp_gpu_power))
1307 ret = -EINVAL;
1308 else {
1309 *size = sizeof(struct pp_gpu_power);
1310 ret = vega12_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
1311 }
1312 break;
1313 default:
1314 ret = -EINVAL;
1315 break;
1316 }
1317 return ret;
1318}
1319
1320static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
1321 bool has_disp)
1322{
1323 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1324
1325 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1326 return smum_send_msg_to_smc_with_parameter(hwmgr,
1327 PPSMC_MSG_SetUclkFastSwitch,
1328 has_disp ? 0 : 1);
1329
1330 return 0;
1331}
1332
1333int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
1334 struct pp_display_clock_request *clock_req)
1335{
1336 int result = 0;
1337 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1338 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1339 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1340 PPCLK_e clk_select = 0;
1341 uint32_t clk_request = 0;
1342
1343 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1344 switch (clk_type) {
1345 case amd_pp_dcef_clock:
1346 clk_freq = clock_req->clock_freq_in_khz / 100;
1347 clk_select = PPCLK_DCEFCLK;
1348 break;
1349 case amd_pp_disp_clock:
1350 clk_select = PPCLK_DISPCLK;
1351 break;
1352 case amd_pp_pixel_clock:
1353 clk_select = PPCLK_PIXCLK;
1354 break;
1355 case amd_pp_phy_clock:
1356 clk_select = PPCLK_PHYCLK;
1357 break;
1358 default:
1359 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
1360 result = -1;
1361 break;
1362 }
1363
1364 if (!result) {
1365 clk_request = (clk_select << 16) | clk_freq;
1366 result = smum_send_msg_to_smc_with_parameter(hwmgr,
1367 PPSMC_MSG_SetHardMinByFreq,
1368 clk_request);
1369 }
1370 }
1371
1372 return result;
1373}
1374
1375static int vega12_notify_smc_display_config_after_ps_adjustment(
1376 struct pp_hwmgr *hwmgr)
1377{
1378 struct vega12_hwmgr *data =
1379 (struct vega12_hwmgr *)(hwmgr->backend);
1380 uint32_t num_active_disps = 0;
1381 struct cgs_display_info info = {0};
1382 struct PP_Clocks min_clocks = {0};
1383 struct pp_display_clock_request clock_req;
1384 uint32_t clk_request;
1385
1386 info.mode_info = NULL;
1387 cgs_get_active_displays_info(hwmgr->device, &info);
1388 num_active_disps = info.display_count;
1389 if (num_active_disps > 1)
1390 vega12_notify_smc_display_change(hwmgr, false);
1391 else
1392 vega12_notify_smc_display_change(hwmgr, true);
1393
1394 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
1395 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
1396 min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
1397
1398 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
1399 clock_req.clock_type = amd_pp_dcef_clock;
1400 clock_req.clock_freq_in_khz = min_clocks.dcefClock;
1401 if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
1402 if (data->smu_features[GNLD_DS_DCEFCLK].supported)
1403 PP_ASSERT_WITH_CODE(
1404 !smum_send_msg_to_smc_with_parameter(
1405 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
1406 min_clocks.dcefClockInSR /100),
1407 "Attempt to set divider for DCEFCLK Failed!",
1408 return -1);
1409 } else {
1410 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1411 }
1412 }
1413
1414 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1415 clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
1416 PP_ASSERT_WITH_CODE(
1417 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
1418 "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
1419 return -1);
1420 data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
1421 }
1422
1423 return 0;
1424}
1425
1426static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
1427{
1428 struct vega12_hwmgr *data =
1429 (struct vega12_hwmgr *)(hwmgr->backend);
1430
1431 data->smc_state_table.gfx_boot_level =
1432 data->smc_state_table.gfx_max_level =
1433 vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1434 data->smc_state_table.mem_boot_level =
1435 data->smc_state_table.mem_max_level =
1436 vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1437
1438 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1439 "Failed to upload boot level to highest!",
1440 return -1);
1441
1442 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1443 "Failed to upload dpm max level to highest!",
1444 return -1);
1445
1446 return 0;
1447}
1448
1449static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1450{
1451 struct vega12_hwmgr *data =
1452 (struct vega12_hwmgr *)(hwmgr->backend);
1453
1454 data->smc_state_table.gfx_boot_level =
1455 data->smc_state_table.gfx_max_level =
1456 vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1457 data->smc_state_table.mem_boot_level =
1458 data->smc_state_table.mem_max_level =
1459 vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1460
1461 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1462 "Failed to upload boot level to highest!",
1463 return -1);
1464
1465 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1466 "Failed to upload dpm max level to highest!",
1467 return -1);
1468
1469 return 0;
1470
1471}
1472
1473static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1474{
1475 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1476
1477 data->smc_state_table.gfx_boot_level =
1478 vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1479 data->smc_state_table.gfx_max_level =
1480 vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1481 data->smc_state_table.mem_boot_level =
1482 vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1483 data->smc_state_table.mem_max_level =
1484 vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1485
1486 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1487 "Failed to upload DPM Bootup Levels!",
1488 return -1);
1489
1490 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1491 "Failed to upload DPM Max Levels!",
1492 return -1);
1493 return 0;
1494}
1495
1496#if 0
1497static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
1498 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
1499{
1500 struct phm_ppt_v2_information *table_info =
1501 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1502
1503 if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
1504 table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
1505 table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
1506 *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
1507 *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
1508 *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
1509 }
1510
1511 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
1512 *sclk_mask = 0;
1513 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
1514 *mclk_mask = 0;
1515 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1516 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
1517 *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
1518 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
1519 }
1520 return 0;
1521}
1522#endif
1523
1524static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
1525{
1526 switch (mode) {
1527 case AMD_FAN_CTRL_NONE:
1528 break;
1529 case AMD_FAN_CTRL_MANUAL:
1530 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1531 vega12_fan_ctrl_stop_smc_fan_control(hwmgr);
1532 break;
1533 case AMD_FAN_CTRL_AUTO:
1534 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1535 vega12_fan_ctrl_start_smc_fan_control(hwmgr);
1536 break;
1537 default:
1538 break;
1539 }
1540}
1541
1542static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1543 enum amd_dpm_forced_level level)
1544{
1545 int ret = 0;
1546#if 0
1547 uint32_t sclk_mask = 0;
1548 uint32_t mclk_mask = 0;
1549 uint32_t soc_mask = 0;
1550#endif
1551
1552 switch (level) {
1553 case AMD_DPM_FORCED_LEVEL_HIGH:
1554 ret = vega12_force_dpm_highest(hwmgr);
1555 break;
1556 case AMD_DPM_FORCED_LEVEL_LOW:
1557 ret = vega12_force_dpm_lowest(hwmgr);
1558 break;
1559 case AMD_DPM_FORCED_LEVEL_AUTO:
1560 ret = vega12_unforce_dpm_levels(hwmgr);
1561 break;
1562 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1563 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1564 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1565 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1566#if 0
1567 ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
1568 if (ret)
1569 return ret;
1570 vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
1571 vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
1572#endif
1573 break;
1574 case AMD_DPM_FORCED_LEVEL_MANUAL:
1575 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1576 default:
1577 break;
1578 }
1579#if 0
1580 if (!ret) {
1581 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1582 vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
1583 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1584 vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
1585 }
1586#endif
1587 return ret;
1588}
1589
1590static uint32_t vega12_get_fan_control_mode(struct pp_hwmgr *hwmgr)
1591{
1592 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1593
1594 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
1595 return AMD_FAN_CTRL_MANUAL;
1596 else
1597 return AMD_FAN_CTRL_AUTO;
1598}
1599
1600static int vega12_get_dal_power_level(struct pp_hwmgr *hwmgr,
1601 struct amd_pp_simple_clock_info *info)
1602{
1603#if 0
1604 struct phm_ppt_v2_information *table_info =
1605 (struct phm_ppt_v2_information *)hwmgr->pptable;
1606 struct phm_clock_and_voltage_limits *max_limits =
1607 &table_info->max_clock_voltage_on_ac;
1608
1609 info->engine_max_clock = max_limits->sclk;
1610 info->memory_max_clock = max_limits->mclk;
1611#endif
1612 return 0;
1613}
1614
1615static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
1616 uint32_t *clock,
1617 PPCLK_e clock_select,
1618 bool max)
1619{
1620 int result;
1621 *clock = 0;
1622
1623 if (max) {
1624 PP_ASSERT_WITH_CODE(
1625 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
1626 "[GetClockRanges] Failed to get max clock from SMC!",
1627 return -1);
1628 result = vega12_read_arg_from_smc(hwmgr, clock);
1629 } else {
1630 PP_ASSERT_WITH_CODE(
1631 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
1632 "[GetClockRanges] Failed to get min clock from SMC!",
1633 return -1);
1634 result = vega12_read_arg_from_smc(hwmgr, clock);
1635 }
1636
1637 return result;
1638}
1639
1640static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
1641 struct pp_clock_levels_with_latency *clocks)
1642{
1643 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1644 int i;
1645 uint32_t min, max, increments;
1646
1647 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1648 return -1;
1649
1650 PP_ASSERT_WITH_CODE(
1651 vega12_get_clock_ranges(hwmgr, &min, PPCLK_GFXCLK, false) == 0,
1652 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1653 return -1);
1654 PP_ASSERT_WITH_CODE(
1655 vega12_get_clock_ranges(hwmgr, &max, PPCLK_GFXCLK, true) == 0,
1656 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1657 return -1);
1658
1659 clocks->data[0].clocks_in_khz = min * 100;
1660 increments = (max - min) / (VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS - 1);
1661
1662 for (i = 1; i < (VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS - 1); i++) {
1663 if ((min + (increments * i)) != 0) {
1664 clocks->data[i].clocks_in_khz =
1665 (min + increments * i) * 100;
1666 clocks->data[i].latency_in_us = 0;
1667 }
1668 }
1669 clocks->data[i].clocks_in_khz = max * 100;
1670 clocks->num_levels = i + 1;
1671
1672 return 0;
1673}
1674
1675static uint32_t vega12_get_mem_latency(struct pp_hwmgr *hwmgr,
1676 uint32_t clock)
1677{
1678 return 25;
1679}
1680
1681static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
1682 struct pp_clock_levels_with_latency *clocks)
1683{
1684 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1685 uint32_t min, max, increments;
1686 int i;
1687
1688 if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1689 return -1;
1690
1691 PP_ASSERT_WITH_CODE(
1692 vega12_get_clock_ranges(hwmgr, &min, PPCLK_UCLK, false) == 0,
1693 "[GetMclks]: fail to get min PPCLK_UCLK\n",
1694 return -1);
1695 PP_ASSERT_WITH_CODE(
1696 vega12_get_clock_ranges(hwmgr, &max, PPCLK_UCLK, true) == 0,
1697 "[GetMclks]: fail to get max PPCLK_UCLK\n",
1698 return -1);
1699
1700 clocks->data[0].clocks_in_khz = min * 100;
1701 clocks->data[0].latency_in_us =
1702 data->mclk_latency_table.entries[0].latency =
1703 vega12_get_mem_latency(hwmgr, min);
1704
1705 increments = (max - min) / (VG12_PSUEDO_NUM_UCLK_DPM_LEVELS - 1);
1706
1707 for (i = 1; i < (VG12_PSUEDO_NUM_UCLK_DPM_LEVELS - 1); i++) {
1708 if ((min + (increments * i)) != 0) {
1709 clocks->data[i].clocks_in_khz =
1710 (min + (increments * i)) * 100;
1711 clocks->data[i].latency_in_us =
1712 data->mclk_latency_table.entries[i].latency =
1713 vega12_get_mem_latency(hwmgr, min + increments * i);
1714 }
1715 }
1716
1717 clocks->data[i].clocks_in_khz = max * 100;
1718 clocks->data[i].latency_in_us =
1719 data->mclk_latency_table.entries[i].latency =
1720 vega12_get_mem_latency(hwmgr, max);
1721
1722 clocks->num_levels = data->mclk_latency_table.count = i + 1;
1723
1724 return 0;
1725}
1726
1727static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
1728 struct pp_clock_levels_with_latency *clocks)
1729{
1730 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1731 int i;
1732 uint32_t min, max, increments;
1733
1734 if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
1735 return -1;
1736
1737 PP_ASSERT_WITH_CODE(
1738 vega12_get_clock_ranges(hwmgr, &min, PPCLK_DCEFCLK, false) == 0,
1739 "[GetDcfclocks]: fail to get min PPCLK_DCEFCLK\n",
1740 return -1);
1741 PP_ASSERT_WITH_CODE(
1742 vega12_get_clock_ranges(hwmgr, &max, PPCLK_DCEFCLK, true) == 0,
1743 "[GetDcfclocks]: fail to get max PPCLK_DCEFCLK\n",
1744 return -1);
1745
1746 clocks->data[0].clocks_in_khz = min * 100;
1747 increments = (max - min) / (VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS - 1);
1748
1749 for (i = 1; i < (VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS - 1); i++) {
1750 if ((min + (increments * i)) != 0) {
1751 clocks->data[i].clocks_in_khz =
1752 (min + increments * i) * 100;
1753 clocks->data[i].latency_in_us = 0;
1754 }
1755 }
1756 clocks->data[i].clocks_in_khz = max * 100;
1757 clocks->num_levels = i + 1;
1758
1759 return 0;
1760}
1761
1762static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
1763 struct pp_clock_levels_with_latency *clocks)
1764{
1765 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1766 int i;
1767 uint32_t min, max, increments;
1768
1769 if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
1770 return -1;
1771
1772 PP_ASSERT_WITH_CODE(
1773 vega12_get_clock_ranges(hwmgr, &min, PPCLK_SOCCLK, false) == 0,
1774 "[GetSocclks]: fail to get min PPCLK_SOCCLK\n",
1775 return -1);
1776 PP_ASSERT_WITH_CODE(
1777 vega12_get_clock_ranges(hwmgr, &max, PPCLK_SOCCLK, true) == 0,
1778 "[GetSocclks]: fail to get max PPCLK_SOCCLK\n",
1779 return -1);
1780
1781 clocks->data[0].clocks_in_khz = min * 100;
1782 increments = (max - min) / (VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS - 1);
1783
1784 for (i = 1; i < (VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS - 1); i++) {
1785 if ((min + (increments * i)) != 0) {
1786 clocks->data[i].clocks_in_khz =
1787 (min + increments * i) * 100;
1788 clocks->data[i].latency_in_us = 0;
1789 }
1790 }
1791
1792 clocks->data[i].clocks_in_khz = max * 100;
1793 clocks->num_levels = i + 1;
1794
1795 return 0;
1796
1797}
1798
1799static int vega12_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1800 enum amd_pp_clock_type type,
1801 struct pp_clock_levels_with_latency *clocks)
1802{
1803 int ret;
1804
1805 switch (type) {
1806 case amd_pp_sys_clock:
1807 ret = vega12_get_sclks(hwmgr, clocks);
1808 break;
1809 case amd_pp_mem_clock:
1810 ret = vega12_get_memclocks(hwmgr, clocks);
1811 break;
1812 case amd_pp_dcef_clock:
1813 ret = vega12_get_dcefclocks(hwmgr, clocks);
1814 break;
1815 case amd_pp_soc_clock:
1816 ret = vega12_get_socclocks(hwmgr, clocks);
1817 break;
1818 default:
1819 return -EINVAL;
1820 }
1821
1822 return ret;
1823}
1824
1825static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1826 enum amd_pp_clock_type type,
1827 struct pp_clock_levels_with_voltage *clocks)
1828{
1829 clocks->num_levels = 0;
1830
1831 return 0;
1832}
1833
1834static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1835 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1836{
1837 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1838 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
1839 int result = 0;
1840 uint32_t i;
1841
1842 if (!data->registry_data.disable_water_mark &&
1843 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
1844 data->smu_features[GNLD_DPM_SOCCLK].supported) {
1845 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
1846 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
1847 cpu_to_le16((uint16_t)
1848 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
1849 100);
1850 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
1851 cpu_to_le16((uint16_t)
1852 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
1853 100);
1854 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
1855 cpu_to_le16((uint16_t)
1856 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
1857 100);
1858 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
1859 cpu_to_le16((uint16_t)
1860 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
1861 100);
1862 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
1863 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
1864 }
1865
1866 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
1867 table->WatermarkRow[WM_SOCCLK][i].MinClock =
1868 cpu_to_le16((uint16_t)
1869 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
1870 100);
1871 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1872 cpu_to_le16((uint16_t)
1873 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
1874 100);
1875 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
1876 cpu_to_le16((uint16_t)
1877 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
1878 100);
1879 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
1880 cpu_to_le16((uint16_t)
1881 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
1882 100);
1883 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
1884 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
1885 }
1886 data->water_marks_bitmap |= WaterMarksExist;
1887 data->water_marks_bitmap &= ~WaterMarksLoaded;
1888 }
1889
1890 return result;
1891}
1892
1893static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
1894 enum pp_clock_type type, uint32_t mask)
1895{
1896 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1897
1898 if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
1899 AMD_DPM_FORCED_LEVEL_LOW |
1900 AMD_DPM_FORCED_LEVEL_HIGH))
1901 return -EINVAL;
1902
1903 switch (type) {
1904 case PP_SCLK:
1905 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
1906 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
1907
1908 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1909 "Failed to upload boot level to lowest!",
1910 return -EINVAL);
1911
1912 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1913 "Failed to upload dpm max level to highest!",
1914 return -EINVAL);
1915 break;
1916
1917 case PP_MCLK:
1918 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
1919 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
1920
1921 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1922 "Failed to upload boot level to lowest!",
1923 return -EINVAL);
1924
1925 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1926 "Failed to upload dpm max level to highest!",
1927 return -EINVAL);
1928
1929 break;
1930
1931 case PP_PCIE:
1932 break;
1933
1934 default:
1935 break;
1936 }
1937
1938 return 0;
1939}
1940
1941static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1942 enum pp_clock_type type, char *buf)
1943{
1944 int i, now, size = 0;
1945 struct pp_clock_levels_with_latency clocks;
1946
1947 switch (type) {
1948 case PP_SCLK:
1949 PP_ASSERT_WITH_CODE(
1950 vega12_get_current_gfx_clk_freq(hwmgr, &now) == 0,
1951 "Attempt to get current gfx clk Failed!",
1952 return -1);
1953
1954 PP_ASSERT_WITH_CODE(
1955 vega12_get_sclks(hwmgr, &clocks) == 0,
1956 "Attempt to get gfx clk levels Failed!",
1957 return -1);
1958 for (i = 0; i < clocks.num_levels; i++)
1959 size += sprintf(buf + size, "%d: %uMhz %s\n",
1960 i, clocks.data[i].clocks_in_khz / 100,
1961 (clocks.data[i].clocks_in_khz == now) ? "*" : "");
1962 break;
1963
1964 case PP_MCLK:
1965 PP_ASSERT_WITH_CODE(
1966 vega12_get_current_mclk_freq(hwmgr, &now) == 0,
1967 "Attempt to get current mclk freq Failed!",
1968 return -1);
1969
1970 PP_ASSERT_WITH_CODE(
1971 vega12_get_memclocks(hwmgr, &clocks) == 0,
1972 "Attempt to get memory clk levels Failed!",
1973 return -1);
1974 for (i = 0; i < clocks.num_levels; i++)
1975 size += sprintf(buf + size, "%d: %uMhz %s\n",
1976 i, clocks.data[i].clocks_in_khz / 100,
1977 (clocks.data[i].clocks_in_khz == now) ? "*" : "");
1978 break;
1979
1980 case PP_PCIE:
1981 break;
1982
1983 default:
1984 break;
1985 }
1986 return size;
1987}
1988
1989static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
1990{
1991 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1992 int result = 0;
1993 uint32_t num_turned_on_displays = 1;
1994 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
1995 struct cgs_display_info info = {0};
1996
1997 if ((data->water_marks_bitmap & WaterMarksExist) &&
1998 !(data->water_marks_bitmap & WaterMarksLoaded)) {
1999 result = vega12_copy_table_to_smc(hwmgr,
2000 (uint8_t *)wm_table, TABLE_WATERMARKS);
2001 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
2002 data->water_marks_bitmap |= WaterMarksLoaded;
2003 }
2004
2005 if ((data->water_marks_bitmap & WaterMarksExist) &&
2006 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2007 data->smu_features[GNLD_DPM_SOCCLK].supported) {
2008 cgs_get_active_displays_info(hwmgr->device, &info);
2009 num_turned_on_displays = info.display_count;
2010 smum_send_msg_to_smc_with_parameter(hwmgr,
2011 PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
2012 }
2013
2014 return result;
2015}
2016
2017int vega12_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
2018{
2019 struct vega12_hwmgr *data =
2020 (struct vega12_hwmgr *)(hwmgr->backend);
2021
2022 if (data->smu_features[GNLD_DPM_UVD].supported) {
2023 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
2024 enable,
2025 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
2026 "Attempt to Enable/Disable DPM UVD Failed!",
2027 return -1);
2028 data->smu_features[GNLD_DPM_UVD].enabled = enable;
2029 }
2030
2031 return 0;
2032}
2033
2034static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
2035{
2036 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2037
2038 data->vce_power_gated = bgate;
2039 vega12_enable_disable_vce_dpm(hwmgr, !bgate);
2040}
2041
2042static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
2043{
2044 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2045
2046 data->uvd_power_gated = bgate;
2047 vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
2048}
2049
2050static inline bool vega12_are_power_levels_equal(
2051 const struct vega12_performance_level *pl1,
2052 const struct vega12_performance_level *pl2)
2053{
2054 return ((pl1->soc_clock == pl2->soc_clock) &&
2055 (pl1->gfx_clock == pl2->gfx_clock) &&
2056 (pl1->mem_clock == pl2->mem_clock));
2057}
2058
2059static int vega12_check_states_equal(struct pp_hwmgr *hwmgr,
2060 const struct pp_hw_power_state *pstate1,
2061 const struct pp_hw_power_state *pstate2, bool *equal)
2062{
2063 const struct vega12_power_state *psa;
2064 const struct vega12_power_state *psb;
2065 int i;
2066
2067 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
2068 return -EINVAL;
2069
2070 psa = cast_const_phw_vega12_power_state(pstate1);
2071 psb = cast_const_phw_vega12_power_state(pstate2);
2072 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
2073 if (psa->performance_level_count != psb->performance_level_count) {
2074 *equal = false;
2075 return 0;
2076 }
2077
2078 for (i = 0; i < psa->performance_level_count; i++) {
2079 if (!vega12_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
2080 /* If we have found even one performance level pair that is different the states are different. */
2081 *equal = false;
2082 return 0;
2083 }
2084 }
2085
2086 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
2087 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
2088 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
2089 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
2090
2091 return 0;
2092}
2093
2094static bool
2095vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
2096{
2097 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2098 bool is_update_required = false;
2099 struct cgs_display_info info = {0, 0, NULL};
2100
2101 cgs_get_active_displays_info(hwmgr->device, &info);
2102
2103 if (data->display_timing.num_existing_displays != info.display_count)
2104 is_update_required = true;
2105
2106 if (data->registry_data.gfx_clk_deep_sleep_support) {
2107 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
2108 is_update_required = true;
2109 }
2110
2111 return is_update_required;
2112}
2113
2114static int vega12_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2115{
2116 int tmp_result, result = 0;
2117
2118 tmp_result = vega12_disable_all_smu_features(hwmgr);
2119 PP_ASSERT_WITH_CODE((tmp_result == 0),
2120 "Failed to disable all smu features!", result = tmp_result);
2121
2122 return result;
2123}
2124
2125static int vega12_power_off_asic(struct pp_hwmgr *hwmgr)
2126{
2127 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2128 int result;
2129
2130 result = vega12_disable_dpm_tasks(hwmgr);
2131 PP_ASSERT_WITH_CODE((0 == result),
2132 "[disable_dpm_tasks] Failed to disable DPM!",
2133 );
2134 data->water_marks_bitmap &= ~(WaterMarksLoaded);
2135
2136 return result;
2137}
2138
2139#if 0
2140static void vega12_find_min_clock_index(struct pp_hwmgr *hwmgr,
2141 uint32_t *sclk_idx, uint32_t *mclk_idx,
2142 uint32_t min_sclk, uint32_t min_mclk)
2143{
2144 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2145 struct vega12_dpm_table *dpm_table = &(data->dpm_table);
2146 uint32_t i;
2147
2148 for (i = 0; i < dpm_table->gfx_table.count; i++) {
2149 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
2150 dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
2151 *sclk_idx = i;
2152 break;
2153 }
2154 }
2155
2156 for (i = 0; i < dpm_table->mem_table.count; i++) {
2157 if (dpm_table->mem_table.dpm_levels[i].enabled &&
2158 dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
2159 *mclk_idx = i;
2160 break;
2161 }
2162 }
2163}
2164#endif
2165
2166#if 0
2167static int vega12_set_power_profile_state(struct pp_hwmgr *hwmgr,
2168 struct amd_pp_profile *request)
2169{
2170 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2171 uint32_t sclk_idx = ~0, mclk_idx = ~0;
2172
2173 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
2174 return -EINVAL;
2175
2176 vega12_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
2177 request->min_sclk, request->min_mclk);
2178
2179 if (sclk_idx != ~0) {
2180 if (!data->registry_data.sclk_dpm_key_disabled)
2181 PP_ASSERT_WITH_CODE(
2182 !smum_send_msg_to_smc_with_parameter(
2183 hwmgr,
2184 PPSMC_MSG_SetSoftMinGfxclkByIndex,
2185 sclk_idx),
2186 "Failed to set soft min sclk index!",
2187 return -EINVAL);
2188 }
2189
2190 if (mclk_idx != ~0) {
2191 if (!data->registry_data.mclk_dpm_key_disabled)
2192 PP_ASSERT_WITH_CODE(
2193 !smum_send_msg_to_smc_with_parameter(
2194 hwmgr,
2195 PPSMC_MSG_SetSoftMinUclkByIndex,
2196 mclk_idx),
2197 "Failed to set soft min mclk index!",
2198 return -EINVAL);
2199 }
2200
2201 return 0;
2202}
2203
2204static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
2205{
2206 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2207 struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
2208 struct vega12_single_dpm_table *golden_sclk_table =
2209 &(data->golden_dpm_table.gfx_table);
2210 int value;
2211
2212 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
2213 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
2214 100 /
2215 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
2216
2217 return value;
2218}
2219
2220static int vega12_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2221{
2222 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2223 struct vega12_single_dpm_table *golden_sclk_table =
2224 &(data->golden_dpm_table.gfx_table);
2225 struct pp_power_state *ps;
2226 struct vega12_power_state *vega12_ps;
2227
2228 ps = hwmgr->request_ps;
2229
2230 if (ps == NULL)
2231 return -EINVAL;
2232
2233 vega12_ps = cast_phw_vega12_power_state(&ps->hardware);
2234
2235 vega12_ps->performance_levels[vega12_ps->performance_level_count - 1].gfx_clock =
2236 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value / 100 +
2237 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
2238
2239 if (vega12_ps->performance_levels[vega12_ps->performance_level_count - 1].gfx_clock >
2240 hwmgr->platform_descriptor.overdriveLimit.engineClock)
2241 vega12_ps->performance_levels[vega12_ps->performance_level_count - 1].gfx_clock =
2242 hwmgr->platform_descriptor.overdriveLimit.engineClock;
2243
2244 return 0;
2245}
2246
2247static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
2248{
2249 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2250 struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
2251 struct vega12_single_dpm_table *golden_mclk_table =
2252 &(data->golden_dpm_table.mem_table);
2253 int value;
2254
2255 value = (mclk_table->dpm_levels
2256 [mclk_table->count - 1].value -
2257 golden_mclk_table->dpm_levels
2258 [golden_mclk_table->count - 1].value) *
2259 100 /
2260 golden_mclk_table->dpm_levels
2261 [golden_mclk_table->count - 1].value;
2262
2263 return value;
2264}
2265
2266static int vega12_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2267{
2268 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2269 struct vega12_single_dpm_table *golden_mclk_table =
2270 &(data->golden_dpm_table.mem_table);
2271 struct pp_power_state *ps;
2272 struct vega12_power_state *vega12_ps;
2273
2274 ps = hwmgr->request_ps;
2275
2276 if (ps == NULL)
2277 return -EINVAL;
2278
2279 vega12_ps = cast_phw_vega12_power_state(&ps->hardware);
2280
2281 vega12_ps->performance_levels
2282 [vega12_ps->performance_level_count - 1].mem_clock =
2283 golden_mclk_table->dpm_levels
2284 [golden_mclk_table->count - 1].value *
2285 value / 100 +
2286 golden_mclk_table->dpm_levels
2287 [golden_mclk_table->count - 1].value;
2288
2289 if (vega12_ps->performance_levels
2290 [vega12_ps->performance_level_count - 1].mem_clock >
2291 hwmgr->platform_descriptor.overdriveLimit.memoryClock)
2292 vega12_ps->performance_levels
2293 [vega12_ps->performance_level_count - 1].mem_clock =
2294 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
2295
2296 return 0;
2297}
2298#endif
2299
2300static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
2301 uint32_t virtual_addr_low,
2302 uint32_t virtual_addr_hi,
2303 uint32_t mc_addr_low,
2304 uint32_t mc_addr_hi,
2305 uint32_t size)
2306{
2307 smum_send_msg_to_smc_with_parameter(hwmgr,
2308 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
2309 virtual_addr_hi);
2310 smum_send_msg_to_smc_with_parameter(hwmgr,
2311 PPSMC_MSG_SetSystemVirtualDramAddrLow,
2312 virtual_addr_low);
2313 smum_send_msg_to_smc_with_parameter(hwmgr,
2314 PPSMC_MSG_DramLogSetDramAddrHigh,
2315 mc_addr_hi);
2316
2317 smum_send_msg_to_smc_with_parameter(hwmgr,
2318 PPSMC_MSG_DramLogSetDramAddrLow,
2319 mc_addr_low);
2320
2321 smum_send_msg_to_smc_with_parameter(hwmgr,
2322 PPSMC_MSG_DramLogSetDramSize,
2323 size);
2324 return 0;
2325}
2326
2327static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
2328 struct PP_TemperatureRange *thermal_data)
2329{
2330 struct phm_ppt_v3_information *pptable_information =
2331 (struct phm_ppt_v3_information *)hwmgr->pptable;
2332
2333 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
2334
2335 thermal_data->max = pptable_information->us_software_shutdown_temp *
2336 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2337
2338 return 0;
2339}
2340
2341static int vega12_is_hardware_ctf_enabled(struct pp_hwmgr *hwmgr)
2342{
2343 uint32_t reg;
2344
2345 reg = soc15_get_register_offset(THM_HWID, 0,
2346 mmTHM_TCON_THERM_TRIP_BASE_IDX,
2347 mmTHM_TCON_THERM_TRIP);
2348
2349 return (((cgs_read_register(hwmgr->device, reg) &
2350 THM_TCON_THERM_TRIP__THERM_TP_EN_MASK) >>
2351 THM_TCON_THERM_TRIP__THERM_TP_EN__SHIFT) == 1);
2352}
2353
2354static int vega12_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
2355 const void *info)
2356{
2357 struct cgs_irq_src_funcs *irq_src =
2358 (struct cgs_irq_src_funcs *)info;
2359
2360 if (hwmgr->thermal_controller.ucType ==
2361 ATOM_VEGA12_PP_THERMALCONTROLLER_VEGA12) {
2362 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
2363 0xf, /* AMDGPU_IH_CLIENTID_THM */
2364 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
2365 "Failed to register high thermal interrupt!",
2366 return -EINVAL);
2367 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
2368 0xf, /* AMDGPU_IH_CLIENTID_THM */
2369 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
2370 "Failed to register low thermal interrupt!",
2371 return -EINVAL);
2372 }
2373
2374 if (vega12_is_hardware_ctf_enabled(hwmgr))
2375 /* Register CTF(GPIO_19) interrupt */
2376 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
2377 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */
2378 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
2379 "Failed to register CTF thermal interrupt!",
2380 return -EINVAL);
2381
2382 return 0;
2383}
2384
2385static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
2386 .backend_init = vega12_hwmgr_backend_init,
2387 .backend_fini = vega12_hwmgr_backend_fini,
2388 .asic_setup = vega12_setup_asic_task,
2389 .dynamic_state_management_enable = vega12_enable_dpm_tasks,
2390 .dynamic_state_management_disable = vega12_disable_dpm_tasks,
2391 .get_num_of_pp_table_entries =
2392 vega12_get_number_of_pp_table_entries,
2393 .get_power_state_size = vega12_get_power_state_size,
2394 .patch_boot_state = vega12_patch_boot_state,
2395 .apply_state_adjust_rules = vega12_apply_state_adjust_rules,
2396 .power_state_set = vega12_set_power_state_tasks,
2397 .get_sclk = vega12_dpm_get_sclk,
2398 .get_mclk = vega12_dpm_get_mclk,
2399 .notify_smc_display_config_after_ps_adjustment =
2400 vega12_notify_smc_display_config_after_ps_adjustment,
2401 .force_dpm_level = vega12_dpm_force_dpm_level,
2402 .stop_thermal_controller = vega12_thermal_stop_thermal_controller,
2403 .get_fan_speed_info = vega12_fan_ctrl_get_fan_speed_info,
2404 .reset_fan_speed_to_default =
2405 vega12_fan_ctrl_reset_fan_speed_to_default,
2406 .get_fan_speed_rpm = vega12_fan_ctrl_get_fan_speed_rpm,
2407 .set_fan_control_mode = vega12_set_fan_control_mode,
2408 .get_fan_control_mode = vega12_get_fan_control_mode,
2409 .read_sensor = vega12_read_sensor,
2410 .get_dal_power_level = vega12_get_dal_power_level,
2411 .get_clock_by_type_with_latency = vega12_get_clock_by_type_with_latency,
2412 .get_clock_by_type_with_voltage = vega12_get_clock_by_type_with_voltage,
2413 .set_watermarks_for_clocks_ranges = vega12_set_watermarks_for_clocks_ranges,
2414 .display_clock_voltage_request = vega12_display_clock_voltage_request,
2415 .force_clock_level = vega12_force_clock_level,
2416 .print_clock_levels = vega12_print_clock_levels,
2417 .display_config_changed = vega12_display_configuration_changed_task,
2418 .powergate_uvd = vega12_power_gate_uvd,
2419 .powergate_vce = vega12_power_gate_vce,
2420 .check_states_equal = vega12_check_states_equal,
2421 .check_smc_update_required_for_display_configuration =
2422 vega12_check_smc_update_required_for_display_configuration,
2423 .power_off_asic = vega12_power_off_asic,
2424 .disable_smc_firmware_ctf = vega12_thermal_disable_alert,
2425#if 0
2426 .set_power_profile_state = vega12_set_power_profile_state,
2427 .get_sclk_od = vega12_get_sclk_od,
2428 .set_sclk_od = vega12_set_sclk_od,
2429 .get_mclk_od = vega12_get_mclk_od,
2430 .set_mclk_od = vega12_set_mclk_od,
2431#endif
2432 .notify_cac_buffer_info = vega12_notify_cac_buffer_info,
2433 .get_thermal_temperature_range = vega12_get_thermal_temperature_range,
2434 .register_internal_thermal_interrupt = vega12_register_thermal_interrupt,
2435 .start_thermal_controller = vega12_start_thermal_controller,
2436};
2437
2438int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
2439{
2440 hwmgr->hwmgr_func = &vega12_hwmgr_funcs;
2441 hwmgr->pptable_func = &vega12_pptable_funcs;
2442
2443 return 0;
2444}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
new file mode 100644
index 000000000000..80791d6e433c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -0,0 +1,470 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _VEGA12_HWMGR_H_
25#define _VEGA12_HWMGR_H_
26
27#include "hwmgr.h"
28#include "vega12/smu9_driver_if.h"
29#include "ppatomfwctrl.h"
30
31#define VEGA12_MAX_HARDWARE_POWERLEVELS 2
32
33#define WaterMarksExist 1
34#define WaterMarksLoaded 2
35
36#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 8
37#define VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS 8
38#define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8
39#define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS 4
40
41enum
42{
43 GNLD_DPM_PREFETCHER = 0,
44 GNLD_DPM_GFXCLK,
45 GNLD_DPM_UCLK,
46 GNLD_DPM_SOCCLK,
47 GNLD_DPM_UVD,
48 GNLD_DPM_VCE,
49 GNLD_ULV,
50 GNLD_DPM_MP0CLK,
51 GNLD_DPM_LINK,
52 GNLD_DPM_DCEFCLK,
53 GNLD_DS_GFXCLK,
54 GNLD_DS_SOCCLK,
55 GNLD_DS_LCLK,
56 GNLD_PPT,
57 GNLD_TDC,
58 GNLD_THERMAL,
59 GNLD_GFX_PER_CU_CG,
60 GNLD_RM,
61 GNLD_DS_DCEFCLK,
62 GNLD_ACDC,
63 GNLD_VR0HOT,
64 GNLD_VR1HOT,
65 GNLD_FW_CTF,
66 GNLD_LED_DISPLAY,
67 GNLD_FAN_CONTROL,
68 GNLD_DIDT,
69 GNLD_GFXOFF,
70 GNLD_CG,
71 GNLD_ACG,
72
73 GNLD_FEATURES_MAX
74};
75
76
77#define GNLD_DPM_MAX (GNLD_DPM_DCEFCLK + 1)
78
79#define SMC_DPM_FEATURES 0x30F
80
81struct smu_features {
82 bool supported;
83 bool enabled;
84 bool allowed;
85 uint32_t smu_feature_id;
86 uint64_t smu_feature_bitmap;
87};
88
89struct vega12_performance_level {
90 uint32_t soc_clock;
91 uint32_t gfx_clock;
92 uint32_t mem_clock;
93};
94
95struct vega12_bacos {
96 uint32_t baco_flags;
97 /* struct vega12_performance_level performance_level; */
98};
99
100struct vega12_uvd_clocks {
101 uint32_t vclk;
102 uint32_t dclk;
103};
104
105struct vega12_vce_clocks {
106 uint32_t evclk;
107 uint32_t ecclk;
108};
109
110struct vega12_power_state {
111 uint32_t magic;
112 struct vega12_uvd_clocks uvd_clks;
113 struct vega12_vce_clocks vce_clks;
114 uint16_t performance_level_count;
115 bool dc_compatible;
116 uint32_t sclk_threshold;
117 struct vega12_performance_level performance_levels[VEGA12_MAX_HARDWARE_POWERLEVELS];
118};
119
120struct vega12_dpm_level {
121 bool enabled;
122 uint32_t value;
123 uint32_t param1;
124};
125
126#define VEGA12_MAX_DEEPSLEEP_DIVIDER_ID 5
127#define MAX_REGULAR_DPM_NUMBER 8
128#define MAX_PCIE_CONF 2
129#define VEGA12_MINIMUM_ENGINE_CLOCK 2500
130
131struct vega12_dpm_state {
132 uint32_t soft_min_level;
133 uint32_t soft_max_level;
134 uint32_t hard_min_level;
135 uint32_t hard_max_level;
136};
137
138struct vega12_single_dpm_table {
139 uint32_t count;
140 struct vega12_dpm_state dpm_state;
141 struct vega12_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
142};
143
144struct vega12_odn_dpm_control {
145 uint32_t count;
146 uint32_t entries[MAX_REGULAR_DPM_NUMBER];
147};
148
149struct vega12_pcie_table {
150 uint16_t count;
151 uint8_t pcie_gen[MAX_PCIE_CONF];
152 uint8_t pcie_lane[MAX_PCIE_CONF];
153 uint32_t lclk[MAX_PCIE_CONF];
154};
155
156struct vega12_dpm_table {
157 struct vega12_single_dpm_table soc_table;
158 struct vega12_single_dpm_table gfx_table;
159 struct vega12_single_dpm_table mem_table;
160 struct vega12_single_dpm_table eclk_table;
161 struct vega12_single_dpm_table vclk_table;
162 struct vega12_single_dpm_table dclk_table;
163 struct vega12_single_dpm_table dcef_table;
164 struct vega12_single_dpm_table pixel_table;
165 struct vega12_single_dpm_table display_table;
166 struct vega12_single_dpm_table phy_table;
167 struct vega12_pcie_table pcie_table;
168};
169
170#define VEGA12_MAX_LEAKAGE_COUNT 8
171struct vega12_leakage_voltage {
172 uint16_t count;
173 uint16_t leakage_id[VEGA12_MAX_LEAKAGE_COUNT];
174 uint16_t actual_voltage[VEGA12_MAX_LEAKAGE_COUNT];
175};
176
177struct vega12_display_timing {
178 uint32_t min_clock_in_sr;
179 uint32_t num_existing_displays;
180};
181
182struct vega12_dpmlevel_enable_mask {
183 uint32_t uvd_dpm_enable_mask;
184 uint32_t vce_dpm_enable_mask;
185 uint32_t samu_dpm_enable_mask;
186 uint32_t sclk_dpm_enable_mask;
187 uint32_t mclk_dpm_enable_mask;
188};
189
190struct vega12_vbios_boot_state {
191 bool bsoc_vddc_lock;
192 uint8_t uc_cooling_id;
193 uint16_t vddc;
194 uint16_t vddci;
195 uint16_t mvddc;
196 uint16_t vdd_gfx;
197 uint32_t gfx_clock;
198 uint32_t mem_clock;
199 uint32_t soc_clock;
200 uint32_t dcef_clock;
201};
202
203#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
204#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
205#define DPMTABLE_UPDATE_SCLK 0x00000004
206#define DPMTABLE_UPDATE_MCLK 0x00000008
207#define DPMTABLE_OD_UPDATE_VDDC 0x00000010
208
209struct vega12_smc_state_table {
210 uint32_t soc_boot_level;
211 uint32_t gfx_boot_level;
212 uint32_t dcef_boot_level;
213 uint32_t mem_boot_level;
214 uint32_t uvd_boot_level;
215 uint32_t vce_boot_level;
216 uint32_t gfx_max_level;
217 uint32_t mem_max_level;
218 uint8_t vr_hot_gpio;
219 uint8_t ac_dc_gpio;
220 uint8_t therm_out_gpio;
221 uint8_t therm_out_polarity;
222 uint8_t therm_out_mode;
223 PPTable_t pp_table;
224 Watermarks_t water_marks_table;
225 AvfsDebugTable_t avfs_debug_table;
226 AvfsFuseOverride_t avfs_fuse_override_table;
227 SmuMetrics_t smu_metrics;
228 DriverSmuConfig_t driver_smu_config;
229 DpmActivityMonitorCoeffInt_t dpm_activity_monitor_coeffint;
230 OverDriveTable_t overdrive_table;
231};
232
233struct vega12_mclk_latency_entries {
234 uint32_t frequency;
235 uint32_t latency;
236};
237
238struct vega12_mclk_latency_table {
239 uint32_t count;
240 struct vega12_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER];
241};
242
243struct vega12_registry_data {
244 uint64_t disallowed_features;
245 uint8_t ac_dc_switch_gpio_support;
246 uint8_t acg_loop_support;
247 uint8_t clock_stretcher_support;
248 uint8_t db_ramping_support;
249 uint8_t didt_mode;
250 uint8_t didt_support;
251 uint8_t edc_didt_support;
252 uint8_t force_dpm_high;
253 uint8_t fuzzy_fan_control_support;
254 uint8_t mclk_dpm_key_disabled;
255 uint8_t od_state_in_dc_support;
256 uint8_t pcie_lane_override;
257 uint8_t pcie_speed_override;
258 uint32_t pcie_clock_override;
259 uint8_t pcie_dpm_key_disabled;
260 uint8_t dcefclk_dpm_key_disabled;
261 uint8_t prefetcher_dpm_key_disabled;
262 uint8_t quick_transition_support;
263 uint8_t regulator_hot_gpio_support;
264 uint8_t master_deep_sleep_support;
265 uint8_t gfx_clk_deep_sleep_support;
266 uint8_t sclk_deep_sleep_support;
267 uint8_t lclk_deep_sleep_support;
268 uint8_t dce_fclk_deep_sleep_support;
269 uint8_t sclk_dpm_key_disabled;
270 uint8_t sclk_throttle_low_notification;
271 uint8_t skip_baco_hardware;
272 uint8_t socclk_dpm_key_disabled;
273 uint8_t sq_ramping_support;
274 uint8_t tcp_ramping_support;
275 uint8_t td_ramping_support;
276 uint8_t dbr_ramping_support;
277 uint8_t gc_didt_support;
278 uint8_t psm_didt_support;
279 uint8_t thermal_support;
280 uint8_t fw_ctf_enabled;
281 uint8_t led_dpm_enabled;
282 uint8_t fan_control_support;
283 uint8_t ulv_support;
284 uint8_t odn_feature_enable;
285 uint8_t disable_water_mark;
286 uint8_t disable_workload_policy;
287 uint32_t force_workload_policy_mask;
288 uint8_t disable_3d_fs_detection;
289 uint8_t disable_pp_tuning;
290 uint8_t disable_xlpp_tuning;
291 uint32_t perf_ui_tuning_profile_turbo;
292 uint32_t perf_ui_tuning_profile_powerSave;
293 uint32_t perf_ui_tuning_profile_xl;
294 uint16_t zrpm_stop_temp;
295 uint16_t zrpm_start_temp;
296 uint32_t stable_pstate_sclk_dpm_percentage;
297 uint8_t fps_support;
298 uint8_t vr0hot;
299 uint8_t vr1hot;
300 uint8_t disable_auto_wattman;
301 uint32_t auto_wattman_debug;
302 uint32_t auto_wattman_sample_period;
303 uint8_t auto_wattman_threshold;
304 uint8_t log_avfs_param;
305 uint8_t enable_enginess;
306 uint8_t custom_fan_support;
307 uint8_t disable_pcc_limit_control;
308};
309
310struct vega12_odn_clock_voltage_dependency_table {
311 uint32_t count;
312 struct phm_ppt_v1_clock_voltage_dependency_record
313 entries[MAX_REGULAR_DPM_NUMBER];
314};
315
316struct vega12_odn_dpm_table {
317 struct vega12_odn_dpm_control control_gfxclk_state;
318 struct vega12_odn_dpm_control control_memclk_state;
319 struct phm_odn_clock_levels odn_core_clock_dpm_levels;
320 struct phm_odn_clock_levels odn_memory_clock_dpm_levels;
321 struct vega12_odn_clock_voltage_dependency_table vdd_dependency_on_sclk;
322 struct vega12_odn_clock_voltage_dependency_table vdd_dependency_on_mclk;
323 struct vega12_odn_clock_voltage_dependency_table vdd_dependency_on_socclk;
324 uint32_t odn_mclk_min_limit;
325};
326
327struct vega12_odn_fan_table {
328 uint32_t target_fan_speed;
329 uint32_t target_temperature;
330 uint32_t min_performance_clock;
331 uint32_t min_fan_limit;
332 bool force_fan_pwm;
333};
334
335struct vega12_hwmgr {
336 struct vega12_dpm_table dpm_table;
337 struct vega12_dpm_table golden_dpm_table;
338 struct vega12_registry_data registry_data;
339 struct vega12_vbios_boot_state vbios_boot_state;
340 struct vega12_mclk_latency_table mclk_latency_table;
341
342 struct vega12_leakage_voltage vddc_leakage;
343
344 uint32_t vddc_control;
345 struct pp_atomfwctrl_voltage_table vddc_voltage_table;
346 uint32_t mvdd_control;
347 struct pp_atomfwctrl_voltage_table mvdd_voltage_table;
348 uint32_t vddci_control;
349 struct pp_atomfwctrl_voltage_table vddci_voltage_table;
350
351 uint32_t active_auto_throttle_sources;
352 uint32_t water_marks_bitmap;
353 struct vega12_bacos bacos;
354
355 struct vega12_odn_dpm_table odn_dpm_table;
356 struct vega12_odn_fan_table odn_fan_table;
357
358 /* ---- General data ---- */
359 uint8_t need_update_dpm_table;
360
361 bool cac_enabled;
362 bool battery_state;
363 bool is_tlu_enabled;
364 bool avfs_exist;
365
366 uint32_t low_sclk_interrupt_threshold;
367
368 uint32_t total_active_cus;
369
370 struct vega12_display_timing display_timing;
371
372 /* ---- Vega12 Dyn Register Settings ---- */
373
374 uint32_t debug_settings;
375 uint32_t lowest_uclk_reserved_for_ulv;
376 uint32_t gfxclk_average_alpha;
377 uint32_t socclk_average_alpha;
378 uint32_t uclk_average_alpha;
379 uint32_t gfx_activity_average_alpha;
380 uint32_t display_voltage_mode;
381 uint32_t dcef_clk_quad_eqn_a;
382 uint32_t dcef_clk_quad_eqn_b;
383 uint32_t dcef_clk_quad_eqn_c;
384 uint32_t disp_clk_quad_eqn_a;
385 uint32_t disp_clk_quad_eqn_b;
386 uint32_t disp_clk_quad_eqn_c;
387 uint32_t pixel_clk_quad_eqn_a;
388 uint32_t pixel_clk_quad_eqn_b;
389 uint32_t pixel_clk_quad_eqn_c;
390 uint32_t phy_clk_quad_eqn_a;
391 uint32_t phy_clk_quad_eqn_b;
392 uint32_t phy_clk_quad_eqn_c;
393
394 /* ---- Thermal Temperature Setting ---- */
395 struct vega12_dpmlevel_enable_mask dpm_level_enable_mask;
396
397 /* ---- Power Gating States ---- */
398 bool uvd_power_gated;
399 bool vce_power_gated;
400 bool samu_power_gated;
401 bool need_long_memory_training;
402
403 /* Internal settings to apply the application power optimization parameters */
404 bool apply_optimized_settings;
405 uint32_t disable_dpm_mask;
406
407 /* ---- Overdrive next setting ---- */
408 uint32_t apply_overdrive_next_settings_mask;
409
410 /* ---- Workload Mask ---- */
411 uint32_t workload_mask;
412
413 /* ---- SMU9 ---- */
414 uint32_t smu_version;
415 struct smu_features smu_features[GNLD_FEATURES_MAX];
416 struct vega12_smc_state_table smc_state_table;
417};
418
419#define VEGA12_DPM2_NEAR_TDP_DEC 10
420#define VEGA12_DPM2_ABOVE_SAFE_INC 5
421#define VEGA12_DPM2_BELOW_SAFE_INC 20
422
423#define VEGA12_DPM2_LTA_WINDOW_SIZE 7
424
425#define VEGA12_DPM2_LTS_TRUNCATE 0
426
427#define VEGA12_DPM2_TDP_SAFE_LIMIT_PERCENT 80
428
429#define VEGA12_DPM2_MAXPS_PERCENT_M 90
430#define VEGA12_DPM2_MAXPS_PERCENT_H 90
431
432#define VEGA12_DPM2_PWREFFICIENCYRATIO_MARGIN 50
433
434#define VEGA12_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
435#define VEGA12_DPM2_SQ_RAMP_MIN_POWER 0x12
436#define VEGA12_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
437#define VEGA12_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
438#define VEGA12_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
439
440#define VEGA12_VOLTAGE_CONTROL_NONE 0x0
441#define VEGA12_VOLTAGE_CONTROL_BY_GPIO 0x1
442#define VEGA12_VOLTAGE_CONTROL_BY_SVID2 0x2
443#define VEGA12_VOLTAGE_CONTROL_MERGED 0x3
444/* To convert to Q8.8 format for firmware */
445#define VEGA12_Q88_FORMAT_CONVERSION_UNIT 256
446
447#define VEGA12_UNUSED_GPIO_PIN 0x7F
448
449#define VEGA12_THERM_OUT_MODE_DISABLE 0x0
450#define VEGA12_THERM_OUT_MODE_THERM_ONLY 0x1
451#define VEGA12_THERM_OUT_MODE_THERM_VRHOT 0x2
452
453#define PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT 0xffffffff
454#define PPREGKEY_VEGA12QUADRATICEQUATION_DFLT 0xffffffff
455
456#define PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
457#define PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
458#define PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
459#define PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
460#define PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT 0xffffffff
461#define PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT 0xffffffff
462#define PPREGKEY_VEGA12QUADRATICEQUATION_DFLT 0xffffffff
463
464#define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3
465#define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3
466#define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2
467
468int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
469
470#endif /* _VEGA12_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c
new file mode 100644
index 000000000000..76e60c0181ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c
@@ -0,0 +1,1364 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "vega12_hwmgr.h"
26#include "vega12_powertune.h"
27#include "vega12_smumgr.h"
28#include "vega12_ppsmc.h"
29#include "vega12_inc.h"
30#include "pp_debug.h"
31#include "pp_soc15.h"
32
33static const struct vega12_didt_config_reg SEDiDtTuningCtrlConfig_Vega12[] =
34{
35/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
36 * Offset Mask Shift Value
37 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
38 */
39 /* DIDT_SQ */
40 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853 },
41 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153 },
42
43 /* DIDT_TD */
44 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde },
45 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde },
46
47 /* DIDT_TCP */
48 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
49 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
50
51 /* DIDT_DB */
52 { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
53 { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
54
55 { 0xFFFFFFFF } /* End of list */
56};
57
58static const struct vega12_didt_config_reg SEDiDtCtrl3Config_vega12[] =
59{
60/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
61 * Offset Mask Shift Value
62 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
63 */
64 /*DIDT_SQ_CTRL3 */
65 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
66 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
67 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK, DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
68 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
69 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
70 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
71 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
72 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
73 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
74 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK, DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
75 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
76 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
77
78 /*DIDT_TCP_CTRL3 */
79 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
80 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
81 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK, DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
82 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
83 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
84 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
85 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
86 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
87 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
88 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
89 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
90 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
91
92 /*DIDT_TD_CTRL3 */
93 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
94 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
95 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__THROTTLE_POLICY_MASK, DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
96 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
97 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
98 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
99 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
100 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
101 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
102 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
103 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
104 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
105
106 /*DIDT_DB_CTRL3 */
107 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
108 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
109 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__THROTTLE_POLICY_MASK, DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
110 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
111 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
112 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
113 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
114 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
115 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
116 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK, DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
117 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
118 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
119
120 { 0xFFFFFFFF } /* End of list */
121};
122
123static const struct vega12_didt_config_reg SEDiDtCtrl2Config_Vega12[] =
124{
125/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
126 * Offset Mask Shift Value
127 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
128 */
129 /* DIDT_SQ */
130 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853 },
131 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
132 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000 },
133
134 /* DIDT_TD */
135 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff },
136 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
137 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
138
139 /* DIDT_TCP */
140 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
141 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
142 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
143
144 /* DIDT_DB */
145 { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK, DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
146 { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
147 { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
148
149 { 0xFFFFFFFF } /* End of list */
150};
151
152static const struct vega12_didt_config_reg SEDiDtCtrl1Config_Vega12[] =
153{
154/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
155 * Offset Mask Shift Value
156 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
157 */
158 /* DIDT_SQ */
159 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000 },
160 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff },
161 /* DIDT_TD */
162 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000 },
163 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff },
164 /* DIDT_TCP */
165 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000 },
166 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff },
167 /* DIDT_DB */
168 { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MIN_POWER_MASK, DIDT_DB_CTRL1__MIN_POWER__SHIFT, 0x0000 },
169 { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MAX_POWER_MASK, DIDT_DB_CTRL1__MAX_POWER__SHIFT, 0xffff },
170
171 { 0xFFFFFFFF } /* End of list */
172};
173
174
175static const struct vega12_didt_config_reg SEDiDtWeightConfig_Vega12[] =
176{
177/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
178 * Offset Mask Shift Value
179 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
180 */
181 /* DIDT_SQ */
182 { ixDIDT_SQ_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B363B1A },
183 { ixDIDT_SQ_WEIGHT4_7, 0xFFFFFFFF, 0, 0x270B2432 },
184 { ixDIDT_SQ_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000018 },
185
186 /* DIDT_TD */
187 { ixDIDT_TD_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B1D220F },
188 { ixDIDT_TD_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00007558 },
189 { ixDIDT_TD_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
190
191 /* DIDT_TCP */
192 { ixDIDT_TCP_WEIGHT0_3, 0xFFFFFFFF, 0, 0x5ACE160D },
193 { ixDIDT_TCP_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00000000 },
194 { ixDIDT_TCP_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
195
196 /* DIDT_DB */
197 { ixDIDT_DB_WEIGHT0_3, 0xFFFFFFFF, 0, 0x0E152A0F },
198 { ixDIDT_DB_WEIGHT4_7, 0xFFFFFFFF, 0, 0x09061813 },
199 { ixDIDT_DB_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000013 },
200
201 { 0xFFFFFFFF } /* End of list */
202};
203
204static const struct vega12_didt_config_reg SEDiDtCtrl0Config_Vega12[] =
205{
206/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
207 * Offset Mask Shift Value
208 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
209 */
210 /* DIDT_SQ */
211 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
212 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
213 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
214 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
215 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
216 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
217 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
218 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
219 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
220 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
221 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
222 /* DIDT_TD */
223 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
224 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
225 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
226 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
227 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
228 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
229 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
230 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
231 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
232 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
233 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
234 /* DIDT_TCP */
235 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
236 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
237 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
238 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
239 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
240 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
241 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
242 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
243 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
244 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
245 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
246 /* DIDT_DB */
247 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
248 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__PHASE_OFFSET_MASK, DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
249 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK, DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
250 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
251 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
252 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
253 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
254 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
255 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
256 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
257 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
258
259 { 0xFFFFFFFF } /* End of list */
260};
261
262
263static const struct vega12_didt_config_reg SEDiDtStallCtrlConfig_vega12[] =
264{
265/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
266 * Offset Mask Shift Value
267 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
268 */
269 /* DIDT_SQ */
270 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
271 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
272 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
273 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
274
275 /* DIDT_TD */
276 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
277 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
278 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
279 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
280
281 /* DIDT_TCP */
282 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
283 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
284 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
285 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
286
287 /* DIDT_DB */
288 { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
289 { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
290 { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
291 { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
292
293 { 0xFFFFFFFF } /* End of list */
294};
295
296static const struct vega12_didt_config_reg SEDiDtStallPatternConfig_vega12[] =
297{
298/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
299 * Offset Mask Shift Value
300 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
301 */
302 /* DIDT_SQ_STALL_PATTERN_1_2 */
303 { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
304 { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
305
306 /* DIDT_SQ_STALL_PATTERN_3_4 */
307 { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
308 { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
309
310 /* DIDT_SQ_STALL_PATTERN_5_6 */
311 { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
312 { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
313
314 /* DIDT_SQ_STALL_PATTERN_7 */
315 { ixDIDT_SQ_STALL_PATTERN_7, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
316
317 /* DIDT_TCP_STALL_PATTERN_1_2 */
318 { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
319 { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
320
321 /* DIDT_TCP_STALL_PATTERN_3_4 */
322 { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
323 { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
324
325 /* DIDT_TCP_STALL_PATTERN_5_6 */
326 { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
327 { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
328
329 /* DIDT_TCP_STALL_PATTERN_7 */
330 { ixDIDT_TCP_STALL_PATTERN_7, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
331
332 /* DIDT_TD_STALL_PATTERN_1_2 */
333 { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
334 { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
335
336 /* DIDT_TD_STALL_PATTERN_3_4 */
337 { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
338 { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
339
340 /* DIDT_TD_STALL_PATTERN_5_6 */
341 { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
342 { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
343
344 /* DIDT_TD_STALL_PATTERN_7 */
345 { ixDIDT_TD_STALL_PATTERN_7, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
346
347 /* DIDT_DB_STALL_PATTERN_1_2 */
348 { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
349 { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
350
351 /* DIDT_DB_STALL_PATTERN_3_4 */
352 { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
353 { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
354
355 /* DIDT_DB_STALL_PATTERN_5_6 */
356 { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
357 { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
358
359 /* DIDT_DB_STALL_PATTERN_7 */
360 { ixDIDT_DB_STALL_PATTERN_7, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
361
362 { 0xFFFFFFFF } /* End of list */
363};
364
365static const struct vega12_didt_config_reg SELCacConfig_Vega12[] =
366{
367/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
368 * Offset Mask Shift Value
369 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
370 */
371 /* SQ */
372 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060021 },
373 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860021 },
374 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060021 },
375 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860021 },
376 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060021 },
377 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860021 },
378 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060021 },
379 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860021 },
380 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060021 },
381 /* TD */
382 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0020 },
383 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0020 },
384 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0020 },
385 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0020 },
386 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0020 },
387 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x028E0020 },
388 /* TCP */
389 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x001c0020 },
390 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x009c0020 },
391 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x011c0020 },
392 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x019c0020 },
393 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x021c0020 },
394 /* DB */
395 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00200008 },
396 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00820008 },
397 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01020008 },
398 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01820008 },
399
400 { 0xFFFFFFFF } /* End of list */
401};
402
403
404static const struct vega12_didt_config_reg SEEDCStallPatternConfig_Vega12[] =
405{
406/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
407 * Offset Mask Shift Value
408 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
409 */
410 /* SQ */
411 { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00030001 },
412 { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x000F0007 },
413 { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x003F001F },
414 { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x0000007F },
415 /* TD */
416 { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
417 { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
418 { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
419 { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
420 /* TCP */
421 { ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
422 { ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
423 { ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
424 { ixDIDT_TCP_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
425 /* DB */
426 { ixDIDT_DB_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
427 { ixDIDT_DB_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
428 { ixDIDT_DB_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
429 { ixDIDT_DB_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
430
431 { 0xFFFFFFFF } /* End of list */
432};
433
434static const struct vega12_didt_config_reg SEEDCForceStallPatternConfig_Vega12[] =
435{
436/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
437 * Offset Mask Shift Value
438 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
439 */
440 /* SQ */
441 { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
442 { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
443 { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
444 { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
445 /* TD */
446 { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
447 { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
448 { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
449 { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
450
451 { 0xFFFFFFFF } /* End of list */
452};
453
454static const struct vega12_didt_config_reg SEEDCStallDelayConfig_Vega12[] =
455{
456/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
457 * Offset Mask Shift Value
458 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
459 */
460 /* SQ */
461 { ixDIDT_SQ_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
462 { ixDIDT_SQ_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
463 /* TD */
464 { ixDIDT_TD_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
465 { ixDIDT_TD_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
466 /* TCP */
467 { ixDIDT_TCP_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
468 { ixDIDT_TCP_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
469 /* DB */
470 { ixDIDT_DB_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
471
472 { 0xFFFFFFFF } /* End of list */
473};
474
475static const struct vega12_didt_config_reg SEEDCThresholdConfig_Vega12[] =
476{
477/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
478 * Offset Mask Shift Value
479 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
480 */
481 { ixDIDT_SQ_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0x0000010E },
482 { ixDIDT_TD_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
483 { ixDIDT_TCP_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
484 { ixDIDT_DB_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
485
486 { 0xFFFFFFFF } /* End of list */
487};
488
489static const struct vega12_didt_config_reg SEEDCCtrlResetConfig_Vega12[] =
490{
491/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
492 * Offset Mask Shift Value
493 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
494 */
495 /* SQ */
496 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
497 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
498 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
499 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
500 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
501 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
502 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
503 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
504 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
505 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
506 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
507
508 { 0xFFFFFFFF } /* End of list */
509};
510
511static const struct vega12_didt_config_reg SEEDCCtrlConfig_Vega12[] =
512{
513/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
514 * Offset Mask Shift Value
515 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
516 */
517 /* SQ */
518 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
519 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
520 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
521 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
522 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0004 },
523 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0006 },
524 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
525 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
526 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
527 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
528 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
529
530 { 0xFFFFFFFF } /* End of list */
531};
532
533static const struct vega12_didt_config_reg SEEDCCtrlForceStallConfig_Vega12[] =
534{
535/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
536 * Offset Mask Shift Value
537 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
538 */
539 /* SQ */
540 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
541 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
542 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
543 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
544 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
545 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000C },
546 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
547 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
548 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
549 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
550 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
551
552 /* TD */
553 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
554 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
555 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
556 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
557 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
558 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
559 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
560 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
561 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
562 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
563 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
564
565 { 0xFFFFFFFF } /* End of list */
566};
567
568static const struct vega12_didt_config_reg GCDiDtDroopCtrlConfig_vega12[] =
569{
570/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
571 * Offset Mask Shift Value
572 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
573 */
574 { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT, 0x0000 },
575 { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT, 0x0000 },
576 { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT, 0x0000 },
577 { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT, 0x0000 },
578 { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT, 0x0000 },
579
580 { 0xFFFFFFFF } /* End of list */
581};
582
583static const struct vega12_didt_config_reg GCDiDtCtrl0Config_vega12[] =
584{
585/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
586 * Offset Mask Shift Value
587 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
588 */
589 { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK, GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
590 { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__PHASE_OFFSET_MASK, GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
591 { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_SW_RST_MASK, GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT, 0x0000 },
592 { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
593 { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
594 { 0xFFFFFFFF } /* End of list */
595};
596
597
598static const struct vega12_didt_config_reg PSMSEEDCStallPatternConfig_Vega12[] =
599{
600/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
601 * Offset Mask Shift Value
602 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
603 */
604 /* SQ EDC STALL PATTERNs */
605 { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT, 0x0101 },
606 { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT, 0x0101 },
607 { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT, 0x1111 },
608 { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT, 0x1111 },
609
610 { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT, 0x1515 },
611 { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT, 0x1515 },
612
613 { ixDIDT_SQ_EDC_STALL_PATTERN_7, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT, 0x5555 },
614
615 { 0xFFFFFFFF } /* End of list */
616};
617
618static const struct vega12_didt_config_reg PSMSEEDCStallDelayConfig_Vega12[] =
619{
620/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
621 * Offset Mask Shift Value
622 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
623 */
624 /* SQ EDC STALL DELAYs */
625 { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT, 0x0000 },
626 { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT, 0x0000 },
627 { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT, 0x0000 },
628 { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT, 0x0000 },
629
630 { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT, 0x0000 },
631
632 { 0xFFFFFFFF } /* End of list */
633};
634
635static const struct vega12_didt_config_reg PSMSEEDCThresholdConfig_Vega12[] =
636{
637/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
638 * Offset Mask Shift Value
639 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
640 */
641 /* SQ EDC THRESHOLD */
642 { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 },
643
644 { 0xFFFFFFFF } /* End of list */
645};
646
647static const struct vega12_didt_config_reg PSMSEEDCCtrlResetConfig_Vega12[] =
648{
649/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
650 * Offset Mask Shift Value
651 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
652 */
653 /* SQ EDC CTRL */
654 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
655 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
656 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
657 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
658 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
659 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
660 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
661 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
662 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
663 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
664 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
665
666 { 0xFFFFFFFF } /* End of list */
667};
668
669static const struct vega12_didt_config_reg PSMSEEDCCtrlConfig_Vega12[] =
670{
671/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
672 * Offset Mask Shift Value
673 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
674 */
675 /* SQ EDC CTRL */
676 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
677 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
678 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
679 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
680 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
681 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
682 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
683 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0001 },
684 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0003 },
685 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
686 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
687
688 { 0xFFFFFFFF } /* End of list */
689};
690
691static const struct vega12_didt_config_reg PSMGCEDCThresholdConfig_vega12[] =
692{
693/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
694 * Offset Mask Shift Value
695 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
696 */
697 { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 },
698
699 { 0xFFFFFFFF } /* End of list */
700};
701
702static const struct vega12_didt_config_reg PSMGCEDCDroopCtrlConfig_vega12[] =
703{
704/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
705 * Offset Mask Shift Value
706 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
707 */
708 { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT, 0x0001 },
709 { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT, 0x0384 },
710 { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT, 0x0001 },
711 { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK, GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT, 0x0001 },
712 { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT, 0x0001 },
713
714 { 0xFFFFFFFF } /* End of list */
715};
716
717static const struct vega12_didt_config_reg PSMGCEDCCtrlResetConfig_vega12[] =
718{
719/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
720 * Offset Mask Shift Value
721 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
722 */
723 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
724 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
725 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
726 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
727 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
728 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
729
730 { 0xFFFFFFFF } /* End of list */
731};
732
733static const struct vega12_didt_config_reg PSMGCEDCCtrlConfig_vega12[] =
734{
735/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
736 * Offset Mask Shift Value
737 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
738 */
739 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
740 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
741 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
742 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
743 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
744 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
745
746 { 0xFFFFFFFF } /* End of list */
747};
748
749static const struct vega12_didt_config_reg AvfsPSMResetConfig_vega12[]=
750{
751/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
752 * Offset Mask Shift Value
753 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
754 */
755 { 0x16A02, 0xFFFFFFFF, 0x0, 0x0000005F },
756 { 0x16A05, 0xFFFFFFFF, 0x0, 0x00000001 },
757 { 0x16A06, 0x00000001, 0x0, 0x02000000 },
758 { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
759
760 { 0xFFFFFFFF } /* End of list */
761};
762
763static const struct vega12_didt_config_reg AvfsPSMInitConfig_vega12[] =
764{
765/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
766 * Offset Mask Shift Value
767 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
768 */
769 { 0x16A05, 0xFFFFFFFF, 0x18, 0x00000001 },
770 { 0x16A05, 0xFFFFFFFF, 0x8, 0x00000003 },
771 { 0x16A05, 0xFFFFFFFF, 0xa, 0x00000006 },
772 { 0x16A05, 0xFFFFFFFF, 0x7, 0x00000000 },
773 { 0x16A06, 0xFFFFFFFF, 0x18, 0x00000001 },
774 { 0x16A06, 0xFFFFFFFF, 0x19, 0x00000001 },
775 { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
776
777 { 0xFFFFFFFF } /* End of list */
778};
779
780static int vega12_program_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega12_didt_config_reg *config_regs, enum vega12_didt_config_reg_type reg_type)
781{
782 uint32_t data;
783
784 PP_ASSERT_WITH_CODE((config_regs != NULL), "[vega12_program_didt_config_registers] Invalid config register table!", return -EINVAL);
785
786 while (config_regs->offset != 0xFFFFFFFF) {
787 switch (reg_type) {
788 case VEGA12_CONFIGREG_DIDT:
789 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
790 data &= ~config_regs->mask;
791 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
792 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
793 break;
794 case VEGA12_CONFIGREG_GCCAC:
795 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
796 data &= ~config_regs->mask;
797 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
798 cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
799 break;
800 case VEGA12_CONFIGREG_SECAC:
801 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset);
802 data &= ~config_regs->mask;
803 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
804 cgs_write_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset, data);
805 break;
806 default:
807 return -EINVAL;
808 }
809
810 config_regs++;
811 }
812
813 return 0;
814}
815
816static int vega12_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega12_didt_config_reg *config_regs)
817{
818 uint32_t data;
819
820 while (config_regs->offset != 0xFFFFFFFF) {
821 data = cgs_read_register(hwmgr->device, config_regs->offset);
822 data &= ~config_regs->mask;
823 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
824 cgs_write_register(hwmgr->device, config_regs->offset, data);
825 config_regs++;
826 }
827
828 return 0;
829}
830
831static void vega12_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
832{
833 uint32_t data;
834 int result;
835 uint32_t en = (enable ? 1 : 0);
836 uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
837
838 if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
839 CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
840 DIDT_SQ_CTRL0, DIDT_CTRL_EN, en);
841 didt_block_info &= ~SQ_Enable_MASK;
842 didt_block_info |= en << SQ_Enable_SHIFT;
843 }
844
845 if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
846 CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
847 DIDT_DB_CTRL0, DIDT_CTRL_EN, en);
848 didt_block_info &= ~DB_Enable_MASK;
849 didt_block_info |= en << DB_Enable_SHIFT;
850 }
851
852 if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
853 CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
854 DIDT_TD_CTRL0, DIDT_CTRL_EN, en);
855 didt_block_info &= ~TD_Enable_MASK;
856 didt_block_info |= en << TD_Enable_SHIFT;
857 }
858
859 if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
860 CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
861 DIDT_TCP_CTRL0, DIDT_CTRL_EN, en);
862 didt_block_info &= ~TCP_Enable_MASK;
863 didt_block_info |= en << TCP_Enable_SHIFT;
864 }
865
866#if 0
867 if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
868 CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
869 DIDT_DBR_CTRL0, DIDT_CTRL_EN, en);
870 }
871#endif
872
873 if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
874 if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
875 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
876 data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
877 data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
878 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
879 }
880
881 if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
882 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
883 data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
884 data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
885 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
886 }
887
888 if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
889 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
890 data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
891 data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
892 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
893 }
894
895 if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
896 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
897 data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
898 data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
899 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
900 }
901
902#if 0
903 if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
904 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
905 data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
906 data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
907 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
908 }
909#endif
910 }
911
912 if (enable) {
913 /* For Vega12, SMC does not support any mask yet. */
914 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
915 PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
916 }
917}
918
919static int vega12_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
920{
921 int result;
922 uint32_t num_se = 0, count, data;
923 struct amdgpu_device *adev = hwmgr->adev;
924 uint32_t reg;
925
926 num_se = adev->gfx.config.max_shader_engines;
927
928 cgs_enter_safe_mode(hwmgr->device, true);
929
930 cgs_lock_grbm_idx(hwmgr->device, true);
931 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
932 for (count = 0; count < num_se; count++) {
933 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
934 cgs_write_register(hwmgr->device, reg, data);
935
936 result = vega12_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega12, VEGA12_CONFIGREG_DIDT);
937 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega12, VEGA12_CONFIGREG_DIDT);
938 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega12, VEGA12_CONFIGREG_DIDT);
939 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl1Config_Vega12, VEGA12_CONFIGREG_DIDT);
940 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl2Config_Vega12, VEGA12_CONFIGREG_DIDT);
941 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega12, VEGA12_CONFIGREG_DIDT);
942 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtTuningCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
943 result |= vega12_program_didt_config_registers(hwmgr, SELCacConfig_Vega12, VEGA12_CONFIGREG_SECAC);
944 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega12, VEGA12_CONFIGREG_DIDT);
945
946 if (0 != result)
947 break;
948 }
949 cgs_write_register(hwmgr->device, reg, 0xE0000000);
950 cgs_lock_grbm_idx(hwmgr->device, false);
951
952 vega12_didt_set_mask(hwmgr, true);
953
954 cgs_enter_safe_mode(hwmgr->device, false);
955
956 return 0;
957}
958
959static int vega12_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
960{
961 cgs_enter_safe_mode(hwmgr->device, true);
962
963 vega12_didt_set_mask(hwmgr, false);
964
965 cgs_enter_safe_mode(hwmgr->device, false);
966
967 return 0;
968}
969
970static int vega12_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
971{
972 int result;
973 uint32_t num_se = 0, count, data;
974 struct amdgpu_device *adev = hwmgr->adev;
975 uint32_t reg;
976
977 num_se = adev->gfx.config.max_shader_engines;
978
979 cgs_enter_safe_mode(hwmgr->device, true);
980
981 cgs_lock_grbm_idx(hwmgr->device, true);
982 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
983 for (count = 0; count < num_se; count++) {
984 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
985 cgs_write_register(hwmgr->device, reg, data);
986
987 result = vega12_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega12, VEGA12_CONFIGREG_DIDT);
988 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega12, VEGA12_CONFIGREG_DIDT);
989 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega12, VEGA12_CONFIGREG_DIDT);
990 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega12, VEGA12_CONFIGREG_DIDT);
991 if (0 != result)
992 break;
993 }
994 cgs_write_register(hwmgr->device, reg, 0xE0000000);
995 cgs_lock_grbm_idx(hwmgr->device, false);
996
997 vega12_didt_set_mask(hwmgr, true);
998
999 cgs_enter_safe_mode(hwmgr->device, false);
1000
1001 vega12_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega12);
1002 if (PP_CAP(PHM_PlatformCaps_GCEDC))
1003 vega12_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega12);
1004
1005 if (PP_CAP(PHM_PlatformCaps_PSM))
1006 vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega12);
1007
1008 return 0;
1009}
1010
1011static int vega12_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
1012{
1013 uint32_t data;
1014
1015 cgs_enter_safe_mode(hwmgr->device, true);
1016
1017 vega12_didt_set_mask(hwmgr, false);
1018
1019 cgs_enter_safe_mode(hwmgr->device, false);
1020
1021 if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
1022 data = 0x00000000;
1023 cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
1024 }
1025
1026 if (PP_CAP(PHM_PlatformCaps_PSM))
1027 vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
1028
1029 return 0;
1030}
1031
1032static int vega12_enable_se_edc_config(struct pp_hwmgr *hwmgr)
1033{
1034 int result;
1035 uint32_t num_se = 0, count, data;
1036 struct amdgpu_device *adev = hwmgr->adev;
1037 uint32_t reg;
1038
1039 num_se = adev->gfx.config.max_shader_engines;
1040
1041 cgs_enter_safe_mode(hwmgr->device, true);
1042
1043 cgs_lock_grbm_idx(hwmgr->device, true);
1044 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
1045 for (count = 0; count < num_se; count++) {
1046 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1047 cgs_write_register(hwmgr->device, reg, data);
1048 result = vega12_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1049 result |= vega12_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1050 result |= vega12_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1051 result |= vega12_program_didt_config_registers(hwmgr, SEEDCThresholdConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1052 result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlResetConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1053 result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1054
1055 if (0 != result)
1056 break;
1057 }
1058 cgs_write_register(hwmgr->device, reg, 0xE0000000);
1059 cgs_lock_grbm_idx(hwmgr->device, false);
1060
1061 vega12_didt_set_mask(hwmgr, true);
1062
1063 cgs_enter_safe_mode(hwmgr->device, false);
1064
1065 return 0;
1066}
1067
1068static int vega12_disable_se_edc_config(struct pp_hwmgr *hwmgr)
1069{
1070 cgs_enter_safe_mode(hwmgr->device, true);
1071
1072 vega12_didt_set_mask(hwmgr, false);
1073
1074 cgs_enter_safe_mode(hwmgr->device, false);
1075
1076 return 0;
1077}
1078
1079static int vega12_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1080{
1081 int result;
1082 uint32_t num_se = 0;
1083 uint32_t count, data;
1084 struct amdgpu_device *adev = hwmgr->adev;
1085 uint32_t reg;
1086
1087 num_se = adev->gfx.config.max_shader_engines;
1088
1089 cgs_enter_safe_mode(hwmgr->device, true);
1090
1091 vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
1092
1093 cgs_lock_grbm_idx(hwmgr->device, true);
1094 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
1095 for (count = 0; count < num_se; count++) {
1096 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1097 cgs_write_register(hwmgr->device, reg, data);
1098 result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1099 result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1100 result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1101 result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1102
1103 if (0 != result)
1104 break;
1105 }
1106 cgs_write_register(hwmgr->device, reg, 0xE0000000);
1107 cgs_lock_grbm_idx(hwmgr->device, false);
1108
1109 vega12_didt_set_mask(hwmgr, true);
1110
1111 cgs_enter_safe_mode(hwmgr->device, false);
1112
1113 vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega12);
1114
1115 if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
1116 vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega12);
1117 vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega12);
1118 }
1119
1120 if (PP_CAP(PHM_PlatformCaps_PSM))
1121 vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega12);
1122
1123 return 0;
1124}
1125
1126static int vega12_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1127{
1128 uint32_t data;
1129
1130 cgs_enter_safe_mode(hwmgr->device, true);
1131
1132 vega12_didt_set_mask(hwmgr, false);
1133
1134 cgs_enter_safe_mode(hwmgr->device, false);
1135
1136 if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
1137 data = 0x00000000;
1138 cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
1139 }
1140
1141 if (PP_CAP(PHM_PlatformCaps_PSM))
1142 vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
1143
1144 return 0;
1145}
1146
1147static int vega12_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
1148{
1149 uint32_t reg;
1150 int result;
1151
1152 cgs_enter_safe_mode(hwmgr->device, true);
1153
1154 cgs_lock_grbm_idx(hwmgr->device, true);
1155 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
1156 cgs_write_register(hwmgr->device, reg, 0xE0000000);
1157 cgs_lock_grbm_idx(hwmgr->device, false);
1158
1159 result = vega12_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1160 result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1161 if (0 != result)
1162 return result;
1163
1164 vega12_didt_set_mask(hwmgr, false);
1165
1166 cgs_enter_safe_mode(hwmgr->device, false);
1167
1168 return 0;
1169}
1170
1171static int vega12_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
1172{
1173 int result;
1174
1175 result = vega12_disable_se_edc_config(hwmgr);
1176 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Pre DIDT disable clock gating failed!", return result);
1177
1178 return 0;
1179}
1180
1181int vega12_enable_didt_config(struct pp_hwmgr *hwmgr)
1182{
1183 int result = 0;
1184 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1185
1186 if (data->smu_features[GNLD_DIDT].supported) {
1187 if (data->smu_features[GNLD_DIDT].enabled)
1188 PP_DBG_LOG("[EnableDiDtConfig] Feature DiDt Already enabled!\n");
1189
1190 switch (data->registry_data.didt_mode) {
1191 case 0:
1192 result = vega12_enable_cac_driving_se_didt_config(hwmgr);
1193 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 0 Failed!", return result);
1194 break;
1195 case 2:
1196 result = vega12_enable_psm_gc_didt_config(hwmgr);
1197 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 2 Failed!", return result);
1198 break;
1199 case 3:
1200 result = vega12_enable_se_edc_config(hwmgr);
1201 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 3 Failed!", return result);
1202 break;
1203 case 1:
1204 case 4:
1205 case 5:
1206 result = vega12_enable_psm_gc_edc_config(hwmgr);
1207 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 5 Failed!", return result);
1208 break;
1209 case 6:
1210 result = vega12_enable_se_edc_force_stall_config(hwmgr);
1211 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 6 Failed!", return result);
1212 break;
1213 default:
1214 result = -EINVAL;
1215 break;
1216 }
1217
1218#if 0
1219 if (0 == result) {
1220 result = vega12_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
1221 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
1222 data->smu_features[GNLD_DIDT].enabled = true;
1223 }
1224#endif
1225 }
1226
1227 return result;
1228}
1229
1230int vega12_disable_didt_config(struct pp_hwmgr *hwmgr)
1231{
1232 int result = 0;
1233 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1234
1235 if (data->smu_features[GNLD_DIDT].supported) {
1236 if (!data->smu_features[GNLD_DIDT].enabled)
1237 PP_DBG_LOG("[DisableDiDtConfig] Feature DiDt Already Disabled!\n");
1238
1239 switch (data->registry_data.didt_mode) {
1240 case 0:
1241 result = vega12_disable_cac_driving_se_didt_config(hwmgr);
1242 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 0 Failed!", return result);
1243 break;
1244 case 2:
1245 result = vega12_disable_psm_gc_didt_config(hwmgr);
1246 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 2 Failed!", return result);
1247 break;
1248 case 3:
1249 result = vega12_disable_se_edc_config(hwmgr);
1250 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 3 Failed!", return result);
1251 break;
1252 case 1:
1253 case 4:
1254 case 5:
1255 result = vega12_disable_psm_gc_edc_config(hwmgr);
1256 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 5 Failed!", return result);
1257 break;
1258 case 6:
1259 result = vega12_disable_se_edc_force_stall_config(hwmgr);
1260 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 6 Failed!", return result);
1261 break;
1262 default:
1263 result = -EINVAL;
1264 break;
1265 }
1266
1267 if (0 == result) {
1268 result = vega12_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
1269 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
1270 data->smu_features[GNLD_DIDT].enabled = false;
1271 }
1272 }
1273
1274 return result;
1275}
1276
1277int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
1278{
1279 struct vega12_hwmgr *data =
1280 (struct vega12_hwmgr *)(hwmgr->backend);
1281
1282 if (data->smu_features[GNLD_PPT].enabled)
1283 return smum_send_msg_to_smc_with_parameter(hwmgr,
1284 PPSMC_MSG_SetPptLimit, n);
1285
1286 return 0;
1287}
1288
1289int vega12_enable_power_containment(struct pp_hwmgr *hwmgr)
1290{
1291 struct vega12_hwmgr *data =
1292 (struct vega12_hwmgr *)(hwmgr->backend);
1293 struct phm_ppt_v2_information *table_info =
1294 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1295 struct phm_tdp_table *tdp_table = table_info->tdp_table;
1296 uint32_t default_pwr_limit =
1297 (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
1298 int result = 0;
1299
1300 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
1301 if (data->smu_features[GNLD_PPT].supported)
1302 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1303 true, data->smu_features[GNLD_PPT].smu_feature_bitmap),
1304 "Attempt to enable PPT feature Failed!",
1305 data->smu_features[GNLD_PPT].supported = false);
1306
1307 if (data->smu_features[GNLD_TDC].supported)
1308 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1309 true, data->smu_features[GNLD_TDC].smu_feature_bitmap),
1310 "Attempt to enable PPT feature Failed!",
1311 data->smu_features[GNLD_TDC].supported = false);
1312
1313 result = vega12_set_power_limit(hwmgr, default_pwr_limit);
1314 PP_ASSERT_WITH_CODE(!result,
1315 "Failed to set Default Power Limit in SMC!",
1316 return result);
1317 }
1318
1319 return result;
1320}
1321
1322int vega12_disable_power_containment(struct pp_hwmgr *hwmgr)
1323{
1324 struct vega12_hwmgr *data =
1325 (struct vega12_hwmgr *)(hwmgr->backend);
1326
1327 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
1328 if (data->smu_features[GNLD_PPT].supported)
1329 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1330 false, data->smu_features[GNLD_PPT].smu_feature_bitmap),
1331 "Attempt to disable PPT feature Failed!",
1332 data->smu_features[GNLD_PPT].supported = false);
1333
1334 if (data->smu_features[GNLD_TDC].supported)
1335 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1336 false, data->smu_features[GNLD_TDC].smu_feature_bitmap),
1337 "Attempt to disable PPT feature Failed!",
1338 data->smu_features[GNLD_TDC].supported = false);
1339 }
1340
1341 return 0;
1342}
1343
1344static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
1345 uint32_t adjust_percent)
1346{
1347 return smum_send_msg_to_smc_with_parameter(hwmgr,
1348 PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
1349}
1350
1351int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
1352{
1353 int adjust_percent, result = 0;
1354
1355 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
1356 adjust_percent =
1357 hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
1358 hwmgr->platform_descriptor.TDPAdjustment :
1359 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
1360 result = vega12_set_overdrive_target_percentage(hwmgr,
1361 (uint32_t)adjust_percent);
1362 }
1363 return result;
1364}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h
new file mode 100644
index 000000000000..78d31a6747dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _VEGA12_POWERTUNE_H_
24#define _VEGA12_POWERTUNE_H_
25
26enum vega12_didt_config_reg_type {
27 VEGA12_CONFIGREG_DIDT = 0,
28 VEGA12_CONFIGREG_GCCAC,
29 VEGA12_CONFIGREG_SECAC
30};
31
32/* PowerContainment Features */
33#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
34#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
35#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
36
37struct vega12_didt_config_reg {
38 uint32_t offset;
39 uint32_t mask;
40 uint32_t shift;
41 uint32_t value;
42};
43
44int vega12_enable_power_containment(struct pp_hwmgr *hwmgr);
45int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
46int vega12_power_control_set_level(struct pp_hwmgr *hwmgr);
47int vega12_disable_power_containment(struct pp_hwmgr *hwmgr);
48
49int vega12_enable_didt_config(struct pp_hwmgr *hwmgr);
50int vega12_disable_didt_config(struct pp_hwmgr *hwmgr);
51
52#endif /* _VEGA12_POWERTUNE_H_ */
53
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
new file mode 100644
index 000000000000..e7d794980b84
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -0,0 +1,430 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26
27#include "vega12/smu9_driver_if.h"
28#include "vega12_processpptables.h"
29#include "ppatomfwctrl.h"
30#include "atomfirmware.h"
31#include "pp_debug.h"
32#include "cgs_common.h"
33#include "vega12_pptable.h"
34
35static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
36 enum phm_platform_caps cap)
37{
38 if (enable)
39 phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap);
40 else
41 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap);
42}
43
44static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
45{
46 int index = GetIndexIntoMasterDataTable(powerplayinfo);
47
48 u16 size;
49 u8 frev, crev;
50 const void *table_address = hwmgr->soft_pp_table;
51
52 if (!table_address) {
53 table_address = (ATOM_Vega12_POWERPLAYTABLE *)
54 cgs_atom_get_data_table(hwmgr->device, index,
55 &size, &frev, &crev);
56
57 hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
58 hwmgr->soft_pp_table_size = size;
59 }
60
61 return table_address;
62}
63
64static int check_powerplay_tables(
65 struct pp_hwmgr *hwmgr,
66 const ATOM_Vega12_POWERPLAYTABLE *powerplay_table)
67{
68 PP_ASSERT_WITH_CODE((powerplay_table->sHeader.format_revision >=
69 ATOM_VEGA12_TABLE_REVISION_VEGA12),
70 "Unsupported PPTable format!", return -1);
71 PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0,
72 "Invalid PowerPlay Table!", return -1);
73
74 return 0;
75}
76
77static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
78{
79 set_hw_cap(
80 hwmgr,
81 0 != (powerplay_caps & ATOM_VEGA12_PP_PLATFORM_CAP_POWERPLAY),
82 PHM_PlatformCaps_PowerPlaySupport);
83
84 set_hw_cap(
85 hwmgr,
86 0 != (powerplay_caps & ATOM_VEGA12_PP_PLATFORM_CAP_SBIOSPOWERSOURCE),
87 PHM_PlatformCaps_BiosPowerSourceControl);
88
89 set_hw_cap(
90 hwmgr,
91 0 != (powerplay_caps & ATOM_VEGA12_PP_PLATFORM_CAP_BACO),
92 PHM_PlatformCaps_BACO);
93
94 set_hw_cap(
95 hwmgr,
96 0 != (powerplay_caps & ATOM_VEGA12_PP_PLATFORM_CAP_BAMACO),
97 PHM_PlatformCaps_BAMACO);
98
99 return 0;
100}
101
102static int copy_clock_limits_array(
103 struct pp_hwmgr *hwmgr,
104 uint32_t **pptable_info_array,
105 const uint32_t *pptable_array)
106{
107 uint32_t array_size, i;
108 uint32_t *table;
109
110 array_size = sizeof(uint32_t) * ATOM_VEGA12_PPCLOCK_COUNT;
111
112 table = kzalloc(array_size, GFP_KERNEL);
113 if (NULL == table)
114 return -ENOMEM;
115
116 for (i = 0; i < ATOM_VEGA12_PPCLOCK_COUNT; i++)
117 table[i] = pptable_array[i];
118
119 *pptable_info_array = table;
120
121 return 0;
122}
123
124static int copy_overdrive_settings_limits_array(
125 struct pp_hwmgr *hwmgr,
126 uint32_t **pptable_info_array,
127 const uint32_t *pptable_array)
128{
129 uint32_t array_size, i;
130 uint32_t *table;
131
132 array_size = sizeof(uint32_t) * ATOM_VEGA12_ODSETTING_COUNT;
133
134 table = kzalloc(array_size, GFP_KERNEL);
135 if (NULL == table)
136 return -ENOMEM;
137
138 for (i = 0; i < ATOM_VEGA12_ODSETTING_COUNT; i++)
139 table[i] = pptable_array[i];
140
141 *pptable_info_array = table;
142
143 return 0;
144}
145
146static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable)
147{
148 struct pp_atomfwctrl_smc_dpm_parameters smc_dpm_table;
149
150 PP_ASSERT_WITH_CODE(
151 pp_atomfwctrl_get_smc_dpm_information(hwmgr, &smc_dpm_table) == 0,
152 "[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!",
153 return -1);
154
155 ppsmc_pptable->Liquid1_I2C_address = smc_dpm_table.liquid1_i2c_address;
156 ppsmc_pptable->Liquid2_I2C_address = smc_dpm_table.liquid2_i2c_address;
157 ppsmc_pptable->Vr_I2C_address = smc_dpm_table.vr_i2c_address;
158 ppsmc_pptable->Plx_I2C_address = smc_dpm_table.plx_i2c_address;
159
160 ppsmc_pptable->Liquid_I2C_LineSCL = smc_dpm_table.liquid_i2c_linescl;
161 ppsmc_pptable->Liquid_I2C_LineSDA = smc_dpm_table.liquid_i2c_linesda;
162 ppsmc_pptable->Vr_I2C_LineSCL = smc_dpm_table.vr_i2c_linescl;
163 ppsmc_pptable->Vr_I2C_LineSDA = smc_dpm_table.vr_i2c_linesda;
164
165 ppsmc_pptable->Plx_I2C_LineSCL = smc_dpm_table.plx_i2c_linescl;
166 ppsmc_pptable->Plx_I2C_LineSDA = smc_dpm_table.plx_i2c_linesda;
167 ppsmc_pptable->VrSensorPresent = smc_dpm_table.vrsensorpresent;
168 ppsmc_pptable->LiquidSensorPresent = smc_dpm_table.liquidsensorpresent;
169
170 ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table.maxvoltagestepgfx;
171 ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table.maxvoltagestepsoc;
172
173 ppsmc_pptable->VddGfxVrMapping = smc_dpm_table.vddgfxvrmapping;
174 ppsmc_pptable->VddSocVrMapping = smc_dpm_table.vddsocvrmapping;
175 ppsmc_pptable->VddMem0VrMapping = smc_dpm_table.vddmem0vrmapping;
176 ppsmc_pptable->VddMem1VrMapping = smc_dpm_table.vddmem1vrmapping;
177
178 ppsmc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table.gfxulvphasesheddingmask;
179 ppsmc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table.soculvphasesheddingmask;
180
181 ppsmc_pptable->GfxMaxCurrent = smc_dpm_table.gfxmaxcurrent;
182 ppsmc_pptable->GfxOffset = smc_dpm_table.gfxoffset;
183 ppsmc_pptable->Padding_TelemetryGfx = smc_dpm_table.padding_telemetrygfx;
184
185 ppsmc_pptable->SocMaxCurrent = smc_dpm_table.socmaxcurrent;
186 ppsmc_pptable->SocOffset = smc_dpm_table.socoffset;
187 ppsmc_pptable->Padding_TelemetrySoc = smc_dpm_table.padding_telemetrysoc;
188
189 ppsmc_pptable->Mem0MaxCurrent = smc_dpm_table.mem0maxcurrent;
190 ppsmc_pptable->Mem0Offset = smc_dpm_table.mem0offset;
191 ppsmc_pptable->Padding_TelemetryMem0 = smc_dpm_table.padding_telemetrymem0;
192
193 ppsmc_pptable->Mem1MaxCurrent = smc_dpm_table.mem1maxcurrent;
194 ppsmc_pptable->Mem1Offset = smc_dpm_table.mem1offset;
195 ppsmc_pptable->Padding_TelemetryMem1 = smc_dpm_table.padding_telemetrymem1;
196
197 ppsmc_pptable->AcDcGpio = smc_dpm_table.acdcgpio;
198 ppsmc_pptable->AcDcPolarity = smc_dpm_table.acdcpolarity;
199 ppsmc_pptable->VR0HotGpio = smc_dpm_table.vr0hotgpio;
200 ppsmc_pptable->VR0HotPolarity = smc_dpm_table.vr0hotpolarity;
201
202 ppsmc_pptable->VR1HotGpio = smc_dpm_table.vr1hotgpio;
203 ppsmc_pptable->VR1HotPolarity = smc_dpm_table.vr1hotpolarity;
204 ppsmc_pptable->Padding1 = smc_dpm_table.padding1;
205 ppsmc_pptable->Padding2 = smc_dpm_table.padding2;
206
207 ppsmc_pptable->LedPin0 = smc_dpm_table.ledpin0;
208 ppsmc_pptable->LedPin1 = smc_dpm_table.ledpin1;
209 ppsmc_pptable->LedPin2 = smc_dpm_table.ledpin2;
210
211 ppsmc_pptable->GfxclkSpreadEnabled = smc_dpm_table.gfxclkspreadenabled;
212 ppsmc_pptable->GfxclkSpreadPercent = smc_dpm_table.gfxclkspreadpercent;
213 ppsmc_pptable->GfxclkSpreadFreq = smc_dpm_table.gfxclkspreadfreq;
214
215 ppsmc_pptable->UclkSpreadEnabled = 0;
216 ppsmc_pptable->UclkSpreadPercent = smc_dpm_table.uclkspreadpercent;
217 ppsmc_pptable->UclkSpreadFreq = smc_dpm_table.uclkspreadfreq;
218
219 ppsmc_pptable->SocclkSpreadEnabled = 0;
220 ppsmc_pptable->SocclkSpreadPercent = smc_dpm_table.socclkspreadpercent;
221 ppsmc_pptable->SocclkSpreadFreq = smc_dpm_table.socclkspreadfreq;
222
223 return 0;
224}
225
226#define VEGA12_ENGINECLOCK_HARDMAX 198000
227static int init_powerplay_table_information(
228 struct pp_hwmgr *hwmgr,
229 const ATOM_Vega12_POWERPLAYTABLE *powerplay_table)
230{
231 struct phm_ppt_v3_information *pptable_information =
232 (struct phm_ppt_v3_information *)hwmgr->pptable;
233 uint32_t disable_power_control = 0;
234 int result;
235
236 hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType;
237 pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType;
238
239 set_hw_cap(hwmgr,
240 ATOM_VEGA12_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
241 PHM_PlatformCaps_ThermalController);
242
243 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
244
245 if (powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX] > VEGA12_ENGINECLOCK_HARDMAX)
246 hwmgr->platform_descriptor.overdriveLimit.engineClock = VEGA12_ENGINECLOCK_HARDMAX;
247 else
248 hwmgr->platform_descriptor.overdriveLimit.engineClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX];
249 hwmgr->platform_descriptor.overdriveLimit.memoryClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX];
250
251 copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->ODSettingsMax);
252 copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->ODSettingsMin);
253
254 /* hwmgr->platformDescriptor.minOverdriveVDDC = 0;
255 hwmgr->platformDescriptor.maxOverdriveVDDC = 0;
256 hwmgr->platformDescriptor.overdriveVDDCStep = 0; */
257
258 if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0
259 && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0)
260 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ACOverdriveSupport);
261
262 pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1;
263 pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2;
264 pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit;
265 pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit;
266 pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit;
267
268 pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
269
270 hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_POWERPERCENTAGE];
271
272 disable_power_control = 0;
273 if (!disable_power_control) {
274 /* enable TDP overdrive (PowerControl) feature as well if supported */
275 if (hwmgr->platform_descriptor.TDPODLimit)
276 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
277 PHM_PlatformCaps_PowerControl);
278 }
279
280 copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax);
281 copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin);
282
283 pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL);
284 if (pptable_information->smc_pptable == NULL)
285 return -ENOMEM;
286
287 memcpy(pptable_information->smc_pptable, &(powerplay_table->smcPPTable), sizeof(PPTable_t));
288
289 result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
290
291 return result;
292}
293
294int vega12_pp_tables_initialize(struct pp_hwmgr *hwmgr)
295{
296 int result = 0;
297 const ATOM_Vega12_POWERPLAYTABLE *powerplay_table;
298
299 hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v3_information), GFP_KERNEL);
300 PP_ASSERT_WITH_CODE((hwmgr->pptable != NULL),
301 "Failed to allocate hwmgr->pptable!", return -ENOMEM);
302
303 powerplay_table = get_powerplay_table(hwmgr);
304 PP_ASSERT_WITH_CODE((powerplay_table != NULL),
305 "Missing PowerPlay Table!", return -1);
306
307 result = check_powerplay_tables(hwmgr, powerplay_table);
308 PP_ASSERT_WITH_CODE((result == 0),
309 "check_powerplay_tables failed", return result);
310
311 result = set_platform_caps(hwmgr,
312 le32_to_cpu(powerplay_table->ulPlatformCaps));
313 PP_ASSERT_WITH_CODE((result == 0),
314 "set_platform_caps failed", return result);
315
316 result = init_powerplay_table_information(hwmgr, powerplay_table);
317 PP_ASSERT_WITH_CODE((result == 0),
318 "init_powerplay_table_information failed", return result);
319
320 return result;
321}
322
323static int vega12_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
324{
325 struct phm_ppt_v3_information *pp_table_info =
326 (struct phm_ppt_v3_information *)(hwmgr->pptable);
327
328 kfree(pp_table_info->power_saving_clock_max);
329 pp_table_info->power_saving_clock_max = NULL;
330
331 kfree(pp_table_info->power_saving_clock_min);
332 pp_table_info->power_saving_clock_min = NULL;
333
334 kfree(pp_table_info->od_settings_max);
335 pp_table_info->od_settings_max = NULL;
336
337 kfree(pp_table_info->od_settings_min);
338 pp_table_info->od_settings_min = NULL;
339
340 kfree(pp_table_info->smc_pptable);
341 pp_table_info->smc_pptable = NULL;
342
343 kfree(hwmgr->pptable);
344 hwmgr->pptable = NULL;
345
346 return 0;
347}
348
349const struct pp_table_func vega12_pptable_funcs = {
350 .pptable_init = vega12_pp_tables_initialize,
351 .pptable_fini = vega12_pp_tables_uninitialize,
352};
353
354#if 0
355static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
356 uint16_t classification, uint16_t classification2)
357{
358 uint32_t result = 0;
359
360 if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT)
361 result |= PP_StateClassificationFlag_Boot;
362
363 if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL)
364 result |= PP_StateClassificationFlag_Thermal;
365
366 if (classification & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
367 result |= PP_StateClassificationFlag_LimitedPowerSource;
368
369 if (classification & ATOM_PPLIB_CLASSIFICATION_REST)
370 result |= PP_StateClassificationFlag_Rest;
371
372 if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED)
373 result |= PP_StateClassificationFlag_Forced;
374
375 if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI)
376 result |= PP_StateClassificationFlag_ACPI;
377
378 if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
379 result |= PP_StateClassificationFlag_LimitedPowerSource_2;
380
381 return result;
382}
383
384int vega12_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
385 uint32_t entry_index, struct pp_power_state *power_state,
386 int (*call_back_func)(struct pp_hwmgr *, void *,
387 struct pp_power_state *, void *, uint32_t))
388{
389 int result = 0;
390 const ATOM_Vega12_State_Array *state_arrays;
391 const ATOM_Vega12_State *state_entry;
392 const ATOM_Vega12_POWERPLAYTABLE *pp_table =
393 get_powerplay_table(hwmgr);
394
395 PP_ASSERT_WITH_CODE(pp_table, "Missing PowerPlay Table!",
396 return -1;);
397 power_state->classification.bios_index = entry_index;
398
399 if (pp_table->sHeader.format_revision >=
400 ATOM_Vega12_TABLE_REVISION_VEGA12) {
401 state_arrays = (ATOM_Vega12_State_Array *)
402 (((unsigned long)pp_table) +
403 le16_to_cpu(pp_table->usStateArrayOffset));
404
405 PP_ASSERT_WITH_CODE(pp_table->usStateArrayOffset > 0,
406 "Invalid PowerPlay Table State Array Offset.",
407 return -1);
408 PP_ASSERT_WITH_CODE(state_arrays->ucNumEntries > 0,
409 "Invalid PowerPlay Table State Array.",
410 return -1);
411 PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
412 "Invalid PowerPlay Table State Array Entry.",
413 return -1);
414
415 state_entry = &(state_arrays->states[entry_index]);
416
417 result = call_back_func(hwmgr, (void *)state_entry, power_state,
418 (void *)pp_table,
419 make_classification_flags(hwmgr,
420 le16_to_cpu(state_entry->usClassification),
421 le16_to_cpu(state_entry->usClassification2)));
422 }
423
424 if (!result && (power_state->classification.flags &
425 PP_StateClassificationFlag_Boot))
426 result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
427
428 return result;
429}
430#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h
new file mode 100644
index 000000000000..65652ae65929
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef VEGA12_PROCESSPPTABLES_H
25#define VEGA12_PROCESSPPTABLES_H
26
27#include "hwmgr.h"
28
29enum Vega12_I2CLineID {
30 Vega12_I2CLineID_DDC1 = 0x90,
31 Vega12_I2CLineID_DDC2 = 0x91,
32 Vega12_I2CLineID_DDC3 = 0x92,
33 Vega12_I2CLineID_DDC4 = 0x93,
34 Vega12_I2CLineID_DDC5 = 0x94,
35 Vega12_I2CLineID_DDC6 = 0x95,
36 Vega12_I2CLineID_SCLSDA = 0x96,
37 Vega12_I2CLineID_DDCVGA = 0x97
38};
39
40#define Vega12_I2C_DDC1DATA 0
41#define Vega12_I2C_DDC1CLK 1
42#define Vega12_I2C_DDC2DATA 2
43#define Vega12_I2C_DDC2CLK 3
44#define Vega12_I2C_DDC3DATA 4
45#define Vega12_I2C_DDC3CLK 5
46#define Vega12_I2C_SDA 40
47#define Vega12_I2C_SCL 41
48#define Vega12_I2C_DDC4DATA 65
49#define Vega12_I2C_DDC4CLK 66
50#define Vega12_I2C_DDC5DATA 0x48
51#define Vega12_I2C_DDC5CLK 0x49
52#define Vega12_I2C_DDC6DATA 0x4a
53#define Vega12_I2C_DDC6CLK 0x4b
54#define Vega12_I2C_DDCVGADATA 0x4c
55#define Vega12_I2C_DDCVGACLK 0x4d
56
57extern const struct pp_table_func vega12_pptable_funcs;
58#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
new file mode 100644
index 000000000000..df0fa815cd6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -0,0 +1,324 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "vega12_thermal.h"
25#include "vega12_hwmgr.h"
26#include "vega12_smumgr.h"
27#include "vega12_ppsmc.h"
28#include "vega12_inc.h"
29#include "pp_soc15.h"
30#include "pp_debug.h"
31
32static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
33{
34 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
35 PPSMC_MSG_GetCurrentRpm),
36 "Attempt to get current RPM from SMC Failed!",
37 return -1);
38 PP_ASSERT_WITH_CODE(!vega12_read_arg_from_smc(hwmgr,
39 current_rpm),
40 "Attempt to read current RPM from SMC Failed!",
41 return -1);
42 return 0;
43}
44
45int vega12_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
46 struct phm_fan_speed_info *fan_speed_info)
47{
48 memset(fan_speed_info, 0, sizeof(*fan_speed_info));
49 fan_speed_info->supports_percent_read = false;
50 fan_speed_info->supports_percent_write = false;
51 fan_speed_info->supports_rpm_read = true;
52 fan_speed_info->supports_rpm_write = true;
53
54 return 0;
55}
56
57int vega12_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
58{
59 *speed = 0;
60
61 return vega12_get_current_rpm(hwmgr, speed);
62}
63
64/**
65 * @fn vega12_enable_fan_control_feature
66 * @brief Enables the SMC Fan Control Feature.
67 *
68 * @param hwmgr - the address of the powerplay hardware manager.
69 * @return 0 on success. -1 otherwise.
70 */
71static int vega12_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
72{
73#if 0
74 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
75
76 if (data->smu_features[GNLD_FAN_CONTROL].supported) {
77 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(
78 hwmgr, true,
79 data->smu_features[GNLD_FAN_CONTROL].
80 smu_feature_bitmap),
81 "Attempt to Enable FAN CONTROL feature Failed!",
82 return -1);
83 data->smu_features[GNLD_FAN_CONTROL].enabled = true;
84 }
85#endif
86 return 0;
87}
88
89static int vega12_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
90{
91#if 0
92 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
93
94 if (data->smu_features[GNLD_FAN_CONTROL].supported) {
95 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(
96 hwmgr, false,
97 data->smu_features[GNLD_FAN_CONTROL].
98 smu_feature_bitmap),
99 "Attempt to Enable FAN CONTROL feature Failed!",
100 return -1);
101 data->smu_features[GNLD_FAN_CONTROL].enabled = false;
102 }
103#endif
104 return 0;
105}
106
107int vega12_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
108{
109 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
110
111 if (data->smu_features[GNLD_FAN_CONTROL].supported)
112 PP_ASSERT_WITH_CODE(
113 !vega12_enable_fan_control_feature(hwmgr),
114 "Attempt to Enable SMC FAN CONTROL Feature Failed!",
115 return -1);
116
117 return 0;
118}
119
120
121int vega12_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
122{
123 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
124
125 if (data->smu_features[GNLD_FAN_CONTROL].supported)
126 PP_ASSERT_WITH_CODE(!vega12_disable_fan_control_feature(hwmgr),
127 "Attempt to Disable SMC FAN CONTROL Feature Failed!",
128 return -1);
129
130 return 0;
131}
132
133/**
134* Reset Fan Speed to default.
135* @param hwmgr the address of the powerplay hardware manager.
136* @exception Always succeeds.
137*/
138int vega12_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
139{
140 return vega12_fan_ctrl_start_smc_fan_control(hwmgr);
141}
142
143/**
144* Reads the remote temperature from the SIslands thermal controller.
145*
146* @param hwmgr The address of the hardware manager.
147*/
148int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
149{
150 int temp = 0;
151 uint32_t reg;
152
153 reg = soc15_get_register_offset(THM_HWID, 0,
154 mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
155
156 temp = cgs_read_register(hwmgr->device, reg);
157
158 temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
159 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
160
161 temp = temp & 0x1ff;
162
163 temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
164 return temp;
165}
166
167/**
168* Set the requested temperature range for high and low alert signals
169*
170* @param hwmgr The address of the hardware manager.
171* @param range Temperature range to be programmed for
172* high and low alert signals
173* @exception PP_Result_BadInput if the input data is not valid.
174*/
175static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
176 struct PP_TemperatureRange *range)
177{
178 int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
179 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
180 int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP *
181 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
182 uint32_t val, reg;
183
184 if (low < range->min)
185 low = range->min;
186 if (high > range->max)
187 high = range->max;
188
189 if (low > high)
190 return -EINVAL;
191
192 reg = soc15_get_register_offset(THM_HWID, 0,
193 mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL);
194
195 val = cgs_read_register(hwmgr->device, reg);
196
197 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
198 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
199 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
200 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
201 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
202
203 cgs_write_register(hwmgr->device, reg, val);
204
205 return 0;
206}
207
208/**
209* Enable thermal alerts on the RV770 thermal controller.
210*
211* @param hwmgr The address of the hardware manager.
212*/
213static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr)
214{
215 uint32_t val = 0;
216 uint32_t reg;
217
218 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
219 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
220 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
221
222 reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
223 cgs_write_register(hwmgr->device, reg, val);
224
225 return 0;
226}
227
228/**
229* Disable thermal alerts on the RV770 thermal controller.
230* @param hwmgr The address of the hardware manager.
231*/
232int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr)
233{
234 uint32_t reg;
235
236 reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
237 cgs_write_register(hwmgr->device, reg, 0);
238
239 return 0;
240}
241
242/**
243* Uninitialize the thermal controller.
244* Currently just disables alerts.
245* @param hwmgr The address of the hardware manager.
246*/
247int vega12_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
248{
249 int result = vega12_thermal_disable_alert(hwmgr);
250
251 return result;
252}
253
254/**
255* Set up the fan table to control the fan using the SMC.
256* @param hwmgr the address of the powerplay hardware manager.
257* @param pInput the pointer to input data
258* @param pOutput the pointer to output data
259* @param pStorage the pointer to temporary storage
260* @param Result the last failure code
261* @return result from set temperature range routine
262*/
263int vega12_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
264{
265 int ret;
266 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
267 PPTable_t *table = &(data->smc_state_table.pp_table);
268
269 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
270 PPSMC_MSG_SetFanTemperatureTarget,
271 (uint32_t)table->FanTargetTemperature);
272
273 return ret;
274}
275
276/**
277* Start the fan control on the SMC.
278* @param hwmgr the address of the powerplay hardware manager.
279* @param pInput the pointer to input data
280* @param pOutput the pointer to output data
281* @param pStorage the pointer to temporary storage
282* @param Result the last failure code
283* @return result from set temperature range routine
284*/
285int vega12_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr)
286{
287 /* If the fantable setup has failed we could have disabled
288 * PHM_PlatformCaps_MicrocodeFanControl even after
289 * this function was included in the table.
290 * Make sure that we still think controlling the fan is OK.
291 */
292 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
293 vega12_fan_ctrl_start_smc_fan_control(hwmgr);
294
295 return 0;
296}
297
298
299int vega12_start_thermal_controller(struct pp_hwmgr *hwmgr,
300 struct PP_TemperatureRange *range)
301{
302 int ret = 0;
303
304 if (range == NULL)
305 return -EINVAL;
306
307 ret = vega12_thermal_set_temperature_range(hwmgr, range);
308 if (ret)
309 return -EINVAL;
310
311 vega12_thermal_enable_alert(hwmgr);
312 /* We should restrict performance levels to low before we halt the SMC.
313 * On the other hand we are still in boot state when we do this
314 * so it would be pointless.
315 * If this assumption changes we have to revisit this table.
316 */
317 ret = vega12_thermal_setup_fan_table(hwmgr);
318 if (ret)
319 return -EINVAL;
320
321 vega12_thermal_start_smc_fan_control(hwmgr);
322
323 return 0;
324};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h
new file mode 100644
index 000000000000..0d8ed039ab12
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef VEGA12_THERMAL_H
25#define VEGA12_THERMAL_H
26
27#include "hwmgr.h"
28
29struct vega12_temperature {
30 uint16_t edge_temp;
31 uint16_t hot_spot_temp;
32 uint16_t hbm_temp;
33 uint16_t vr_soc_temp;
34 uint16_t vr_mem_temp;
35 uint16_t liquid1_temp;
36 uint16_t liquid2_temp;
37 uint16_t plx_temp;
38};
39
40#define VEGA12_THERMAL_HIGH_ALERT_MASK 0x1
41#define VEGA12_THERMAL_LOW_ALERT_MASK 0x2
42
43#define VEGA12_THERMAL_MINIMUM_TEMP_READING -256
44#define VEGA12_THERMAL_MAXIMUM_TEMP_READING 255
45
46#define VEGA12_THERMAL_MINIMUM_ALERT_TEMP 0
47#define VEGA12_THERMAL_MAXIMUM_ALERT_TEMP 255
48
49#define FDO_PWM_MODE_STATIC 1
50#define FDO_PWM_MODE_STATIC_RPM 5
51
52extern int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr);
53extern int vega12_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
54extern int vega12_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
55 struct phm_fan_speed_info *fan_speed_info);
56extern int vega12_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
57extern int vega12_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
58 uint32_t *speed);
59extern int vega12_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
60extern int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr);
61extern int vega12_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
62extern int vega12_start_thermal_controller(struct pp_hwmgr *hwmgr,
63 struct PP_TemperatureRange *range);
64
65#endif
66
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index b366a5bd2d81..19f2e43480cb 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -232,6 +232,20 @@ enum phm_platform_caps {
232 PHM_PlatformCaps_UVDClientMCTuning, 232 PHM_PlatformCaps_UVDClientMCTuning,
233 PHM_PlatformCaps_ODNinACSupport, 233 PHM_PlatformCaps_ODNinACSupport,
234 PHM_PlatformCaps_ODNinDCSupport, 234 PHM_PlatformCaps_ODNinDCSupport,
235 PHM_PlatformCaps_UMDPState,
236 PHM_PlatformCaps_AutoWattmanSupport,
237 PHM_PlatformCaps_AutoWattmanEnable_CCCState,
238 PHM_PlatformCaps_FreeSyncActive,
239 PHM_PlatformCaps_EnableShadowPstate,
240 PHM_PlatformCaps_customThermalManagement,
241 PHM_PlatformCaps_staticFanControl,
242 PHM_PlatformCaps_Virtual_System,
243 PHM_PlatformCaps_LowestUclkReservedForUlv,
244 PHM_PlatformCaps_EnableBoostState,
245 PHM_PlatformCaps_AVFSSupport,
246 PHM_PlatformCaps_ThermalPolicyDelay,
247 PHM_PlatformCaps_CustomFanControlSupport,
248 PHM_PlatformCaps_BAMACO,
235 PHM_PlatformCaps_Max 249 PHM_PlatformCaps_Max
236}; 250};
237 251
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 85b46ad68546..4b3b05747a3f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -585,6 +585,27 @@ struct phm_ppt_v2_information {
585 uint8_t uc_dcef_dpm_voltage_mode; 585 uint8_t uc_dcef_dpm_voltage_mode;
586}; 586};
587 587
588struct phm_ppt_v3_information
589{
590 uint8_t uc_thermal_controller_type;
591
592 uint16_t us_small_power_limit1;
593 uint16_t us_small_power_limit2;
594 uint16_t us_boost_power_limit;
595
596 uint16_t us_od_turbo_power_limit;
597 uint16_t us_od_powersave_power_limit;
598 uint16_t us_software_shutdown_temp;
599
600 uint32_t *power_saving_clock_max;
601 uint32_t *power_saving_clock_min;
602
603 uint32_t *od_settings_max;
604 uint32_t *od_settings_min;
605
606 void *smc_pptable;
607};
608
588struct phm_dynamic_state_info { 609struct phm_dynamic_state_info {
589 struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk; 610 struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk;
590 struct phm_clock_voltage_dependency_table *vddci_dependency_on_mclk; 611 struct phm_clock_voltage_dependency_table *vddci_dependency_on_mclk;