aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRex Zhu <Rex.Zhu@amd.com>2015-11-19 05:23:32 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-05-04 20:25:50 -0400
commita23eefa2f4615af91ea496ca5b55c9e7c6fa934c (patch)
treee45c46c34093786fd893dd990803be91ef43d959
parentc81726252ec99f375a226005b4e0277df3601d66 (diff)
drm/amd/powerplay: enable dpm for baffin.
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_dyn_defaults.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.c4560
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.h349
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_powertune.c396
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_powertune.h70
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c111
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h40
8 files changed, 5581 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index b664e34dbcc0..2982d5c9a059 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -8,7 +8,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
8 tonga_processpptables.o ppatomctrl.o \ 8 tonga_processpptables.o ppatomctrl.o \
9 tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ 9 tonga_hwmgr.o pppcielanes.o tonga_thermal.o\
10 fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ 10 fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
11 fiji_clockpowergating.o fiji_thermal.o 11 fiji_clockpowergating.o fiji_thermal.o \
12 ellesmere_hwmgr.o ellesmere_powertune.o
12 13
13AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) 14AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
14 15
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_dyn_defaults.h
new file mode 100644
index 000000000000..ba1187c84008
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_dyn_defaults.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef ELLESMERE_DYN_DEFAULTS_H
25#define ELLESMERE_DYN_DEFAULTS_H
26
27
28enum Ellesmeredpm_TrendDetection {
29 EllesmereAdpm_TrendDetection_AUTO,
30 EllesmereAdpm_TrendDetection_UP,
31 EllesmereAdpm_TrendDetection_DOWN
32};
33typedef enum Ellesmeredpm_TrendDetection Ellesmeredpm_TrendDetection;
34
35/* We need to fill in the default values */
36
37
38#define PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
39#define PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
40#define PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
41#define PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
42#define PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
43#define PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
44#define PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
45#define PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
46
47
48#define PPELLESMERE_THERMALPROTECTCOUNTER_DFLT 0x200
49#define PPELLESMERE_STATICSCREENTHRESHOLDUNIT_DFLT 0
50#define PPELLESMERE_STATICSCREENTHRESHOLD_DFLT 0x00C8
51#define PPELLESMERE_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
52#define PPELLESMERE_REFERENCEDIVIDER_DFLT 4
53
54#define PPELLESMERE_ULVVOLTAGECHANGEDELAY_DFLT 1687
55
56#define PPELLESMERE_CGULVPARAMETER_DFLT 0x00040035
57#define PPELLESMERE_CGULVCONTROL_DFLT 0x00007450
58#define PPELLESMERE_TARGETACTIVITY_DFLT 50
59#define PPELLESMERE_MCLK_TARGETACTIVITY_DFLT 10
60
61#endif
62
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.c
new file mode 100644
index 000000000000..10e8e87d8384
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.c
@@ -0,0 +1,4560 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include "linux/delay.h"
27#include "pp_acpi.h"
28#include "hwmgr.h"
29#include "ellesmere_hwmgr.h"
30#include "ellesmere_powertune.h"
31#include "ellesmere_dyn_defaults.h"
32#include "ellesmere_smumgr.h"
33#include "pp_debug.h"
34#include "ppatomctrl.h"
35#include "atombios.h"
36#include "tonga_pptable.h"
37#include "pppcielanes.h"
38#include "amd_pcie_helpers.h"
39#include "hardwaremanager.h"
40#include "tonga_processpptables.h"
41#include "cgs_common.h"
42#include "smu74.h"
43#include "smu_ucode_xfer_vi.h"
44#include "smu74_discrete.h"
45#include "smu/smu_7_1_3_d.h"
46#include "smu/smu_7_1_3_sh_mask.h"
47#include "gmc/gmc_8_1_d.h"
48#include "gmc/gmc_8_1_sh_mask.h"
49#include "oss/oss_3_0_d.h"
50#include "gca/gfx_8_0_d.h"
51#include "bif/bif_5_0_d.h"
52#include "bif/bif_5_0_sh_mask.h"
53#include "gmc/gmc_8_1_d.h"
54#include "gmc/gmc_8_1_sh_mask.h"
55#include "bif/bif_5_0_d.h"
56#include "bif/bif_5_0_sh_mask.h"
57#include "dce/dce_10_0_d.h"
58#include "dce/dce_10_0_sh_mask.h"
59
60#define MC_CG_ARB_FREQ_F0 0x0a
61#define MC_CG_ARB_FREQ_F1 0x0b
62#define MC_CG_ARB_FREQ_F2 0x0c
63#define MC_CG_ARB_FREQ_F3 0x0d
64
65#define MC_CG_SEQ_DRAMCONF_S0 0x05
66#define MC_CG_SEQ_DRAMCONF_S1 0x06
67#define MC_CG_SEQ_YCLK_SUSPEND 0x04
68#define MC_CG_SEQ_YCLK_RESUME 0x0a
69
70
71#define SMC_RAM_END 0x40000
72
73#define SMC_CG_IND_START 0xc0030000
74#define SMC_CG_IND_END 0xc0040000
75
76#define VOLTAGE_SCALE 4
77#define VOLTAGE_VID_OFFSET_SCALE1 625
78#define VOLTAGE_VID_OFFSET_SCALE2 100
79
80#define VDDC_VDDCI_DELTA 200
81
82#define MEM_FREQ_LOW_LATENCY 25000
83#define MEM_FREQ_HIGH_LATENCY 80000
84
85#define MEM_LATENCY_HIGH 45
86#define MEM_LATENCY_LOW 35
87#define MEM_LATENCY_ERR 0xFFFF
88
89#define MC_SEQ_MISC0_GDDR5_SHIFT 28
90#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
91#define MC_SEQ_MISC0_GDDR5_VALUE 5
92
93
94#define PCIE_BUS_CLK 10000
95#define TCLK (PCIE_BUS_CLK / 10)
96
97
98uint16_t ellesmere_clock_stretcher_lookup_table[2][4] = { {600, 1050, 3, 0},
99 {600, 1050, 6, 1} };
100
101/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
102uint32_t ellesmere_clock_stretcher_ddt_table[2][4][4] = { { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
103 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
104
105/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
106uint8_t ellesmere_clock_stretch_amount_conversion[2][6] = { {0, 1, 3, 2, 4, 5},
107 {0, 2, 4, 5, 6, 5} };
108
109/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
110enum DPM_EVENT_SRC {
111 DPM_EVENT_SRC_ANALOG = 0,
112 DPM_EVENT_SRC_EXTERNAL = 1,
113 DPM_EVENT_SRC_DIGITAL = 2,
114 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
115 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
116};
117
118const unsigned long PhwEllesmere_Magic = (unsigned long)(PHM_VIslands_Magic);
119
120struct ellesmere_power_state *cast_phw_ellesmere_power_state(
121 struct pp_hw_power_state *hw_ps)
122{
123 PP_ASSERT_WITH_CODE((PhwEllesmere_Magic == hw_ps->magic),
124 "Invalid Powerstate Type!",
125 return NULL);
126
127 return (struct ellesmere_power_state *)hw_ps;
128}
129
130const struct ellesmere_power_state *cast_const_phw_ellesmere_power_state(
131 const struct pp_hw_power_state *hw_ps)
132{
133 PP_ASSERT_WITH_CODE((PhwEllesmere_Magic == hw_ps->magic),
134 "Invalid Powerstate Type!",
135 return NULL);
136
137 return (const struct ellesmere_power_state *)hw_ps;
138}
139
140static bool ellesmere_is_dpm_running(struct pp_hwmgr *hwmgr)
141{
142 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
143 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
144 ? true : false;
145}
146
147/**
148 * Find the MC microcode version and store it in the HwMgr struct
149 *
150 * @param hwmgr the address of the powerplay hardware manager.
151 * @return always 0
152 */
153int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
154{
155 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
156
157 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
158
159 return 0;
160}
161
162uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
163{
164 uint32_t speedCntl = 0;
165
166 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
167 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
168 ixPCIE_LC_SPEED_CNTL);
169 return((uint16_t)PHM_GET_FIELD(speedCntl,
170 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
171}
172
173int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
174{
175 uint32_t link_width;
176
177 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
178 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
179 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
180
181 PP_ASSERT_WITH_CODE((7 >= link_width),
182 "Invalid PCIe lane width!", return 0);
183
184 return decode_pcie_lane_width(link_width);
185}
186
187void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
188{
189 struct phm_ppt_v1_information *table_info =
190 (struct phm_ppt_v1_information *)hwmgr->pptable;
191 struct phm_clock_voltage_dependency_table *table =
192 table_info->vddc_dep_on_dal_pwrl;
193 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
194 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
195 uint32_t req_vddc = 0, req_volt, i;
196
197 if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW &&
198 dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE))
199 return;
200
201 for (i = 0; i < table->count; i++) {
202 if (dal_power_level == table->entries[i].clk) {
203 req_vddc = table->entries[i].v;
204 break;
205 }
206 }
207
208 vddc_table = table_info->vdd_dep_on_sclk;
209 for (i = 0; i < vddc_table->count; i++) {
210 if (req_vddc <= vddc_table->entries[i].vddc) {
211 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE)
212 << VDDC_SHIFT;
213 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
214 PPSMC_MSG_VddC_Request, req_volt);
215 return;
216 }
217 }
218 printk(KERN_ERR "DAL requested level can not"
219 " found a available voltage in VDDC DPM Table \n");
220}
221
222
223/**
224* Checks if we want to support voltage control
225*
226* @param hwmgr the address of the powerplay hardware manager.
227*/
228static bool ellesmere_voltage_control(const struct pp_hwmgr *hwmgr)
229{
230 const struct ellesmere_hwmgr *data =
231 (const struct ellesmere_hwmgr *)(hwmgr->backend);
232
233 return (ELLESMERE_VOLTAGE_CONTROL_NONE != data->voltage_control);
234}
235
236/**
237* Enable voltage control
238*
239* @param hwmgr the address of the powerplay hardware manager.
240* @return always 0
241*/
242static int ellesmere_enable_voltage_control(struct pp_hwmgr *hwmgr)
243{
244 /* enable voltage control */
245 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
246 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
247
248 return 0;
249}
250
251/**
252* Create Voltage Tables.
253*
254* @param hwmgr the address of the powerplay hardware manager.
255* @return always 0
256*/
257static int ellesmere_construct_voltage_tables(struct pp_hwmgr *hwmgr)
258{
259 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
260 struct phm_ppt_v1_information *table_info =
261 (struct phm_ppt_v1_information *)hwmgr->pptable;
262 int result;
263
264 if (ELLESMERE_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
265 result = atomctrl_get_voltage_table_v3(hwmgr,
266 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
267 &(data->mvdd_voltage_table));
268 PP_ASSERT_WITH_CODE((0 == result),
269 "Failed to retrieve MVDD table.",
270 return result);
271 } else if (ELLESMERE_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
272 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
273 table_info->vdd_dep_on_mclk);
274 PP_ASSERT_WITH_CODE((0 == result),
275 "Failed to retrieve SVI2 MVDD table from dependancy table.",
276 return result;);
277 }
278
279 if (ELLESMERE_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
280 result = atomctrl_get_voltage_table_v3(hwmgr,
281 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
282 &(data->vddci_voltage_table));
283 PP_ASSERT_WITH_CODE((0 == result),
284 "Failed to retrieve VDDCI table.",
285 return result);
286 } else if (ELLESMERE_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
287 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
288 table_info->vdd_dep_on_mclk);
289 PP_ASSERT_WITH_CODE((0 == result),
290 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
291 return result);
292 }
293
294 if (ELLESMERE_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
295 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
296 table_info->vddc_lookup_table);
297 PP_ASSERT_WITH_CODE((0 == result),
298 "Failed to retrieve SVI2 VDDC table from lookup table.",
299 return result);
300 }
301
302 PP_ASSERT_WITH_CODE(
303 (data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)),
304 "Too many voltage values for VDDC. Trimming to fit state table.",
305 phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC,
306 &(data->vddc_voltage_table)));
307
308 PP_ASSERT_WITH_CODE(
309 (data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)),
310 "Too many voltage values for VDDCI. Trimming to fit state table.",
311 phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI,
312 &(data->vddci_voltage_table)));
313
314 PP_ASSERT_WITH_CODE(
315 (data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)),
316 "Too many voltage values for MVDD. Trimming to fit state table.",
317 phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD,
318 &(data->mvdd_voltage_table)));
319
320 return 0;
321}
322
323/**
324* Programs static screed detection parameters
325*
326* @param hwmgr the address of the powerplay hardware manager.
327* @return always 0
328*/
329static int ellesmere_program_static_screen_threshold_parameters(
330 struct pp_hwmgr *hwmgr)
331{
332 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
333
334 /* Set static screen threshold unit */
335 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
336 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
337 data->static_screen_threshold_unit);
338 /* Set static screen threshold */
339 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
340 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
341 data->static_screen_threshold);
342
343 return 0;
344}
345
346/**
347* Setup display gap for glitch free memory clock switching.
348*
349* @param hwmgr the address of the powerplay hardware manager.
350* @return always 0
351*/
352static int ellesmere_enable_display_gap(struct pp_hwmgr *hwmgr)
353{
354 uint32_t display_gap =
355 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
356 ixCG_DISPLAY_GAP_CNTL);
357
358 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
359 DISP_GAP, DISPLAY_GAP_IGNORE);
360
361 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
362 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
363
364 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
365 ixCG_DISPLAY_GAP_CNTL, display_gap);
366
367 return 0;
368}
369
370/**
371* Programs activity state transition voting clients
372*
373* @param hwmgr the address of the powerplay hardware manager.
374* @return always 0
375*/
376static int ellesmere_program_voting_clients(struct pp_hwmgr *hwmgr)
377{
378 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
379
380 /* Clear reset for voting clients before enabling DPM */
381 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
382 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
383 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
384 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
385
386 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
387 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
388 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
389 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
390 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
391 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
392 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
393 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
394 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
395 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
396 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
397 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
398 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
399 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
400 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
401 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
402
403 return 0;
404}
405
406/**
407* Get the location of various tables inside the FW image.
408*
409* @param hwmgr the address of the powerplay hardware manager.
410* @return always 0
411*/
412static int ellesmere_process_firmware_header(struct pp_hwmgr *hwmgr)
413{
414 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
415 struct ellesmere_smumgr *smu_data = (struct ellesmere_smumgr *)(hwmgr->smumgr->backend);
416 uint32_t tmp;
417 int result;
418 bool error = false;
419
420 result = ellesmere_read_smc_sram_dword(hwmgr->smumgr,
421 SMU7_FIRMWARE_HEADER_LOCATION +
422 offsetof(SMU74_Firmware_Header, DpmTable),
423 &tmp, data->sram_end);
424
425 if (0 == result)
426 data->dpm_table_start = tmp;
427
428 error |= (0 != result);
429
430 result = ellesmere_read_smc_sram_dword(hwmgr->smumgr,
431 SMU7_FIRMWARE_HEADER_LOCATION +
432 offsetof(SMU74_Firmware_Header, SoftRegisters),
433 &tmp, data->sram_end);
434
435 if (!result) {
436 data->soft_regs_start = tmp;
437 smu_data->soft_regs_start = tmp;
438 }
439
440 error |= (0 != result);
441
442 result = ellesmere_read_smc_sram_dword(hwmgr->smumgr,
443 SMU7_FIRMWARE_HEADER_LOCATION +
444 offsetof(SMU74_Firmware_Header, mcRegisterTable),
445 &tmp, data->sram_end);
446
447 if (!result)
448 data->mc_reg_table_start = tmp;
449
450 result = ellesmere_read_smc_sram_dword(hwmgr->smumgr,
451 SMU7_FIRMWARE_HEADER_LOCATION +
452 offsetof(SMU74_Firmware_Header, FanTable),
453 &tmp, data->sram_end);
454
455 if (!result)
456 data->fan_table_start = tmp;
457
458 error |= (0 != result);
459
460 result = ellesmere_read_smc_sram_dword(hwmgr->smumgr,
461 SMU7_FIRMWARE_HEADER_LOCATION +
462 offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
463 &tmp, data->sram_end);
464
465 if (!result)
466 data->arb_table_start = tmp;
467
468 error |= (0 != result);
469
470 result = ellesmere_read_smc_sram_dword(hwmgr->smumgr,
471 SMU7_FIRMWARE_HEADER_LOCATION +
472 offsetof(SMU74_Firmware_Header, Version),
473 &tmp, data->sram_end);
474
475 if (!result)
476 hwmgr->microcode_version_info.SMC = tmp;
477
478 error |= (0 != result);
479
480 return error ? -1 : 0;
481}
482
483/* Copy one arb setting to another and then switch the active set.
484 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
485 */
486static int ellesmere_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
487 uint32_t arb_src, uint32_t arb_dest)
488{
489 uint32_t mc_arb_dram_timing;
490 uint32_t mc_arb_dram_timing2;
491 uint32_t burst_time;
492 uint32_t mc_cg_config;
493
494 switch (arb_src) {
495 case MC_CG_ARB_FREQ_F0:
496 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
497 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
498 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
499 break;
500 case MC_CG_ARB_FREQ_F1:
501 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
502 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
503 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
504 break;
505 default:
506 return -EINVAL;
507 }
508
509 switch (arb_dest) {
510 case MC_CG_ARB_FREQ_F0:
511 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
512 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
513 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
514 break;
515 case MC_CG_ARB_FREQ_F1:
516 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
517 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
518 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
519 break;
520 default:
521 return -EINVAL;
522 }
523
524 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
525 mc_cg_config |= 0x0000000F;
526 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
527 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
528
529 return 0;
530}
531
532/**
533* Initial switch from ARB F0->F1
534*
535* @param hwmgr the address of the powerplay hardware manager.
536* @return always 0
537* This function is to be called from the SetPowerState table.
538*/
539static int ellesmere_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
540{
541 return ellesmere_copy_and_switch_arb_sets(hwmgr,
542 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
543}
544
545static int ellesmere_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
546{
547 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
548 struct phm_ppt_v1_information *table_info =
549 (struct phm_ppt_v1_information *)(hwmgr->pptable);
550 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
551 uint32_t i, max_entry;
552
553 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
554 data->use_pcie_power_saving_levels), "No pcie performance levels!",
555 return -EINVAL);
556
557 if (data->use_pcie_performance_levels &&
558 !data->use_pcie_power_saving_levels) {
559 data->pcie_gen_power_saving = data->pcie_gen_performance;
560 data->pcie_lane_power_saving = data->pcie_lane_performance;
561 } else if (!data->use_pcie_performance_levels &&
562 data->use_pcie_power_saving_levels) {
563 data->pcie_gen_performance = data->pcie_gen_power_saving;
564 data->pcie_lane_performance = data->pcie_lane_power_saving;
565 }
566
567 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
568 SMU74_MAX_LEVELS_LINK,
569 MAX_REGULAR_DPM_NUMBER);
570
571 if (pcie_table != NULL) {
572 /* max_entry is used to make sure we reserve one PCIE level
573 * for boot level (fix for A+A PSPP issue).
574 * If PCIE table from PPTable have ULV entry + 8 entries,
575 * then ignore the last entry.*/
576 max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
577 SMU74_MAX_LEVELS_LINK : pcie_table->count;
578 for (i = 1; i < max_entry; i++) {
579 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
580 get_pcie_gen_support(data->pcie_gen_cap,
581 pcie_table->entries[i].gen_speed),
582 get_pcie_lane_support(data->pcie_lane_cap,
583 pcie_table->entries[i].lane_width));
584 }
585 data->dpm_table.pcie_speed_table.count = max_entry - 1;
586 } else {
587 /* Hardcode Pcie Table */
588 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
589 get_pcie_gen_support(data->pcie_gen_cap,
590 PP_Min_PCIEGen),
591 get_pcie_lane_support(data->pcie_lane_cap,
592 PP_Max_PCIELane));
593 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
594 get_pcie_gen_support(data->pcie_gen_cap,
595 PP_Min_PCIEGen),
596 get_pcie_lane_support(data->pcie_lane_cap,
597 PP_Max_PCIELane));
598 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
599 get_pcie_gen_support(data->pcie_gen_cap,
600 PP_Max_PCIEGen),
601 get_pcie_lane_support(data->pcie_lane_cap,
602 PP_Max_PCIELane));
603 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
604 get_pcie_gen_support(data->pcie_gen_cap,
605 PP_Max_PCIEGen),
606 get_pcie_lane_support(data->pcie_lane_cap,
607 PP_Max_PCIELane));
608 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
609 get_pcie_gen_support(data->pcie_gen_cap,
610 PP_Max_PCIEGen),
611 get_pcie_lane_support(data->pcie_lane_cap,
612 PP_Max_PCIELane));
613 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
614 get_pcie_gen_support(data->pcie_gen_cap,
615 PP_Max_PCIEGen),
616 get_pcie_lane_support(data->pcie_lane_cap,
617 PP_Max_PCIELane));
618
619 data->dpm_table.pcie_speed_table.count = 6;
620 }
621 /* Populate last level for boot PCIE level, but do not increment count. */
622 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
623 data->dpm_table.pcie_speed_table.count,
624 get_pcie_gen_support(data->pcie_gen_cap,
625 PP_Min_PCIEGen),
626 get_pcie_lane_support(data->pcie_lane_cap,
627 PP_Max_PCIELane));
628
629 return 0;
630}
631
632/*
633 * This function is to initalize all DPM state tables
634 * for SMU7 based on the dependency table.
635 * Dynamic state patching function will then trim these
636 * state tables to the allowed range based
637 * on the power policy or external client requests,
638 * such as UVD request, etc.
639 */
640int ellesmere_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
641{
642 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
643 struct phm_ppt_v1_information *table_info =
644 (struct phm_ppt_v1_information *)(hwmgr->pptable);
645 uint32_t i;
646
647 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
648 table_info->vdd_dep_on_sclk;
649 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
650 table_info->vdd_dep_on_mclk;
651
652 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
653 "SCLK dependency table is missing. This table is mandatory",
654 return -EINVAL);
655 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
656 "SCLK dependency table has to have is missing."
657 "This table is mandatory",
658 return -EINVAL);
659
660 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
661 "MCLK dependency table is missing. This table is mandatory",
662 return -EINVAL);
663 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
664 "MCLK dependency table has to have is missing."
665 "This table is mandatory",
666 return -EINVAL);
667
668 /* clear the state table to reset everything to default */
669 phm_reset_single_dpm_table(
670 &data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER);
671 phm_reset_single_dpm_table(
672 &data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER);
673
674
675 /* Initialize Sclk DPM table based on allow Sclk values */
676 data->dpm_table.sclk_table.count = 0;
677 for (i = 0; i < dep_sclk_table->count; i++) {
678 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
679 dep_sclk_table->entries[i].clk) {
680
681 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
682 dep_sclk_table->entries[i].clk;
683
684 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
685 (i == 0) ? true : false;
686 data->dpm_table.sclk_table.count++;
687 }
688 }
689
690 /* Initialize Mclk DPM table based on allow Mclk values */
691 data->dpm_table.mclk_table.count = 0;
692 for (i = 0; i < dep_mclk_table->count; i++) {
693 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
694 [data->dpm_table.mclk_table.count - 1].value !=
695 dep_mclk_table->entries[i].clk) {
696 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
697 dep_mclk_table->entries[i].clk;
698 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
699 (i == 0) ? true : false;
700 data->dpm_table.mclk_table.count++;
701 }
702 }
703
704 /* setup PCIE gen speed levels */
705 ellesmere_setup_default_pcie_table(hwmgr);
706
707 /* save a copy of the default DPM table */
708 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
709 sizeof(struct ellesmere_dpm_table));
710
711 return 0;
712}
713
714uint8_t convert_to_vid(uint16_t vddc)
715{
716 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
717}
718
719/**
720 * Mvdd table preparation for SMC.
721 *
722 * @param *hwmgr The address of the hardware manager.
723 * @param *table The SMC DPM table structure to be populated.
724 * @return 0
725 */
726static int ellesmere_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
727 SMU74_Discrete_DpmTable *table)
728{
729 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
730 uint32_t count, level;
731
732 if (ELLESMERE_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
733 count = data->mvdd_voltage_table.count;
734 if (count > SMU_MAX_SMIO_LEVELS)
735 count = SMU_MAX_SMIO_LEVELS;
736 for (level = 0; level < count; level++) {
737 table->SmioTable2.Pattern[level].Voltage =
738 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
739 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
740 table->SmioTable2.Pattern[level].Smio =
741 (uint8_t) level;
742 table->Smio[level] |=
743 data->mvdd_voltage_table.entries[level].smio_low;
744 }
745 table->SmioMask2 = data->vddci_voltage_table.mask_low;
746
747 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
748 }
749
750 return 0;
751}
752
753static int ellesmere_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
754 struct SMU74_Discrete_DpmTable *table)
755{
756 uint32_t count, level;
757 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
758
759 count = data->vddci_voltage_table.count;
760
761 if (ELLESMERE_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
762 if (count > SMU_MAX_SMIO_LEVELS)
763 count = SMU_MAX_SMIO_LEVELS;
764 for (level = 0; level < count; ++level) {
765 table->SmioTable1.Pattern[level].Voltage =
766 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
767 table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
768
769 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
770 }
771 }
772
773 table->SmioMask1 = data->vddci_voltage_table.mask_low;
774
775 return 0;
776}
777
778/**
779* Preparation of vddc and vddgfx CAC tables for SMC.
780*
781* @param hwmgr the address of the hardware manager
782* @param table the SMC DPM table structure to be populated
783* @return always 0
784*/
785static int ellesmere_populate_cac_table(struct pp_hwmgr *hwmgr,
786 struct SMU74_Discrete_DpmTable *table)
787{
788 uint32_t count;
789 uint8_t index;
790 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
791 struct phm_ppt_v1_information *table_info =
792 (struct phm_ppt_v1_information *)(hwmgr->pptable);
793 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
794 table_info->vddc_lookup_table;
795 /* tables is already swapped, so in order to use the value from it,
796 * we need to swap it back.
797 * We are populating vddc CAC data to BapmVddc table
798 * in split and merged mode
799 */
800 for (count = 0; count < lookup_table->count; count++) {
801 index = phm_get_voltage_index(lookup_table,
802 data->vddc_voltage_table.entries[count].value);
803 table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
804 table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
805 table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
806 }
807
808 return 0;
809}
810
811/**
812* Preparation of voltage tables for SMC.
813*
814* @param hwmgr the address of the hardware manager
815* @param table the SMC DPM table structure to be populated
816* @return always 0
817*/
818
819int ellesmere_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
820 struct SMU74_Discrete_DpmTable *table)
821{
822 ellesmere_populate_smc_vddci_table(hwmgr, table);
823 ellesmere_populate_smc_mvdd_table(hwmgr, table);
824 ellesmere_populate_cac_table(hwmgr, table);
825
826 return 0;
827}
828
829static int ellesmere_populate_ulv_level(struct pp_hwmgr *hwmgr,
830 struct SMU74_Discrete_Ulv *state)
831{
832 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
833 struct phm_ppt_v1_information *table_info =
834 (struct phm_ppt_v1_information *)(hwmgr->pptable);
835
836 state->CcPwrDynRm = 0;
837 state->CcPwrDynRm1 = 0;
838
839 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
840 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
841 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
842
843 state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
844
845 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
846 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
847 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
848
849 return 0;
850}
851
852static int ellesmere_populate_ulv_state(struct pp_hwmgr *hwmgr,
853 struct SMU74_Discrete_DpmTable *table)
854{
855 return ellesmere_populate_ulv_level(hwmgr, &table->Ulv);
856}
857
858static int ellesmere_populate_smc_link_level(struct pp_hwmgr *hwmgr,
859 struct SMU74_Discrete_DpmTable *table)
860{
861 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
862 struct ellesmere_dpm_table *dpm_table = &data->dpm_table;
863 int i;
864
865 /* Index (dpm_table->pcie_speed_table.count)
866 * is reserved for PCIE boot level. */
867 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
868 table->LinkLevel[i].PcieGenSpeed =
869 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
870 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
871 dpm_table->pcie_speed_table.dpm_levels[i].param1);
872 table->LinkLevel[i].EnabledForActivity = 1;
873 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
874 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
875 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
876 }
877
878 data->smc_state_table.LinkLevelCount =
879 (uint8_t)dpm_table->pcie_speed_table.count;
880 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
881 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
882
883 return 0;
884}
885
886static uint32_t ellesemere_get_xclk(struct pp_hwmgr *hwmgr)
887{
888 uint32_t reference_clock, tmp;
889 struct cgs_display_info info = {0};
890 struct cgs_mode_info mode_info;
891
892 info.mode_info = &mode_info;
893
894 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
895
896 if (tmp)
897 return TCLK;
898
899 cgs_get_active_displays_info(hwmgr->device, &info);
900 reference_clock = mode_info.ref_clock;
901
902 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
903
904 if (0 != tmp)
905 return reference_clock / 4;
906
907 return reference_clock;
908}
909
910/**
911* Calculates the SCLK dividers using the provided engine clock
912*
913* @param hwmgr the address of the hardware manager
914* @param clock the engine clock to use to populate the structure
915* @param sclk the SMC SCLK structure to be populated
916*/
917static int ellesmere_calculate_sclk_params(struct pp_hwmgr *hwmgr,
918 uint32_t clock, SMU_SclkSetting *sclk_setting)
919{
920 const struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
921 const SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
922 struct pp_atomctrl_clock_dividers_ai dividers;
923
924 uint32_t ref_clock;
925 uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
926 uint8_t i;
927 int result;
928 uint64_t temp;
929
930 sclk_setting->SclkFrequency = clock;
931 /* get the engine clock dividers for this clock value */
932 result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
933 if (result == 0) {
934 sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
935 sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
936 sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
937 sclk_setting->PllRange = dividers.ucSclkPllRange;
938 sclk_setting->SSc_En = dividers.ucSscEnable;
939 sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
940 sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
941 return result;
942 }
943
944 ref_clock = ellesemere_get_xclk(hwmgr);
945
946 for (i = 0; i < NUM_SCLK_RANGE; i++) {
947 if (clock > data->range_table[i].trans_lower_frequency
948 && clock <= data->range_table[i].trans_upper_frequency) {
949 sclk_setting->PllRange = i;
950 break;
951 }
952 }
953
954 sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
955 temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
956 temp <<= 0x10;
957 sclk_setting->Fcw_frac = (uint16_t)(0xFFFF & (temp / ref_clock));
958
959 pcc_target_percent = 10; /* Hardcode 10% for now. */
960 pcc_target_freq = clock - (clock * pcc_target_percent / 100);
961 sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
962
963 ss_target_percent = 2; /* Hardcode 2% for now. */
964 sclk_setting->SSc_En = 0;
965 if (ss_target_percent) {
966 sclk_setting->SSc_En = 1;
967 ss_target_freq = clock - (clock * ss_target_percent / 100);
968 sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
969 temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
970 temp <<= 0x10;
971 sclk_setting->Fcw1_frac = (uint16_t)(0xFFFF & (temp / ref_clock));
972 }
973
974 return 0;
975}
976
977static int ellesmere_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
978 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
979 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
980{
981 uint32_t i;
982 uint16_t vddci;
983 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
984
985 *voltage = *mvdd = 0;
986
987 /* clock - voltage dependency table is empty table */
988 if (dep_table->count == 0)
989 return -EINVAL;
990
991 for (i = 0; i < dep_table->count; i++) {
992 /* find first sclk bigger than request */
993 if (dep_table->entries[i].clk >= clock) {
994 *voltage |= (dep_table->entries[i].vddc *
995 VOLTAGE_SCALE) << VDDC_SHIFT;
996 if (ELLESMERE_VOLTAGE_CONTROL_NONE == data->vddci_control)
997 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
998 VOLTAGE_SCALE) << VDDCI_SHIFT;
999 else if (dep_table->entries[i].vddci)
1000 *voltage |= (dep_table->entries[i].vddci *
1001 VOLTAGE_SCALE) << VDDCI_SHIFT;
1002 else {
1003 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
1004 (dep_table->entries[i].vddc -
1005 (uint16_t)data->vddc_vddci_delta));
1006 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1007 }
1008
1009 if (ELLESMERE_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1010 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
1011 VOLTAGE_SCALE;
1012 else if (dep_table->entries[i].mvdd)
1013 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
1014 VOLTAGE_SCALE;
1015
1016 *voltage |= 1 << PHASES_SHIFT;
1017 return 0;
1018 }
1019 }
1020
1021 /* sclk is bigger than max sclk in the dependence table */
1022 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1023
1024 if (ELLESMERE_VOLTAGE_CONTROL_NONE == data->vddci_control)
1025 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1026 VOLTAGE_SCALE) << VDDCI_SHIFT;
1027 else if (dep_table->entries[i-1].vddci) {
1028 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
1029 (dep_table->entries[i].vddc -
1030 (uint16_t)data->vddc_vddci_delta));
1031 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1032 }
1033
1034 if (ELLESMERE_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1035 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
1036 else if (dep_table->entries[i].mvdd)
1037 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
1038
1039 return 0;
1040}
1041
1042sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = { {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
1043 {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
1044 {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
1045 {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
1046 {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
1047 {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
1048 {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
1049 {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
1050
1051static void ellesmere_get_sclk_range_table(struct pp_hwmgr *hwmgr)
1052{
1053 uint32_t i, ref_clk;
1054 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1055 SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
1056 struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
1057
1058 ref_clk = ellesemere_get_xclk(hwmgr);
1059
1060 if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
1061 for (i = 0; i < NUM_SCLK_RANGE; i++) {
1062 table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
1063 table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
1064 table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
1065
1066 table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
1067 table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
1068
1069 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
1070 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
1071 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
1072 }
1073 return;
1074 }
1075
1076 for (i = 0; i < NUM_SCLK_RANGE; i++) {
1077
1078 data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
1079 data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
1080
1081 table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
1082 table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
1083 table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
1084
1085 table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
1086 table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
1087
1088 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
1089 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
1090 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
1091 }
1092}
1093
1094/**
1095* Populates single SMC SCLK structure using the provided engine clock
1096*
1097* @param hwmgr the address of the hardware manager
1098* @param clock the engine clock to use to populate the structure
1099* @param sclk the SMC SCLK structure to be populated
1100*/
1101
1102static int ellesmere_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
1103 uint32_t clock, uint16_t sclk_al_threshold,
1104 struct SMU74_Discrete_GraphicsLevel *level)
1105{
1106 int result, i, temp;
1107 /* PP_Clocks minClocks; */
1108 uint32_t mvdd;
1109 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1110 struct phm_ppt_v1_information *table_info =
1111 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1112 SMU_SclkSetting curr_sclk_setting = { 0 };
1113
1114 result = ellesmere_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
1115
1116 /* populate graphics levels */
1117 result = ellesmere_get_dependency_volt_by_clk(hwmgr,
1118 table_info->vdd_dep_on_sclk, clock,
1119 &level->MinVoltage, &mvdd);
1120
1121 PP_ASSERT_WITH_CODE((0 == result),
1122 "can not find VDDC voltage value for "
1123 "VDDC engine clock dependency table",
1124 return result);
1125 level->ActivityLevel = sclk_al_threshold;
1126
1127 level->CcPwrDynRm = 0;
1128 level->CcPwrDynRm1 = 0;
1129 level->EnabledForActivity = 0;
1130 level->EnabledForThrottle = 1;
1131 level->UpHyst = 10;
1132 level->DownHyst = 0;
1133 level->VoltageDownHyst = 0;
1134 level->PowerThrottle = 0;
1135
1136 /*
1137 * TODO: get minimum clocks from dal configaration
1138 * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
1139 */
1140 /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
1141
1142 /* get level->DeepSleepDivId
1143 if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
1144 level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
1145 */
1146 PP_ASSERT_WITH_CODE((clock >= 2500), "Engine clock can't satisfy stutter requirement!", return 0);
1147 for (i = ELLESMERE_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1148 temp = clock / (1UL << i);
1149
1150 if (temp >= 2500 || i == 0)
1151 break;
1152 }
1153
1154 level->DeepSleepDivId = i;
1155
1156 /* Default to slow, highest DPM level will be
1157 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
1158 */
1159 if (data->update_up_hyst)
1160 level->UpHyst = (uint8_t)data->up_hyst;
1161 if (data->update_down_hyst)
1162 level->DownHyst = (uint8_t)data->down_hyst;
1163
1164 level->SclkSetting = curr_sclk_setting;
1165
1166 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
1167 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
1168 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
1169 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
1170 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
1171 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
1172 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
1173 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
1174 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
1175 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
1176 return 0;
1177}
1178
1179/**
1180* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1181*
1182* @param hwmgr the address of the hardware manager
1183*/
1184static int ellesmere_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1185{
1186 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1187 struct ellesmere_dpm_table *dpm_table = &data->dpm_table;
1188 struct phm_ppt_v1_information *table_info =
1189 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1190 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
1191 uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
1192 int result = 0;
1193 uint32_t array = data->dpm_table_start +
1194 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
1195 uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
1196 SMU74_MAX_LEVELS_GRAPHICS;
1197 struct SMU74_Discrete_GraphicsLevel *levels =
1198 data->smc_state_table.GraphicsLevel;
1199 uint32_t i, max_entry;
1200 uint8_t hightest_pcie_level_enabled = 0,
1201 lowest_pcie_level_enabled = 0,
1202 mid_pcie_level_enabled = 0,
1203 count = 0;
1204
1205 ellesmere_get_sclk_range_table(hwmgr);
1206
1207 for (i = 0; i < dpm_table->sclk_table.count; i++) {
1208
1209 result = ellesmere_populate_single_graphic_level(hwmgr,
1210 dpm_table->sclk_table.dpm_levels[i].value,
1211 (uint16_t)data->activity_target[i],
1212 &(data->smc_state_table.GraphicsLevel[i]));
1213 if (result)
1214 return result;
1215
1216 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
1217 if (i > 1)
1218 levels[i].DeepSleepDivId = 0;
1219 }
1220
1221 data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
1222 data->smc_state_table.GraphicsDpmLevelCount =
1223 (uint8_t)dpm_table->sclk_table.count;
1224 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
1225 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
1226
1227
1228 if (pcie_table != NULL) {
1229 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
1230 "There must be 1 or more PCIE levels defined in PPTable.",
1231 return -EINVAL);
1232 max_entry = pcie_entry_cnt - 1;
1233 for (i = 0; i < dpm_table->sclk_table.count; i++)
1234 levels[i].pcieDpmLevel =
1235 (uint8_t) ((i < max_entry) ? i : max_entry);
1236 } else {
1237 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1238 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1239 (1 << (hightest_pcie_level_enabled + 1))) != 0))
1240 hightest_pcie_level_enabled++;
1241
1242 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1243 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1244 (1 << lowest_pcie_level_enabled)) == 0))
1245 lowest_pcie_level_enabled++;
1246
1247 while ((count < hightest_pcie_level_enabled) &&
1248 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1249 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
1250 count++;
1251
1252 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
1253 hightest_pcie_level_enabled ?
1254 (lowest_pcie_level_enabled + 1 + count) :
1255 hightest_pcie_level_enabled;
1256
1257 /* set pcieDpmLevel to hightest_pcie_level_enabled */
1258 for (i = 2; i < dpm_table->sclk_table.count; i++)
1259 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
1260
1261 /* set pcieDpmLevel to lowest_pcie_level_enabled */
1262 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
1263
1264 /* set pcieDpmLevel to mid_pcie_level_enabled */
1265 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
1266 }
1267 /* level count will send to smc once at init smc table and never change */
1268 result = ellesmere_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
1269 (uint32_t)array_size, data->sram_end);
1270
1271 return result;
1272}
1273
1274static int ellesmere_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1275 uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
1276{
1277 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1278 struct phm_ppt_v1_information *table_info =
1279 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1280 int result = 0;
1281 struct cgs_display_info info = {0, 0, NULL};
1282
1283 cgs_get_active_displays_info(hwmgr->device, &info);
1284
1285 if (table_info->vdd_dep_on_mclk) {
1286 result = ellesmere_get_dependency_volt_by_clk(hwmgr,
1287 table_info->vdd_dep_on_mclk, clock,
1288 &mem_level->MinVoltage, &mem_level->MinMvdd);
1289 PP_ASSERT_WITH_CODE((0 == result),
1290 "can not find MinVddc voltage value from memory "
1291 "VDDC voltage dependency table", return result);
1292 }
1293
1294 mem_level->MclkFrequency = clock;
1295 mem_level->StutterEnable = 0;
1296 mem_level->EnabledForThrottle = 1;
1297 mem_level->EnabledForActivity = 0;
1298 mem_level->UpHyst = 0;
1299 mem_level->DownHyst = 100;
1300 mem_level->VoltageDownHyst = 0;
1301 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1302 mem_level->StutterEnable = false;
1303
1304 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1305
1306 data->display_timing.num_existing_displays = info.display_count;
1307
1308 if ((data->mclk_stutter_mode_threshold) &&
1309 (clock <= data->mclk_stutter_mode_threshold) &&
1310 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
1311 STUTTER_ENABLE) & 0x1))
1312 mem_level->StutterEnable = true;
1313
1314 if (!result) {
1315 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
1316 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
1317 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
1318 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
1319 }
1320 return result;
1321}
1322
1323/**
1324* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
1325*
1326* @param hwmgr the address of the hardware manager
1327*/
1328static int ellesmere_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1329{
1330 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1331 struct ellesmere_dpm_table *dpm_table = &data->dpm_table;
1332 int result;
1333 /* populate MCLK dpm table to SMU7 */
1334 uint32_t array = data->dpm_table_start +
1335 offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
1336 uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
1337 SMU74_MAX_LEVELS_MEMORY;
1338 struct SMU74_Discrete_MemoryLevel *levels =
1339 data->smc_state_table.MemoryLevel;
1340 uint32_t i;
1341
1342 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1343 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1344 "can not populate memory level as memory clock is zero",
1345 return -EINVAL);
1346 result = ellesmere_populate_single_memory_level(hwmgr,
1347 dpm_table->mclk_table.dpm_levels[i].value,
1348 &levels[i]);
1349 if (result)
1350 return result;
1351 }
1352
1353 /* Only enable level 0 for now. */
1354 levels[0].EnabledForActivity = 1;
1355
1356 /* in order to prevent MC activity from stutter mode to push DPM up.
1357 * the UVD change complements this by putting the MCLK in
1358 * a higher state by default such that we are not effected by
1359 * up threshold or and MCLK DPM latency.
1360 */
1361 levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
1362 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
1363
1364 data->smc_state_table.MemoryDpmLevelCount =
1365 (uint8_t)dpm_table->mclk_table.count;
1366 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
1367 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1368 /* set highest level watermark to high */
1369 levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
1370 PPSMC_DISPLAY_WATERMARK_HIGH;
1371
1372 /* level count will send to smc once at init smc table and never change */
1373 result = ellesmere_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
1374 (uint32_t)array_size, data->sram_end);
1375
1376 return result;
1377}
1378
1379/**
1380* Populates the SMC MVDD structure using the provided memory clock.
1381*
1382* @param hwmgr the address of the hardware manager
1383* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
1384* @param voltage the SMC VOLTAGE structure to be populated
1385*/
1386int ellesmere_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1387 uint32_t mclk, SMIO_Pattern *smio_pat)
1388{
1389 const struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1390 struct phm_ppt_v1_information *table_info =
1391 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1392 uint32_t i = 0;
1393
1394 if (ELLESMERE_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1395 /* find mvdd value which clock is more than request */
1396 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1397 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1398 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
1399 break;
1400 }
1401 }
1402 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1403 "MVDD Voltage is outside the supported range.",
1404 return -EINVAL);
1405 } else
1406 return -EINVAL;
1407
1408 return 0;
1409}
1410
1411static int ellesmere_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1412 SMU74_Discrete_DpmTable *table)
1413{
1414 int result = 0;
1415 uint32_t sclk_frequency;
1416 const struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1417 struct phm_ppt_v1_information *table_info =
1418 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1419 SMIO_Pattern vol_level;
1420 uint32_t mvdd;
1421 uint16_t us_mvdd;
1422
1423 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1424
1425 if (!data->sclk_dpm_key_disabled) {
1426 /* Get MinVoltage and Frequency from DPM0,
1427 * already converted to SMC_UL */
1428 sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
1429 result = ellesmere_get_dependency_volt_by_clk(hwmgr,
1430 table_info->vdd_dep_on_sclk,
1431 table->ACPILevel.SclkFrequency,
1432 &table->ACPILevel.MinVoltage, &mvdd);
1433 PP_ASSERT_WITH_CODE((0 == result),
1434 "Cannot find ACPI VDDC voltage value "
1435 "in Clock Dependency Table", );
1436 } else {
1437 sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
1438 table->ACPILevel.MinVoltage =
1439 data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
1440 }
1441
1442 result = ellesmere_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
1443 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
1444
1445 table->ACPILevel.DeepSleepDivId = 0;
1446 table->ACPILevel.CcPwrDynRm = 0;
1447 table->ACPILevel.CcPwrDynRm1 = 0;
1448
1449 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1450 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1451 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1452 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1453
1454 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
1455 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
1456 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
1457 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
1458 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
1459 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1460
1461 if (!data->mclk_dpm_key_disabled) {
1462 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1463 table->MemoryACPILevel.MclkFrequency =
1464 data->dpm_table.mclk_table.dpm_levels[0].value;
1465 result = ellesmere_get_dependency_volt_by_clk(hwmgr,
1466 table_info->vdd_dep_on_mclk,
1467 table->MemoryACPILevel.MclkFrequency,
1468 &table->MemoryACPILevel.MinVoltage, &mvdd);
1469 PP_ASSERT_WITH_CODE((0 == result),
1470 "Cannot find ACPI VDDCI voltage value "
1471 "in Clock Dependency Table",
1472 );
1473 } else {
1474 table->MemoryACPILevel.MclkFrequency =
1475 data->vbios_boot_state.mclk_bootup_value;
1476 table->MemoryACPILevel.MinVoltage =
1477 data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
1478 }
1479
1480 us_mvdd = 0;
1481 if ((ELLESMERE_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1482 (data->mclk_dpm_key_disabled))
1483 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1484 else {
1485 if (!ellesmere_populate_mvdd_value(hwmgr,
1486 data->dpm_table.mclk_table.dpm_levels[0].value,
1487 &vol_level))
1488 us_mvdd = vol_level.Voltage;
1489 }
1490
1491 if (0 == ellesmere_populate_mvdd_value(hwmgr, 0, &vol_level))
1492 table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
1493 else
1494 table->MemoryACPILevel.MinMvdd = 0;
1495
1496 table->MemoryACPILevel.StutterEnable = false;
1497
1498 table->MemoryACPILevel.EnabledForThrottle = 0;
1499 table->MemoryACPILevel.EnabledForActivity = 0;
1500 table->MemoryACPILevel.UpHyst = 0;
1501 table->MemoryACPILevel.DownHyst = 100;
1502 table->MemoryACPILevel.VoltageDownHyst = 0;
1503 table->MemoryACPILevel.ActivityLevel =
1504 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1505
1506 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1507 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1508
1509 return result;
1510}
1511
1512static int ellesmere_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1513 SMU74_Discrete_DpmTable *table)
1514{
1515 int result = -EINVAL;
1516 uint8_t count;
1517 struct pp_atomctrl_clock_dividers_vi dividers;
1518 struct phm_ppt_v1_information *table_info =
1519 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1520 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1521 table_info->mm_dep_table;
1522 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1523
1524 table->VceLevelCount = (uint8_t)(mm_table->count);
1525 table->VceBootLevel = 0;
1526
1527 for (count = 0; count < table->VceLevelCount; count++) {
1528 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1529 table->VceLevel[count].MinVoltage |=
1530 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1531 table->VceLevel[count].MinVoltage |=
1532 ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
1533 VOLTAGE_SCALE) << VDDCI_SHIFT;
1534 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1535
1536 /*retrieve divider value for VBIOS */
1537 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1538 table->VceLevel[count].Frequency, &dividers);
1539 PP_ASSERT_WITH_CODE((0 == result),
1540 "can not find divide id for VCE engine clock",
1541 return result);
1542
1543 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1544
1545 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1546 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1547 }
1548 return result;
1549}
1550
1551static int ellesmere_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1552 SMU74_Discrete_DpmTable *table)
1553{
1554 int result = -EINVAL;
1555 uint8_t count;
1556 struct pp_atomctrl_clock_dividers_vi dividers;
1557 struct phm_ppt_v1_information *table_info =
1558 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1559 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1560 table_info->mm_dep_table;
1561 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1562
1563 table->SamuBootLevel = 0;
1564 table->SamuLevelCount = (uint8_t)(mm_table->count);
1565
1566 for (count = 0; count < table->SamuLevelCount; count++) {
1567 /* not sure whether we need evclk or not */
1568 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1569 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1570 VOLTAGE_SCALE) << VDDC_SHIFT;
1571 table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1572 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1573 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1574
1575 /* retrieve divider value for VBIOS */
1576 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1577 table->SamuLevel[count].Frequency, &dividers);
1578 PP_ASSERT_WITH_CODE((0 == result),
1579 "can not find divide id for samu clock", return result);
1580
1581 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1582
1583 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1584 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1585 }
1586 return result;
1587}
1588
1589static int ellesmere_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1590 int32_t eng_clock, int32_t mem_clock,
1591 SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
1592{
1593 uint32_t dram_timing;
1594 uint32_t dram_timing2;
1595 uint32_t burst_time;
1596 int result;
1597
1598 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1599 eng_clock, mem_clock);
1600 PP_ASSERT_WITH_CODE(result == 0,
1601 "Error calling VBIOS to set DRAM_TIMING.", return result);
1602
1603 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1604 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1605 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1606
1607
1608 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1609 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1610 arb_regs->McArbBurstTime = (uint8_t)burst_time;
1611
1612 return 0;
1613}
1614
1615static int ellesmere_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1616{
1617 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1618 struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
1619 uint32_t i, j;
1620 int result = 0;
1621
1622 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1623 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1624 result = ellesmere_populate_memory_timing_parameters(hwmgr,
1625 data->dpm_table.sclk_table.dpm_levels[i].value,
1626 data->dpm_table.mclk_table.dpm_levels[j].value,
1627 &arb_regs.entries[i][j]);
1628 if (result == 0)
1629 result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j);
1630 if (result != 0)
1631 return result;
1632 }
1633 }
1634
1635 result = ellesmere_copy_bytes_to_smc(
1636 hwmgr->smumgr,
1637 data->arb_table_start,
1638 (uint8_t *)&arb_regs,
1639 sizeof(SMU74_Discrete_MCArbDramTimingTable),
1640 data->sram_end);
1641 return result;
1642}
1643
1644static int ellesmere_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1645 struct SMU74_Discrete_DpmTable *table)
1646{
1647 int result = -EINVAL;
1648 uint8_t count;
1649 struct pp_atomctrl_clock_dividers_vi dividers;
1650 struct phm_ppt_v1_information *table_info =
1651 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1652 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1653 table_info->mm_dep_table;
1654 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1655
1656 table->UvdLevelCount = (uint8_t)(mm_table->count);
1657 table->UvdBootLevel = 0;
1658
1659 for (count = 0; count < table->UvdLevelCount; count++) {
1660 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1661 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1662 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1663 VOLTAGE_SCALE) << VDDC_SHIFT;
1664 table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1665 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1666 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1667
1668 /* retrieve divider value for VBIOS */
1669 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1670 table->UvdLevel[count].VclkFrequency, &dividers);
1671 PP_ASSERT_WITH_CODE((0 == result),
1672 "can not find divide id for Vclk clock", return result);
1673
1674 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1675
1676 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1677 table->UvdLevel[count].DclkFrequency, &dividers);
1678 PP_ASSERT_WITH_CODE((0 == result),
1679 "can not find divide id for Dclk clock", return result);
1680
1681 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1682
1683 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1684 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1685 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1686
1687 }
1688 return result;
1689}
1690
1691static int ellesmere_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1692 struct SMU74_Discrete_DpmTable *table)
1693{
1694 int result = 0;
1695 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1696
1697 table->GraphicsBootLevel = 0;
1698 table->MemoryBootLevel = 0;
1699
1700 /* find boot level from dpm table */
1701 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1702 data->vbios_boot_state.sclk_bootup_value,
1703 (uint32_t *)&(table->GraphicsBootLevel));
1704
1705 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1706 data->vbios_boot_state.mclk_bootup_value,
1707 (uint32_t *)&(table->MemoryBootLevel));
1708
1709 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1710 VOLTAGE_SCALE;
1711 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1712 VOLTAGE_SCALE;
1713 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1714 VOLTAGE_SCALE;
1715
1716 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1717 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1718 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1719
1720 return 0;
1721}
1722
1723
1724static int ellesmere_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1725{
1726 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1727 struct phm_ppt_v1_information *table_info =
1728 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1729 uint8_t count, level;
1730
1731 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1732
1733 for (level = 0; level < count; level++) {
1734 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1735 data->vbios_boot_state.sclk_bootup_value) {
1736 data->smc_state_table.GraphicsBootLevel = level;
1737 break;
1738 }
1739 }
1740
1741 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1742 for (level = 0; level < count; level++) {
1743 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1744 data->vbios_boot_state.mclk_bootup_value) {
1745 data->smc_state_table.MemoryBootLevel = level;
1746 break;
1747 }
1748 }
1749
1750 return 0;
1751}
1752
1753static int ellesmere_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1754{
1755 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
1756 volt_with_cks, value;
1757 uint16_t clock_freq_u16;
1758 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1759 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
1760 volt_offset = 0;
1761 struct phm_ppt_v1_information *table_info =
1762 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1763 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1764 table_info->vdd_dep_on_sclk;
1765
1766 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1767
1768 /* Read SMU_Eefuse to read and calculate RO and determine
1769 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1770 */
1771 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1772 ixSMU_EFUSE_0 + (146 * 4));
1773 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1774 ixSMU_EFUSE_0 + (148 * 4));
1775 efuse &= 0xFF000000;
1776 efuse = efuse >> 24;
1777 efuse2 &= 0xF;
1778
1779 if (efuse2 == 1)
1780 ro = (2300 - 1350) * efuse / 255 + 1350;
1781 else
1782 ro = (2500 - 1000) * efuse / 255 + 1000;
1783
1784 if (ro >= 1660)
1785 type = 0;
1786 else
1787 type = 1;
1788
1789 /* Populate Stretch amount */
1790 data->smc_state_table.ClockStretcherAmount = stretch_amount;
1791
1792 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1793 for (i = 0; i < sclk_table->count; i++) {
1794 data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1795 sclk_table->entries[i].cks_enable << i;
1796 volt_without_cks = (uint32_t)((14041 *
1797 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
1798 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
1799 volt_with_cks = (uint32_t)((13946 *
1800 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
1801 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
1802 if (volt_without_cks >= volt_with_cks)
1803 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1804 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
1805 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1806 }
1807
1808 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1809 STRETCH_ENABLE, 0x0);
1810 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1811 masterReset, 0x1);
1812 /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */
1813 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1814 masterReset, 0x0);
1815
1816 /* Populate CKS Lookup Table */
1817 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1818 stretch_amount2 = 0;
1819 else if (stretch_amount == 3 || stretch_amount == 4)
1820 stretch_amount2 = 1;
1821 else {
1822 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1823 PHM_PlatformCaps_ClockStretcher);
1824 PP_ASSERT_WITH_CODE(false,
1825 "Stretch Amount in PPTable not supported\n",
1826 return -EINVAL);
1827 }
1828
1829 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1830 ixPWR_CKS_CNTL);
1831 value &= 0xFFC2FF87;
1832 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
1833 ellesmere_clock_stretcher_lookup_table[stretch_amount2][0];
1834 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
1835 ellesmere_clock_stretcher_lookup_table[stretch_amount2][1];
1836 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
1837 GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100);
1838 if (ellesmere_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16
1839 && ellesmere_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) {
1840 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
1841 value |= (ellesmere_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
1842 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
1843 value |= (ellesmere_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
1844 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
1845 value |= (ellesmere_clock_stretch_amount_conversion
1846 [ellesmere_clock_stretcher_lookup_table[stretch_amount2][3]]
1847 [stretch_amount]) << 3;
1848 }
1849 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq);
1850 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq);
1851 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
1852 ellesmere_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
1853 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
1854 (ellesmere_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
1855
1856 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1857 ixPWR_CKS_CNTL, value);
1858
1859 /* Populate DDT Lookup Table */
1860 for (i = 0; i < 4; i++) {
1861 /* Assign the minimum and maximum VID stored
1862 * in the last row of Clock Stretcher Voltage Table.
1863 */
1864 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID =
1865 (uint8_t) ellesmere_clock_stretcher_ddt_table[type][i][2];
1866 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID =
1867 (uint8_t) ellesmere_clock_stretcher_ddt_table[type][i][3];
1868 /* Loop through each SCLK and check the frequency
1869 * to see if it lies within the frequency for clock stretcher.
1870 */
1871 for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
1872 cks_setting = 0;
1873 clock_freq = PP_SMC_TO_HOST_UL(
1874 data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency);
1875 /* Check the allowed frequency against the sclk level[j].
1876 * Sclk's endianness has already been converted,
1877 * and it's in 10Khz unit,
1878 * as opposed to Data table, which is in Mhz unit.
1879 */
1880 if (clock_freq >= (ellesmere_clock_stretcher_ddt_table[type][i][0]) * 100) {
1881 cks_setting |= 0x2;
1882 if (clock_freq < (ellesmere_clock_stretcher_ddt_table[type][i][1]) * 100)
1883 cks_setting |= 0x1;
1884 }
1885 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting
1886 |= cks_setting << (j * 2);
1887 }
1888 CONVERT_FROM_HOST_TO_SMC_US(
1889 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting);
1890 }
1891
1892 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1893 value &= 0xFFFFFFFE;
1894 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1895
1896 return 0;
1897}
1898
1899/**
1900* Populates the SMC VRConfig field in DPM table.
1901*
1902* @param hwmgr the address of the hardware manager
1903* @param table the SMC DPM table structure to be populated
1904* @return always 0
1905*/
1906static int ellesmere_populate_vr_config(struct pp_hwmgr *hwmgr,
1907 struct SMU74_Discrete_DpmTable *table)
1908{
1909 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1910 uint16_t config;
1911
1912 config = VR_MERGED_WITH_VDDC;
1913 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1914
1915 /* Set Vddc Voltage Controller */
1916 if (ELLESMERE_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1917 config = VR_SVI2_PLANE_1;
1918 table->VRConfig |= config;
1919 } else {
1920 PP_ASSERT_WITH_CODE(false,
1921 "VDDC should be on SVI2 control in merged mode!",
1922 );
1923 }
1924 /* Set Vddci Voltage Controller */
1925 if (ELLESMERE_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1926 config = VR_SVI2_PLANE_2; /* only in merged mode */
1927 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1928 } else if (ELLESMERE_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1929 config = VR_SMIO_PATTERN_1;
1930 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1931 } else {
1932 config = VR_STATIC_VOLTAGE;
1933 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1934 }
1935 /* Set Mvdd Voltage Controller */
1936 if (ELLESMERE_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1937 config = VR_SVI2_PLANE_2;
1938 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1939 } else if (ELLESMERE_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1940 config = VR_SMIO_PATTERN_2;
1941 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1942 } else {
1943 config = VR_STATIC_VOLTAGE;
1944 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1945 }
1946
1947 return 0;
1948}
1949
1950/**
1951* Initializes the SMC table and uploads it
1952*
1953* @param hwmgr the address of the powerplay hardware manager.
1954* @return always 0
1955*/
1956static int ellesmere_init_smc_table(struct pp_hwmgr *hwmgr)
1957{
1958 int result;
1959 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
1960 struct phm_ppt_v1_information *table_info =
1961 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1962 struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
1963 const struct ellesmere_ulv_parm *ulv = &(data->ulv);
1964 uint8_t i;
1965 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1966
1967 result = ellesmere_setup_default_dpm_tables(hwmgr);
1968 PP_ASSERT_WITH_CODE(0 == result,
1969 "Failed to setup default DPM tables!", return result);
1970
1971 if (ELLESMERE_VOLTAGE_CONTROL_NONE != data->voltage_control)
1972 ellesmere_populate_smc_voltage_tables(hwmgr, table);
1973
1974 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1975 PHM_PlatformCaps_AutomaticDCTransition))
1976 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1977
1978 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1979 PHM_PlatformCaps_StepVddc))
1980 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1981
1982 if (data->is_memory_gddr5)
1983 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1984
1985 if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
1986 result = ellesmere_populate_ulv_state(hwmgr, table);
1987 PP_ASSERT_WITH_CODE(0 == result,
1988 "Failed to initialize ULV state!", return result);
1989 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1990 ixCG_ULV_PARAMETER, PPELLESMERE_CGULVPARAMETER_DFLT);
1991 }
1992
1993 result = ellesmere_populate_smc_link_level(hwmgr, table);
1994 PP_ASSERT_WITH_CODE(0 == result,
1995 "Failed to initialize Link Level!", return result);
1996
1997 result = ellesmere_populate_all_graphic_levels(hwmgr);
1998 PP_ASSERT_WITH_CODE(0 == result,
1999 "Failed to initialize Graphics Level!", return result);
2000
2001 result = ellesmere_populate_all_memory_levels(hwmgr);
2002 PP_ASSERT_WITH_CODE(0 == result,
2003 "Failed to initialize Memory Level!", return result);
2004
2005 result = ellesmere_populate_smc_acpi_level(hwmgr, table);
2006 PP_ASSERT_WITH_CODE(0 == result,
2007 "Failed to initialize ACPI Level!", return result);
2008
2009 result = ellesmere_populate_smc_vce_level(hwmgr, table);
2010 PP_ASSERT_WITH_CODE(0 == result,
2011 "Failed to initialize VCE Level!", return result);
2012
2013 result = ellesmere_populate_smc_samu_level(hwmgr, table);
2014 PP_ASSERT_WITH_CODE(0 == result,
2015 "Failed to initialize SAMU Level!", return result);
2016
2017 /* Since only the initial state is completely set up at this point
2018 * (the other states are just copies of the boot state) we only
2019 * need to populate the ARB settings for the initial state.
2020 */
2021 result = ellesmere_program_memory_timing_parameters(hwmgr);
2022 PP_ASSERT_WITH_CODE(0 == result,
2023 "Failed to Write ARB settings for the initial state.", return result);
2024
2025 result = ellesmere_populate_smc_uvd_level(hwmgr, table);
2026 PP_ASSERT_WITH_CODE(0 == result,
2027 "Failed to initialize UVD Level!", return result);
2028
2029 result = ellesmere_populate_smc_boot_level(hwmgr, table);
2030 PP_ASSERT_WITH_CODE(0 == result,
2031 "Failed to initialize Boot Level!", return result);
2032
2033 result = ellesmere_populate_smc_initailial_state(hwmgr);
2034 PP_ASSERT_WITH_CODE(0 == result,
2035 "Failed to initialize Boot State!", return result);
2036
2037 result = ellesmere_populate_bapm_parameters_in_dpm_table(hwmgr);
2038 PP_ASSERT_WITH_CODE(0 == result,
2039 "Failed to populate BAPM Parameters!", return result);
2040
2041 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2042 PHM_PlatformCaps_ClockStretcher)) {
2043 result = ellesmere_populate_clock_stretcher_data_table(hwmgr);
2044 PP_ASSERT_WITH_CODE(0 == result,
2045 "Failed to populate Clock Stretcher Data Table!",
2046 return result);
2047 }
2048
2049 table->GraphicsVoltageChangeEnable = 1;
2050 table->GraphicsThermThrottleEnable = 1;
2051 table->GraphicsInterval = 1;
2052 table->VoltageInterval = 1;
2053 table->ThermalInterval = 1;
2054 table->TemperatureLimitHigh =
2055 table_info->cac_dtp_table->usTargetOperatingTemp *
2056 ELLESMERE_Q88_FORMAT_CONVERSION_UNIT;
2057 table->TemperatureLimitLow =
2058 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2059 ELLESMERE_Q88_FORMAT_CONVERSION_UNIT;
2060 table->MemoryVoltageChangeEnable = 1;
2061 table->MemoryInterval = 1;
2062 table->VoltageResponseTime = 0;
2063 table->PhaseResponseTime = 0;
2064 table->MemoryThermThrottleEnable = 1;
2065 table->PCIeBootLinkLevel = 0;
2066 table->PCIeGenInterval = 1;
2067
2068 result = ellesmere_populate_vr_config(hwmgr, table);
2069 PP_ASSERT_WITH_CODE(0 == result,
2070 "Failed to populate VRConfig setting!", return result);
2071
2072 table->ThermGpio = 17;
2073 table->SclkStepSize = 0x4000;
2074
2075 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2076 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2077 } else {
2078 table->VRHotGpio = ELLESMERE_UNUSED_GPIO_PIN;
2079 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2080 PHM_PlatformCaps_RegulatorHot);
2081 }
2082
2083 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
2084 &gpio_pin)) {
2085 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
2086 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2087 PHM_PlatformCaps_AutomaticDCTransition);
2088 } else {
2089 table->AcDcGpio = ELLESMERE_UNUSED_GPIO_PIN;
2090 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2091 PHM_PlatformCaps_AutomaticDCTransition);
2092 }
2093
2094 /* Thermal Output GPIO */
2095 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
2096 &gpio_pin)) {
2097 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2098 PHM_PlatformCaps_ThermalOutGPIO);
2099
2100 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
2101
2102 /* For porlarity read GPIOPAD_A with assigned Gpio pin
2103 * since VBIOS will program this register to set 'inactive state',
2104 * driver can then determine 'active state' from this and
2105 * program SMU with correct polarity
2106 */
2107 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
2108 & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
2109 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
2110
2111 /* if required, combine VRHot/PCC with thermal out GPIO */
2112 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
2113 && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
2114 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
2115 } else {
2116 table->ThermOutGpio = 17;
2117 table->ThermOutPolarity = 1;
2118 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
2119 }
2120
2121 for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
2122 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2123
2124 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2125 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2126 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2127 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2128 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2129 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2130 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2131 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2132 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2133
2134 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2135 result = ellesmere_copy_bytes_to_smc(hwmgr->smumgr,
2136 data->dpm_table_start +
2137 offsetof(SMU74_Discrete_DpmTable, SystemFlags),
2138 (uint8_t *)&(table->SystemFlags),
2139 sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
2140 data->sram_end);
2141 PP_ASSERT_WITH_CODE(0 == result,
2142 "Failed to upload dpm data to SMC memory!", return result);
2143
2144 return 0;
2145}
2146
2147/**
2148* Initialize the ARB DRAM timing table's index field.
2149*
2150* @param hwmgr the address of the powerplay hardware manager.
2151* @return always 0
2152*/
2153static int ellesmere_init_arb_table_index(struct pp_hwmgr *hwmgr)
2154{
2155 const struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2156 uint32_t tmp;
2157 int result;
2158
2159 /* This is a read-modify-write on the first byte of the ARB table.
2160 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
2161 * is the field 'current'.
2162 * This solution is ugly, but we never write the whole table only
2163 * individual fields in it.
2164 * In reality this field should not be in that structure
2165 * but in a soft register.
2166 */
2167 result = ellesmere_read_smc_sram_dword(hwmgr->smumgr,
2168 data->arb_table_start, &tmp, data->sram_end);
2169
2170 if (result)
2171 return result;
2172
2173 tmp &= 0x00FFFFFF;
2174 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
2175
2176 return ellesmere_write_smc_sram_dword(hwmgr->smumgr,
2177 data->arb_table_start, tmp, data->sram_end);
2178}
2179
2180static int ellesmere_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
2181{
2182 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2183 PHM_PlatformCaps_RegulatorHot))
2184 return smum_send_msg_to_smc(hwmgr->smumgr,
2185 PPSMC_MSG_EnableVRHotGPIOInterrupt);
2186
2187 return 0;
2188}
2189
2190static int ellesmere_enable_sclk_control(struct pp_hwmgr *hwmgr)
2191{
2192 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
2193 SCLK_PWRMGT_OFF, 0);
2194 return 0;
2195}
2196
2197static int ellesmere_enable_ulv(struct pp_hwmgr *hwmgr)
2198{
2199 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2200 struct ellesmere_ulv_parm *ulv = &(data->ulv);
2201
2202 if (ulv->ulv_supported)
2203 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
2204
2205 return 0;
2206}
2207
2208static int ellesmere_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2209{
2210 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2211 PHM_PlatformCaps_SclkDeepSleep)) {
2212 if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
2213 PP_ASSERT_WITH_CODE(false,
2214 "Attempt to enable Master Deep Sleep switch failed!",
2215 return -1);
2216 } else {
2217 if (smum_send_msg_to_smc(hwmgr->smumgr,
2218 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
2219 PP_ASSERT_WITH_CODE(false,
2220 "Attempt to disable Master Deep Sleep switch failed!",
2221 return -1);
2222 }
2223 }
2224
2225 return 0;
2226}
2227
2228static int ellesmere_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2229{
2230 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2231
2232 /* enable SCLK dpm */
2233 if (!data->sclk_dpm_key_disabled)
2234 PP_ASSERT_WITH_CODE(
2235 (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
2236 "Failed to enable SCLK DPM during DPM Start Function!",
2237 return -1);
2238
2239 /* enable MCLK dpm */
2240 if (0 == data->mclk_dpm_key_disabled) {
2241
2242 PP_ASSERT_WITH_CODE(
2243 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2244 PPSMC_MSG_MCLKDPM_Enable)),
2245 "Failed to enable MCLK DPM during DPM Start Function!",
2246 return -1);
2247
2248
2249 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
2250
2251 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
2252 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
2253 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
2254 udelay(10);
2255 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
2256 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
2257 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
2258 }
2259
2260 return 0;
2261}
2262
2263static int ellesmere_start_dpm(struct pp_hwmgr *hwmgr)
2264{
2265 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2266
2267 /*enable general power management */
2268
2269 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2270 GLOBAL_PWRMGT_EN, 1);
2271
2272 /* enable sclk deep sleep */
2273
2274 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
2275 DYNAMIC_PM_EN, 1);
2276
2277 /* prepare for PCIE DPM */
2278
2279 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2280 data->soft_regs_start + offsetof(SMU74_SoftRegisters,
2281 VoltageChangeTimeout), 0x1000);
2282 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
2283 SWRST_COMMAND_1, RESETLC, 0x0);
2284
2285 PP_ASSERT_WITH_CODE(
2286 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2287 PPSMC_MSG_Voltage_Cntl_Enable)),
2288 "Failed to enable voltage DPM during DPM Start Function!",
2289 return -1);
2290
2291 if (ellesmere_enable_sclk_mclk_dpm(hwmgr)) {
2292 printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
2293 return -1;
2294 }
2295
2296 /* enable PCIE dpm */
2297 if (0 == data->pcie_dpm_key_disabled) {
2298 PP_ASSERT_WITH_CODE(
2299 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2300 PPSMC_MSG_PCIeDPM_Enable)),
2301 "Failed to enable pcie DPM during DPM Start Function!",
2302 return -1);
2303 }
2304
2305 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
2306 PPSMC_MSG_EnableACDCGPIOInterrupt)),
2307 "Failed to enable AC DC GPIO Interrupt!",
2308 );
2309
2310 return 0;
2311}
2312
2313static void ellesmere_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
2314{
2315 bool protection;
2316 enum DPM_EVENT_SRC src;
2317
2318 switch (sources) {
2319 default:
2320 printk(KERN_ERR "Unknown throttling event sources.");
2321 /* fall through */
2322 case 0:
2323 protection = false;
2324 /* src is unused */
2325 break;
2326 case (1 << PHM_AutoThrottleSource_Thermal):
2327 protection = true;
2328 src = DPM_EVENT_SRC_DIGITAL;
2329 break;
2330 case (1 << PHM_AutoThrottleSource_External):
2331 protection = true;
2332 src = DPM_EVENT_SRC_EXTERNAL;
2333 break;
2334 case (1 << PHM_AutoThrottleSource_External) |
2335 (1 << PHM_AutoThrottleSource_Thermal):
2336 protection = true;
2337 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
2338 break;
2339 }
2340 /* Order matters - don't enable thermal protection for the wrong source. */
2341 if (protection) {
2342 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
2343 DPM_EVENT_SRC, src);
2344 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2345 THERMAL_PROTECTION_DIS,
2346 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2347 PHM_PlatformCaps_ThermalController));
2348 } else
2349 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2350 THERMAL_PROTECTION_DIS, 1);
2351}
2352
2353static int ellesmere_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
2354 PHM_AutoThrottleSource source)
2355{
2356 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2357
2358 if (!(data->active_auto_throttle_sources & (1 << source))) {
2359 data->active_auto_throttle_sources |= 1 << source;
2360 ellesmere_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
2361 }
2362 return 0;
2363}
2364
2365static int ellesmere_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
2366{
2367 return ellesmere_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
2368}
2369
2370int ellesmere_pcie_performance_request(struct pp_hwmgr *hwmgr)
2371{
2372 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2373 data->pcie_performance_request = true;
2374
2375 return 0;
2376}
2377
2378int ellesmere_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2379{
2380 int tmp_result, result = 0;
2381 tmp_result = (!ellesmere_is_dpm_running(hwmgr)) ? 0 : -1;
2382 PP_ASSERT_WITH_CODE(result == 0,
2383 "DPM is already running right now, no need to enable DPM!",
2384 return 0);
2385
2386 if (ellesmere_voltage_control(hwmgr)) {
2387 tmp_result = ellesmere_enable_voltage_control(hwmgr);
2388 PP_ASSERT_WITH_CODE(tmp_result == 0,
2389 "Failed to enable voltage control!",
2390 result = tmp_result);
2391
2392 tmp_result = ellesmere_construct_voltage_tables(hwmgr);
2393 PP_ASSERT_WITH_CODE((0 == tmp_result),
2394 "Failed to contruct voltage tables!",
2395 result = tmp_result);
2396 }
2397
2398 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2399 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
2400 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2401 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
2402
2403 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2404 PHM_PlatformCaps_ThermalController))
2405 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2406 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
2407
2408 tmp_result = ellesmere_program_static_screen_threshold_parameters(hwmgr);
2409 PP_ASSERT_WITH_CODE((0 == tmp_result),
2410 "Failed to program static screen threshold parameters!",
2411 result = tmp_result);
2412
2413 tmp_result = ellesmere_enable_display_gap(hwmgr);
2414 PP_ASSERT_WITH_CODE((0 == tmp_result),
2415 "Failed to enable display gap!", result = tmp_result);
2416
2417 tmp_result = ellesmere_program_voting_clients(hwmgr);
2418 PP_ASSERT_WITH_CODE((0 == tmp_result),
2419 "Failed to program voting clients!", result = tmp_result);
2420
2421 tmp_result = ellesmere_process_firmware_header(hwmgr);
2422 PP_ASSERT_WITH_CODE((0 == tmp_result),
2423 "Failed to process firmware header!", result = tmp_result);
2424
2425 tmp_result = ellesmere_initial_switch_from_arbf0_to_f1(hwmgr);
2426 PP_ASSERT_WITH_CODE((0 == tmp_result),
2427 "Failed to initialize switch from ArbF0 to F1!",
2428 result = tmp_result);
2429
2430 tmp_result = ellesmere_init_smc_table(hwmgr);
2431 PP_ASSERT_WITH_CODE((0 == tmp_result),
2432 "Failed to initialize SMC table!", result = tmp_result);
2433
2434 tmp_result = ellesmere_init_arb_table_index(hwmgr);
2435 PP_ASSERT_WITH_CODE((0 == tmp_result),
2436 "Failed to initialize ARB table index!", result = tmp_result);
2437
2438 tmp_result = ellesmere_populate_pm_fuses(hwmgr);
2439 PP_ASSERT_WITH_CODE((0 == tmp_result),
2440 "Failed to populate PM fuses!", result = tmp_result);
2441
2442 tmp_result = ellesmere_enable_vrhot_gpio_interrupt(hwmgr);
2443 PP_ASSERT_WITH_CODE((0 == tmp_result),
2444 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
2445
2446 tmp_result = ellesmere_enable_sclk_control(hwmgr);
2447 PP_ASSERT_WITH_CODE((0 == tmp_result),
2448 "Failed to enable SCLK control!", result = tmp_result);
2449
2450 tmp_result = ellesmere_enable_ulv(hwmgr);
2451 PP_ASSERT_WITH_CODE((0 == tmp_result),
2452 "Failed to enable ULV!", result = tmp_result);
2453
2454 tmp_result = ellesmere_enable_deep_sleep_master_switch(hwmgr);
2455 PP_ASSERT_WITH_CODE((0 == tmp_result),
2456 "Failed to enable deep sleep master switch!", result = tmp_result);
2457
2458 tmp_result = ellesmere_start_dpm(hwmgr);
2459 PP_ASSERT_WITH_CODE((0 == tmp_result),
2460 "Failed to start DPM!", result = tmp_result);
2461
2462 tmp_result = ellesmere_enable_smc_cac(hwmgr);
2463 PP_ASSERT_WITH_CODE((0 == tmp_result),
2464 "Failed to enable SMC CAC!", result = tmp_result);
2465
2466 tmp_result = ellesmere_enable_power_containment(hwmgr);
2467 PP_ASSERT_WITH_CODE((0 == tmp_result),
2468 "Failed to enable power containment!", result = tmp_result);
2469
2470 tmp_result = ellesmere_power_control_set_level(hwmgr);
2471 PP_ASSERT_WITH_CODE((0 == tmp_result),
2472 "Failed to power control set level!", result = tmp_result);
2473
2474 tmp_result = ellesmere_enable_thermal_auto_throttle(hwmgr);
2475 PP_ASSERT_WITH_CODE((0 == tmp_result),
2476 "Failed to enable thermal auto throttle!", result = tmp_result);
2477
2478 tmp_result = ellesmere_pcie_performance_request(hwmgr);
2479 PP_ASSERT_WITH_CODE((0 == tmp_result),
2480 "Failed to enable thermal auto throttle!", result = tmp_result);
2481
2482 return result;
2483}
2484
2485int ellesmere_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2486{
2487
2488 return 0;
2489}
2490
2491int ellesmere_reset_asic_tasks(struct pp_hwmgr *hwmgr)
2492{
2493
2494 return 0;
2495}
2496
2497int ellesmere_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2498{
2499 return phm_hwmgr_backend_fini(hwmgr);
2500}
2501
2502int ellesmere_set_features_platform_caps(struct pp_hwmgr *hwmgr)
2503{
2504 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2505
2506 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2507 PHM_PlatformCaps_SclkDeepSleep);
2508
2509 if (data->mvdd_control == ELLESMERE_VOLTAGE_CONTROL_NONE)
2510 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2511 PHM_PlatformCaps_EnableMVDDControl);
2512
2513 if (data->vddci_control == ELLESMERE_VOLTAGE_CONTROL_NONE)
2514 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2515 PHM_PlatformCaps_ControlVDDCI);
2516
2517 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2518 PHM_PlatformCaps_TablelessHardwareInterface);
2519
2520 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2521 PHM_PlatformCaps_EnableSMU7ThermalManagement);
2522
2523 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2524 PHM_PlatformCaps_DynamicPowerManagement);
2525
2526 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2527 PHM_PlatformCaps_TablelessHardwareInterface);
2528
2529 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2530 PHM_PlatformCaps_SMC);
2531
2532 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2533 PHM_PlatformCaps_NonABMSupportInPPLib);
2534
2535 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2536 PHM_PlatformCaps_DynamicUVDState);
2537
2538 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2539 PHM_PlatformCaps_SclkThrottleLowNotification);
2540
2541 /* power tune caps Assume disabled */
2542 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2543 PHM_PlatformCaps_PowerContainment);
2544 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2545 PHM_PlatformCaps_CAC);
2546 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2547 PHM_PlatformCaps_SQRamping);
2548 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2549 PHM_PlatformCaps_DBRamping);
2550 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2551 PHM_PlatformCaps_TDRamping);
2552 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2553 PHM_PlatformCaps_TCPRamping);
2554
2555 return 0;
2556}
2557
2558static void ellesmere_init_dpm_defaults(struct pp_hwmgr *hwmgr)
2559{
2560 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2561
2562 ellesmere_initialize_power_tune_defaults(hwmgr);
2563
2564 data->pcie_gen_performance.max = PP_PCIEGen1;
2565 data->pcie_gen_performance.min = PP_PCIEGen3;
2566 data->pcie_gen_power_saving.max = PP_PCIEGen1;
2567 data->pcie_gen_power_saving.min = PP_PCIEGen3;
2568 data->pcie_lane_performance.max = 0;
2569 data->pcie_lane_performance.min = 16;
2570 data->pcie_lane_power_saving.max = 0;
2571 data->pcie_lane_power_saving.min = 16;
2572}
2573
2574/**
2575* Get Leakage VDDC based on leakage ID.
2576*
2577* @param hwmgr the address of the powerplay hardware manager.
2578* @return always 0
2579*/
2580static int ellesmere_get_evv_voltages(struct pp_hwmgr *hwmgr)
2581{
2582 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2583 uint16_t vv_id;
2584 uint16_t vddc = 0;
2585 uint16_t i, j;
2586 uint32_t sclk = 0;
2587 struct phm_ppt_v1_information *table_info =
2588 (struct phm_ppt_v1_information *)hwmgr->pptable;
2589 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2590 table_info->vdd_dep_on_sclk;
2591 int result;
2592
2593 for (i = 0; i < ELLESMERE_MAX_LEAKAGE_COUNT; i++) {
2594 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2595 if (!phm_get_sclk_for_voltage_evv(hwmgr,
2596 table_info->vddc_lookup_table, vv_id, &sclk)) {
2597 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2598 PHM_PlatformCaps_ClockStretcher)) {
2599 for (j = 1; j < sclk_table->count; j++) {
2600 if (sclk_table->entries[j].clk == sclk &&
2601 sclk_table->entries[j].cks_enable == 0) {
2602 sclk += 5000;
2603 break;
2604 }
2605 }
2606 }
2607
2608
2609 PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
2610 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
2611 "Error retrieving EVV voltage value!",
2612 continue);
2613
2614
2615 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
2616 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
2617 "Invalid VDDC value", result = -EINVAL;);
2618
2619 /* the voltage should not be zero nor equal to leakage ID */
2620 if (vddc != 0 && vddc != vv_id) {
2621 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
2622 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
2623 data->vddc_leakage.count++;
2624 }
2625 }
2626 }
2627
2628 return 0;
2629}
2630
2631/**
2632 * Change virtual leakage voltage to actual value.
2633 *
2634 * @param hwmgr the address of the powerplay hardware manager.
2635 * @param pointer to changing voltage
2636 * @param pointer to leakage table
2637 */
2638static void ellesmere_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2639 uint16_t *voltage, struct ellesmere_leakage_voltage *leakage_table)
2640{
2641 uint32_t index;
2642
2643 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2644 for (index = 0; index < leakage_table->count; index++) {
2645 /* if this voltage matches a leakage voltage ID */
2646 /* patch with actual leakage voltage */
2647 if (leakage_table->leakage_id[index] == *voltage) {
2648 *voltage = leakage_table->actual_voltage[index];
2649 break;
2650 }
2651 }
2652
2653 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2654 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
2655}
2656
2657/**
2658* Patch voltage lookup table by EVV leakages.
2659*
2660* @param hwmgr the address of the powerplay hardware manager.
2661* @param pointer to voltage lookup table
2662* @param pointer to leakage table
2663* @return always 0
2664*/
2665static int ellesmere_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
2666 phm_ppt_v1_voltage_lookup_table *lookup_table,
2667 struct ellesmere_leakage_voltage *leakage_table)
2668{
2669 uint32_t i;
2670
2671 for (i = 0; i < lookup_table->count; i++)
2672 ellesmere_patch_with_vdd_leakage(hwmgr,
2673 &lookup_table->entries[i].us_vdd, leakage_table);
2674
2675 return 0;
2676}
2677
2678static int ellesmere_patch_clock_voltage_limits_with_vddc_leakage(
2679 struct pp_hwmgr *hwmgr, struct ellesmere_leakage_voltage *leakage_table,
2680 uint16_t *vddc)
2681{
2682 struct phm_ppt_v1_information *table_info =
2683 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2684 ellesmere_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
2685 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
2686 table_info->max_clock_voltage_on_dc.vddc;
2687 return 0;
2688}
2689
2690static int ellesmere_patch_voltage_dependency_tables_with_lookup_table(
2691 struct pp_hwmgr *hwmgr)
2692{
2693 uint8_t entryId;
2694 uint8_t voltageId;
2695 struct phm_ppt_v1_information *table_info =
2696 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2697
2698 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2699 table_info->vdd_dep_on_sclk;
2700 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
2701 table_info->vdd_dep_on_mclk;
2702 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2703 table_info->mm_dep_table;
2704
2705 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
2706 voltageId = sclk_table->entries[entryId].vddInd;
2707 sclk_table->entries[entryId].vddc =
2708 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
2709 }
2710
2711 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
2712 voltageId = mclk_table->entries[entryId].vddInd;
2713 mclk_table->entries[entryId].vddc =
2714 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
2715 }
2716
2717 for (entryId = 0; entryId < mm_table->count; ++entryId) {
2718 voltageId = mm_table->entries[entryId].vddcInd;
2719 mm_table->entries[entryId].vddc =
2720 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
2721 }
2722
2723 return 0;
2724
2725}
2726
2727static int ellesmere_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
2728{
2729 /* Need to determine if we need calculated voltage. */
2730 return 0;
2731}
2732
2733static int ellesmere_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
2734{
2735 /* Need to determine if we need calculated voltage from mm table. */
2736 return 0;
2737}
2738
2739static int ellesmere_sort_lookup_table(struct pp_hwmgr *hwmgr,
2740 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
2741{
2742 uint32_t table_size, i, j;
2743 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
2744 table_size = lookup_table->count;
2745
2746 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2747 "Lookup table is empty", return -EINVAL);
2748
2749 /* Sorting voltages */
2750 for (i = 0; i < table_size - 1; i++) {
2751 for (j = i + 1; j > 0; j--) {
2752 if (lookup_table->entries[j].us_vdd <
2753 lookup_table->entries[j - 1].us_vdd) {
2754 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
2755 lookup_table->entries[j - 1] = lookup_table->entries[j];
2756 lookup_table->entries[j] = tmp_voltage_lookup_record;
2757 }
2758 }
2759 }
2760
2761 return 0;
2762}
2763
2764static int ellesmere_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2765{
2766 int result = 0;
2767 int tmp_result;
2768 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2769 struct phm_ppt_v1_information *table_info =
2770 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2771
2772 tmp_result = ellesmere_patch_lookup_table_with_leakage(hwmgr,
2773 table_info->vddc_lookup_table, &(data->vddc_leakage));
2774 if (tmp_result)
2775 result = tmp_result;
2776
2777 tmp_result = ellesmere_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2778 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2779 if (tmp_result)
2780 result = tmp_result;
2781
2782 tmp_result = ellesmere_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2783 if (tmp_result)
2784 result = tmp_result;
2785
2786 tmp_result = ellesmere_calc_voltage_dependency_tables(hwmgr);
2787 if (tmp_result)
2788 result = tmp_result;
2789
2790 tmp_result = ellesmere_calc_mm_voltage_dependency_table(hwmgr);
2791 if (tmp_result)
2792 result = tmp_result;
2793
2794 tmp_result = ellesmere_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2795 if (tmp_result)
2796 result = tmp_result;
2797
2798 return result;
2799}
2800
2801static int ellesmere_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
2802{
2803 struct phm_ppt_v1_information *table_info =
2804 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2805
2806 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2807 table_info->vdd_dep_on_sclk;
2808 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2809 table_info->vdd_dep_on_mclk;
2810
2811 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2812 "VDD dependency on SCLK table is missing. \
2813 This table is mandatory", return -EINVAL);
2814 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2815 "VDD dependency on SCLK table has to have is missing. \
2816 This table is mandatory", return -EINVAL);
2817
2818 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2819 "VDD dependency on MCLK table is missing. \
2820 This table is mandatory", return -EINVAL);
2821 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2822 "VDD dependency on MCLK table has to have is missing. \
2823 This table is mandatory", return -EINVAL);
2824
2825 table_info->max_clock_voltage_on_ac.sclk =
2826 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2827 table_info->max_clock_voltage_on_ac.mclk =
2828 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2829 table_info->max_clock_voltage_on_ac.vddc =
2830 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2831 table_info->max_clock_voltage_on_ac.vddci =
2832 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2833
2834 return 0;
2835}
2836
2837int ellesmere_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2838{
2839 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2840 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2841 uint32_t temp_reg;
2842 int result;
2843
2844 data->dll_default_on = false;
2845 data->sram_end = SMC_RAM_END;
2846
2847 data->disable_dpm_mask = 0xFF;
2848 data->static_screen_threshold = PPELLESMERE_STATICSCREENTHRESHOLD_DFLT;
2849 data->static_screen_threshold_unit = PPELLESMERE_STATICSCREENTHRESHOLD_DFLT;
2850 data->activity_target[0] = PPELLESMERE_TARGETACTIVITY_DFLT;
2851 data->activity_target[1] = PPELLESMERE_TARGETACTIVITY_DFLT;
2852 data->activity_target[2] = PPELLESMERE_TARGETACTIVITY_DFLT;
2853 data->activity_target[3] = PPELLESMERE_TARGETACTIVITY_DFLT;
2854 data->activity_target[4] = PPELLESMERE_TARGETACTIVITY_DFLT;
2855 data->activity_target[5] = PPELLESMERE_TARGETACTIVITY_DFLT;
2856 data->activity_target[6] = PPELLESMERE_TARGETACTIVITY_DFLT;
2857 data->activity_target[7] = PPELLESMERE_TARGETACTIVITY_DFLT;
2858
2859 data->voting_rights_clients0 = PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT0;
2860 data->voting_rights_clients1 = PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT1;
2861 data->voting_rights_clients2 = PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT2;
2862 data->voting_rights_clients3 = PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT3;
2863 data->voting_rights_clients4 = PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT4;
2864 data->voting_rights_clients5 = PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT5;
2865 data->voting_rights_clients6 = PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT6;
2866 data->voting_rights_clients7 = PPELLESMERE_VOTINGRIGHTSCLIENTS_DFLT7;
2867
2868 data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
2869
2870 data->mclk_activity_target = PPELLESMERE_MCLK_TARGETACTIVITY_DFLT;
2871
2872 /* need to set voltage control types before EVV patching */
2873 data->voltage_control = ELLESMERE_VOLTAGE_CONTROL_NONE;
2874 data->vddci_control = ELLESMERE_VOLTAGE_CONTROL_NONE;
2875 data->mvdd_control = ELLESMERE_VOLTAGE_CONTROL_NONE;
2876
2877 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
2878 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
2879 data->voltage_control = ELLESMERE_VOLTAGE_CONTROL_BY_SVID2;
2880
2881 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2882 PHM_PlatformCaps_DynamicPatchPowerState);
2883
2884 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2885 PHM_PlatformCaps_EnableMVDDControl)) {
2886 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
2887 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
2888 data->mvdd_control = ELLESMERE_VOLTAGE_CONTROL_BY_GPIO;
2889 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
2890 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
2891 data->mvdd_control = ELLESMERE_VOLTAGE_CONTROL_BY_SVID2;
2892 }
2893
2894 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2895 PHM_PlatformCaps_ControlVDDCI)) {
2896 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
2897 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
2898 data->vddci_control = ELLESMERE_VOLTAGE_CONTROL_BY_GPIO;
2899 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
2900 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
2901 data->vddci_control = ELLESMERE_VOLTAGE_CONTROL_BY_SVID2;
2902 }
2903
2904 ellesmere_set_features_platform_caps(hwmgr);
2905
2906 ellesmere_init_dpm_defaults(hwmgr);
2907
2908 /* Get leakage voltage based on leakage ID. */
2909 result = ellesmere_get_evv_voltages(hwmgr);
2910
2911 if (result) {
2912 printk("Get EVV Voltage Failed. Abort Driver loading!\n");
2913 return -1;
2914 }
2915
2916 ellesmere_complete_dependency_tables(hwmgr);
2917 ellesmere_set_private_data_based_on_pptable(hwmgr);
2918
2919 /* Initalize Dynamic State Adjustment Rule Settings */
2920 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2921
2922 if (0 == result) {
2923 struct cgs_system_info sys_info = {0};
2924
2925 data->is_tlu_enabled = 0;
2926
2927 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2928 ELLESMERE_MAX_HARDWARE_POWERLEVELS;
2929 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2930 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2931 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2932/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2933 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2934 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2935
2936 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2937 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2938 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2939 case 0:
2940 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2941 break;
2942 case 1:
2943 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2944 break;
2945 case 2:
2946 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2947 break;
2948 case 3:
2949 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2950 break;
2951 case 4:
2952 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2953 break;
2954 default:
2955 PP_ASSERT_WITH_CODE(0,
2956 "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
2957 );
2958 break;
2959 }
2960 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2961 }
2962
2963 sys_info.size = sizeof(struct cgs_system_info);
2964 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
2965 result = cgs_query_system_info(hwmgr->device, &sys_info);
2966 if (result)
2967 data->pcie_gen_cap = 0x30007;
2968 else
2969 data->pcie_gen_cap = (uint32_t)sys_info.value;
2970 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2971 data->pcie_spc_cap = 20;
2972 sys_info.size = sizeof(struct cgs_system_info);
2973 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
2974 result = cgs_query_system_info(hwmgr->device, &sys_info);
2975 if (result)
2976 data->pcie_lane_cap = 0x2f0000;
2977 else
2978 data->pcie_lane_cap = (uint32_t)sys_info.value;
2979 } else {
2980 /* Ignore return value in here, we are cleaning up a mess. */
2981 ellesmere_hwmgr_backend_fini(hwmgr);
2982 }
2983
2984 return 0;
2985}
2986
2987static int ellesmere_force_dpm_highest(struct pp_hwmgr *hwmgr)
2988{
2989 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
2990 uint32_t level, tmp;
2991
2992 if (!data->pcie_dpm_key_disabled) {
2993 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2994 level = 0;
2995 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2996 while (tmp >>= 1)
2997 level++;
2998
2999 if (level)
3000 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3001 PPSMC_MSG_PCIeDPM_ForceLevel, level);
3002 }
3003 }
3004
3005 if (!data->sclk_dpm_key_disabled) {
3006 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3007 level = 0;
3008 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
3009 while (tmp >>= 1)
3010 level++;
3011
3012 if (level)
3013 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3014 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3015 (1 << level));
3016 }
3017 }
3018
3019 if (!data->mclk_dpm_key_disabled) {
3020 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3021 level = 0;
3022 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3023 while (tmp >>= 1)
3024 level++;
3025
3026 if (level)
3027 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3028 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3029 (1 << level));
3030 }
3031 }
3032
3033 return 0;
3034}
3035
3036static int ellesmere_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3037{
3038 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3039
3040 phm_apply_dal_min_voltage_request(hwmgr);
3041
3042 if (!data->sclk_dpm_key_disabled) {
3043 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
3044 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3045 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3046 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3047 }
3048
3049 if (!data->mclk_dpm_key_disabled) {
3050 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
3051 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3052 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3053 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3054 }
3055
3056 return 0;
3057}
3058
3059static int ellesmere_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3060{
3061 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3062
3063 if (!ellesmere_is_dpm_running(hwmgr))
3064 return -EINVAL;
3065
3066 if (!data->pcie_dpm_key_disabled) {
3067 smum_send_msg_to_smc(hwmgr->smumgr,
3068 PPSMC_MSG_PCIeDPM_UnForceLevel);
3069 }
3070
3071 return ellesmere_upload_dpm_level_enable_mask(hwmgr);
3072}
3073
3074static int ellesmere_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3075{
3076 struct ellesmere_hwmgr *data =
3077 (struct ellesmere_hwmgr *)(hwmgr->backend);
3078 uint32_t level;
3079
3080 if (!data->sclk_dpm_key_disabled)
3081 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3082 level = phm_get_lowest_enabled_level(hwmgr,
3083 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3084 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3085 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3086 (1 << level));
3087
3088 }
3089/* uvd is enabled, can't set mclk low right now
3090 if (!data->mclk_dpm_key_disabled) {
3091 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3092 level = phm_get_lowest_enabled_level(hwmgr,
3093 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3094 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3095 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3096 (1 << level));
3097 }
3098 }
3099*/
3100 if (!data->pcie_dpm_key_disabled) {
3101 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3102 level = phm_get_lowest_enabled_level(hwmgr,
3103 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3104 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3105 PPSMC_MSG_PCIeDPM_ForceLevel,
3106 (level));
3107 }
3108 }
3109
3110 return 0;
3111
3112}
3113static int ellesmere_force_dpm_level(struct pp_hwmgr *hwmgr,
3114 enum amd_dpm_forced_level level)
3115{
3116 int ret = 0;
3117
3118 switch (level) {
3119 case AMD_DPM_FORCED_LEVEL_HIGH:
3120 ret = ellesmere_force_dpm_highest(hwmgr);
3121 if (ret)
3122 return ret;
3123 break;
3124 case AMD_DPM_FORCED_LEVEL_LOW:
3125 ret = ellesmere_force_dpm_lowest(hwmgr);
3126 if (ret)
3127 return ret;
3128 break;
3129 case AMD_DPM_FORCED_LEVEL_AUTO:
3130 ret = ellesmere_unforce_dpm_levels(hwmgr);
3131 if (ret)
3132 return ret;
3133 break;
3134 default:
3135 break;
3136 }
3137
3138 hwmgr->dpm_level = level;
3139
3140 return ret;
3141}
3142
3143static int ellesmere_get_power_state_size(struct pp_hwmgr *hwmgr)
3144{
3145 return sizeof(struct ellesmere_power_state);
3146}
3147
3148
3149static int ellesmere_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3150 struct pp_power_state *request_ps,
3151 const struct pp_power_state *current_ps)
3152{
3153
3154 struct ellesmere_power_state *ellesmere_ps =
3155 cast_phw_ellesmere_power_state(&request_ps->hardware);
3156 uint32_t sclk;
3157 uint32_t mclk;
3158 struct PP_Clocks minimum_clocks = {0};
3159 bool disable_mclk_switching;
3160 bool disable_mclk_switching_for_frame_lock;
3161 struct cgs_display_info info = {0};
3162 const struct phm_clock_and_voltage_limits *max_limits;
3163 uint32_t i;
3164 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3165 struct phm_ppt_v1_information *table_info =
3166 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3167 int32_t count;
3168 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3169
3170 data->battery_state = (PP_StateUILabel_Battery ==
3171 request_ps->classification.ui_label);
3172
3173 PP_ASSERT_WITH_CODE(ellesmere_ps->performance_level_count == 2,
3174 "VI should always have 2 performance levels",
3175 );
3176
3177 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3178 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3179 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3180
3181 /* Cap clock DPM tables at DC MAX if it is in DC. */
3182 if (PP_PowerSource_DC == hwmgr->power_source) {
3183 for (i = 0; i < ellesmere_ps->performance_level_count; i++) {
3184 if (ellesmere_ps->performance_levels[i].memory_clock > max_limits->mclk)
3185 ellesmere_ps->performance_levels[i].memory_clock = max_limits->mclk;
3186 if (ellesmere_ps->performance_levels[i].engine_clock > max_limits->sclk)
3187 ellesmere_ps->performance_levels[i].engine_clock = max_limits->sclk;
3188 }
3189 }
3190
3191 ellesmere_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
3192 ellesmere_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
3193
3194 cgs_get_active_displays_info(hwmgr->device, &info);
3195
3196 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3197
3198 /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
3199
3200 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3201 PHM_PlatformCaps_StablePState)) {
3202 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3203 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3204
3205 for (count = table_info->vdd_dep_on_sclk->count - 1;
3206 count >= 0; count--) {
3207 if (stable_pstate_sclk >=
3208 table_info->vdd_dep_on_sclk->entries[count].clk) {
3209 stable_pstate_sclk =
3210 table_info->vdd_dep_on_sclk->entries[count].clk;
3211 break;
3212 }
3213 }
3214
3215 if (count < 0)
3216 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3217
3218 stable_pstate_mclk = max_limits->mclk;
3219
3220 minimum_clocks.engineClock = stable_pstate_sclk;
3221 minimum_clocks.memoryClock = stable_pstate_mclk;
3222 }
3223
3224 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3225 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3226
3227 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3228 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3229
3230 ellesmere_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3231
3232 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
3233 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
3234 hwmgr->platform_descriptor.overdriveLimit.engineClock),
3235 "Overdrive sclk exceeds limit",
3236 hwmgr->gfx_arbiter.sclk_over_drive =
3237 hwmgr->platform_descriptor.overdriveLimit.engineClock);
3238
3239 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3240 ellesmere_ps->performance_levels[1].engine_clock =
3241 hwmgr->gfx_arbiter.sclk_over_drive;
3242 }
3243
3244 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
3245 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
3246 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3247 "Overdrive mclk exceeds limit",
3248 hwmgr->gfx_arbiter.mclk_over_drive =
3249 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3250
3251 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3252 ellesmere_ps->performance_levels[1].memory_clock =
3253 hwmgr->gfx_arbiter.mclk_over_drive;
3254 }
3255
3256 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3257 hwmgr->platform_descriptor.platformCaps,
3258 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3259
3260 disable_mclk_switching = (1 < info.display_count) ||
3261 disable_mclk_switching_for_frame_lock;
3262
3263 sclk = ellesmere_ps->performance_levels[0].engine_clock;
3264 mclk = ellesmere_ps->performance_levels[0].memory_clock;
3265
3266 if (disable_mclk_switching)
3267 mclk = ellesmere_ps->performance_levels
3268 [ellesmere_ps->performance_level_count - 1].memory_clock;
3269
3270 if (sclk < minimum_clocks.engineClock)
3271 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3272 max_limits->sclk : minimum_clocks.engineClock;
3273
3274 if (mclk < minimum_clocks.memoryClock)
3275 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3276 max_limits->mclk : minimum_clocks.memoryClock;
3277
3278 ellesmere_ps->performance_levels[0].engine_clock = sclk;
3279 ellesmere_ps->performance_levels[0].memory_clock = mclk;
3280
3281 ellesmere_ps->performance_levels[1].engine_clock =
3282 (ellesmere_ps->performance_levels[1].engine_clock >=
3283 ellesmere_ps->performance_levels[0].engine_clock) ?
3284 ellesmere_ps->performance_levels[1].engine_clock :
3285 ellesmere_ps->performance_levels[0].engine_clock;
3286
3287 if (disable_mclk_switching) {
3288 if (mclk < ellesmere_ps->performance_levels[1].memory_clock)
3289 mclk = ellesmere_ps->performance_levels[1].memory_clock;
3290
3291 ellesmere_ps->performance_levels[0].memory_clock = mclk;
3292 ellesmere_ps->performance_levels[1].memory_clock = mclk;
3293 } else {
3294 if (ellesmere_ps->performance_levels[1].memory_clock <
3295 ellesmere_ps->performance_levels[0].memory_clock)
3296 ellesmere_ps->performance_levels[1].memory_clock =
3297 ellesmere_ps->performance_levels[0].memory_clock;
3298 }
3299
3300 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3301 PHM_PlatformCaps_StablePState)) {
3302 for (i = 0; i < ellesmere_ps->performance_level_count; i++) {
3303 ellesmere_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3304 ellesmere_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3305 ellesmere_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3306 ellesmere_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3307 }
3308 }
3309 return 0;
3310}
3311
3312
3313static int ellesmere_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3314{
3315 struct pp_power_state *ps;
3316 struct ellesmere_power_state *ellesmere_ps;
3317
3318 if (hwmgr == NULL)
3319 return -EINVAL;
3320
3321 ps = hwmgr->request_ps;
3322
3323 if (ps == NULL)
3324 return -EINVAL;
3325
3326 ellesmere_ps = cast_phw_ellesmere_power_state(&ps->hardware);
3327
3328 if (low)
3329 return ellesmere_ps->performance_levels[0].memory_clock;
3330 else
3331 return ellesmere_ps->performance_levels
3332 [ellesmere_ps->performance_level_count-1].memory_clock;
3333}
3334
3335static int ellesmere_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3336{
3337 struct pp_power_state *ps;
3338 struct ellesmere_power_state *ellesmere_ps;
3339
3340 if (hwmgr == NULL)
3341 return -EINVAL;
3342
3343 ps = hwmgr->request_ps;
3344
3345 if (ps == NULL)
3346 return -EINVAL;
3347
3348 ellesmere_ps = cast_phw_ellesmere_power_state(&ps->hardware);
3349
3350 if (low)
3351 return ellesmere_ps->performance_levels[0].engine_clock;
3352 else
3353 return ellesmere_ps->performance_levels
3354 [ellesmere_ps->performance_level_count-1].engine_clock;
3355}
3356
3357static int ellesmere_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3358 struct pp_hw_power_state *hw_ps)
3359{
3360 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3361 struct ellesmere_power_state *ps = (struct ellesmere_power_state *)hw_ps;
3362 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3363 uint16_t size;
3364 uint8_t frev, crev;
3365 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3366
3367 /* First retrieve the Boot clocks and VDDC from the firmware info table.
3368 * We assume here that fw_info is unchanged if this call fails.
3369 */
3370 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
3371 hwmgr->device, index,
3372 &size, &frev, &crev);
3373 if (!fw_info)
3374 /* During a test, there is no firmware info table. */
3375 return 0;
3376
3377 /* Patch the state. */
3378 data->vbios_boot_state.sclk_bootup_value =
3379 le32_to_cpu(fw_info->ulDefaultEngineClock);
3380 data->vbios_boot_state.mclk_bootup_value =
3381 le32_to_cpu(fw_info->ulDefaultMemoryClock);
3382 data->vbios_boot_state.mvdd_bootup_value =
3383 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3384 data->vbios_boot_state.vddc_bootup_value =
3385 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3386 data->vbios_boot_state.vddci_bootup_value =
3387 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3388 data->vbios_boot_state.pcie_gen_bootup_value =
3389 phm_get_current_pcie_speed(hwmgr);
3390
3391 data->vbios_boot_state.pcie_lane_bootup_value =
3392 (uint16_t)phm_get_current_pcie_lane_number(hwmgr);
3393
3394 /* set boot power state */
3395 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3396 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3397 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3398 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3399
3400 return 0;
3401}
3402
3403static int ellesmere_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3404 void *state, struct pp_power_state *power_state,
3405 void *pp_table, uint32_t classification_flag)
3406{
3407 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3408 struct ellesmere_power_state *ellesmere_power_state =
3409 (struct ellesmere_power_state *)(&(power_state->hardware));
3410 struct ellesmere_performance_level *performance_level;
3411 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3412 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3413 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3414 ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
3415 (ATOM_Tonga_SCLK_Dependency_Table *)
3416 (((unsigned long)powerplay_table) +
3417 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3418 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3419 (ATOM_Tonga_MCLK_Dependency_Table *)
3420 (((unsigned long)powerplay_table) +
3421 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3422
3423 /* The following fields are not initialized here: id orderedList allStatesList */
3424 power_state->classification.ui_label =
3425 (le16_to_cpu(state_entry->usClassification) &
3426 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3427 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3428 power_state->classification.flags = classification_flag;
3429 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3430
3431 power_state->classification.temporary_state = false;
3432 power_state->classification.to_be_deleted = false;
3433
3434 power_state->validation.disallowOnDC =
3435 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3436 ATOM_Tonga_DISALLOW_ON_DC));
3437
3438 power_state->pcie.lanes = 0;
3439
3440 power_state->display.disableFrameModulation = false;
3441 power_state->display.limitRefreshrate = false;
3442 power_state->display.enableVariBright =
3443 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3444 ATOM_Tonga_ENABLE_VARIBRIGHT));
3445
3446 power_state->validation.supportedPowerLevels = 0;
3447 power_state->uvd_clocks.VCLK = 0;
3448 power_state->uvd_clocks.DCLK = 0;
3449 power_state->temperatures.min = 0;
3450 power_state->temperatures.max = 0;
3451
3452 performance_level = &(ellesmere_power_state->performance_levels
3453 [ellesmere_power_state->performance_level_count++]);
3454
3455 PP_ASSERT_WITH_CODE(
3456 (ellesmere_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS),
3457 "Performance levels exceeds SMC limit!",
3458 return -1);
3459
3460 PP_ASSERT_WITH_CODE(
3461 (ellesmere_power_state->performance_level_count <=
3462 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3463 "Performance levels exceeds Driver limit!",
3464 return -1);
3465
3466 /* Performance levels are arranged from low to high. */
3467 performance_level->memory_clock = mclk_dep_table->entries
3468 [state_entry->ucMemoryClockIndexLow].ulMclk;
3469 performance_level->engine_clock = sclk_dep_table->entries
3470 [state_entry->ucEngineClockIndexLow].ulSclk;
3471 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3472 state_entry->ucPCIEGenLow);
3473 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3474 state_entry->ucPCIELaneHigh);
3475
3476 performance_level = &(ellesmere_power_state->performance_levels
3477 [ellesmere_power_state->performance_level_count++]);
3478 performance_level->memory_clock = mclk_dep_table->entries
3479 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3480 performance_level->engine_clock = sclk_dep_table->entries
3481 [state_entry->ucEngineClockIndexHigh].ulSclk;
3482 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3483 state_entry->ucPCIEGenHigh);
3484 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3485 state_entry->ucPCIELaneHigh);
3486
3487 return 0;
3488}
3489
3490static int ellesmere_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3491 unsigned long entry_index, struct pp_power_state *state)
3492{
3493 int result;
3494 struct ellesmere_power_state *ps;
3495 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3496 struct phm_ppt_v1_information *table_info =
3497 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3498 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3499 table_info->vdd_dep_on_mclk;
3500
3501 state->hardware.magic = PHM_VIslands_Magic;
3502
3503 ps = (struct ellesmere_power_state *)(&state->hardware);
3504
3505 result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
3506 ellesmere_get_pp_table_entry_callback_func);
3507
3508 /* This is the earliest time we have all the dependency table and the VBIOS boot state
3509 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3510 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3511 */
3512 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3513 if (dep_mclk_table->entries[0].clk !=
3514 data->vbios_boot_state.mclk_bootup_value)
3515 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
3516 "does not match VBIOS boot MCLK level");
3517 if (dep_mclk_table->entries[0].vddci !=
3518 data->vbios_boot_state.vddci_bootup_value)
3519 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
3520 "does not match VBIOS boot VDDCI level");
3521 }
3522
3523 /* set DC compatible flag if this state supports DC */
3524 if (!state->validation.disallowOnDC)
3525 ps->dc_compatible = true;
3526
3527 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3528 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3529
3530 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3531 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3532
3533 if (!result) {
3534 uint32_t i;
3535
3536 switch (state->classification.ui_label) {
3537 case PP_StateUILabel_Performance:
3538 data->use_pcie_performance_levels = true;
3539
3540 for (i = 0; i < ps->performance_level_count; i++) {
3541 if (data->pcie_gen_performance.max <
3542 ps->performance_levels[i].pcie_gen)
3543 data->pcie_gen_performance.max =
3544 ps->performance_levels[i].pcie_gen;
3545
3546 if (data->pcie_gen_performance.min >
3547 ps->performance_levels[i].pcie_gen)
3548 data->pcie_gen_performance.min =
3549 ps->performance_levels[i].pcie_gen;
3550
3551 if (data->pcie_lane_performance.max <
3552 ps->performance_levels[i].pcie_lane)
3553 data->pcie_lane_performance.max =
3554 ps->performance_levels[i].pcie_lane;
3555
3556 if (data->pcie_lane_performance.min >
3557 ps->performance_levels[i].pcie_lane)
3558 data->pcie_lane_performance.min =
3559 ps->performance_levels[i].pcie_lane;
3560 }
3561 break;
3562 case PP_StateUILabel_Battery:
3563 data->use_pcie_power_saving_levels = true;
3564
3565 for (i = 0; i < ps->performance_level_count; i++) {
3566 if (data->pcie_gen_power_saving.max <
3567 ps->performance_levels[i].pcie_gen)
3568 data->pcie_gen_power_saving.max =
3569 ps->performance_levels[i].pcie_gen;
3570
3571 if (data->pcie_gen_power_saving.min >
3572 ps->performance_levels[i].pcie_gen)
3573 data->pcie_gen_power_saving.min =
3574 ps->performance_levels[i].pcie_gen;
3575
3576 if (data->pcie_lane_power_saving.max <
3577 ps->performance_levels[i].pcie_lane)
3578 data->pcie_lane_power_saving.max =
3579 ps->performance_levels[i].pcie_lane;
3580
3581 if (data->pcie_lane_power_saving.min >
3582 ps->performance_levels[i].pcie_lane)
3583 data->pcie_lane_power_saving.min =
3584 ps->performance_levels[i].pcie_lane;
3585 }
3586 break;
3587 default:
3588 break;
3589 }
3590 }
3591 return 0;
3592}
3593
3594static void
3595ellesmere_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
3596{
3597 uint32_t sclk, mclk;
3598
3599 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
3600
3601 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3602
3603 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
3604
3605 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3606 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
3607 mclk / 100, sclk / 100);
3608}
3609
3610static int ellesmere_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3611{
3612 const struct phm_set_power_state_input *states =
3613 (const struct phm_set_power_state_input *)input;
3614 const struct ellesmere_power_state *ellesmere_ps =
3615 cast_const_phw_ellesmere_power_state(states->pnew_state);
3616 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3617 struct ellesmere_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3618 uint32_t sclk = ellesmere_ps->performance_levels
3619 [ellesmere_ps->performance_level_count - 1].engine_clock;
3620 struct ellesmere_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3621 uint32_t mclk = ellesmere_ps->performance_levels
3622 [ellesmere_ps->performance_level_count - 1].memory_clock;
3623 struct PP_Clocks min_clocks = {0};
3624 uint32_t i;
3625 struct cgs_display_info info = {0};
3626
3627 data->need_update_smu7_dpm_table = 0;
3628
3629 for (i = 0; i < sclk_table->count; i++) {
3630 if (sclk == sclk_table->dpm_levels[i].value)
3631 break;
3632 }
3633
3634 if (i >= sclk_table->count)
3635 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3636 else {
3637 /* TODO: Check SCLK in DAL's minimum clocks
3638 * in case DeepSleep divider update is required.
3639 */
3640 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR)
3641 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3642 }
3643
3644 for (i = 0; i < mclk_table->count; i++) {
3645 if (mclk == mclk_table->dpm_levels[i].value)
3646 break;
3647 }
3648
3649 if (i >= mclk_table->count)
3650 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3651
3652 cgs_get_active_displays_info(hwmgr->device, &info);
3653
3654 if (data->display_timing.num_existing_displays != info.display_count)
3655 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3656
3657 return 0;
3658}
3659
3660static uint16_t ellesmere_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3661 const struct ellesmere_power_state *ellesmere_ps)
3662{
3663 uint32_t i;
3664 uint32_t sclk, max_sclk = 0;
3665 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3666 struct ellesmere_dpm_table *dpm_table = &data->dpm_table;
3667
3668 for (i = 0; i < ellesmere_ps->performance_level_count; i++) {
3669 sclk = ellesmere_ps->performance_levels[i].engine_clock;
3670 if (max_sclk < sclk)
3671 max_sclk = sclk;
3672 }
3673
3674 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3675 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3676 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3677 dpm_table->pcie_speed_table.dpm_levels
3678 [dpm_table->pcie_speed_table.count - 1].value :
3679 dpm_table->pcie_speed_table.dpm_levels[i].value);
3680 }
3681
3682 return 0;
3683}
3684
3685static int ellesmere_request_link_speed_change_before_state_change(
3686 struct pp_hwmgr *hwmgr, const void *input)
3687{
3688 const struct phm_set_power_state_input *states =
3689 (const struct phm_set_power_state_input *)input;
3690 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3691 const struct ellesmere_power_state *ellesmere_nps =
3692 cast_const_phw_ellesmere_power_state(states->pnew_state);
3693 const struct ellesmere_power_state *ellesmere_cps =
3694 cast_const_phw_ellesmere_power_state(states->pcurrent_state);
3695
3696 uint16_t target_link_speed = ellesmere_get_maximum_link_speed(hwmgr, ellesmere_nps);
3697 uint16_t current_link_speed;
3698
3699 if (data->force_pcie_gen == PP_PCIEGenInvalid)
3700 current_link_speed = ellesmere_get_maximum_link_speed(hwmgr, ellesmere_cps);
3701 else
3702 current_link_speed = data->force_pcie_gen;
3703
3704 data->force_pcie_gen = PP_PCIEGenInvalid;
3705 data->pspp_notify_required = false;
3706
3707 if (target_link_speed > current_link_speed) {
3708 switch (target_link_speed) {
3709 case PP_PCIEGen3:
3710 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
3711 break;
3712 data->force_pcie_gen = PP_PCIEGen2;
3713 if (current_link_speed == PP_PCIEGen2)
3714 break;
3715 case PP_PCIEGen2:
3716 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
3717 break;
3718 default:
3719 data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr);
3720 break;
3721 }
3722 } else {
3723 if (target_link_speed < current_link_speed)
3724 data->pspp_notify_required = true;
3725 }
3726
3727 return 0;
3728}
3729
3730static int ellesmere_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3731{
3732 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3733
3734 if (0 == data->need_update_smu7_dpm_table)
3735 return 0;
3736
3737 if ((0 == data->sclk_dpm_key_disabled) &&
3738 (data->need_update_smu7_dpm_table &
3739 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3740 PP_ASSERT_WITH_CODE(true == ellesmere_is_dpm_running(hwmgr),
3741 "Trying to freeze SCLK DPM when DPM is disabled",
3742 );
3743 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3744 PPSMC_MSG_SCLKDPM_FreezeLevel),
3745 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3746 return -1);
3747 }
3748
3749 if ((0 == data->mclk_dpm_key_disabled) &&
3750 (data->need_update_smu7_dpm_table &
3751 DPMTABLE_OD_UPDATE_MCLK)) {
3752 PP_ASSERT_WITH_CODE(true == ellesmere_is_dpm_running(hwmgr),
3753 "Trying to freeze MCLK DPM when DPM is disabled",
3754 );
3755 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3756 PPSMC_MSG_MCLKDPM_FreezeLevel),
3757 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3758 return -1);
3759 }
3760
3761 return 0;
3762}
3763
3764static int ellesmere_populate_and_upload_sclk_mclk_dpm_levels(
3765 struct pp_hwmgr *hwmgr, const void *input)
3766{
3767 int result = 0;
3768 const struct phm_set_power_state_input *states =
3769 (const struct phm_set_power_state_input *)input;
3770 const struct ellesmere_power_state *ellesmere_ps =
3771 cast_const_phw_ellesmere_power_state(states->pnew_state);
3772 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3773 uint32_t sclk = ellesmere_ps->performance_levels
3774 [ellesmere_ps->performance_level_count - 1].engine_clock;
3775 uint32_t mclk = ellesmere_ps->performance_levels
3776 [ellesmere_ps->performance_level_count - 1].memory_clock;
3777 struct ellesmere_dpm_table *dpm_table = &data->dpm_table;
3778
3779 struct ellesmere_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3780 uint32_t dpm_count, clock_percent;
3781 uint32_t i;
3782
3783 if (0 == data->need_update_smu7_dpm_table)
3784 return 0;
3785
3786 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3787 dpm_table->sclk_table.dpm_levels
3788 [dpm_table->sclk_table.count - 1].value = sclk;
3789
3790 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3791 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3792 /* Need to do calculation based on the golden DPM table
3793 * as the Heatmap GPU Clock axis is also based on the default values
3794 */
3795 PP_ASSERT_WITH_CODE(
3796 (golden_dpm_table->sclk_table.dpm_levels
3797 [golden_dpm_table->sclk_table.count - 1].value != 0),
3798 "Divide by 0!",
3799 return -1);
3800 dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
3801
3802 for (i = dpm_count; i > 1; i--) {
3803 if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
3804 clock_percent =
3805 ((sclk
3806 - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
3807 ) * 100)
3808 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3809
3810 dpm_table->sclk_table.dpm_levels[i].value =
3811 golden_dpm_table->sclk_table.dpm_levels[i].value +
3812 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3813 clock_percent)/100;
3814
3815 } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
3816 clock_percent =
3817 ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
3818 - sclk) * 100)
3819 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3820
3821 dpm_table->sclk_table.dpm_levels[i].value =
3822 golden_dpm_table->sclk_table.dpm_levels[i].value -
3823 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3824 clock_percent) / 100;
3825 } else
3826 dpm_table->sclk_table.dpm_levels[i].value =
3827 golden_dpm_table->sclk_table.dpm_levels[i].value;
3828 }
3829 }
3830 }
3831
3832 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3833 dpm_table->mclk_table.dpm_levels
3834 [dpm_table->mclk_table.count - 1].value = mclk;
3835
3836 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3837 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3838
3839 PP_ASSERT_WITH_CODE(
3840 (golden_dpm_table->mclk_table.dpm_levels
3841 [golden_dpm_table->mclk_table.count-1].value != 0),
3842 "Divide by 0!",
3843 return -1);
3844 dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
3845 for (i = dpm_count; i > 1; i--) {
3846 if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
3847 clock_percent = ((mclk -
3848 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
3849 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3850
3851 dpm_table->mclk_table.dpm_levels[i].value =
3852 golden_dpm_table->mclk_table.dpm_levels[i].value +
3853 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3854 clock_percent) / 100;
3855
3856 } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
3857 clock_percent = (
3858 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
3859 * 100)
3860 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3861
3862 dpm_table->mclk_table.dpm_levels[i].value =
3863 golden_dpm_table->mclk_table.dpm_levels[i].value -
3864 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3865 clock_percent) / 100;
3866 } else
3867 dpm_table->mclk_table.dpm_levels[i].value =
3868 golden_dpm_table->mclk_table.dpm_levels[i].value;
3869 }
3870 }
3871 }
3872
3873 if (data->need_update_smu7_dpm_table &
3874 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3875 result = ellesmere_populate_all_graphic_levels(hwmgr);
3876 PP_ASSERT_WITH_CODE((0 == result),
3877 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3878 return result);
3879 }
3880
3881 if (data->need_update_smu7_dpm_table &
3882 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3883 /*populate MCLK dpm table to SMU7 */
3884 result = ellesmere_populate_all_memory_levels(hwmgr);
3885 PP_ASSERT_WITH_CODE((0 == result),
3886 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3887 return result);
3888 }
3889
3890 return result;
3891}
3892
3893static int ellesmere_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3894 struct ellesmere_single_dpm_table *dpm_table,
3895 uint32_t low_limit, uint32_t high_limit)
3896{
3897 uint32_t i;
3898 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3899
3900 for (i = 0; i < dpm_table->count; i++) {
3901 if ((dpm_table->dpm_levels[i].value < low_limit)
3902 || (dpm_table->dpm_levels[i].value > high_limit))
3903 dpm_table->dpm_levels[i].enabled = false;
3904 else if (((1 << i) & data->disable_dpm_mask) == 0)
3905 dpm_table->dpm_levels[i].enabled = false;
3906 else
3907 dpm_table->dpm_levels[i].enabled = true;
3908 }
3909
3910 return 0;
3911}
3912
3913static int ellesmere_trim_dpm_states(struct pp_hwmgr *hwmgr,
3914 const struct ellesmere_power_state *ellesmere_ps)
3915{
3916 int result = 0;
3917 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3918 uint32_t high_limit_count;
3919
3920 PP_ASSERT_WITH_CODE((ellesmere_ps->performance_level_count >= 1),
3921 "power state did not have any performance level",
3922 return -1);
3923
3924 high_limit_count = (1 == ellesmere_ps->performance_level_count) ? 0 : 1;
3925
3926 ellesmere_trim_single_dpm_states(hwmgr,
3927 &(data->dpm_table.sclk_table),
3928 ellesmere_ps->performance_levels[0].engine_clock,
3929 ellesmere_ps->performance_levels[high_limit_count].engine_clock);
3930
3931 ellesmere_trim_single_dpm_states(hwmgr,
3932 &(data->dpm_table.mclk_table),
3933 ellesmere_ps->performance_levels[0].memory_clock,
3934 ellesmere_ps->performance_levels[high_limit_count].memory_clock);
3935
3936 return result;
3937}
3938
3939static int ellesmere_generate_dpm_level_enable_mask(
3940 struct pp_hwmgr *hwmgr, const void *input)
3941{
3942 int result;
3943 const struct phm_set_power_state_input *states =
3944 (const struct phm_set_power_state_input *)input;
3945 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3946 const struct ellesmere_power_state *ellesmere_ps =
3947 cast_const_phw_ellesmere_power_state(states->pnew_state);
3948
3949 result = ellesmere_trim_dpm_states(hwmgr, ellesmere_ps);
3950 if (result)
3951 return result;
3952
3953 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3954 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3955 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3956 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3957 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3958 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3959
3960 return 0;
3961}
3962
3963static int ellesmere_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3964{
3965 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
3966 PPSMC_MSG_VCEDPM_Enable :
3967 PPSMC_MSG_VCEDPM_Disable);
3968}
3969
3970static int ellesmere_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
3971{
3972 const struct phm_set_power_state_input *states =
3973 (const struct phm_set_power_state_input *)input;
3974 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
3975 const struct ellesmere_power_state *ellesmere_nps =
3976 cast_const_phw_ellesmere_power_state(states->pnew_state);
3977 const struct ellesmere_power_state *ellesmere_cps =
3978 cast_const_phw_ellesmere_power_state(states->pcurrent_state);
3979
3980 uint32_t mm_boot_level_offset, mm_boot_level_value;
3981 struct phm_ppt_v1_information *table_info =
3982 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3983
3984 if (ellesmere_nps->vce_clks.evclk > 0 &&
3985 (ellesmere_cps == NULL || ellesmere_cps->vce_clks.evclk == 0)) {
3986
3987 data->smc_state_table.VceBootLevel =
3988 (uint8_t) (table_info->mm_dep_table->count - 1);
3989
3990 mm_boot_level_offset = data->dpm_table_start +
3991 offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
3992 mm_boot_level_offset /= 4;
3993 mm_boot_level_offset *= 4;
3994 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
3995 CGS_IND_REG__SMC, mm_boot_level_offset);
3996 mm_boot_level_value &= 0xFF00FFFF;
3997 mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
3998 cgs_write_ind_register(hwmgr->device,
3999 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4000
4001 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
4002 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4003 PPSMC_MSG_VCEDPM_SetEnabledMask,
4004 (uint32_t)1 << data->smc_state_table.VceBootLevel);
4005
4006 ellesmere_enable_disable_vce_dpm(hwmgr, true);
4007 } else if (ellesmere_nps->vce_clks.evclk == 0 &&
4008 ellesmere_cps != NULL &&
4009 ellesmere_cps->vce_clks.evclk > 0)
4010 ellesmere_enable_disable_vce_dpm(hwmgr, false);
4011 }
4012
4013 return 0;
4014}
4015
4016static int ellesmere_update_sclk_threshold(struct pp_hwmgr *hwmgr)
4017{
4018 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4019
4020 int result = 0;
4021 uint32_t low_sclk_interrupt_threshold = 0;
4022
4023 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4024 PHM_PlatformCaps_SclkThrottleLowNotification)
4025 && (hwmgr->gfx_arbiter.sclk_threshold !=
4026 data->low_sclk_interrupt_threshold)) {
4027 data->low_sclk_interrupt_threshold =
4028 hwmgr->gfx_arbiter.sclk_threshold;
4029 low_sclk_interrupt_threshold =
4030 data->low_sclk_interrupt_threshold;
4031
4032 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
4033
4034 result = ellesmere_copy_bytes_to_smc(
4035 hwmgr->smumgr,
4036 data->dpm_table_start +
4037 offsetof(SMU74_Discrete_DpmTable,
4038 LowSclkInterruptThreshold),
4039 (uint8_t *)&low_sclk_interrupt_threshold,
4040 sizeof(uint32_t),
4041 data->sram_end);
4042 }
4043
4044 return result;
4045}
4046
4047static int ellesmere_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
4048{
4049 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4050
4051 if (data->need_update_smu7_dpm_table &
4052 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
4053 return ellesmere_program_memory_timing_parameters(hwmgr);
4054
4055 return 0;
4056}
4057
4058static int ellesmere_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4059{
4060 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4061
4062 if (0 == data->need_update_smu7_dpm_table)
4063 return 0;
4064
4065 if ((0 == data->sclk_dpm_key_disabled) &&
4066 (data->need_update_smu7_dpm_table &
4067 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4068
4069 PP_ASSERT_WITH_CODE(true == ellesmere_is_dpm_running(hwmgr),
4070 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4071 );
4072 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4073 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4074 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4075 return -1);
4076 }
4077
4078 if ((0 == data->mclk_dpm_key_disabled) &&
4079 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4080
4081 PP_ASSERT_WITH_CODE(true == ellesmere_is_dpm_running(hwmgr),
4082 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4083 );
4084 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4085 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4086 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4087 return -1);
4088 }
4089
4090 data->need_update_smu7_dpm_table = 0;
4091
4092 return 0;
4093}
4094
4095static int ellesmere_notify_link_speed_change_after_state_change(
4096 struct pp_hwmgr *hwmgr, const void *input)
4097{
4098 const struct phm_set_power_state_input *states =
4099 (const struct phm_set_power_state_input *)input;
4100 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4101 const struct ellesmere_power_state *ellesmere_ps =
4102 cast_const_phw_ellesmere_power_state(states->pnew_state);
4103 uint16_t target_link_speed = ellesmere_get_maximum_link_speed(hwmgr, ellesmere_ps);
4104 uint8_t request;
4105
4106 if (data->pspp_notify_required) {
4107 if (target_link_speed == PP_PCIEGen3)
4108 request = PCIE_PERF_REQ_GEN3;
4109 else if (target_link_speed == PP_PCIEGen2)
4110 request = PCIE_PERF_REQ_GEN2;
4111 else
4112 request = PCIE_PERF_REQ_GEN1;
4113
4114 if (request == PCIE_PERF_REQ_GEN1 &&
4115 phm_get_current_pcie_speed(hwmgr) > 0)
4116 return 0;
4117
4118 if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
4119 if (PP_PCIEGen2 == target_link_speed)
4120 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
4121 else
4122 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
4123 }
4124 }
4125
4126 return 0;
4127}
4128
4129static int ellesmere_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4130{
4131 int tmp_result, result = 0;
4132 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4133
4134 tmp_result = ellesmere_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4135 PP_ASSERT_WITH_CODE((0 == tmp_result),
4136 "Failed to find DPM states clocks in DPM table!",
4137 result = tmp_result);
4138
4139 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4140 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4141 tmp_result =
4142 ellesmere_request_link_speed_change_before_state_change(hwmgr, input);
4143 PP_ASSERT_WITH_CODE((0 == tmp_result),
4144 "Failed to request link speed change before state change!",
4145 result = tmp_result);
4146 }
4147
4148 tmp_result = ellesmere_freeze_sclk_mclk_dpm(hwmgr);
4149 PP_ASSERT_WITH_CODE((0 == tmp_result),
4150 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4151
4152 tmp_result = ellesmere_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4153 PP_ASSERT_WITH_CODE((0 == tmp_result),
4154 "Failed to populate and upload SCLK MCLK DPM levels!",
4155 result = tmp_result);
4156
4157 tmp_result = ellesmere_generate_dpm_level_enable_mask(hwmgr, input);
4158 PP_ASSERT_WITH_CODE((0 == tmp_result),
4159 "Failed to generate DPM level enabled mask!",
4160 result = tmp_result);
4161
4162 tmp_result = ellesmere_update_vce_dpm(hwmgr, input);
4163 PP_ASSERT_WITH_CODE((0 == tmp_result),
4164 "Failed to update VCE DPM!",
4165 result = tmp_result);
4166
4167 tmp_result = ellesmere_update_sclk_threshold(hwmgr);
4168 PP_ASSERT_WITH_CODE((0 == tmp_result),
4169 "Failed to update SCLK threshold!",
4170 result = tmp_result);
4171
4172 tmp_result = ellesmere_program_mem_timing_parameters(hwmgr);
4173 PP_ASSERT_WITH_CODE((0 == tmp_result),
4174 "Failed to program memory timing parameters!",
4175 result = tmp_result);
4176
4177 tmp_result = ellesmere_unfreeze_sclk_mclk_dpm(hwmgr);
4178 PP_ASSERT_WITH_CODE((0 == tmp_result),
4179 "Failed to unfreeze SCLK MCLK DPM!",
4180 result = tmp_result);
4181
4182 tmp_result = ellesmere_upload_dpm_level_enable_mask(hwmgr);
4183 PP_ASSERT_WITH_CODE((0 == tmp_result),
4184 "Failed to upload DPM level enabled mask!",
4185 result = tmp_result);
4186
4187 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4188 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4189 tmp_result =
4190 ellesmere_notify_link_speed_change_after_state_change(hwmgr, input);
4191 PP_ASSERT_WITH_CODE((0 == tmp_result),
4192 "Failed to notify link speed change after state change!",
4193 result = tmp_result);
4194 }
4195 data->apply_optimized_settings = false;
4196 return result;
4197}
4198
4199static int ellesmere_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4200{
4201
4202 return 0;
4203}
4204
4205int ellesmere_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4206{
4207 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
4208
4209 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
4210}
4211
4212int ellesmere_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4213{
4214 uint32_t num_active_displays = 0;
4215 struct cgs_display_info info = {0};
4216 info.mode_info = NULL;
4217
4218 cgs_get_active_displays_info(hwmgr->device, &info);
4219
4220 num_active_displays = info.display_count;
4221
4222 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
4223 ellesmere_notify_smc_display_change(hwmgr, false);
4224 else
4225 ellesmere_notify_smc_display_change(hwmgr, true);
4226
4227 return 0;
4228}
4229
4230/**
4231* Programs the display gap
4232*
4233* @param hwmgr the address of the powerplay hardware manager.
4234* @return always OK
4235*/
4236int ellesmere_program_display_gap(struct pp_hwmgr *hwmgr)
4237{
4238 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4239 uint32_t num_active_displays = 0;
4240 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4241 uint32_t display_gap2;
4242 uint32_t pre_vbi_time_in_us;
4243 uint32_t frame_time_in_us;
4244 uint32_t ref_clock;
4245 uint32_t refresh_rate = 0;
4246 struct cgs_display_info info = {0};
4247 struct cgs_mode_info mode_info;
4248
4249 info.mode_info = &mode_info;
4250
4251 cgs_get_active_displays_info(hwmgr->device, &info);
4252 num_active_displays = info.display_count;
4253
4254 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4255 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4256
4257 ref_clock = mode_info.ref_clock;
4258 refresh_rate = mode_info.refresh_rate;
4259
4260 if (0 == refresh_rate)
4261 refresh_rate = 60;
4262
4263 frame_time_in_us = 1000000 / refresh_rate;
4264
4265 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
4266 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4267
4268 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4269
4270 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64);
4271
4272 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
4273
4274 if (num_active_displays == 1)
4275 ellesmere_notify_smc_display_change(hwmgr, true);
4276
4277 return 0;
4278}
4279
4280
4281int ellesmere_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4282{
4283 return ellesmere_program_display_gap(hwmgr);
4284}
4285
4286/**
4287* Set maximum target operating fan output RPM
4288*
4289* @param hwmgr: the address of the powerplay hardware manager.
4290* @param usMaxFanRpm: max operating fan RPM value.
4291* @return The response that came from the SMC.
4292*/
4293static int ellesmere_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4294{
4295 return 0;
4296}
4297
4298int ellesmere_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
4299 const void *thermal_interrupt_info)
4300{
4301 return 0;
4302}
4303
4304bool ellesmere_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4305{
4306 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4307 bool is_update_required = false;
4308 struct cgs_display_info info = {0, 0, NULL};
4309
4310 cgs_get_active_displays_info(hwmgr->device, &info);
4311
4312 if (data->display_timing.num_existing_displays != info.display_count)
4313 is_update_required = true;
4314/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
4315 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4316 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
4317 if (min_clocks.engineClockInSR != data->display_timing.minClockInSR)
4318 is_update_required = true;
4319*/
4320 return is_update_required;
4321}
4322
4323static inline bool ellesmere_are_power_levels_equal(const struct ellesmere_performance_level *pl1,
4324 const struct ellesmere_performance_level *pl2)
4325{
4326 return ((pl1->memory_clock == pl2->memory_clock) &&
4327 (pl1->engine_clock == pl2->engine_clock) &&
4328 (pl1->pcie_gen == pl2->pcie_gen) &&
4329 (pl1->pcie_lane == pl2->pcie_lane));
4330}
4331
4332int ellesmere_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
4333{
4334 const struct ellesmere_power_state *psa = cast_const_phw_ellesmere_power_state(pstate1);
4335 const struct ellesmere_power_state *psb = cast_const_phw_ellesmere_power_state(pstate2);
4336 int i;
4337
4338 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4339 return -EINVAL;
4340
4341 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4342 if (psa->performance_level_count != psb->performance_level_count) {
4343 *equal = false;
4344 return 0;
4345 }
4346
4347 for (i = 0; i < psa->performance_level_count; i++) {
4348 if (!ellesmere_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4349 /* If we have found even one performance level pair that is different the states are different. */
4350 *equal = false;
4351 return 0;
4352 }
4353 }
4354
4355 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4356 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4357 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4358 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4359
4360 return 0;
4361}
4362
4363int ellesmere_upload_mc_firmware(struct pp_hwmgr *hwmgr)
4364{
4365 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4366
4367 uint32_t vbios_version;
4368
4369 /* Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/
4370
4371 phm_get_mc_microcode_version(hwmgr);
4372 vbios_version = hwmgr->microcode_version_info.MC & 0xf;
4373 /* Full version of MC ucode has already been loaded. */
4374 if (vbios_version == 0) {
4375 data->need_long_memory_training = false;
4376 return 0;
4377 }
4378
4379 data->need_long_memory_training = true;
4380
4381/*
4382 * PPMCME_FirmwareDescriptorEntry *pfd = NULL;
4383 pfd = &tonga_mcmeFirmware;
4384 if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN))
4385 ellesmere_load_mc_microcode(hwmgr, pfd->dpmThreshold,
4386 pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray,
4387 pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize);
4388*/
4389 return 0;
4390}
4391
4392/**
4393 * Read clock related registers.
4394 *
4395 * @param hwmgr the address of the powerplay hardware manager.
4396 * @return always 0
4397 */
4398static int ellesmere_read_clock_registers(struct pp_hwmgr *hwmgr)
4399{
4400 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4401
4402 data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device,
4403 CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL)
4404 & CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
4405
4406 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device,
4407 CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2)
4408 & CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
4409
4410 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device,
4411 CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4)
4412 & CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK;
4413
4414 return 0;
4415}
4416
4417/**
4418 * Find out if memory is GDDR5.
4419 *
4420 * @param hwmgr the address of the powerplay hardware manager.
4421 * @return always 0
4422 */
4423static int ellesmere_get_memory_type(struct pp_hwmgr *hwmgr)
4424{
4425 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4426 uint32_t temp;
4427
4428 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
4429
4430 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
4431 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
4432 MC_SEQ_MISC0_GDDR5_SHIFT));
4433
4434 return 0;
4435}
4436
4437/**
4438 * Enables Dynamic Power Management by SMC
4439 *
4440 * @param hwmgr the address of the powerplay hardware manager.
4441 * @return always 0
4442 */
4443static int ellesmere_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4444{
4445 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4446 GENERAL_PWRMGT, STATIC_PM_EN, 1);
4447
4448 return 0;
4449}
4450
4451/**
4452 * Initialize PowerGating States for different engines
4453 *
4454 * @param hwmgr the address of the powerplay hardware manager.
4455 * @return always 0
4456 */
4457static int ellesmere_init_power_gate_state(struct pp_hwmgr *hwmgr)
4458{
4459 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4460
4461 data->uvd_power_gated = false;
4462 data->vce_power_gated = false;
4463 data->samu_power_gated = false;
4464
4465 return 0;
4466}
4467
4468static int ellesmere_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4469{
4470 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
4471 data->low_sclk_interrupt_threshold = 0;
4472
4473 return 0;
4474}
4475
4476int ellesmere_setup_asic_task(struct pp_hwmgr *hwmgr)
4477{
4478 int tmp_result, result = 0;
4479
4480 ellesmere_upload_mc_firmware(hwmgr);
4481
4482 tmp_result = ellesmere_read_clock_registers(hwmgr);
4483 PP_ASSERT_WITH_CODE((0 == tmp_result),
4484 "Failed to read clock registers!", result = tmp_result);
4485
4486 tmp_result = ellesmere_get_memory_type(hwmgr);
4487 PP_ASSERT_WITH_CODE((0 == tmp_result),
4488 "Failed to get memory type!", result = tmp_result);
4489
4490 tmp_result = ellesmere_enable_acpi_power_management(hwmgr);
4491 PP_ASSERT_WITH_CODE((0 == tmp_result),
4492 "Failed to enable ACPI power management!", result = tmp_result);
4493
4494 tmp_result = ellesmere_init_power_gate_state(hwmgr);
4495 PP_ASSERT_WITH_CODE((0 == tmp_result),
4496 "Failed to init power gate state!", result = tmp_result);
4497
4498 tmp_result = phm_get_mc_microcode_version(hwmgr);
4499 PP_ASSERT_WITH_CODE((0 == tmp_result),
4500 "Failed to get MC microcode version!", result = tmp_result);
4501
4502 tmp_result = ellesmere_init_sclk_threshold(hwmgr);
4503 PP_ASSERT_WITH_CODE((0 == tmp_result),
4504 "Failed to init sclk threshold!", result = tmp_result);
4505
4506 return result;
4507}
4508
4509static const struct pp_hwmgr_func ellesmere_hwmgr_funcs = {
4510 .backend_init = &ellesmere_hwmgr_backend_init,
4511 .backend_fini = &ellesmere_hwmgr_backend_fini,
4512 .asic_setup = &ellesmere_setup_asic_task,
4513 .dynamic_state_management_enable = &ellesmere_enable_dpm_tasks,
4514 .apply_state_adjust_rules = ellesmere_apply_state_adjust_rules,
4515 .force_dpm_level = &ellesmere_force_dpm_level,
4516 .power_state_set = ellesmere_set_power_state_tasks,
4517 .get_power_state_size = ellesmere_get_power_state_size,
4518 .get_mclk = ellesmere_dpm_get_mclk,
4519 .get_sclk = ellesmere_dpm_get_sclk,
4520 .patch_boot_state = ellesmere_dpm_patch_boot_state,
4521 .get_pp_table_entry = ellesmere_get_pp_table_entry,
4522 .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
4523 .print_current_perforce_level = ellesmere_print_current_perforce_level,
4524 .powerdown_uvd = NULL,
4525 .powergate_uvd = NULL,
4526 .powergate_vce = NULL,
4527 .disable_clock_power_gating = NULL,
4528 .notify_smc_display_config_after_ps_adjustment = ellesmere_notify_smc_display_config_after_ps_adjustment,
4529 .display_config_changed = ellesmere_display_configuration_changed_task,
4530 .set_max_fan_pwm_output = ellesmere_set_max_fan_pwm_output,
4531 .set_max_fan_rpm_output = ellesmere_set_max_fan_rpm_output,
4532 .get_temperature = NULL,
4533 .stop_thermal_controller = NULL,
4534 .get_fan_speed_info = NULL,
4535 .get_fan_speed_percent = NULL,
4536 .set_fan_speed_percent = NULL,
4537 .reset_fan_speed_to_default = NULL,
4538 .get_fan_speed_rpm = NULL,
4539 .set_fan_speed_rpm = NULL,
4540 .uninitialize_thermal_controller = NULL,
4541 .register_internal_thermal_interrupt = ellesmere_register_internal_thermal_interrupt,
4542 .check_smc_update_required_for_display_configuration = ellesmere_check_smc_update_required_for_display_configuration,
4543 .check_states_equal = ellesmere_check_states_equal,
4544};
4545
4546int ellesemere_hwmgr_init(struct pp_hwmgr *hwmgr)
4547{
4548 struct ellesmere_hwmgr *data;
4549
4550 data = kzalloc (sizeof(struct ellesmere_hwmgr), GFP_KERNEL);
4551 if (data == NULL)
4552 return -ENOMEM;
4553
4554 hwmgr->backend = data;
4555 hwmgr->hwmgr_func = &ellesmere_hwmgr_funcs;
4556 hwmgr->pptable_func = &tonga_pptable_funcs;
4557
4558
4559 return 0;
4560}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.h
new file mode 100644
index 000000000000..4d576984928d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_hwmgr.h
@@ -0,0 +1,349 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef ELLESMERE_HWMGR_H
25#define ELLESMERE_HWMGR_H
26
27#include "hwmgr.h"
28#include "smu74.h"
29#include "smu74_discrete.h"
30#include "ppatomctrl.h"
31#include "ellesmere_ppsmc.h"
32#include "ellesmere_powertune.h"
33
34#define ELLESMERE_MAX_HARDWARE_POWERLEVELS 2
35
36#define ELLESMERE_VOLTAGE_CONTROL_NONE 0x0
37#define ELLESMERE_VOLTAGE_CONTROL_BY_GPIO 0x1
38#define ELLESMERE_VOLTAGE_CONTROL_BY_SVID2 0x2
39#define ELLESMERE_VOLTAGE_CONTROL_MERGED 0x3
40
41#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
42#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
43#define DPMTABLE_UPDATE_SCLK 0x00000004
44#define DPMTABLE_UPDATE_MCLK 0x00000008
45
46struct ellesmere_performance_level {
47 uint32_t memory_clock;
48 uint32_t engine_clock;
49 uint16_t pcie_gen;
50 uint16_t pcie_lane;
51};
52
53struct ellesmere_uvd_clocks {
54 uint32_t vclk;
55 uint32_t dclk;
56};
57
58struct ellesmere_vce_clocks {
59 uint32_t evclk;
60 uint32_t ecclk;
61};
62
63struct ellesmere_power_state {
64 uint32_t magic;
65 struct ellesmere_uvd_clocks uvd_clks;
66 struct ellesmere_vce_clocks vce_clks;
67 uint32_t sam_clk;
68 uint16_t performance_level_count;
69 bool dc_compatible;
70 uint32_t sclk_threshold;
71 struct ellesmere_performance_level performance_levels[ELLESMERE_MAX_HARDWARE_POWERLEVELS];
72};
73
74struct ellesmere_dpm_level {
75 bool enabled;
76 uint32_t value;
77 uint32_t param1;
78};
79
80#define ELLESMERE_MAX_DEEPSLEEP_DIVIDER_ID 5
81#define MAX_REGULAR_DPM_NUMBER 8
82#define ELLESMERE_MINIMUM_ENGINE_CLOCK 2500
83
84struct ellesmere_single_dpm_table {
85 uint32_t count;
86 struct ellesmere_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
87};
88
89struct ellesmere_dpm_table {
90 struct ellesmere_single_dpm_table sclk_table;
91 struct ellesmere_single_dpm_table mclk_table;
92 struct ellesmere_single_dpm_table pcie_speed_table;
93 struct ellesmere_single_dpm_table vddc_table;
94 struct ellesmere_single_dpm_table vddci_table;
95 struct ellesmere_single_dpm_table mvdd_table;
96};
97
98struct ellesmere_clock_registers {
99 uint32_t vCG_SPLL_FUNC_CNTL;
100 uint32_t vCG_SPLL_FUNC_CNTL_2;
101 uint32_t vCG_SPLL_FUNC_CNTL_3;
102 uint32_t vCG_SPLL_FUNC_CNTL_4;
103 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
104 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
105 uint32_t vDLL_CNTL;
106 uint32_t vMCLK_PWRMGT_CNTL;
107 uint32_t vMPLL_AD_FUNC_CNTL;
108 uint32_t vMPLL_DQ_FUNC_CNTL;
109 uint32_t vMPLL_FUNC_CNTL;
110 uint32_t vMPLL_FUNC_CNTL_1;
111 uint32_t vMPLL_FUNC_CNTL_2;
112 uint32_t vMPLL_SS1;
113 uint32_t vMPLL_SS2;
114};
115
116#define DISABLE_MC_LOADMICROCODE 1
117#define DISABLE_MC_CFGPROGRAMMING 2
118
119struct ellesmere_voltage_smio_registers {
120 uint32_t vS0_VID_LOWER_SMIO_CNTL;
121};
122
123#define ELLESMERE_MAX_LEAKAGE_COUNT 8
124
125struct ellesmere_leakage_voltage {
126 uint16_t count;
127 uint16_t leakage_id[ELLESMERE_MAX_LEAKAGE_COUNT];
128 uint16_t actual_voltage[ELLESMERE_MAX_LEAKAGE_COUNT];
129};
130
131struct ellesmere_vbios_boot_state {
132 uint16_t mvdd_bootup_value;
133 uint16_t vddc_bootup_value;
134 uint16_t vddci_bootup_value;
135 uint32_t sclk_bootup_value;
136 uint32_t mclk_bootup_value;
137 uint16_t pcie_gen_bootup_value;
138 uint16_t pcie_lane_bootup_value;
139};
140
141/* Ultra Low Voltage parameter structure */
142struct ellesmere_ulv_parm {
143 bool ulv_supported;
144 uint32_t cg_ulv_parameter;
145 uint32_t ulv_volt_change_delay;
146 struct ellesmere_performance_level ulv_power_level;
147};
148
149struct ellesmere_display_timing {
150 uint32_t min_clock_in_sr;
151 uint32_t num_existing_displays;
152};
153
154struct ellesmere_dpmlevel_enable_mask {
155 uint32_t uvd_dpm_enable_mask;
156 uint32_t vce_dpm_enable_mask;
157 uint32_t acp_dpm_enable_mask;
158 uint32_t samu_dpm_enable_mask;
159 uint32_t sclk_dpm_enable_mask;
160 uint32_t mclk_dpm_enable_mask;
161 uint32_t pcie_dpm_enable_mask;
162};
163
164struct ellesmere_pcie_perf_range {
165 uint16_t max;
166 uint16_t min;
167};
168struct ellesmere_range_table {
169 uint32_t trans_lower_frequency; /* in 10khz */
170 uint32_t trans_upper_frequency;
171};
172
173struct ellesmere_hwmgr {
174 struct ellesmere_dpm_table dpm_table;
175 struct ellesmere_dpm_table golden_dpm_table;
176 SMU74_Discrete_DpmTable smc_state_table;
177 struct SMU74_Discrete_Ulv ulv_setting;
178
179 struct ellesmere_range_table range_table[NUM_SCLK_RANGE];
180 uint32_t voting_rights_clients0;
181 uint32_t voting_rights_clients1;
182 uint32_t voting_rights_clients2;
183 uint32_t voting_rights_clients3;
184 uint32_t voting_rights_clients4;
185 uint32_t voting_rights_clients5;
186 uint32_t voting_rights_clients6;
187 uint32_t voting_rights_clients7;
188 uint32_t static_screen_threshold_unit;
189 uint32_t static_screen_threshold;
190 uint32_t voltage_control;
191 uint32_t vddc_vddci_delta;
192
193 uint32_t active_auto_throttle_sources;
194
195 struct ellesmere_clock_registers clock_registers;
196 struct ellesmere_voltage_smio_registers voltage_smio_registers;
197
198 bool is_memory_gddr5;
199 uint16_t acpi_vddc;
200 bool pspp_notify_required;
201 uint16_t force_pcie_gen;
202 uint16_t acpi_pcie_gen;
203 uint32_t pcie_gen_cap;
204 uint32_t pcie_lane_cap;
205 uint32_t pcie_spc_cap;
206 struct ellesmere_leakage_voltage vddc_leakage;
207 struct ellesmere_leakage_voltage Vddci_leakage;
208
209 uint32_t mvdd_control;
210 uint32_t vddc_mask_low;
211 uint32_t mvdd_mask_low;
212 uint16_t max_vddc_in_pptable;
213 uint16_t min_vddc_in_pptable;
214 uint16_t max_vddci_in_pptable;
215 uint16_t min_vddci_in_pptable;
216 uint32_t mclk_strobe_mode_threshold;
217 uint32_t mclk_stutter_mode_threshold;
218 uint32_t mclk_edc_enable_threshold;
219 uint32_t mclk_edcwr_enable_threshold;
220 bool is_uvd_enabled;
221 struct ellesmere_vbios_boot_state vbios_boot_state;
222
223 bool pcie_performance_request;
224 bool battery_state;
225 bool is_tlu_enabled;
226
227 /* ---- SMC SRAM Address of firmware header tables ---- */
228 uint32_t sram_end;
229 uint32_t dpm_table_start;
230 uint32_t soft_regs_start;
231 uint32_t mc_reg_table_start;
232 uint32_t fan_table_start;
233 uint32_t arb_table_start;
234
235 /* ---- Stuff originally coming from Evergreen ---- */
236 uint32_t vddci_control;
237 struct pp_atomctrl_voltage_table vddc_voltage_table;
238 struct pp_atomctrl_voltage_table vddci_voltage_table;
239 struct pp_atomctrl_voltage_table mvdd_voltage_table;
240
241 uint32_t mgcg_cgtt_local2;
242 uint32_t mgcg_cgtt_local3;
243 uint32_t gpio_debug;
244 uint32_t mc_micro_code_feature;
245 uint32_t highest_mclk;
246 uint16_t acpi_vddci;
247 uint8_t mvdd_high_index;
248 uint8_t mvdd_low_index;
249 bool dll_default_on;
250 bool performance_request_registered;
251
252 /* ---- Low Power Features ---- */
253 struct ellesmere_ulv_parm ulv;
254
255 /* ---- CAC Stuff ---- */
256 uint32_t cac_table_start;
257 bool cac_configuration_required;
258 bool driver_calculate_cac_leakage;
259 bool cac_enabled;
260
261 /* ---- DPM2 Parameters ---- */
262 uint32_t power_containment_features;
263 bool enable_dte_feature;
264 bool enable_tdc_limit_feature;
265 bool enable_pkg_pwr_tracking_feature;
266 bool disable_uvd_power_tune_feature;
267 struct ellesmere_pt_defaults *power_tune_defaults;
268 struct SMU74_Discrete_PmFuses power_tune_table;
269 uint32_t dte_tj_offset;
270 uint32_t fast_watermark_threshold;
271
272 /* ---- Phase Shedding ---- */
273 bool vddc_phase_shed_control;
274
275 /* ---- DI/DT ---- */
276 struct ellesmere_display_timing display_timing;
277
278 /* ---- Thermal Temperature Setting ---- */
279 struct ellesmere_dpmlevel_enable_mask dpm_level_enable_mask;
280 uint32_t need_update_smu7_dpm_table;
281 uint32_t sclk_dpm_key_disabled;
282 uint32_t mclk_dpm_key_disabled;
283 uint32_t pcie_dpm_key_disabled;
284 uint32_t min_engine_clocks;
285 struct ellesmere_pcie_perf_range pcie_gen_performance;
286 struct ellesmere_pcie_perf_range pcie_lane_performance;
287 struct ellesmere_pcie_perf_range pcie_gen_power_saving;
288 struct ellesmere_pcie_perf_range pcie_lane_power_saving;
289 bool use_pcie_performance_levels;
290 bool use_pcie_power_saving_levels;
291 uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
292 uint32_t mclk_activity_target;
293 uint32_t mclk_dpm0_activity_target;
294 uint32_t low_sclk_interrupt_threshold;
295 uint32_t last_mclk_dpm_enable_mask;
296 bool uvd_enabled;
297
298 /* ---- Power Gating States ---- */
299 bool uvd_power_gated;
300 bool vce_power_gated;
301 bool samu_power_gated;
302 bool need_long_memory_training;
303
304 /* Application power optimization parameters */
305 bool update_up_hyst;
306 bool update_down_hyst;
307 uint32_t down_hyst;
308 uint32_t up_hyst;
309 uint32_t disable_dpm_mask;
310 bool apply_optimized_settings;
311};
312
313/* To convert to Q8.8 format for firmware */
314#define ELLESMERE_Q88_FORMAT_CONVERSION_UNIT 256
315
316enum Ellesmere_I2CLineID {
317 Ellesmere_I2CLineID_DDC1 = 0x90,
318 Ellesmere_I2CLineID_DDC2 = 0x91,
319 Ellesmere_I2CLineID_DDC3 = 0x92,
320 Ellesmere_I2CLineID_DDC4 = 0x93,
321 Ellesmere_I2CLineID_DDC5 = 0x94,
322 Ellesmere_I2CLineID_DDC6 = 0x95,
323 Ellesmere_I2CLineID_SCLSDA = 0x96,
324 Ellesmere_I2CLineID_DDCVGA = 0x97
325};
326
327#define ELLESMERE_I2C_DDC1DATA 0
328#define ELLESMERE_I2C_DDC1CLK 1
329#define ELLESMERE_I2C_DDC2DATA 2
330#define ELLESMERE_I2C_DDC2CLK 3
331#define ELLESMERE_I2C_DDC3DATA 4
332#define ELLESMERE_I2C_DDC3CLK 5
333#define ELLESMERE_I2C_SDA 40
334#define ELLESMERE_I2C_SCL 41
335#define ELLESMERE_I2C_DDC4DATA 65
336#define ELLESMERE_I2C_DDC4CLK 66
337#define ELLESMERE_I2C_DDC5DATA 0x48
338#define ELLESMERE_I2C_DDC5CLK 0x49
339#define ELLESMERE_I2C_DDC6DATA 0x4a
340#define ELLESMERE_I2C_DDC6CLK 0x4b
341#define ELLESMERE_I2C_DDCVGADATA 0x4c
342#define ELLESMERE_I2C_DDCVGACLK 0x4d
343
344#define ELLESMERE_UNUSED_GPIO_PIN 0x7F
345
346int ellesemere_hwmgr_init(struct pp_hwmgr *hwmgr);
347
348#endif
349
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_powertune.c
new file mode 100644
index 000000000000..ff41c41b0b0f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_powertune.c
@@ -0,0 +1,396 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "smumgr.h"
26#include "ellesmere_hwmgr.h"
27#include "ellesmere_powertune.h"
28#include "ellesmere_smumgr.h"
29#include "smu74_discrete.h"
30#include "pp_debug.h"
31
32#define VOLTAGE_SCALE 4
33#define POWERTUNE_DEFAULT_SET_MAX 1
34
35struct ellesmere_pt_defaults ellesmere_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
36 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
37 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
38 { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
39 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
40 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
41};
42
43void ellesmere_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
44{
45 struct ellesmere_hwmgr *ellesmere_hwmgr = (struct ellesmere_hwmgr *)(hwmgr->backend);
46 struct phm_ppt_v1_information *table_info =
47 (struct phm_ppt_v1_information *)(hwmgr->pptable);
48
49 if (table_info &&
50 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
51 table_info->cac_dtp_table->usPowerTuneDataSetID)
52 ellesmere_hwmgr->power_tune_defaults =
53 &ellesmere_power_tune_data_set_array
54 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
55 else
56 ellesmere_hwmgr->power_tune_defaults = &ellesmere_power_tune_data_set_array[0];
57
58}
59
60int ellesmere_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
61{
62 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
63 struct ellesmere_pt_defaults *defaults = data->power_tune_defaults;
64 SMU74_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
65 struct phm_ppt_v1_information *table_info =
66 (struct phm_ppt_v1_information *)(hwmgr->pptable);
67 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
68 int i, j, k;
69 uint16_t *pdef1;
70 uint16_t *pdef2;
71
72 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
73 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
74
75 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
76 "Target Operating Temp is out of Range!",
77 );
78/* This is the same value as TemperatureLimitHigh except it is integer with no fraction bit. */
79 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
80
81/* HW request to hard code this value to 8 which is 0.5C */
82 dpm_table->GpuTjHyst = 8;
83
84 dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
85 dpm_table->DTETjOffset = (uint8_t)(data->dte_tj_offset);
86 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->BAPM_TEMP_GRADIENT);
87 pdef1 = defaults->BAPMTI_R;
88 pdef2 = defaults->BAPMTI_RC;
89
90 for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
91 for (j = 0; j < SMU74_DTE_SOURCES; j++) {
92 for (k = 0; k < SMU74_DTE_SINKS; k++) {
93 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
94 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
95 pdef1++;
96 pdef2++;
97 }
98 }
99 }
100
101 return 0;
102}
103
104static int ellesmere_populate_svi_load_line(struct pp_hwmgr *hwmgr)
105{
106 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
107 struct ellesmere_pt_defaults *defaults = data->power_tune_defaults;
108
109 data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
110 data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
111 data->power_tune_table.SviLoadLineTrimVddC = 3;
112 data->power_tune_table.SviLoadLineOffsetVddC = 0;
113
114 return 0;
115}
116
117static int ellesmere_populate_tdc_limit(struct pp_hwmgr *hwmgr)
118{
119 uint16_t tdc_limit;
120 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
121 struct phm_ppt_v1_information *table_info =
122 (struct phm_ppt_v1_information *)(hwmgr->pptable);
123 struct ellesmere_pt_defaults *defaults = data->power_tune_defaults;
124
125 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
126 data->power_tune_table.TDC_VDDC_PkgLimit =
127 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
128 data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
129 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
130 data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
131
132 return 0;
133}
134
135static int ellesmere_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
136{
137 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
138 struct ellesmere_pt_defaults *defaults = data->power_tune_defaults;
139 uint32_t temp;
140
141 if (ellesmere_read_smc_sram_dword(hwmgr->smumgr,
142 fuse_table_offset +
143 offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
144 (uint32_t *)&temp, data->sram_end))
145 PP_ASSERT_WITH_CODE(false,
146 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
147 return -EINVAL);
148 else {
149 data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
150 data->power_tune_table.LPMLTemperatureMin =
151 (uint8_t)((temp >> 16) & 0xff);
152 data->power_tune_table.LPMLTemperatureMax =
153 (uint8_t)((temp >> 8) & 0xff);
154 data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
155 }
156 return 0;
157}
158
159static int ellesmere_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
160{
161 int i;
162 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
163
164 /* Currently not used. Set all to zero. */
165 for (i = 0; i < 16; i++)
166 data->power_tune_table.LPMLTemperatureScaler[i] = 0;
167
168 return 0;
169}
170
171static int ellesmere_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
172{
173 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
174
175 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
176 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
177 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
178 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
179
180 data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
181 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
182 return 0;
183}
184
185static int ellesmere_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
186{
187 int i;
188 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
189
190 /* Currently not used. Set all to zero. */
191 for (i = 0; i < 16; i++)
192 data->power_tune_table.GnbLPML[i] = 0;
193
194 return 0;
195}
196
197static int ellesmere_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
198{
199 return 0;
200}
201
202static int ellesmere_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
203{
204 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
205 struct phm_ppt_v1_information *table_info =
206 (struct phm_ppt_v1_information *)(hwmgr->pptable);
207 uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
208 uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
209 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
210
211 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
212 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
213
214 data->power_tune_table.BapmVddCBaseLeakageHiSidd =
215 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
216 data->power_tune_table.BapmVddCBaseLeakageLoSidd =
217 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
218
219 return 0;
220}
221
222int ellesmere_populate_pm_fuses(struct pp_hwmgr *hwmgr)
223{
224 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
225 uint32_t pm_fuse_table_offset;
226
227 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_PowerContainment)) {
229 if (ellesmere_read_smc_sram_dword(hwmgr->smumgr,
230 SMU7_FIRMWARE_HEADER_LOCATION +
231 offsetof(SMU74_Firmware_Header, PmFuseTable),
232 &pm_fuse_table_offset, data->sram_end))
233 PP_ASSERT_WITH_CODE(false,
234 "Attempt to get pm_fuse_table_offset Failed!",
235 return -EINVAL);
236
237 if (ellesmere_populate_svi_load_line(hwmgr))
238 PP_ASSERT_WITH_CODE(false,
239 "Attempt to populate SviLoadLine Failed!",
240 return -EINVAL);
241
242 if (ellesmere_populate_tdc_limit(hwmgr))
243 PP_ASSERT_WITH_CODE(false,
244 "Attempt to populate TDCLimit Failed!", return -EINVAL);
245
246 if (ellesmere_populate_dw8(hwmgr, pm_fuse_table_offset))
247 PP_ASSERT_WITH_CODE(false,
248 "Attempt to populate TdcWaterfallCtl, "
249 "LPMLTemperature Min and Max Failed!",
250 return -EINVAL);
251
252 if (0 != ellesmere_populate_temperature_scaler(hwmgr))
253 PP_ASSERT_WITH_CODE(false,
254 "Attempt to populate LPMLTemperatureScaler Failed!",
255 return -EINVAL);
256
257 if (ellesmere_populate_fuzzy_fan(hwmgr))
258 PP_ASSERT_WITH_CODE(false,
259 "Attempt to populate Fuzzy Fan Control parameters Failed!",
260 return -EINVAL);
261
262 if (ellesmere_populate_gnb_lpml(hwmgr))
263 PP_ASSERT_WITH_CODE(false,
264 "Attempt to populate GnbLPML Failed!",
265 return -EINVAL);
266
267 if (ellesmere_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
268 PP_ASSERT_WITH_CODE(false,
269 "Attempt to populate GnbLPML Min and Max Vid Failed!",
270 return -EINVAL);
271
272 if (ellesmere_populate_bapm_vddc_base_leakage_sidd(hwmgr))
273 PP_ASSERT_WITH_CODE(false,
274 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
275 "Sidd Failed!", return -EINVAL);
276
277 if (ellesmere_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
278 (uint8_t *)&data->power_tune_table,
279 sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
280 PP_ASSERT_WITH_CODE(false,
281 "Attempt to download PmFuseTable Failed!",
282 return -EINVAL);
283 }
284 return 0;
285}
286
287int ellesmere_enable_smc_cac(struct pp_hwmgr *hwmgr)
288{
289 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
290 int result = 0;
291
292 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
293 PHM_PlatformCaps_CAC)) {
294 int smc_result;
295 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
296 (uint16_t)(PPSMC_MSG_EnableCac));
297 PP_ASSERT_WITH_CODE((0 == smc_result),
298 "Failed to enable CAC in SMC.", result = -1);
299
300 data->cac_enabled = (0 == smc_result) ? true : false;
301 }
302 return result;
303}
304
305int ellesmere_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
306{
307 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
308
309 if (data->power_containment_features &
310 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
311 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
312 PPSMC_MSG_PkgPwrSetLimit, n);
313 return 0;
314}
315
316static int ellesmere_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
317{
318 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
319 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
320}
321
322int ellesmere_enable_power_containment(struct pp_hwmgr *hwmgr)
323{
324 struct ellesmere_hwmgr *data = (struct ellesmere_hwmgr *)(hwmgr->backend);
325 struct phm_ppt_v1_information *table_info =
326 (struct phm_ppt_v1_information *)(hwmgr->pptable);
327 int smc_result;
328 int result = 0;
329
330 data->power_containment_features = 0;
331 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
332 PHM_PlatformCaps_PowerContainment)) {
333 if (data->enable_dte_feature) {
334 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
335 (uint16_t)(PPSMC_MSG_EnableDTE));
336 PP_ASSERT_WITH_CODE((0 == smc_result),
337 "Failed to enable DTE in SMC.", result = -1;);
338 if (0 == smc_result)
339 data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
340 }
341
342 if (data->enable_tdc_limit_feature) {
343 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
344 (uint16_t)(PPSMC_MSG_TDCLimitEnable));
345 PP_ASSERT_WITH_CODE((0 == smc_result),
346 "Failed to enable TDCLimit in SMC.", result = -1;);
347 if (0 == smc_result)
348 data->power_containment_features |=
349 POWERCONTAINMENT_FEATURE_TDCLimit;
350 }
351
352 if (data->enable_pkg_pwr_tracking_feature) {
353 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
354 (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
355 PP_ASSERT_WITH_CODE((0 == smc_result),
356 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
357 if (0 == smc_result) {
358 struct phm_cac_tdp_table *cac_table =
359 table_info->cac_dtp_table;
360 uint32_t default_limit =
361 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
362
363 data->power_containment_features |=
364 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
365
366 if (ellesmere_set_power_limit(hwmgr, default_limit))
367 printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
368 }
369 }
370 }
371 return result;
372}
373
374int ellesmere_power_control_set_level(struct pp_hwmgr *hwmgr)
375{
376 struct phm_ppt_v1_information *table_info =
377 (struct phm_ppt_v1_information *)(hwmgr->pptable);
378 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
379 int adjust_percent, target_tdp;
380 int result = 0;
381
382 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
383 PHM_PlatformCaps_PowerContainment)) {
384 /* adjustment percentage has already been validated */
385 adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
386 hwmgr->platform_descriptor.TDPAdjustment :
387 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
388 /* SMC requested that target_tdp to be 7 bit fraction in DPM table
389 * but message to be 8 bit fraction for messages
390 */
391 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
392 result = ellesmere_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
393 }
394
395 return result;
396}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_powertune.h
new file mode 100644
index 000000000000..5772bf929eaf
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ellesmere_powertune.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef ELLESMERE_POWERTUNE_H
24#define ELLESMERE_POWERTUNE_H
25
26enum ellesmere_pt_config_reg_type {
27 ELLESMERE_CONFIGREG_MMR = 0,
28 ELLESMERE_CONFIGREG_SMC_IND,
29 ELLESMERE_CONFIGREG_DIDT_IND,
30 ELLESMERE_CONFIGREG_CACHE,
31 ELLESMERE_CONFIGREG_MAX
32};
33
34/* PowerContainment Features */
35#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
36#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
37#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
38
39struct ellesmere_pt_config_reg {
40 uint32_t offset;
41 uint32_t mask;
42 uint32_t shift;
43 uint32_t value;
44 enum ellesmere_pt_config_reg_type type;
45};
46
47struct ellesmere_pt_defaults {
48 uint8_t SviLoadLineEn;
49 uint8_t SviLoadLineVddC;
50 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
51 uint8_t TDC_MAWt;
52 uint8_t TdcWaterfallCtl;
53 uint8_t DTEAmbientTempBase;
54
55 uint32_t DisplayCac;
56 uint32_t BAPM_TEMP_GRADIENT;
57 uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
58 uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
59};
60
61void ellesmere_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
62int ellesmere_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
63int ellesmere_populate_pm_fuses(struct pp_hwmgr *hwmgr);
64int ellesmere_enable_smc_cac(struct pp_hwmgr *hwmgr);
65int ellesmere_enable_power_containment(struct pp_hwmgr *hwmgr);
66int ellesmere_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
67int ellesmere_power_control_set_level(struct pp_hwmgr *hwmgr);
68
69#endif /* ELLESMERE_POWERTUNE_H */
70
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 2a83a4af2904..8ba3ad5e7111 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -373,6 +373,37 @@ int atomctrl_get_engine_pll_dividers_vi(
373 return result; 373 return result;
374} 374}
375 375
376int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
377 uint32_t clock_value,
378 pp_atomctrl_clock_dividers_ai *dividers)
379{
380 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
381 int result;
382
383 pll_patameters.ulClock.ulClock = clock_value;
384 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
385
386 result = cgs_atom_exec_cmd_table
387 (hwmgr->device,
388 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
389 &pll_patameters);
390
391 if (0 == result) {
392 dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
393 dividers->usSclk_fcw_int = le16_to_cpu(pll_patameters.usSclk_fcw_int);
394 dividers->ucSclkPostDiv = pll_patameters.ucSclkPostDiv;
395 dividers->ucSclkVcoMode = pll_patameters.ucSclkVcoMode;
396 dividers->ucSclkPllRange = pll_patameters.ucSclkPllRange;
397 dividers->ucSscEnable = pll_patameters.ucSscEnable;
398 dividers->usSsc_fcw1_frac = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
399 dividers->usSsc_fcw1_int = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
400 dividers->usPcc_fcw_int = le16_to_cpu(pll_patameters.usPcc_fcw_int);
401 dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
402 dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
403 }
404 return result;
405}
406
376int atomctrl_get_dfs_pll_dividers_vi( 407int atomctrl_get_dfs_pll_dividers_vi(
377 struct pp_hwmgr *hwmgr, 408 struct pp_hwmgr *hwmgr,
378 uint32_t clock_value, 409 uint32_t clock_value,
@@ -618,7 +649,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
618 if (!getASICProfilingInfo) 649 if (!getASICProfilingInfo)
619 return -1; 650 return -1;
620 651
621 if(getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || 652 if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
622 (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && 653 (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
623 getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) 654 getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
624 return -1; 655 return -1;
@@ -891,18 +922,18 @@ int atomctrl_calculate_voltage_evv_on_sclk(
891 *----------------------- 922 *-----------------------
892 */ 923 */
893 924
894 fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4,fSclk), fSM_A5)); 925 fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
895 fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); 926 fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
896 fC_Term = fAdd(fMargin_RO_c, 927 fC_Term = fAdd(fMargin_RO_c,
897 fAdd(fMultiply(fSM_A0,fLkg_FT), 928 fAdd(fMultiply(fSM_A0,fLkg_FT),
898 fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT,fSclk)), 929 fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
899 fAdd(fMultiply(fSM_A3, fSclk), 930 fAdd(fMultiply(fSM_A3, fSclk),
900 fSubtract(fSM_A7,fRO_fused))))); 931 fSubtract(fSM_A7, fRO_fused)))));
901 932
902 fVDDC_base = fSubtract(fRO_fused, 933 fVDDC_base = fSubtract(fRO_fused,
903 fSubtract(fMargin_RO_c, 934 fSubtract(fMargin_RO_c,
904 fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk)))); 935 fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
905 fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0,fSclk), fSM_A2)); 936 fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
906 937
907 repeat = fSubtract(fVDDC_base, 938 repeat = fSubtract(fVDDC_base,
908 fDivide(fMargin_DC_sigma, ConvertToFraction(1000))); 939 fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
@@ -916,7 +947,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
916 fSubtract(fRO_DC_margin, 947 fSubtract(fRO_DC_margin,
917 fSubtract(fSM_A3, 948 fSubtract(fSM_A3,
918 fMultiply(fSM_A2, repeat)))); 949 fMultiply(fSM_A2, repeat))));
919 fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0,repeat), fSM_A1)); 950 fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
920 951
921 fSigma_DC = fSubtract(fSclk, fDC_SCLK); 952 fSigma_DC = fSubtract(fSclk, fDC_SCLK);
922 953
@@ -996,7 +1027,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
996 fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0); 1027 fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
997 1028
998 if (GreaterThan(fV_max, fV_NL) && 1029 if (GreaterThan(fV_max, fV_NL) &&
999 (GreaterThan(fV_NL,fEVV_V) || 1030 (GreaterThan(fV_NL, fEVV_V) ||
1000 Equal(fV_NL, fEVV_V))) { 1031 Equal(fV_NL, fEVV_V))) {
1001 fV_NL = fMultiply(fV_NL, ConvertToFraction(1000)); 1032 fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
1002 1033
@@ -1205,3 +1236,69 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
1205 1236
1206 return result; 1237 return result;
1207} 1238}
1239
1240int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1241 uint8_t level)
1242{
1243 DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
1244 int result;
1245
1246 memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK;
1247 memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM;
1248 memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
1249
1250 result = cgs_atom_exec_cmd_table
1251 (hwmgr->device,
1252 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
1253 &memory_clock_parameters);
1254
1255 return result;
1256}
1257
1258int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1259 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
1260{
1261
1262 int result;
1263 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
1264
1265 get_voltage_info_param_space.ucVoltageType = voltage_type;
1266 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1267 get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id;
1268 get_voltage_info_param_space.ulSCLKFreq = sclk;
1269
1270 result = cgs_atom_exec_cmd_table(hwmgr->device,
1271 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1272 &get_voltage_info_param_space);
1273
1274 if (0 != result)
1275 return result;
1276
1277 *voltage = get_voltage_info_param_space.usVoltageLevel;
1278
1279 return result;
1280}
1281
1282int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
1283{
1284
1285 int i;
1286 u8 frev, crev;
1287 u16 size;
1288
1289 ATOM_SMU_INFO_V2_1 *psmu_info =
1290 (ATOM_SMU_INFO_V2_1 *)cgs_atom_get_data_table(hwmgr->device,
1291 GetIndexIntoMasterTable(DATA, SMU_Info),
1292 &size, &frev, &crev);
1293
1294
1295 for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
1296 table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
1297 table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
1298 table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc;
1299 table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper;
1300 table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower;
1301 }
1302
1303 return 0;
1304}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index 627420b80a5f..d24ebb566905 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -101,6 +101,23 @@ struct pp_atomctrl_clock_dividers_vi {
101}; 101};
102typedef struct pp_atomctrl_clock_dividers_vi pp_atomctrl_clock_dividers_vi; 102typedef struct pp_atomctrl_clock_dividers_vi pp_atomctrl_clock_dividers_vi;
103 103
104struct pp_atomctrl_clock_dividers_ai {
105 u16 usSclk_fcw_frac;
106 u16 usSclk_fcw_int;
107 u8 ucSclkPostDiv;
108 u8 ucSclkVcoMode;
109 u8 ucSclkPllRange;
110 u8 ucSscEnable;
111 u16 usSsc_fcw1_frac;
112 u16 usSsc_fcw1_int;
113 u16 usReserved;
114 u16 usPcc_fcw_int;
115 u16 usSsc_fcw_slew_frac;
116 u16 usPcc_fcw_slew_frac;
117};
118typedef struct pp_atomctrl_clock_dividers_ai pp_atomctrl_clock_dividers_ai;
119
120
104union pp_atomctrl_s_mpll_fb_divider { 121union pp_atomctrl_s_mpll_fb_divider {
105 struct { 122 struct {
106 uint32_t cl_kf : 12; 123 uint32_t cl_kf : 12;
@@ -204,6 +221,21 @@ struct pp_atomctrl_mc_register_address {
204 221
205typedef struct pp_atomctrl_mc_register_address pp_atomctrl_mc_register_address; 222typedef struct pp_atomctrl_mc_register_address pp_atomctrl_mc_register_address;
206 223
224#define MAX_SCLK_RANGE 8
225
226struct pp_atom_ctrl_sclk_range_table_entry{
227 uint8_t ucVco_setting;
228 uint8_t ucPostdiv;
229 uint16_t usFcw_pcc;
230 uint16_t usFcw_trans_upper;
231 uint16_t usRcw_trans_lower;
232};
233
234
235struct pp_atom_ctrl_sclk_range_table{
236 struct pp_atom_ctrl_sclk_range_table_entry entry[MAX_SCLK_RANGE];
237};
238
207struct pp_atomctrl_mc_reg_table { 239struct pp_atomctrl_mc_reg_table {
208 uint8_t last; /* number of registers */ 240 uint8_t last; /* number of registers */
209 uint8_t num_entries; /* number of AC timing entries */ 241 uint8_t num_entries; /* number of AC timing entries */
@@ -240,7 +272,11 @@ extern int atomctrl_read_efuse(void *device, uint16_t start_index,
240 uint16_t end_index, uint32_t mask, uint32_t *efuse); 272 uint16_t end_index, uint32_t mask, uint32_t *efuse);
241extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 273extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
242 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug); 274 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug);
243 275extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers);
244 276extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
277 uint8_t level);
278extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
279 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
280extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
245#endif 281#endif
246 282