aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2013-08-14 01:03:41 -0400
committerAlex Deucher <alexander.deucher@amd.com>2013-08-30 16:30:29 -0400
commitcc8dbbb4f62aa53e604e7c61dedc03ee4e8dfed4 (patch)
treee346ae250ed7c00644b883cbe024695fe8c40524 /drivers/gpu/drm/radeon
parent41a524abff2630dce0f9c38eb7340fbf2dc5bf27 (diff)
drm/radeon: add dpm support for CI dGPUs (v2)
This adds dpm support for btc asics. This includes: - dynamic engine clock scaling - dynamic memory clock scaling - dynamic voltage scaling - dynamic pcie gen switching Set radeon.dpm=1 to enable. v2: remove unused radeon_atombios.c changes, make missing smc ucode non-fatal Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/Makefile3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5006
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h331
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c262
-rw-r--r--drivers/gpu/drm/radeon/cik.c41
-rw-r--r--drivers/gpu/drm/radeon/cikd.h259
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h486
15 files changed, 6447 insertions, 15 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index d3265b5d4661..ea913cc681b4 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -79,7 +79,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ 79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ 80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ 81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o 82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
83 ci_dpm.o
83 84
84radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 85radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
85radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 86radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
new file mode 100644
index 000000000000..72ab92b60e6e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -0,0 +1,5006 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "cikd.h"
27#include "r600_dpm.h"
28#include "ci_dpm.h"
29#include "atom.h"
30
31#define MC_CG_ARB_FREQ_F0 0x0a
32#define MC_CG_ARB_FREQ_F1 0x0b
33#define MC_CG_ARB_FREQ_F2 0x0c
34#define MC_CG_ARB_FREQ_F3 0x0d
35
36#define SMC_RAM_END 0x40000
37
38#define VOLTAGE_SCALE 4
39#define VOLTAGE_VID_OFFSET_SCALE1 625
40#define VOLTAGE_VID_OFFSET_SCALE2 100
41
42static const struct ci_pt_defaults defaults_bonaire_xt =
43{
44 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
45 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
46 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
47};
48
49static const struct ci_pt_defaults defaults_bonaire_pro =
50{
51 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
52 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
53 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
54};
55
56static const struct ci_pt_defaults defaults_saturn_xt =
57{
58 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
59 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
60 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
61};
62
63static const struct ci_pt_defaults defaults_saturn_pro =
64{
65 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
66 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
67 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
68};
69
70static const struct ci_pt_config_reg didt_config_ci[] =
71{
72 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
73 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
74 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
75 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
76 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
77 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
78 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
79 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
80 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
81 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
82 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
83 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
84 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
85 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
86 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
87 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
88 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
89 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
90 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
103 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
104 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
105 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
106 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0xFFFFFFFF }
145};
146
147extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
148extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
149 u32 arb_freq_src, u32 arb_freq_dest);
150extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
151extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
152extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
153 u32 max_voltage_steps,
154 struct atom_voltage_table *voltage_table);
155extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
156extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
157
158static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
159 struct atom_voltage_table_entry *voltage_table,
160 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
161static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
162static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
163 u32 target_tdp);
164static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
165
166static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
167{
168 struct ci_power_info *pi = rdev->pm.dpm.priv;
169
170 return pi;
171}
172
173static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
174{
175 struct ci_ps *ps = rps->ps_priv;
176
177 return ps;
178}
179
180static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
181{
182 struct ci_power_info *pi = ci_get_pi(rdev);
183
184 switch (rdev->pdev->device) {
185 case 0x6650:
186 case 0x6658:
187 case 0x665C:
188 default:
189 pi->powertune_defaults = &defaults_bonaire_xt;
190 break;
191 case 0x6651:
192 case 0x665D:
193 pi->powertune_defaults = &defaults_bonaire_pro;
194 break;
195 case 0x6640:
196 pi->powertune_defaults = &defaults_saturn_xt;
197 break;
198 case 0x6641:
199 pi->powertune_defaults = &defaults_saturn_pro;
200 break;
201 }
202
203 pi->dte_tj_offset = 0;
204
205 pi->caps_power_containment = true;
206 pi->caps_cac = false;
207 pi->caps_sq_ramping = false;
208 pi->caps_db_ramping = false;
209 pi->caps_td_ramping = false;
210 pi->caps_tcp_ramping = false;
211
212 if (pi->caps_power_containment) {
213 pi->caps_cac = true;
214 pi->enable_bapm_feature = true;
215 pi->enable_tdc_limit_feature = true;
216 pi->enable_pkg_pwr_tracking_feature = true;
217 }
218}
219
220static u8 ci_convert_to_vid(u16 vddc)
221{
222 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
223}
224
225static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
226{
227 struct ci_power_info *pi = ci_get_pi(rdev);
228 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
229 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
230 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
231 u32 i;
232
233 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
234 return -EINVAL;
235 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
236 return -EINVAL;
237 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
238 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
239 return -EINVAL;
240
241 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
242 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
243 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
244 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
245 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
246 } else {
247 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
248 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
249 }
250 }
251 return 0;
252}
253
254static int ci_populate_vddc_vid(struct radeon_device *rdev)
255{
256 struct ci_power_info *pi = ci_get_pi(rdev);
257 u8 *vid = pi->smc_powertune_table.VddCVid;
258 u32 i;
259
260 if (pi->vddc_voltage_table.count > 8)
261 return -EINVAL;
262
263 for (i = 0; i < pi->vddc_voltage_table.count; i++)
264 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
265
266 return 0;
267}
268
269static int ci_populate_svi_load_line(struct radeon_device *rdev)
270{
271 struct ci_power_info *pi = ci_get_pi(rdev);
272 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
273
274 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
275 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
276 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
277 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
278
279 return 0;
280}
281
282static int ci_populate_tdc_limit(struct radeon_device *rdev)
283{
284 struct ci_power_info *pi = ci_get_pi(rdev);
285 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
286 u16 tdc_limit;
287
288 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
289 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
290 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
291 pt_defaults->tdc_vddc_throttle_release_limit_perc;
292 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
293
294 return 0;
295}
296
297static int ci_populate_dw8(struct radeon_device *rdev)
298{
299 struct ci_power_info *pi = ci_get_pi(rdev);
300 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
301 int ret;
302
303 ret = ci_read_smc_sram_dword(rdev,
304 SMU7_FIRMWARE_HEADER_LOCATION +
305 offsetof(SMU7_Firmware_Header, PmFuseTable) +
306 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
307 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
308 pi->sram_end);
309 if (ret)
310 return -EINVAL;
311 else
312 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
313
314 return 0;
315}
316
317static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
318{
319 struct ci_power_info *pi = ci_get_pi(rdev);
320 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
321 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
322 int i, min, max;
323
324 min = max = hi_vid[0];
325 for (i = 0; i < 8; i++) {
326 if (0 != hi_vid[i]) {
327 if (min > hi_vid[i])
328 min = hi_vid[i];
329 if (max < hi_vid[i])
330 max = hi_vid[i];
331 }
332
333 if (0 != lo_vid[i]) {
334 if (min > lo_vid[i])
335 min = lo_vid[i];
336 if (max < lo_vid[i])
337 max = lo_vid[i];
338 }
339 }
340
341 if ((min == 0) || (max == 0))
342 return -EINVAL;
343 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
344 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
345
346 return 0;
347}
348
349static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
350{
351 struct ci_power_info *pi = ci_get_pi(rdev);
352 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
353 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
354 struct radeon_cac_tdp_table *cac_tdp_table =
355 rdev->pm.dpm.dyn_state.cac_tdp_table;
356
357 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
358 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
359
360 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
361 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
362
363 return 0;
364}
365
366static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
367{
368 struct ci_power_info *pi = ci_get_pi(rdev);
369 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
370 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
371 struct radeon_cac_tdp_table *cac_tdp_table =
372 rdev->pm.dpm.dyn_state.cac_tdp_table;
373 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
374 int i, j, k;
375 const u16 *def1;
376 const u16 *def2;
377
378 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
379 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
380
381 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
382 dpm_table->GpuTjMax =
383 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
384 dpm_table->GpuTjHyst = 8;
385
386 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
387
388 if (ppm) {
389 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
390 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
391 } else {
392 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
393 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
394 }
395
396 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
397 def1 = pt_defaults->bapmti_r;
398 def2 = pt_defaults->bapmti_rc;
399
400 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
401 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
402 for (k = 0; k < SMU7_DTE_SINKS; k++) {
403 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
404 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
405 def1++;
406 def2++;
407 }
408 }
409 }
410
411 return 0;
412}
413
414static int ci_populate_pm_base(struct radeon_device *rdev)
415{
416 struct ci_power_info *pi = ci_get_pi(rdev);
417 u32 pm_fuse_table_offset;
418 int ret;
419
420 if (pi->caps_power_containment) {
421 ret = ci_read_smc_sram_dword(rdev,
422 SMU7_FIRMWARE_HEADER_LOCATION +
423 offsetof(SMU7_Firmware_Header, PmFuseTable),
424 &pm_fuse_table_offset, pi->sram_end);
425 if (ret)
426 return ret;
427 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
428 if (ret)
429 return ret;
430 ret = ci_populate_vddc_vid(rdev);
431 if (ret)
432 return ret;
433 ret = ci_populate_svi_load_line(rdev);
434 if (ret)
435 return ret;
436 ret = ci_populate_tdc_limit(rdev);
437 if (ret)
438 return ret;
439 ret = ci_populate_dw8(rdev);
440 if (ret)
441 return ret;
442 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
443 if (ret)
444 return ret;
445 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
446 if (ret)
447 return ret;
448 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
449 (u8 *)&pi->smc_powertune_table,
450 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
451 if (ret)
452 return ret;
453 }
454
455 return 0;
456}
457
458static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
459{
460 struct ci_power_info *pi = ci_get_pi(rdev);
461 u32 data;
462
463 if (pi->caps_sq_ramping) {
464 data = RREG32_DIDT(DIDT_SQ_CTRL0);
465 if (enable)
466 data |= DIDT_CTRL_EN;
467 else
468 data &= ~DIDT_CTRL_EN;
469 WREG32_DIDT(DIDT_SQ_CTRL0, data);
470 }
471
472 if (pi->caps_db_ramping) {
473 data = RREG32_DIDT(DIDT_DB_CTRL0);
474 if (enable)
475 data |= DIDT_CTRL_EN;
476 else
477 data &= ~DIDT_CTRL_EN;
478 WREG32_DIDT(DIDT_DB_CTRL0, data);
479 }
480
481 if (pi->caps_td_ramping) {
482 data = RREG32_DIDT(DIDT_TD_CTRL0);
483 if (enable)
484 data |= DIDT_CTRL_EN;
485 else
486 data &= ~DIDT_CTRL_EN;
487 WREG32_DIDT(DIDT_TD_CTRL0, data);
488 }
489
490 if (pi->caps_tcp_ramping) {
491 data = RREG32_DIDT(DIDT_TCP_CTRL0);
492 if (enable)
493 data |= DIDT_CTRL_EN;
494 else
495 data &= ~DIDT_CTRL_EN;
496 WREG32_DIDT(DIDT_TCP_CTRL0, data);
497 }
498}
499
500static int ci_program_pt_config_registers(struct radeon_device *rdev,
501 const struct ci_pt_config_reg *cac_config_regs)
502{
503 const struct ci_pt_config_reg *config_regs = cac_config_regs;
504 u32 data;
505 u32 cache = 0;
506
507 if (config_regs == NULL)
508 return -EINVAL;
509
510 while (config_regs->offset != 0xFFFFFFFF) {
511 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
512 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
513 } else {
514 switch (config_regs->type) {
515 case CISLANDS_CONFIGREG_SMC_IND:
516 data = RREG32_SMC(config_regs->offset);
517 break;
518 case CISLANDS_CONFIGREG_DIDT_IND:
519 data = RREG32_DIDT(config_regs->offset);
520 break;
521 default:
522 data = RREG32(config_regs->offset << 2);
523 break;
524 }
525
526 data &= ~config_regs->mask;
527 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
528 data |= cache;
529
530 switch (config_regs->type) {
531 case CISLANDS_CONFIGREG_SMC_IND:
532 WREG32_SMC(config_regs->offset, data);
533 break;
534 case CISLANDS_CONFIGREG_DIDT_IND:
535 WREG32_DIDT(config_regs->offset, data);
536 break;
537 default:
538 WREG32(config_regs->offset << 2, data);
539 break;
540 }
541 cache = 0;
542 }
543 config_regs++;
544 }
545 return 0;
546}
547
548static int ci_enable_didt(struct radeon_device *rdev, bool enable)
549{
550 struct ci_power_info *pi = ci_get_pi(rdev);
551 int ret;
552
553 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
554 pi->caps_td_ramping || pi->caps_tcp_ramping) {
555 cik_enter_rlc_safe_mode(rdev);
556
557 if (enable) {
558 ret = ci_program_pt_config_registers(rdev, didt_config_ci);
559 if (ret) {
560 cik_exit_rlc_safe_mode(rdev);
561 return ret;
562 }
563 }
564
565 ci_do_enable_didt(rdev, enable);
566
567 cik_exit_rlc_safe_mode(rdev);
568 }
569
570 return 0;
571}
572
573static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
574{
575 struct ci_power_info *pi = ci_get_pi(rdev);
576 PPSMC_Result smc_result;
577 int ret = 0;
578
579 if (enable) {
580 pi->power_containment_features = 0;
581 if (pi->caps_power_containment) {
582 if (pi->enable_bapm_feature) {
583 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
584 if (smc_result != PPSMC_Result_OK)
585 ret = -EINVAL;
586 else
587 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
588 }
589
590 if (pi->enable_tdc_limit_feature) {
591 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
592 if (smc_result != PPSMC_Result_OK)
593 ret = -EINVAL;
594 else
595 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
596 }
597
598 if (pi->enable_pkg_pwr_tracking_feature) {
599 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
600 if (smc_result != PPSMC_Result_OK) {
601 ret = -EINVAL;
602 } else {
603 struct radeon_cac_tdp_table *cac_tdp_table =
604 rdev->pm.dpm.dyn_state.cac_tdp_table;
605 u32 default_pwr_limit =
606 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
607
608 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
609
610 ci_set_power_limit(rdev, default_pwr_limit);
611 }
612 }
613 }
614 } else {
615 if (pi->caps_power_containment && pi->power_containment_features) {
616 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
617 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
618
619 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
620 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
621
622 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
623 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
624 pi->power_containment_features = 0;
625 }
626 }
627
628 return ret;
629}
630
631static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
632{
633 struct ci_power_info *pi = ci_get_pi(rdev);
634 PPSMC_Result smc_result;
635 int ret = 0;
636
637 if (pi->caps_cac) {
638 if (enable) {
639 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
640 if (smc_result != PPSMC_Result_OK) {
641 ret = -EINVAL;
642 pi->cac_enabled = false;
643 } else {
644 pi->cac_enabled = true;
645 }
646 } else if (pi->cac_enabled) {
647 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
648 pi->cac_enabled = false;
649 }
650 }
651
652 return ret;
653}
654
655static int ci_power_control_set_level(struct radeon_device *rdev)
656{
657 struct ci_power_info *pi = ci_get_pi(rdev);
658 struct radeon_cac_tdp_table *cac_tdp_table =
659 rdev->pm.dpm.dyn_state.cac_tdp_table;
660 s32 adjust_percent;
661 s32 target_tdp;
662 int ret = 0;
663 bool adjust_polarity = false; /* ??? */
664
665 if (pi->caps_power_containment &&
666 (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
667 adjust_percent = adjust_polarity ?
668 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
669 target_tdp = ((100 + adjust_percent) *
670 (s32)cac_tdp_table->configurable_tdp) / 100;
671 target_tdp *= 256;
672
673 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
674 }
675
676 return ret;
677}
678
679static void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
680{
681 ci_update_uvd_dpm(rdev, gate);
682}
683
684static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
685 struct radeon_ps *rps)
686{
687 struct ci_ps *ps = ci_get_ps(rps);
688 struct ci_power_info *pi = ci_get_pi(rdev);
689 struct radeon_clock_and_voltage_limits *max_limits;
690 bool disable_mclk_switching;
691 u32 sclk, mclk;
692 int i;
693
694 if (rdev->pm.dpm.new_active_crtc_count > 1)
695 disable_mclk_switching = true;
696 else
697 disable_mclk_switching = false;
698
699 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
700 pi->battery_state = true;
701 else
702 pi->battery_state = false;
703
704 if (rdev->pm.dpm.ac_power)
705 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
706 else
707 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
708
709 if (rdev->pm.dpm.ac_power == false) {
710 for (i = 0; i < ps->performance_level_count; i++) {
711 if (ps->performance_levels[i].mclk > max_limits->mclk)
712 ps->performance_levels[i].mclk = max_limits->mclk;
713 if (ps->performance_levels[i].sclk > max_limits->sclk)
714 ps->performance_levels[i].sclk = max_limits->sclk;
715 }
716 }
717
718 /* XXX validate the min clocks required for display */
719
720 if (disable_mclk_switching) {
721 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
722 sclk = ps->performance_levels[0].sclk;
723 } else {
724 mclk = ps->performance_levels[0].mclk;
725 sclk = ps->performance_levels[0].sclk;
726 }
727
728 ps->performance_levels[0].sclk = sclk;
729 ps->performance_levels[0].mclk = mclk;
730
731 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
732 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
733
734 if (disable_mclk_switching) {
735 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
736 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
737 } else {
738 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
739 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
740 }
741}
742
743static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
744 int min_temp, int max_temp)
745{
746 int low_temp = 0 * 1000;
747 int high_temp = 255 * 1000;
748 u32 tmp;
749
750 if (low_temp < min_temp)
751 low_temp = min_temp;
752 if (high_temp > max_temp)
753 high_temp = max_temp;
754 if (high_temp < low_temp) {
755 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
756 return -EINVAL;
757 }
758
759 tmp = RREG32_SMC(CG_THERMAL_INT);
760 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
761 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
762 CI_DIG_THERM_INTL(low_temp / 1000);
763 WREG32_SMC(CG_THERMAL_INT, tmp);
764
765#if 0
766 /* XXX: need to figure out how to handle this properly */
767 tmp = RREG32_SMC(CG_THERMAL_CTRL);
768 tmp &= DIG_THERM_DPM_MASK;
769 tmp |= DIG_THERM_DPM(high_temp / 1000);
770 WREG32_SMC(CG_THERMAL_CTRL, tmp);
771#endif
772
773 return 0;
774}
775
776#if 0
777static int ci_read_smc_soft_register(struct radeon_device *rdev,
778 u16 reg_offset, u32 *value)
779{
780 struct ci_power_info *pi = ci_get_pi(rdev);
781
782 return ci_read_smc_sram_dword(rdev,
783 pi->soft_regs_start + reg_offset,
784 value, pi->sram_end);
785}
786#endif
787
788static int ci_write_smc_soft_register(struct radeon_device *rdev,
789 u16 reg_offset, u32 value)
790{
791 struct ci_power_info *pi = ci_get_pi(rdev);
792
793 return ci_write_smc_sram_dword(rdev,
794 pi->soft_regs_start + reg_offset,
795 value, pi->sram_end);
796}
797
798static void ci_init_fps_limits(struct radeon_device *rdev)
799{
800 struct ci_power_info *pi = ci_get_pi(rdev);
801 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
802
803 if (pi->caps_fps) {
804 u16 tmp;
805
806 tmp = 45;
807 table->FpsHighT = cpu_to_be16(tmp);
808
809 tmp = 30;
810 table->FpsLowT = cpu_to_be16(tmp);
811 }
812}
813
814static int ci_update_sclk_t(struct radeon_device *rdev)
815{
816 struct ci_power_info *pi = ci_get_pi(rdev);
817 int ret = 0;
818 u32 low_sclk_interrupt_t = 0;
819
820 if (pi->caps_sclk_throttle_low_notification) {
821 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
822
823 ret = ci_copy_bytes_to_smc(rdev,
824 pi->dpm_table_start +
825 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
826 (u8 *)&low_sclk_interrupt_t,
827 sizeof(u32), pi->sram_end);
828
829 }
830
831 return ret;
832}
833
834static void ci_get_leakage_voltages(struct radeon_device *rdev)
835{
836 struct ci_power_info *pi = ci_get_pi(rdev);
837 u16 leakage_id, virtual_voltage_id;
838 u16 vddc, vddci;
839 int i;
840
841 pi->vddc_leakage.count = 0;
842 pi->vddci_leakage.count = 0;
843
844 if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
845 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
846 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
847 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
848 virtual_voltage_id,
849 leakage_id) == 0) {
850 if (vddc != 0 && vddc != virtual_voltage_id) {
851 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
852 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
853 pi->vddc_leakage.count++;
854 }
855 if (vddci != 0 && vddci != virtual_voltage_id) {
856 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
857 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
858 pi->vddci_leakage.count++;
859 }
860 }
861 }
862 }
863}
864
865static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
866{
867 struct ci_power_info *pi = ci_get_pi(rdev);
868 bool want_thermal_protection;
869 enum radeon_dpm_event_src dpm_event_src;
870 u32 tmp;
871
872 switch (sources) {
873 case 0:
874 default:
875 want_thermal_protection = false;
876 break;
877 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
878 want_thermal_protection = true;
879 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
880 break;
881 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
882 want_thermal_protection = true;
883 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
884 break;
885 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
886 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
887 want_thermal_protection = true;
888 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
889 break;
890 }
891
892 if (want_thermal_protection) {
893#if 0
894 /* XXX: need to figure out how to handle this properly */
895 tmp = RREG32_SMC(CG_THERMAL_CTRL);
896 tmp &= DPM_EVENT_SRC_MASK;
897 tmp |= DPM_EVENT_SRC(dpm_event_src);
898 WREG32_SMC(CG_THERMAL_CTRL, tmp);
899#endif
900
901 tmp = RREG32_SMC(GENERAL_PWRMGT);
902 if (pi->thermal_protection)
903 tmp &= ~THERMAL_PROTECTION_DIS;
904 else
905 tmp |= THERMAL_PROTECTION_DIS;
906 WREG32_SMC(GENERAL_PWRMGT, tmp);
907 } else {
908 tmp = RREG32_SMC(GENERAL_PWRMGT);
909 tmp |= THERMAL_PROTECTION_DIS;
910 WREG32_SMC(GENERAL_PWRMGT, tmp);
911 }
912}
913
914static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
915 enum radeon_dpm_auto_throttle_src source,
916 bool enable)
917{
918 struct ci_power_info *pi = ci_get_pi(rdev);
919
920 if (enable) {
921 if (!(pi->active_auto_throttle_sources & (1 << source))) {
922 pi->active_auto_throttle_sources |= 1 << source;
923 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
924 }
925 } else {
926 if (pi->active_auto_throttle_sources & (1 << source)) {
927 pi->active_auto_throttle_sources &= ~(1 << source);
928 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
929 }
930 }
931}
932
933static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
934{
935 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
936 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
937}
938
939static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
940{
941 struct ci_power_info *pi = ci_get_pi(rdev);
942 PPSMC_Result smc_result;
943
944 if (!pi->need_update_smu7_dpm_table)
945 return 0;
946
947 if ((!pi->sclk_dpm_key_disabled) &&
948 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
949 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
950 if (smc_result != PPSMC_Result_OK)
951 return -EINVAL;
952 }
953
954 if ((!pi->mclk_dpm_key_disabled) &&
955 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
956 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
957 if (smc_result != PPSMC_Result_OK)
958 return -EINVAL;
959 }
960
961 pi->need_update_smu7_dpm_table = 0;
962 return 0;
963}
964
965static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
966{
967 struct ci_power_info *pi = ci_get_pi(rdev);
968 PPSMC_Result smc_result;
969
970 if (enable) {
971 if (!pi->sclk_dpm_key_disabled) {
972 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
973 if (smc_result != PPSMC_Result_OK)
974 return -EINVAL;
975 }
976
977 if (!pi->mclk_dpm_key_disabled) {
978 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
979 if (smc_result != PPSMC_Result_OK)
980 return -EINVAL;
981
982 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
983
984 WREG32_SMC(LCAC_MC0_CNTL, 0x05);
985 WREG32_SMC(LCAC_MC1_CNTL, 0x05);
986 WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
987
988 udelay(10);
989
990 WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
991 WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
992 WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
993 }
994 } else {
995 if (!pi->sclk_dpm_key_disabled) {
996 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
997 if (smc_result != PPSMC_Result_OK)
998 return -EINVAL;
999 }
1000
1001 if (!pi->mclk_dpm_key_disabled) {
1002 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1003 if (smc_result != PPSMC_Result_OK)
1004 return -EINVAL;
1005 }
1006 }
1007
1008 return 0;
1009}
1010
1011static int ci_start_dpm(struct radeon_device *rdev)
1012{
1013 struct ci_power_info *pi = ci_get_pi(rdev);
1014 PPSMC_Result smc_result;
1015 int ret;
1016 u32 tmp;
1017
1018 tmp = RREG32_SMC(GENERAL_PWRMGT);
1019 tmp |= GLOBAL_PWRMGT_EN;
1020 WREG32_SMC(GENERAL_PWRMGT, tmp);
1021
1022 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1023 tmp |= DYNAMIC_PM_EN;
1024 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1025
1026 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1027
1028 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1029
1030 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1031 if (smc_result != PPSMC_Result_OK)
1032 return -EINVAL;
1033
1034 ret = ci_enable_sclk_mclk_dpm(rdev, true);
1035 if (ret)
1036 return ret;
1037
1038 if (!pi->pcie_dpm_key_disabled) {
1039 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1040 if (smc_result != PPSMC_Result_OK)
1041 return -EINVAL;
1042 }
1043
1044 return 0;
1045}
1046
1047static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1048{
1049 struct ci_power_info *pi = ci_get_pi(rdev);
1050 PPSMC_Result smc_result;
1051
1052 if (!pi->need_update_smu7_dpm_table)
1053 return 0;
1054
1055 if ((!pi->sclk_dpm_key_disabled) &&
1056 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1057 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1058 if (smc_result != PPSMC_Result_OK)
1059 return -EINVAL;
1060 }
1061
1062 if ((!pi->mclk_dpm_key_disabled) &&
1063 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1064 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1065 if (smc_result != PPSMC_Result_OK)
1066 return -EINVAL;
1067 }
1068
1069 return 0;
1070}
1071
1072static int ci_stop_dpm(struct radeon_device *rdev)
1073{
1074 struct ci_power_info *pi = ci_get_pi(rdev);
1075 PPSMC_Result smc_result;
1076 int ret;
1077 u32 tmp;
1078
1079 tmp = RREG32_SMC(GENERAL_PWRMGT);
1080 tmp &= ~GLOBAL_PWRMGT_EN;
1081 WREG32_SMC(GENERAL_PWRMGT, tmp);
1082
1083 tmp = RREG32(SCLK_PWRMGT_CNTL);
1084 tmp &= ~DYNAMIC_PM_EN;
1085 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1086
1087 if (!pi->pcie_dpm_key_disabled) {
1088 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1089 if (smc_result != PPSMC_Result_OK)
1090 return -EINVAL;
1091 }
1092
1093 ret = ci_enable_sclk_mclk_dpm(rdev, false);
1094 if (ret)
1095 return ret;
1096
1097 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1098 if (smc_result != PPSMC_Result_OK)
1099 return -EINVAL;
1100
1101 return 0;
1102}
1103
1104static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1105{
1106 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1107
1108 if (enable)
1109 tmp &= ~SCLK_PWRMGT_OFF;
1110 else
1111 tmp |= SCLK_PWRMGT_OFF;
1112 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1113}
1114
1115#if 0
1116static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1117 bool ac_power)
1118{
1119 struct ci_power_info *pi = ci_get_pi(rdev);
1120 struct radeon_cac_tdp_table *cac_tdp_table =
1121 rdev->pm.dpm.dyn_state.cac_tdp_table;
1122 u32 power_limit;
1123
1124 if (ac_power)
1125 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1126 else
1127 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1128
1129 ci_set_power_limit(rdev, power_limit);
1130
1131 if (pi->caps_automatic_dc_transition) {
1132 if (ac_power)
1133 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1134 else
1135 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1136 }
1137
1138 return 0;
1139}
1140#endif
1141
1142static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1143 PPSMC_Msg msg, u32 parameter)
1144{
1145 WREG32(SMC_MSG_ARG_0, parameter);
1146 return ci_send_msg_to_smc(rdev, msg);
1147}
1148
1149static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1150 PPSMC_Msg msg, u32 *parameter)
1151{
1152 PPSMC_Result smc_result;
1153
1154 smc_result = ci_send_msg_to_smc(rdev, msg);
1155
1156 if ((smc_result == PPSMC_Result_OK) && parameter)
1157 *parameter = RREG32(SMC_MSG_ARG_0);
1158
1159 return smc_result;
1160}
1161
1162static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1163{
1164 struct ci_power_info *pi = ci_get_pi(rdev);
1165
1166 if (!pi->sclk_dpm_key_disabled) {
1167 PPSMC_Result smc_result =
1168 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1169 if (smc_result != PPSMC_Result_OK)
1170 return -EINVAL;
1171 }
1172
1173 return 0;
1174}
1175
1176static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1177{
1178 struct ci_power_info *pi = ci_get_pi(rdev);
1179
1180 if (!pi->mclk_dpm_key_disabled) {
1181 PPSMC_Result smc_result =
1182 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1183 if (smc_result != PPSMC_Result_OK)
1184 return -EINVAL;
1185 }
1186
1187 return 0;
1188}
1189
1190static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1191{
1192 struct ci_power_info *pi = ci_get_pi(rdev);
1193
1194 if (!pi->pcie_dpm_key_disabled) {
1195 PPSMC_Result smc_result =
1196 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1197 if (smc_result != PPSMC_Result_OK)
1198 return -EINVAL;
1199 }
1200
1201 return 0;
1202}
1203
1204static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1205{
1206 struct ci_power_info *pi = ci_get_pi(rdev);
1207
1208 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1209 PPSMC_Result smc_result =
1210 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1211 if (smc_result != PPSMC_Result_OK)
1212 return -EINVAL;
1213 }
1214
1215 return 0;
1216}
1217
1218static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1219 u32 target_tdp)
1220{
1221 PPSMC_Result smc_result =
1222 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1223 if (smc_result != PPSMC_Result_OK)
1224 return -EINVAL;
1225 return 0;
1226}
1227
1228static int ci_set_boot_state(struct radeon_device *rdev)
1229{
1230 return ci_enable_sclk_mclk_dpm(rdev, false);
1231}
1232
1233static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1234{
1235 u32 sclk_freq;
1236 PPSMC_Result smc_result =
1237 ci_send_msg_to_smc_return_parameter(rdev,
1238 PPSMC_MSG_API_GetSclkFrequency,
1239 &sclk_freq);
1240 if (smc_result != PPSMC_Result_OK)
1241 sclk_freq = 0;
1242
1243 return sclk_freq;
1244}
1245
1246static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1247{
1248 u32 mclk_freq;
1249 PPSMC_Result smc_result =
1250 ci_send_msg_to_smc_return_parameter(rdev,
1251 PPSMC_MSG_API_GetMclkFrequency,
1252 &mclk_freq);
1253 if (smc_result != PPSMC_Result_OK)
1254 mclk_freq = 0;
1255
1256 return mclk_freq;
1257}
1258
1259static void ci_dpm_start_smc(struct radeon_device *rdev)
1260{
1261 int i;
1262
1263 ci_program_jump_on_start(rdev);
1264 ci_start_smc_clock(rdev);
1265 ci_start_smc(rdev);
1266 for (i = 0; i < rdev->usec_timeout; i++) {
1267 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1268 break;
1269 }
1270}
1271
1272static void ci_dpm_stop_smc(struct radeon_device *rdev)
1273{
1274 ci_reset_smc(rdev);
1275 ci_stop_smc_clock(rdev);
1276}
1277
1278static int ci_process_firmware_header(struct radeon_device *rdev)
1279{
1280 struct ci_power_info *pi = ci_get_pi(rdev);
1281 u32 tmp;
1282 int ret;
1283
1284 ret = ci_read_smc_sram_dword(rdev,
1285 SMU7_FIRMWARE_HEADER_LOCATION +
1286 offsetof(SMU7_Firmware_Header, DpmTable),
1287 &tmp, pi->sram_end);
1288 if (ret)
1289 return ret;
1290
1291 pi->dpm_table_start = tmp;
1292
1293 ret = ci_read_smc_sram_dword(rdev,
1294 SMU7_FIRMWARE_HEADER_LOCATION +
1295 offsetof(SMU7_Firmware_Header, SoftRegisters),
1296 &tmp, pi->sram_end);
1297 if (ret)
1298 return ret;
1299
1300 pi->soft_regs_start = tmp;
1301
1302 ret = ci_read_smc_sram_dword(rdev,
1303 SMU7_FIRMWARE_HEADER_LOCATION +
1304 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1305 &tmp, pi->sram_end);
1306 if (ret)
1307 return ret;
1308
1309 pi->mc_reg_table_start = tmp;
1310
1311 ret = ci_read_smc_sram_dword(rdev,
1312 SMU7_FIRMWARE_HEADER_LOCATION +
1313 offsetof(SMU7_Firmware_Header, FanTable),
1314 &tmp, pi->sram_end);
1315 if (ret)
1316 return ret;
1317
1318 pi->fan_table_start = tmp;
1319
1320 ret = ci_read_smc_sram_dword(rdev,
1321 SMU7_FIRMWARE_HEADER_LOCATION +
1322 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1323 &tmp, pi->sram_end);
1324 if (ret)
1325 return ret;
1326
1327 pi->arb_table_start = tmp;
1328
1329 return 0;
1330}
1331
1332static void ci_read_clock_registers(struct radeon_device *rdev)
1333{
1334 struct ci_power_info *pi = ci_get_pi(rdev);
1335
1336 pi->clock_registers.cg_spll_func_cntl =
1337 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1338 pi->clock_registers.cg_spll_func_cntl_2 =
1339 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1340 pi->clock_registers.cg_spll_func_cntl_3 =
1341 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1342 pi->clock_registers.cg_spll_func_cntl_4 =
1343 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1344 pi->clock_registers.cg_spll_spread_spectrum =
1345 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1346 pi->clock_registers.cg_spll_spread_spectrum_2 =
1347 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1348 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1349 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1350 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1351 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1352 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1353 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1354 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1355 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1356 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1357}
1358
1359static void ci_init_sclk_t(struct radeon_device *rdev)
1360{
1361 struct ci_power_info *pi = ci_get_pi(rdev);
1362
1363 pi->low_sclk_interrupt_t = 0;
1364}
1365
1366static void ci_enable_thermal_protection(struct radeon_device *rdev,
1367 bool enable)
1368{
1369 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1370
1371 if (enable)
1372 tmp &= ~THERMAL_PROTECTION_DIS;
1373 else
1374 tmp |= THERMAL_PROTECTION_DIS;
1375 WREG32_SMC(GENERAL_PWRMGT, tmp);
1376}
1377
1378static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1379{
1380 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1381
1382 tmp |= STATIC_PM_EN;
1383
1384 WREG32_SMC(GENERAL_PWRMGT, tmp);
1385}
1386
1387#if 0
1388static int ci_enter_ulp_state(struct radeon_device *rdev)
1389{
1390
1391 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1392
1393 udelay(25000);
1394
1395 return 0;
1396}
1397
1398static int ci_exit_ulp_state(struct radeon_device *rdev)
1399{
1400 int i;
1401
1402 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1403
1404 udelay(7000);
1405
1406 for (i = 0; i < rdev->usec_timeout; i++) {
1407 if (RREG32(SMC_RESP_0) == 1)
1408 break;
1409 udelay(1000);
1410 }
1411
1412 return 0;
1413}
1414#endif
1415
1416static int ci_notify_smc_display_change(struct radeon_device *rdev,
1417 bool has_display)
1418{
1419 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1420
1421 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
1422}
1423
1424static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1425 bool enable)
1426{
1427 struct ci_power_info *pi = ci_get_pi(rdev);
1428
1429 if (enable) {
1430 if (pi->caps_sclk_ds) {
1431 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1432 return -EINVAL;
1433 } else {
1434 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1435 return -EINVAL;
1436 }
1437 } else {
1438 if (pi->caps_sclk_ds) {
1439 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1440 return -EINVAL;
1441 }
1442 }
1443
1444 return 0;
1445}
1446
1447static void ci_program_display_gap(struct radeon_device *rdev)
1448{
1449 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1450 u32 pre_vbi_time_in_us;
1451 u32 frame_time_in_us;
1452 u32 ref_clock = rdev->clock.spll.reference_freq;
1453 u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1454 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1455
1456 tmp &= ~DISP_GAP_MASK;
1457 if (rdev->pm.dpm.new_active_crtc_count > 0)
1458 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1459 else
1460 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1461 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1462
1463 if (refresh_rate == 0)
1464 refresh_rate = 60;
1465 if (vblank_time == 0xffffffff)
1466 vblank_time = 500;
1467 frame_time_in_us = 1000000 / refresh_rate;
1468 pre_vbi_time_in_us =
1469 frame_time_in_us - 200 - vblank_time;
1470 tmp = pre_vbi_time_in_us * (ref_clock / 100);
1471
1472 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1473 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1474 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1475
1476
1477 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1478
1479}
1480
1481static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1482{
1483 struct ci_power_info *pi = ci_get_pi(rdev);
1484 u32 tmp;
1485
1486 if (enable) {
1487 if (pi->caps_sclk_ss_support) {
1488 tmp = RREG32_SMC(GENERAL_PWRMGT);
1489 tmp |= DYN_SPREAD_SPECTRUM_EN;
1490 WREG32_SMC(GENERAL_PWRMGT, tmp);
1491 }
1492 } else {
1493 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1494 tmp &= ~SSEN;
1495 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1496
1497 tmp = RREG32_SMC(GENERAL_PWRMGT);
1498 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1499 WREG32_SMC(GENERAL_PWRMGT, tmp);
1500 }
1501}
1502
1503static void ci_program_sstp(struct radeon_device *rdev)
1504{
1505 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1506}
1507
1508static void ci_enable_display_gap(struct radeon_device *rdev)
1509{
1510 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1511
1512 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1513 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1514 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1515
1516 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1517}
1518
1519static void ci_program_vc(struct radeon_device *rdev)
1520{
1521 u32 tmp;
1522
1523 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1524 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1525 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1526
1527 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1528 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1529 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1530 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1531 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1532 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1533 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1534 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1535}
1536
1537static void ci_clear_vc(struct radeon_device *rdev)
1538{
1539 u32 tmp;
1540
1541 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1542 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1543 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1544
1545 WREG32_SMC(CG_FTV_0, 0);
1546 WREG32_SMC(CG_FTV_1, 0);
1547 WREG32_SMC(CG_FTV_2, 0);
1548 WREG32_SMC(CG_FTV_3, 0);
1549 WREG32_SMC(CG_FTV_4, 0);
1550 WREG32_SMC(CG_FTV_5, 0);
1551 WREG32_SMC(CG_FTV_6, 0);
1552 WREG32_SMC(CG_FTV_7, 0);
1553}
1554
1555static int ci_upload_firmware(struct radeon_device *rdev)
1556{
1557 struct ci_power_info *pi = ci_get_pi(rdev);
1558 int i, ret;
1559
1560 for (i = 0; i < rdev->usec_timeout; i++) {
1561 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1562 break;
1563 }
1564 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1565
1566 ci_stop_smc_clock(rdev);
1567 ci_reset_smc(rdev);
1568
1569 ret = ci_load_smc_ucode(rdev, pi->sram_end);
1570
1571 return ret;
1572
1573}
1574
1575static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1576 struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1577 struct atom_voltage_table *voltage_table)
1578{
1579 u32 i;
1580
1581 if (voltage_dependency_table == NULL)
1582 return -EINVAL;
1583
1584 voltage_table->mask_low = 0;
1585 voltage_table->phase_delay = 0;
1586
1587 voltage_table->count = voltage_dependency_table->count;
1588 for (i = 0; i < voltage_table->count; i++) {
1589 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1590 voltage_table->entries[i].smio_low = 0;
1591 }
1592
1593 return 0;
1594}
1595
1596static int ci_construct_voltage_tables(struct radeon_device *rdev)
1597{
1598 struct ci_power_info *pi = ci_get_pi(rdev);
1599 int ret;
1600
1601 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1602 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1603 VOLTAGE_OBJ_GPIO_LUT,
1604 &pi->vddc_voltage_table);
1605 if (ret)
1606 return ret;
1607 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1608 ret = ci_get_svi2_voltage_table(rdev,
1609 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1610 &pi->vddc_voltage_table);
1611 if (ret)
1612 return ret;
1613 }
1614
1615 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1616 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1617 &pi->vddc_voltage_table);
1618
1619 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1620 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1621 VOLTAGE_OBJ_GPIO_LUT,
1622 &pi->vddci_voltage_table);
1623 if (ret)
1624 return ret;
1625 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1626 ret = ci_get_svi2_voltage_table(rdev,
1627 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1628 &pi->vddci_voltage_table);
1629 if (ret)
1630 return ret;
1631 }
1632
1633 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1634 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1635 &pi->vddci_voltage_table);
1636
1637 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1638 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1639 VOLTAGE_OBJ_GPIO_LUT,
1640 &pi->mvdd_voltage_table);
1641 if (ret)
1642 return ret;
1643 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1644 ret = ci_get_svi2_voltage_table(rdev,
1645 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1646 &pi->mvdd_voltage_table);
1647 if (ret)
1648 return ret;
1649 }
1650
1651 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1652 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1653 &pi->mvdd_voltage_table);
1654
1655 return 0;
1656}
1657
1658static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1659 struct atom_voltage_table_entry *voltage_table,
1660 SMU7_Discrete_VoltageLevel *smc_voltage_table)
1661{
1662 int ret;
1663
1664 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1665 &smc_voltage_table->StdVoltageHiSidd,
1666 &smc_voltage_table->StdVoltageLoSidd);
1667
1668 if (ret) {
1669 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1670 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1671 }
1672
1673 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1674 smc_voltage_table->StdVoltageHiSidd =
1675 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1676 smc_voltage_table->StdVoltageLoSidd =
1677 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1678}
1679
1680static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1681 SMU7_Discrete_DpmTable *table)
1682{
1683 struct ci_power_info *pi = ci_get_pi(rdev);
1684 unsigned int count;
1685
1686 table->VddcLevelCount = pi->vddc_voltage_table.count;
1687 for (count = 0; count < table->VddcLevelCount; count++) {
1688 ci_populate_smc_voltage_table(rdev,
1689 &pi->vddc_voltage_table.entries[count],
1690 &table->VddcLevel[count]);
1691
1692 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1693 table->VddcLevel[count].Smio |=
1694 pi->vddc_voltage_table.entries[count].smio_low;
1695 else
1696 table->VddcLevel[count].Smio = 0;
1697 }
1698 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1699
1700 return 0;
1701}
1702
1703static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1704 SMU7_Discrete_DpmTable *table)
1705{
1706 unsigned int count;
1707 struct ci_power_info *pi = ci_get_pi(rdev);
1708
1709 table->VddciLevelCount = pi->vddci_voltage_table.count;
1710 for (count = 0; count < table->VddciLevelCount; count++) {
1711 ci_populate_smc_voltage_table(rdev,
1712 &pi->vddci_voltage_table.entries[count],
1713 &table->VddciLevel[count]);
1714
1715 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1716 table->VddciLevel[count].Smio |=
1717 pi->vddci_voltage_table.entries[count].smio_low;
1718 else
1719 table->VddciLevel[count].Smio = 0;
1720 }
1721 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1722
1723 return 0;
1724}
1725
1726static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1727 SMU7_Discrete_DpmTable *table)
1728{
1729 struct ci_power_info *pi = ci_get_pi(rdev);
1730 unsigned int count;
1731
1732 table->MvddLevelCount = pi->mvdd_voltage_table.count;
1733 for (count = 0; count < table->MvddLevelCount; count++) {
1734 ci_populate_smc_voltage_table(rdev,
1735 &pi->mvdd_voltage_table.entries[count],
1736 &table->MvddLevel[count]);
1737
1738 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1739 table->MvddLevel[count].Smio |=
1740 pi->mvdd_voltage_table.entries[count].smio_low;
1741 else
1742 table->MvddLevel[count].Smio = 0;
1743 }
1744 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1745
1746 return 0;
1747}
1748
1749static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1750 SMU7_Discrete_DpmTable *table)
1751{
1752 int ret;
1753
1754 ret = ci_populate_smc_vddc_table(rdev, table);
1755 if (ret)
1756 return ret;
1757
1758 ret = ci_populate_smc_vddci_table(rdev, table);
1759 if (ret)
1760 return ret;
1761
1762 ret = ci_populate_smc_mvdd_table(rdev, table);
1763 if (ret)
1764 return ret;
1765
1766 return 0;
1767}
1768
1769static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1770 SMU7_Discrete_VoltageLevel *voltage)
1771{
1772 struct ci_power_info *pi = ci_get_pi(rdev);
1773 u32 i = 0;
1774
1775 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1776 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1777 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1778 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1779 break;
1780 }
1781 }
1782
1783 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1784 return -EINVAL;
1785 }
1786
1787 return -EINVAL;
1788}
1789
1790static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1791 struct atom_voltage_table_entry *voltage_table,
1792 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1793{
1794 u16 v_index, idx;
1795 bool voltage_found = false;
1796 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1797 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1798
1799 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1800 return -EINVAL;
1801
1802 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1803 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1804 if (voltage_table->value ==
1805 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1806 voltage_found = true;
1807 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1808 idx = v_index;
1809 else
1810 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1811 *std_voltage_lo_sidd =
1812 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1813 *std_voltage_hi_sidd =
1814 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1815 break;
1816 }
1817 }
1818
1819 if (!voltage_found) {
1820 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1821 if (voltage_table->value <=
1822 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1823 voltage_found = true;
1824 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1825 idx = v_index;
1826 else
1827 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1828 *std_voltage_lo_sidd =
1829 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1830 *std_voltage_hi_sidd =
1831 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1832 break;
1833 }
1834 }
1835 }
1836 }
1837
1838 return 0;
1839}
1840
1841static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1842 const struct radeon_phase_shedding_limits_table *limits,
1843 u32 sclk,
1844 u32 *phase_shedding)
1845{
1846 unsigned int i;
1847
1848 *phase_shedding = 1;
1849
1850 for (i = 0; i < limits->count; i++) {
1851 if (sclk < limits->entries[i].sclk) {
1852 *phase_shedding = i;
1853 break;
1854 }
1855 }
1856}
1857
1858static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1859 const struct radeon_phase_shedding_limits_table *limits,
1860 u32 mclk,
1861 u32 *phase_shedding)
1862{
1863 unsigned int i;
1864
1865 *phase_shedding = 1;
1866
1867 for (i = 0; i < limits->count; i++) {
1868 if (mclk < limits->entries[i].mclk) {
1869 *phase_shedding = i;
1870 break;
1871 }
1872 }
1873}
1874
1875static int ci_init_arb_table_index(struct radeon_device *rdev)
1876{
1877 struct ci_power_info *pi = ci_get_pi(rdev);
1878 u32 tmp;
1879 int ret;
1880
1881 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1882 &tmp, pi->sram_end);
1883 if (ret)
1884 return ret;
1885
1886 tmp &= 0x00FFFFFF;
1887 tmp |= MC_CG_ARB_FREQ_F1 << 24;
1888
1889 return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1890 tmp, pi->sram_end);
1891}
1892
1893static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1894 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1895 u32 clock, u32 *voltage)
1896{
1897 u32 i = 0;
1898
1899 if (allowed_clock_voltage_table->count == 0)
1900 return -EINVAL;
1901
1902 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1903 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1904 *voltage = allowed_clock_voltage_table->entries[i].v;
1905 return 0;
1906 }
1907 }
1908
1909 *voltage = allowed_clock_voltage_table->entries[i-1].v;
1910
1911 return 0;
1912}
1913
1914static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1915 u32 sclk, u32 min_sclk_in_sr)
1916{
1917 u32 i;
1918 u32 tmp;
1919 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
1920 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
1921
1922 if (sclk < min)
1923 return 0;
1924
1925 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1926 tmp = sclk / (1 << i);
1927 if (tmp >= min || i == 0)
1928 break;
1929 }
1930
1931 return (u8)i;
1932}
1933
1934static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
1935{
1936 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1937}
1938
1939static int ci_reset_to_default(struct radeon_device *rdev)
1940{
1941 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
1942 0 : -EINVAL;
1943}
1944
1945static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
1946{
1947 u32 tmp;
1948
1949 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
1950
1951 if (tmp == MC_CG_ARB_FREQ_F0)
1952 return 0;
1953
1954 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
1955}
1956
1957static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
1958 u32 sclk,
1959 u32 mclk,
1960 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
1961{
1962 u32 dram_timing;
1963 u32 dram_timing2;
1964 u32 burst_time;
1965
1966 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
1967
1968 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1969 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1970 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
1971
1972 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
1973 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
1974 arb_regs->McArbBurstTime = (u8)burst_time;
1975
1976 return 0;
1977}
1978
1979static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
1980{
1981 struct ci_power_info *pi = ci_get_pi(rdev);
1982 SMU7_Discrete_MCArbDramTimingTable arb_regs;
1983 u32 i, j;
1984 int ret = 0;
1985
1986 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1987
1988 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
1989 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
1990 ret = ci_populate_memory_timing_parameters(rdev,
1991 pi->dpm_table.sclk_table.dpm_levels[i].value,
1992 pi->dpm_table.mclk_table.dpm_levels[j].value,
1993 &arb_regs.entries[i][j]);
1994 if (ret)
1995 break;
1996 }
1997 }
1998
1999 if (ret == 0)
2000 ret = ci_copy_bytes_to_smc(rdev,
2001 pi->arb_table_start,
2002 (u8 *)&arb_regs,
2003 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2004 pi->sram_end);
2005
2006 return ret;
2007}
2008
2009static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2010{
2011 struct ci_power_info *pi = ci_get_pi(rdev);
2012
2013 if (pi->need_update_smu7_dpm_table == 0)
2014 return 0;
2015
2016 return ci_do_program_memory_timing_parameters(rdev);
2017}
2018
2019static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2020 struct radeon_ps *radeon_boot_state)
2021{
2022 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2023 struct ci_power_info *pi = ci_get_pi(rdev);
2024 u32 level = 0;
2025
2026 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2027 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2028 boot_state->performance_levels[0].sclk) {
2029 pi->smc_state_table.GraphicsBootLevel = level;
2030 break;
2031 }
2032 }
2033
2034 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2035 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2036 boot_state->performance_levels[0].mclk) {
2037 pi->smc_state_table.MemoryBootLevel = level;
2038 break;
2039 }
2040 }
2041}
2042
2043static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2044{
2045 u32 i;
2046 u32 mask_value = 0;
2047
2048 for (i = dpm_table->count; i > 0; i--) {
2049 mask_value = mask_value << 1;
2050 if (dpm_table->dpm_levels[i-1].enabled)
2051 mask_value |= 0x1;
2052 else
2053 mask_value &= 0xFFFFFFFE;
2054 }
2055
2056 return mask_value;
2057}
2058
2059static void ci_populate_smc_link_level(struct radeon_device *rdev,
2060 SMU7_Discrete_DpmTable *table)
2061{
2062 struct ci_power_info *pi = ci_get_pi(rdev);
2063 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2064 u32 i;
2065
2066 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2067 table->LinkLevel[i].PcieGenSpeed =
2068 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2069 table->LinkLevel[i].PcieLaneCount =
2070 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2071 table->LinkLevel[i].EnabledForActivity = 1;
2072 table->LinkLevel[i].DownT = cpu_to_be32(5);
2073 table->LinkLevel[i].UpT = cpu_to_be32(30);
2074 }
2075
2076 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2077 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2078 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2079}
2080
2081static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2082 SMU7_Discrete_DpmTable *table)
2083{
2084 u32 count;
2085 struct atom_clock_dividers dividers;
2086 int ret = -EINVAL;
2087
2088 table->UvdLevelCount =
2089 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2090
2091 for (count = 0; count < table->UvdLevelCount; count++) {
2092 table->UvdLevel[count].VclkFrequency =
2093 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2094 table->UvdLevel[count].DclkFrequency =
2095 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2096 table->UvdLevel[count].MinVddc =
2097 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2098 table->UvdLevel[count].MinVddcPhases = 1;
2099
2100 ret = radeon_atom_get_clock_dividers(rdev,
2101 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2102 table->UvdLevel[count].VclkFrequency, false, &dividers);
2103 if (ret)
2104 return ret;
2105
2106 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2107
2108 ret = radeon_atom_get_clock_dividers(rdev,
2109 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2110 table->UvdLevel[count].DclkFrequency, false, &dividers);
2111 if (ret)
2112 return ret;
2113
2114 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2115
2116 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2117 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2118 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2119 }
2120
2121 return ret;
2122}
2123
2124static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2125 SMU7_Discrete_DpmTable *table)
2126{
2127 u32 count;
2128 struct atom_clock_dividers dividers;
2129 int ret = -EINVAL;
2130
2131 table->VceLevelCount =
2132 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2133
2134 for (count = 0; count < table->VceLevelCount; count++) {
2135 table->VceLevel[count].Frequency =
2136 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2137 table->VceLevel[count].MinVoltage =
2138 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2139 table->VceLevel[count].MinPhases = 1;
2140
2141 ret = radeon_atom_get_clock_dividers(rdev,
2142 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2143 table->VceLevel[count].Frequency, false, &dividers);
2144 if (ret)
2145 return ret;
2146
2147 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2148
2149 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2150 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2151 }
2152
2153 return ret;
2154
2155}
2156
2157static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2158 SMU7_Discrete_DpmTable *table)
2159{
2160 u32 count;
2161 struct atom_clock_dividers dividers;
2162 int ret = -EINVAL;
2163
2164 table->AcpLevelCount = (u8)
2165 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2166
2167 for (count = 0; count < table->AcpLevelCount; count++) {
2168 table->AcpLevel[count].Frequency =
2169 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2170 table->AcpLevel[count].MinVoltage =
2171 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2172 table->AcpLevel[count].MinPhases = 1;
2173
2174 ret = radeon_atom_get_clock_dividers(rdev,
2175 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2176 table->AcpLevel[count].Frequency, false, &dividers);
2177 if (ret)
2178 return ret;
2179
2180 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2181
2182 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2183 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2184 }
2185
2186 return ret;
2187}
2188
2189static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2190 SMU7_Discrete_DpmTable *table)
2191{
2192 u32 count;
2193 struct atom_clock_dividers dividers;
2194 int ret = -EINVAL;
2195
2196 table->SamuLevelCount =
2197 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2198
2199 for (count = 0; count < table->SamuLevelCount; count++) {
2200 table->SamuLevel[count].Frequency =
2201 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2202 table->SamuLevel[count].MinVoltage =
2203 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2204 table->SamuLevel[count].MinPhases = 1;
2205
2206 ret = radeon_atom_get_clock_dividers(rdev,
2207 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2208 table->SamuLevel[count].Frequency, false, &dividers);
2209 if (ret)
2210 return ret;
2211
2212 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2213
2214 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2215 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2216 }
2217
2218 return ret;
2219}
2220
2221static int ci_calculate_mclk_params(struct radeon_device *rdev,
2222 u32 memory_clock,
2223 SMU7_Discrete_MemoryLevel *mclk,
2224 bool strobe_mode,
2225 bool dll_state_on)
2226{
2227 struct ci_power_info *pi = ci_get_pi(rdev);
2228 u32 dll_cntl = pi->clock_registers.dll_cntl;
2229 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2230 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2231 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2232 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2233 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2234 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2235 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2236 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2237 struct atom_mpll_param mpll_param;
2238 int ret;
2239
2240 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2241 if (ret)
2242 return ret;
2243
2244 mpll_func_cntl &= ~BWCTRL_MASK;
2245 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2246
2247 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2248 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2249 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2250
2251 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2252 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2253
2254 if (pi->mem_gddr5) {
2255 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2256 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2257 YCLK_POST_DIV(mpll_param.post_div);
2258 }
2259
2260 if (pi->caps_mclk_ss_support) {
2261 struct radeon_atom_ss ss;
2262 u32 freq_nom;
2263 u32 tmp;
2264 u32 reference_clock = rdev->clock.mpll.reference_freq;
2265
2266 if (pi->mem_gddr5)
2267 freq_nom = memory_clock * 4;
2268 else
2269 freq_nom = memory_clock * 2;
2270
2271 tmp = (freq_nom / reference_clock);
2272 tmp = tmp * tmp;
2273 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2274 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2275 u32 clks = reference_clock * 5 / ss.rate;
2276 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2277
2278 mpll_ss1 &= ~CLKV_MASK;
2279 mpll_ss1 |= CLKV(clkv);
2280
2281 mpll_ss2 &= ~CLKS_MASK;
2282 mpll_ss2 |= CLKS(clks);
2283 }
2284 }
2285
2286 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2287 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2288
2289 if (dll_state_on)
2290 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2291 else
2292 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2293
2294 mclk->MclkFrequency = memory_clock;
2295 mclk->MpllFuncCntl = mpll_func_cntl;
2296 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2297 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2298 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2299 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2300 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2301 mclk->DllCntl = dll_cntl;
2302 mclk->MpllSs1 = mpll_ss1;
2303 mclk->MpllSs2 = mpll_ss2;
2304
2305 return 0;
2306}
2307
2308static int ci_populate_single_memory_level(struct radeon_device *rdev,
2309 u32 memory_clock,
2310 SMU7_Discrete_MemoryLevel *memory_level)
2311{
2312 struct ci_power_info *pi = ci_get_pi(rdev);
2313 int ret;
2314 bool dll_state_on;
2315
2316 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2317 ret = ci_get_dependency_volt_by_clk(rdev,
2318 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2319 memory_clock, &memory_level->MinVddc);
2320 if (ret)
2321 return ret;
2322 }
2323
2324 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2325 ret = ci_get_dependency_volt_by_clk(rdev,
2326 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2327 memory_clock, &memory_level->MinVddci);
2328 if (ret)
2329 return ret;
2330 }
2331
2332 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2333 ret = ci_get_dependency_volt_by_clk(rdev,
2334 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2335 memory_clock, &memory_level->MinMvdd);
2336 if (ret)
2337 return ret;
2338 }
2339
2340 memory_level->MinVddcPhases = 1;
2341
2342 if (pi->vddc_phase_shed_control)
2343 ci_populate_phase_value_based_on_mclk(rdev,
2344 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2345 memory_clock,
2346 &memory_level->MinVddcPhases);
2347
2348 memory_level->EnabledForThrottle = 1;
2349 memory_level->EnabledForActivity = 1;
2350 memory_level->UpH = 0;
2351 memory_level->DownH = 100;
2352 memory_level->VoltageDownH = 0;
2353 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2354
2355 memory_level->StutterEnable = false;
2356 memory_level->StrobeEnable = false;
2357 memory_level->EdcReadEnable = false;
2358 memory_level->EdcWriteEnable = false;
2359 memory_level->RttEnable = false;
2360
2361 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2362
2363 if (pi->mclk_stutter_mode_threshold &&
2364 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2365 (pi->uvd_enabled == false) &&
2366 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2367 (rdev->pm.dpm.new_active_crtc_count <= 2))
2368 memory_level->StutterEnable = true;
2369
2370 if (pi->mclk_strobe_mode_threshold &&
2371 (memory_clock <= pi->mclk_strobe_mode_threshold))
2372 memory_level->StrobeEnable = 1;
2373
2374 if (pi->mem_gddr5) {
2375 memory_level->StrobeRatio =
2376 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2377 if (pi->mclk_edc_enable_threshold &&
2378 (memory_clock > pi->mclk_edc_enable_threshold))
2379 memory_level->EdcReadEnable = true;
2380
2381 if (pi->mclk_edc_wr_enable_threshold &&
2382 (memory_clock > pi->mclk_edc_wr_enable_threshold))
2383 memory_level->EdcWriteEnable = true;
2384
2385 if (memory_level->StrobeEnable) {
2386 if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2387 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2388 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2389 else
2390 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2391 } else {
2392 dll_state_on = pi->dll_default_on;
2393 }
2394 } else {
2395 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2396 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2397 }
2398
2399 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2400 if (ret)
2401 return ret;
2402
2403 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2404 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2405 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2406 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2407
2408 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2409 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2410 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2411 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2412 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2413 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2414 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2415 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2416 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2417 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2418 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2419
2420 return 0;
2421}
2422
2423static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2424 SMU7_Discrete_DpmTable *table)
2425{
2426 struct ci_power_info *pi = ci_get_pi(rdev);
2427 struct atom_clock_dividers dividers;
2428 SMU7_Discrete_VoltageLevel voltage_level;
2429 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2430 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2431 u32 dll_cntl = pi->clock_registers.dll_cntl;
2432 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2433 int ret;
2434
2435 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2436
2437 if (pi->acpi_vddc)
2438 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2439 else
2440 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2441
2442 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2443
2444 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2445
2446 ret = radeon_atom_get_clock_dividers(rdev,
2447 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2448 table->ACPILevel.SclkFrequency, false, &dividers);
2449 if (ret)
2450 return ret;
2451
2452 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2453 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2454 table->ACPILevel.DeepSleepDivId = 0;
2455
2456 spll_func_cntl &= ~SPLL_PWRON;
2457 spll_func_cntl |= SPLL_RESET;
2458
2459 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2460 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2461
2462 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2463 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2464 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2465 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2466 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2467 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2468 table->ACPILevel.CcPwrDynRm = 0;
2469 table->ACPILevel.CcPwrDynRm1 = 0;
2470
2471 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2472 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2473 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2474 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2475 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2476 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2477 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2478 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2479 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2480 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2481 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2482
2483 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2484 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2485
2486 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2487 if (pi->acpi_vddci)
2488 table->MemoryACPILevel.MinVddci =
2489 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2490 else
2491 table->MemoryACPILevel.MinVddci =
2492 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2493 }
2494
2495 if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2496 table->MemoryACPILevel.MinMvdd = 0;
2497 else
2498 table->MemoryACPILevel.MinMvdd =
2499 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2500
2501 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2502 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2503
2504 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2505
2506 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2507 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2508 table->MemoryACPILevel.MpllAdFuncCntl =
2509 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2510 table->MemoryACPILevel.MpllDqFuncCntl =
2511 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2512 table->MemoryACPILevel.MpllFuncCntl =
2513 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2514 table->MemoryACPILevel.MpllFuncCntl_1 =
2515 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2516 table->MemoryACPILevel.MpllFuncCntl_2 =
2517 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2518 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2519 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2520
2521 table->MemoryACPILevel.EnabledForThrottle = 0;
2522 table->MemoryACPILevel.EnabledForActivity = 0;
2523 table->MemoryACPILevel.UpH = 0;
2524 table->MemoryACPILevel.DownH = 100;
2525 table->MemoryACPILevel.VoltageDownH = 0;
2526 table->MemoryACPILevel.ActivityLevel =
2527 cpu_to_be16((u16)pi->mclk_activity_target);
2528
2529 table->MemoryACPILevel.StutterEnable = false;
2530 table->MemoryACPILevel.StrobeEnable = false;
2531 table->MemoryACPILevel.EdcReadEnable = false;
2532 table->MemoryACPILevel.EdcWriteEnable = false;
2533 table->MemoryACPILevel.RttEnable = false;
2534
2535 return 0;
2536}
2537
2538
2539static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2540{
2541 struct ci_power_info *pi = ci_get_pi(rdev);
2542 struct ci_ulv_parm *ulv = &pi->ulv;
2543
2544 if (ulv->supported) {
2545 if (enable)
2546 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2547 0 : -EINVAL;
2548 else
2549 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2550 0 : -EINVAL;
2551 }
2552
2553 return 0;
2554}
2555
2556static int ci_populate_ulv_level(struct radeon_device *rdev,
2557 SMU7_Discrete_Ulv *state)
2558{
2559 struct ci_power_info *pi = ci_get_pi(rdev);
2560 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2561
2562 state->CcPwrDynRm = 0;
2563 state->CcPwrDynRm1 = 0;
2564
2565 if (ulv_voltage == 0) {
2566 pi->ulv.supported = false;
2567 return 0;
2568 }
2569
2570 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2571 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2572 state->VddcOffset = 0;
2573 else
2574 state->VddcOffset =
2575 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2576 } else {
2577 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2578 state->VddcOffsetVid = 0;
2579 else
2580 state->VddcOffsetVid = (u8)
2581 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2582 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2583 }
2584 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2585
2586 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2587 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2588 state->VddcOffset = cpu_to_be16(state->VddcOffset);
2589
2590 return 0;
2591}
2592
2593static int ci_calculate_sclk_params(struct radeon_device *rdev,
2594 u32 engine_clock,
2595 SMU7_Discrete_GraphicsLevel *sclk)
2596{
2597 struct ci_power_info *pi = ci_get_pi(rdev);
2598 struct atom_clock_dividers dividers;
2599 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2600 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2601 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2602 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2603 u32 reference_clock = rdev->clock.spll.reference_freq;
2604 u32 reference_divider;
2605 u32 fbdiv;
2606 int ret;
2607
2608 ret = radeon_atom_get_clock_dividers(rdev,
2609 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2610 engine_clock, false, &dividers);
2611 if (ret)
2612 return ret;
2613
2614 reference_divider = 1 + dividers.ref_div;
2615 fbdiv = dividers.fb_div & 0x3FFFFFF;
2616
2617 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2618 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2619 spll_func_cntl_3 |= SPLL_DITHEN;
2620
2621 if (pi->caps_sclk_ss_support) {
2622 struct radeon_atom_ss ss;
2623 u32 vco_freq = engine_clock * dividers.post_div;
2624
2625 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2626 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2627 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2628 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2629
2630 cg_spll_spread_spectrum &= ~CLK_S_MASK;
2631 cg_spll_spread_spectrum |= CLK_S(clk_s);
2632 cg_spll_spread_spectrum |= SSEN;
2633
2634 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2635 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2636 }
2637 }
2638
2639 sclk->SclkFrequency = engine_clock;
2640 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2641 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2642 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2643 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2644 sclk->SclkDid = (u8)dividers.post_divider;
2645
2646 return 0;
2647}
2648
2649static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2650 u32 engine_clock,
2651 u16 sclk_activity_level_t,
2652 SMU7_Discrete_GraphicsLevel *graphic_level)
2653{
2654 struct ci_power_info *pi = ci_get_pi(rdev);
2655 int ret;
2656
2657 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2658 if (ret)
2659 return ret;
2660
2661 ret = ci_get_dependency_volt_by_clk(rdev,
2662 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2663 engine_clock, &graphic_level->MinVddc);
2664 if (ret)
2665 return ret;
2666
2667 graphic_level->SclkFrequency = engine_clock;
2668
2669 graphic_level->Flags = 0;
2670 graphic_level->MinVddcPhases = 1;
2671
2672 if (pi->vddc_phase_shed_control)
2673 ci_populate_phase_value_based_on_sclk(rdev,
2674 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2675 engine_clock,
2676 &graphic_level->MinVddcPhases);
2677
2678 graphic_level->ActivityLevel = sclk_activity_level_t;
2679
2680 graphic_level->CcPwrDynRm = 0;
2681 graphic_level->CcPwrDynRm1 = 0;
2682 graphic_level->EnabledForActivity = 1;
2683 graphic_level->EnabledForThrottle = 1;
2684 graphic_level->UpH = 0;
2685 graphic_level->DownH = 0;
2686 graphic_level->VoltageDownH = 0;
2687 graphic_level->PowerThrottle = 0;
2688
2689 if (pi->caps_sclk_ds)
2690 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2691 engine_clock,
2692 CISLAND_MINIMUM_ENGINE_CLOCK);
2693
2694 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2695
2696 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2697 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2698 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2699 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2700 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2701 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2702 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2703 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2704 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2705 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2706 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2707
2708 return 0;
2709}
2710
2711static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2712{
2713 struct ci_power_info *pi = ci_get_pi(rdev);
2714 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2715 u32 level_array_address = pi->dpm_table_start +
2716 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2717 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2718 SMU7_MAX_LEVELS_GRAPHICS;
2719 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2720 u32 i, ret;
2721
2722 memset(levels, 0, level_array_size);
2723
2724 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2725 ret = ci_populate_single_graphic_level(rdev,
2726 dpm_table->sclk_table.dpm_levels[i].value,
2727 (u16)pi->activity_target[i],
2728 &pi->smc_state_table.GraphicsLevel[i]);
2729 if (ret)
2730 return ret;
2731 if (i == (dpm_table->sclk_table.count - 1))
2732 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2733 PPSMC_DISPLAY_WATERMARK_HIGH;
2734 }
2735
2736 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2737 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2738 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2739
2740 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2741 (u8 *)levels, level_array_size,
2742 pi->sram_end);
2743 if (ret)
2744 return ret;
2745
2746 return 0;
2747}
2748
2749static int ci_populate_ulv_state(struct radeon_device *rdev,
2750 SMU7_Discrete_Ulv *ulv_level)
2751{
2752 return ci_populate_ulv_level(rdev, ulv_level);
2753}
2754
2755static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2756{
2757 struct ci_power_info *pi = ci_get_pi(rdev);
2758 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2759 u32 level_array_address = pi->dpm_table_start +
2760 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2761 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2762 SMU7_MAX_LEVELS_MEMORY;
2763 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2764 u32 i, ret;
2765
2766 memset(levels, 0, level_array_size);
2767
2768 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2769 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2770 return -EINVAL;
2771 ret = ci_populate_single_memory_level(rdev,
2772 dpm_table->mclk_table.dpm_levels[i].value,
2773 &pi->smc_state_table.MemoryLevel[i]);
2774 if (ret)
2775 return ret;
2776 }
2777
2778 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2779
2780 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2781 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2782 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2783
2784 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2785 PPSMC_DISPLAY_WATERMARK_HIGH;
2786
2787 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2788 (u8 *)levels, level_array_size,
2789 pi->sram_end);
2790 if (ret)
2791 return ret;
2792
2793 return 0;
2794}
2795
2796static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2797 struct ci_single_dpm_table* dpm_table,
2798 u32 count)
2799{
2800 u32 i;
2801
2802 dpm_table->count = count;
2803 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2804 dpm_table->dpm_levels[i].enabled = false;
2805}
2806
2807static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2808 u32 index, u32 pcie_gen, u32 pcie_lanes)
2809{
2810 dpm_table->dpm_levels[index].value = pcie_gen;
2811 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2812 dpm_table->dpm_levels[index].enabled = true;
2813}
2814
2815static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2816{
2817 struct ci_power_info *pi = ci_get_pi(rdev);
2818
2819 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2820 return -EINVAL;
2821
2822 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2823 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2824 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2825 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2826 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2827 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2828 }
2829
2830 ci_reset_single_dpm_table(rdev,
2831 &pi->dpm_table.pcie_speed_table,
2832 SMU7_MAX_LEVELS_LINK);
2833
2834 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2835 pi->pcie_gen_powersaving.min,
2836 pi->pcie_lane_powersaving.min);
2837 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2838 pi->pcie_gen_performance.min,
2839 pi->pcie_lane_performance.min);
2840 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2841 pi->pcie_gen_powersaving.min,
2842 pi->pcie_lane_powersaving.max);
2843 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2844 pi->pcie_gen_performance.min,
2845 pi->pcie_lane_performance.max);
2846 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2847 pi->pcie_gen_powersaving.max,
2848 pi->pcie_lane_powersaving.max);
2849 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2850 pi->pcie_gen_performance.max,
2851 pi->pcie_lane_performance.max);
2852
2853 pi->dpm_table.pcie_speed_table.count = 6;
2854
2855 return 0;
2856}
2857
2858static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2859{
2860 struct ci_power_info *pi = ci_get_pi(rdev);
2861 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2862 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2863 struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2864 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2865 struct radeon_cac_leakage_table *std_voltage_table =
2866 &rdev->pm.dpm.dyn_state.cac_leakage_table;
2867 u32 i;
2868
2869 if (allowed_sclk_vddc_table == NULL)
2870 return -EINVAL;
2871 if (allowed_sclk_vddc_table->count < 1)
2872 return -EINVAL;
2873 if (allowed_mclk_table == NULL)
2874 return -EINVAL;
2875 if (allowed_mclk_table->count < 1)
2876 return -EINVAL;
2877
2878 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2879
2880 ci_reset_single_dpm_table(rdev,
2881 &pi->dpm_table.sclk_table,
2882 SMU7_MAX_LEVELS_GRAPHICS);
2883 ci_reset_single_dpm_table(rdev,
2884 &pi->dpm_table.mclk_table,
2885 SMU7_MAX_LEVELS_MEMORY);
2886 ci_reset_single_dpm_table(rdev,
2887 &pi->dpm_table.vddc_table,
2888 SMU7_MAX_LEVELS_VDDC);
2889 ci_reset_single_dpm_table(rdev,
2890 &pi->dpm_table.vddci_table,
2891 SMU7_MAX_LEVELS_VDDCI);
2892 ci_reset_single_dpm_table(rdev,
2893 &pi->dpm_table.mvdd_table,
2894 SMU7_MAX_LEVELS_MVDD);
2895
2896 pi->dpm_table.sclk_table.count = 0;
2897 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2898 if ((i == 0) ||
2899 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2900 allowed_sclk_vddc_table->entries[i].clk)) {
2901 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2902 allowed_sclk_vddc_table->entries[i].clk;
2903 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2904 pi->dpm_table.sclk_table.count++;
2905 }
2906 }
2907
2908 pi->dpm_table.mclk_table.count = 0;
2909 for (i = 0; i < allowed_mclk_table->count; i++) {
2910 if ((i==0) ||
2911 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
2912 allowed_mclk_table->entries[i].clk)) {
2913 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
2914 allowed_mclk_table->entries[i].clk;
2915 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
2916 pi->dpm_table.mclk_table.count++;
2917 }
2918 }
2919
2920 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2921 pi->dpm_table.vddc_table.dpm_levels[i].value =
2922 allowed_sclk_vddc_table->entries[i].v;
2923 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
2924 std_voltage_table->entries[i].leakage;
2925 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
2926 }
2927 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
2928
2929 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
2930 if (allowed_mclk_table) {
2931 for (i = 0; i < allowed_mclk_table->count; i++) {
2932 pi->dpm_table.vddci_table.dpm_levels[i].value =
2933 allowed_mclk_table->entries[i].v;
2934 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
2935 }
2936 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
2937 }
2938
2939 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
2940 if (allowed_mclk_table) {
2941 for (i = 0; i < allowed_mclk_table->count; i++) {
2942 pi->dpm_table.mvdd_table.dpm_levels[i].value =
2943 allowed_mclk_table->entries[i].v;
2944 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
2945 }
2946 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
2947 }
2948
2949 ci_setup_default_pcie_tables(rdev);
2950
2951 return 0;
2952}
2953
2954static int ci_find_boot_level(struct ci_single_dpm_table *table,
2955 u32 value, u32 *boot_level)
2956{
2957 u32 i;
2958 int ret = -EINVAL;
2959
2960 for(i = 0; i < table->count; i++) {
2961 if (value == table->dpm_levels[i].value) {
2962 *boot_level = i;
2963 ret = 0;
2964 }
2965 }
2966
2967 return ret;
2968}
2969
2970static int ci_init_smc_table(struct radeon_device *rdev)
2971{
2972 struct ci_power_info *pi = ci_get_pi(rdev);
2973 struct ci_ulv_parm *ulv = &pi->ulv;
2974 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
2975 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
2976 int ret;
2977
2978 ret = ci_setup_default_dpm_tables(rdev);
2979 if (ret)
2980 return ret;
2981
2982 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
2983 ci_populate_smc_voltage_tables(rdev, table);
2984
2985 ci_init_fps_limits(rdev);
2986
2987 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
2988 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2989
2990 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
2991 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2992
2993 if (pi->mem_gddr5)
2994 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2995
2996 if (ulv->supported) {
2997 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
2998 if (ret)
2999 return ret;
3000 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3001 }
3002
3003 ret = ci_populate_all_graphic_levels(rdev);
3004 if (ret)
3005 return ret;
3006
3007 ret = ci_populate_all_memory_levels(rdev);
3008 if (ret)
3009 return ret;
3010
3011 ci_populate_smc_link_level(rdev, table);
3012
3013 ret = ci_populate_smc_acpi_level(rdev, table);
3014 if (ret)
3015 return ret;
3016
3017 ret = ci_populate_smc_vce_level(rdev, table);
3018 if (ret)
3019 return ret;
3020
3021 ret = ci_populate_smc_acp_level(rdev, table);
3022 if (ret)
3023 return ret;
3024
3025 ret = ci_populate_smc_samu_level(rdev, table);
3026 if (ret)
3027 return ret;
3028
3029 ret = ci_do_program_memory_timing_parameters(rdev);
3030 if (ret)
3031 return ret;
3032
3033 ret = ci_populate_smc_uvd_level(rdev, table);
3034 if (ret)
3035 return ret;
3036
3037 table->UvdBootLevel = 0;
3038 table->VceBootLevel = 0;
3039 table->AcpBootLevel = 0;
3040 table->SamuBootLevel = 0;
3041 table->GraphicsBootLevel = 0;
3042 table->MemoryBootLevel = 0;
3043
3044 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3045 pi->vbios_boot_state.sclk_bootup_value,
3046 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3047
3048 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3049 pi->vbios_boot_state.mclk_bootup_value,
3050 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3051
3052 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3053 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3054 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3055
3056 ci_populate_smc_initial_state(rdev, radeon_boot_state);
3057
3058 ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3059 if (ret)
3060 return ret;
3061
3062 table->UVDInterval = 1;
3063 table->VCEInterval = 1;
3064 table->ACPInterval = 1;
3065 table->SAMUInterval = 1;
3066 table->GraphicsVoltageChangeEnable = 1;
3067 table->GraphicsThermThrottleEnable = 1;
3068 table->GraphicsInterval = 1;
3069 table->VoltageInterval = 1;
3070 table->ThermalInterval = 1;
3071 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3072 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3073 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3074 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3075 table->MemoryVoltageChangeEnable = 1;
3076 table->MemoryInterval = 1;
3077 table->VoltageResponseTime = 0;
3078 table->VddcVddciDelta = 4000;
3079 table->PhaseResponseTime = 0;
3080 table->MemoryThermThrottleEnable = 1;
3081 table->PCIeBootLinkLevel = 0;
3082 table->PCIeGenInterval = 1;
3083 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3084 table->SVI2Enable = 1;
3085 else
3086 table->SVI2Enable = 0;
3087
3088 table->ThermGpio = 17;
3089 table->SclkStepSize = 0x4000;
3090
3091 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3092 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3093 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3094 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3095 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3096 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3097 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3098 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3099 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3100 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3101 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3102 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3103 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3104 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3105
3106 ret = ci_copy_bytes_to_smc(rdev,
3107 pi->dpm_table_start +
3108 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3109 (u8 *)&table->SystemFlags,
3110 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3111 pi->sram_end);
3112 if (ret)
3113 return ret;
3114
3115 return 0;
3116}
3117
3118static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3119 struct ci_single_dpm_table *dpm_table,
3120 u32 low_limit, u32 high_limit)
3121{
3122 u32 i;
3123
3124 for (i = 0; i < dpm_table->count; i++) {
3125 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3126 (dpm_table->dpm_levels[i].value > high_limit))
3127 dpm_table->dpm_levels[i].enabled = false;
3128 else
3129 dpm_table->dpm_levels[i].enabled = true;
3130 }
3131}
3132
3133static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3134 u32 speed_low, u32 lanes_low,
3135 u32 speed_high, u32 lanes_high)
3136{
3137 struct ci_power_info *pi = ci_get_pi(rdev);
3138 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3139 u32 i, j;
3140
3141 for (i = 0; i < pcie_table->count; i++) {
3142 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3143 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3144 (pcie_table->dpm_levels[i].value > speed_high) ||
3145 (pcie_table->dpm_levels[i].param1 > lanes_high))
3146 pcie_table->dpm_levels[i].enabled = false;
3147 else
3148 pcie_table->dpm_levels[i].enabled = true;
3149 }
3150
3151 for (i = 0; i < pcie_table->count; i++) {
3152 if (pcie_table->dpm_levels[i].enabled) {
3153 for (j = i + 1; j < pcie_table->count; j++) {
3154 if (pcie_table->dpm_levels[j].enabled) {
3155 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3156 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3157 pcie_table->dpm_levels[j].enabled = false;
3158 }
3159 }
3160 }
3161 }
3162}
3163
3164static int ci_trim_dpm_states(struct radeon_device *rdev,
3165 struct radeon_ps *radeon_state)
3166{
3167 struct ci_ps *state = ci_get_ps(radeon_state);
3168 struct ci_power_info *pi = ci_get_pi(rdev);
3169 u32 high_limit_count;
3170
3171 if (state->performance_level_count < 1)
3172 return -EINVAL;
3173
3174 if (state->performance_level_count == 1)
3175 high_limit_count = 0;
3176 else
3177 high_limit_count = 1;
3178
3179 ci_trim_single_dpm_states(rdev,
3180 &pi->dpm_table.sclk_table,
3181 state->performance_levels[0].sclk,
3182 state->performance_levels[high_limit_count].sclk);
3183
3184 ci_trim_single_dpm_states(rdev,
3185 &pi->dpm_table.mclk_table,
3186 state->performance_levels[0].mclk,
3187 state->performance_levels[high_limit_count].mclk);
3188
3189 ci_trim_pcie_dpm_states(rdev,
3190 state->performance_levels[0].pcie_gen,
3191 state->performance_levels[0].pcie_lane,
3192 state->performance_levels[high_limit_count].pcie_gen,
3193 state->performance_levels[high_limit_count].pcie_lane);
3194
3195 return 0;
3196}
3197
3198static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3199{
3200 struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3201 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3202 struct radeon_clock_voltage_dependency_table *vddc_table =
3203 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3204 u32 requested_voltage = 0;
3205 u32 i;
3206
3207 if (disp_voltage_table == NULL)
3208 return -EINVAL;
3209 if (!disp_voltage_table->count)
3210 return -EINVAL;
3211
3212 for (i = 0; i < disp_voltage_table->count; i++) {
3213 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3214 requested_voltage = disp_voltage_table->entries[i].v;
3215 }
3216
3217 for (i = 0; i < vddc_table->count; i++) {
3218 if (requested_voltage <= vddc_table->entries[i].v) {
3219 requested_voltage = vddc_table->entries[i].v;
3220 return (ci_send_msg_to_smc_with_parameter(rdev,
3221 PPSMC_MSG_VddC_Request,
3222 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3223 0 : -EINVAL;
3224 }
3225 }
3226
3227 return -EINVAL;
3228}
3229
3230static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3231{
3232 struct ci_power_info *pi = ci_get_pi(rdev);
3233 PPSMC_Result result;
3234
3235 if (!pi->sclk_dpm_key_disabled) {
3236 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3237 result = ci_send_msg_to_smc_with_parameter(rdev,
3238 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3239 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3240 if (result != PPSMC_Result_OK)
3241 return -EINVAL;
3242 }
3243 }
3244
3245 if (!pi->mclk_dpm_key_disabled) {
3246 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3247 result = ci_send_msg_to_smc_with_parameter(rdev,
3248 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3249 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3250 if (result != PPSMC_Result_OK)
3251 return -EINVAL;
3252 }
3253 }
3254
3255 if (!pi->pcie_dpm_key_disabled) {
3256 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3257 result = ci_send_msg_to_smc_with_parameter(rdev,
3258 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3259 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3260 if (result != PPSMC_Result_OK)
3261 return -EINVAL;
3262 }
3263 }
3264
3265 ci_apply_disp_minimum_voltage_request(rdev);
3266
3267 return 0;
3268}
3269
3270static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3271 struct radeon_ps *radeon_state)
3272{
3273 struct ci_power_info *pi = ci_get_pi(rdev);
3274 struct ci_ps *state = ci_get_ps(radeon_state);
3275 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3276 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3277 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3278 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3279 u32 i;
3280
3281 pi->need_update_smu7_dpm_table = 0;
3282
3283 for (i = 0; i < sclk_table->count; i++) {
3284 if (sclk == sclk_table->dpm_levels[i].value)
3285 break;
3286 }
3287
3288 if (i >= sclk_table->count) {
3289 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3290 } else {
3291 /* XXX check display min clock requirements */
3292 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3293 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3294 }
3295
3296 for (i = 0; i < mclk_table->count; i++) {
3297 if (mclk == mclk_table->dpm_levels[i].value)
3298 break;
3299 }
3300
3301 if (i >= mclk_table->count)
3302 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3303
3304 if (rdev->pm.dpm.current_active_crtc_count !=
3305 rdev->pm.dpm.new_active_crtc_count)
3306 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3307}
3308
3309static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3310 struct radeon_ps *radeon_state)
3311{
3312 struct ci_power_info *pi = ci_get_pi(rdev);
3313 struct ci_ps *state = ci_get_ps(radeon_state);
3314 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3315 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3316 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3317 int ret;
3318
3319 if (!pi->need_update_smu7_dpm_table)
3320 return 0;
3321
3322 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3323 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3324
3325 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3326 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3327
3328 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3329 ret = ci_populate_all_graphic_levels(rdev);
3330 if (ret)
3331 return ret;
3332 }
3333
3334 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3335 ret = ci_populate_all_memory_levels(rdev);
3336 if (ret)
3337 return ret;
3338 }
3339
3340 return 0;
3341}
3342
3343static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3344{
3345 struct ci_power_info *pi = ci_get_pi(rdev);
3346 const struct radeon_clock_and_voltage_limits *max_limits;
3347 int i;
3348
3349 if (rdev->pm.dpm.ac_power)
3350 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3351 else
3352 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3353
3354 if (enable) {
3355 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3356
3357 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3358 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3359 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3360
3361 if (!pi->caps_uvd_dpm)
3362 break;
3363 }
3364 }
3365
3366 ci_send_msg_to_smc_with_parameter(rdev,
3367 PPSMC_MSG_UVDDPM_SetEnabledMask,
3368 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3369
3370 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3371 pi->uvd_enabled = true;
3372 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3373 ci_send_msg_to_smc_with_parameter(rdev,
3374 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3375 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3376 }
3377 } else {
3378 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3379 pi->uvd_enabled = false;
3380 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3381 ci_send_msg_to_smc_with_parameter(rdev,
3382 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3383 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3384 }
3385 }
3386
3387 return (ci_send_msg_to_smc(rdev, enable ?
3388 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3389 0 : -EINVAL;
3390}
3391
3392#if 0
3393static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3394{
3395 struct ci_power_info *pi = ci_get_pi(rdev);
3396 const struct radeon_clock_and_voltage_limits *max_limits;
3397 int i;
3398
3399 if (rdev->pm.dpm.ac_power)
3400 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3401 else
3402 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3403
3404 if (enable) {
3405 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3406 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3407 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3408 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3409
3410 if (!pi->caps_vce_dpm)
3411 break;
3412 }
3413 }
3414
3415 ci_send_msg_to_smc_with_parameter(rdev,
3416 PPSMC_MSG_VCEDPM_SetEnabledMask,
3417 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3418 }
3419
3420 return (ci_send_msg_to_smc(rdev, enable ?
3421 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3422 0 : -EINVAL;
3423}
3424
3425static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3426{
3427 struct ci_power_info *pi = ci_get_pi(rdev);
3428 const struct radeon_clock_and_voltage_limits *max_limits;
3429 int i;
3430
3431 if (rdev->pm.dpm.ac_power)
3432 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3433 else
3434 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3435
3436 if (enable) {
3437 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3438 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3439 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3440 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3441
3442 if (!pi->caps_samu_dpm)
3443 break;
3444 }
3445 }
3446
3447 ci_send_msg_to_smc_with_parameter(rdev,
3448 PPSMC_MSG_SAMUDPM_SetEnabledMask,
3449 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3450 }
3451 return (ci_send_msg_to_smc(rdev, enable ?
3452 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3453 0 : -EINVAL;
3454}
3455
3456static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3457{
3458 struct ci_power_info *pi = ci_get_pi(rdev);
3459 const struct radeon_clock_and_voltage_limits *max_limits;
3460 int i;
3461
3462 if (rdev->pm.dpm.ac_power)
3463 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3464 else
3465 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3466
3467 if (enable) {
3468 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3469 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3470 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3471 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3472
3473 if (!pi->caps_acp_dpm)
3474 break;
3475 }
3476 }
3477
3478 ci_send_msg_to_smc_with_parameter(rdev,
3479 PPSMC_MSG_ACPDPM_SetEnabledMask,
3480 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3481 }
3482
3483 return (ci_send_msg_to_smc(rdev, enable ?
3484 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3485 0 : -EINVAL;
3486}
3487#endif
3488
3489static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3490{
3491 struct ci_power_info *pi = ci_get_pi(rdev);
3492 u32 tmp;
3493
3494 if (!gate) {
3495 if (pi->caps_uvd_dpm ||
3496 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3497 pi->smc_state_table.UvdBootLevel = 0;
3498 else
3499 pi->smc_state_table.UvdBootLevel =
3500 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3501
3502 tmp = RREG32_SMC(DPM_TABLE_475);
3503 tmp &= ~UvdBootLevel_MASK;
3504 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3505 WREG32_SMC(DPM_TABLE_475, tmp);
3506 }
3507
3508 return ci_enable_uvd_dpm(rdev, !gate);
3509}
3510
3511#if 0
3512static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3513{
3514 u8 i;
3515 u32 min_evclk = 30000; /* ??? */
3516 struct radeon_vce_clock_voltage_dependency_table *table =
3517 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3518
3519 for (i = 0; i < table->count; i++) {
3520 if (table->entries[i].evclk >= min_evclk)
3521 return i;
3522 }
3523
3524 return table->count - 1;
3525}
3526
3527static int ci_update_vce_dpm(struct radeon_device *rdev,
3528 struct radeon_ps *radeon_new_state,
3529 struct radeon_ps *radeon_current_state)
3530{
3531 struct ci_power_info *pi = ci_get_pi(rdev);
3532 bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
3533 bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
3534 int ret = 0;
3535 u32 tmp;
3536
3537 if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
3538 if (new_vce_clock_non_zero) {
3539 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3540
3541 tmp = RREG32_SMC(DPM_TABLE_475);
3542 tmp &= ~VceBootLevel_MASK;
3543 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3544 WREG32_SMC(DPM_TABLE_475, tmp);
3545
3546 ret = ci_enable_vce_dpm(rdev, true);
3547 } else {
3548 ret = ci_enable_vce_dpm(rdev, false);
3549 }
3550 }
3551 return ret;
3552}
3553
3554static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3555{
3556 return ci_enable_samu_dpm(rdev, gate);
3557}
3558
3559static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3560{
3561 struct ci_power_info *pi = ci_get_pi(rdev);
3562 u32 tmp;
3563
3564 if (!gate) {
3565 pi->smc_state_table.AcpBootLevel = 0;
3566
3567 tmp = RREG32_SMC(DPM_TABLE_475);
3568 tmp &= ~AcpBootLevel_MASK;
3569 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3570 WREG32_SMC(DPM_TABLE_475, tmp);
3571 }
3572
3573 return ci_enable_acp_dpm(rdev, !gate);
3574}
3575#endif
3576
3577static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3578 struct radeon_ps *radeon_state)
3579{
3580 struct ci_power_info *pi = ci_get_pi(rdev);
3581 int ret;
3582
3583 ret = ci_trim_dpm_states(rdev, radeon_state);
3584 if (ret)
3585 return ret;
3586
3587 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3588 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3589 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3590 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3591 pi->last_mclk_dpm_enable_mask =
3592 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3593 if (pi->uvd_enabled) {
3594 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3595 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3596 }
3597 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3598 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3599
3600 return 0;
3601}
3602
3603static int ci_set_mc_special_registers(struct radeon_device *rdev,
3604 struct ci_mc_reg_table *table)
3605{
3606 struct ci_power_info *pi = ci_get_pi(rdev);
3607 u8 i, j, k;
3608 u32 temp_reg;
3609
3610 for (i = 0, j = table->last; i < table->last; i++) {
3611 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3612 return -EINVAL;
3613 switch(table->mc_reg_address[i].s1 << 2) {
3614 case MC_SEQ_MISC1:
3615 temp_reg = RREG32(MC_PMG_CMD_EMRS);
3616 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3617 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3618 for (k = 0; k < table->num_entries; k++) {
3619 table->mc_reg_table_entry[k].mc_data[j] =
3620 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3621 }
3622 j++;
3623 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3624 return -EINVAL;
3625
3626 temp_reg = RREG32(MC_PMG_CMD_MRS);
3627 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3628 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3629 for (k = 0; k < table->num_entries; k++) {
3630 table->mc_reg_table_entry[k].mc_data[j] =
3631 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3632 if (!pi->mem_gddr5)
3633 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3634 }
3635 j++;
3636 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3637 return -EINVAL;
3638
3639 if (!pi->mem_gddr5) {
3640 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3641 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3642 for (k = 0; k < table->num_entries; k++) {
3643 table->mc_reg_table_entry[k].mc_data[j] =
3644 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3645 }
3646 j++;
3647 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3648 return -EINVAL;
3649 }
3650 break;
3651 case MC_SEQ_RESERVE_M:
3652 temp_reg = RREG32(MC_PMG_CMD_MRS1);
3653 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3654 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3655 for (k = 0; k < table->num_entries; k++) {
3656 table->mc_reg_table_entry[k].mc_data[j] =
3657 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3658 }
3659 j++;
3660 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3661 return -EINVAL;
3662 break;
3663 default:
3664 break;
3665 }
3666
3667 }
3668
3669 table->last = j;
3670
3671 return 0;
3672}
3673
3674static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3675{
3676 bool result = true;
3677
3678 switch(in_reg) {
3679 case MC_SEQ_RAS_TIMING >> 2:
3680 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3681 break;
3682 case MC_SEQ_DLL_STBY >> 2:
3683 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3684 break;
3685 case MC_SEQ_G5PDX_CMD0 >> 2:
3686 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3687 break;
3688 case MC_SEQ_G5PDX_CMD1 >> 2:
3689 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3690 break;
3691 case MC_SEQ_G5PDX_CTRL >> 2:
3692 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3693 break;
3694 case MC_SEQ_CAS_TIMING >> 2:
3695 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3696 break;
3697 case MC_SEQ_MISC_TIMING >> 2:
3698 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3699 break;
3700 case MC_SEQ_MISC_TIMING2 >> 2:
3701 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3702 break;
3703 case MC_SEQ_PMG_DVS_CMD >> 2:
3704 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3705 break;
3706 case MC_SEQ_PMG_DVS_CTL >> 2:
3707 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3708 break;
3709 case MC_SEQ_RD_CTL_D0 >> 2:
3710 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3711 break;
3712 case MC_SEQ_RD_CTL_D1 >> 2:
3713 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3714 break;
3715 case MC_SEQ_WR_CTL_D0 >> 2:
3716 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3717 break;
3718 case MC_SEQ_WR_CTL_D1 >> 2:
3719 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3720 break;
3721 case MC_PMG_CMD_EMRS >> 2:
3722 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3723 break;
3724 case MC_PMG_CMD_MRS >> 2:
3725 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3726 break;
3727 case MC_PMG_CMD_MRS1 >> 2:
3728 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3729 break;
3730 case MC_SEQ_PMG_TIMING >> 2:
3731 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3732 break;
3733 case MC_PMG_CMD_MRS2 >> 2:
3734 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3735 break;
3736 case MC_SEQ_WR_CTL_2 >> 2:
3737 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3738 break;
3739 default:
3740 result = false;
3741 break;
3742 }
3743
3744 return result;
3745}
3746
3747static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3748{
3749 u8 i, j;
3750
3751 for (i = 0; i < table->last; i++) {
3752 for (j = 1; j < table->num_entries; j++) {
3753 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3754 table->mc_reg_table_entry[j].mc_data[i]) {
3755 table->valid_flag |= 1 << i;
3756 break;
3757 }
3758 }
3759 }
3760}
3761
3762static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
3763{
3764 u32 i;
3765 u16 address;
3766
3767 for (i = 0; i < table->last; i++) {
3768 table->mc_reg_address[i].s0 =
3769 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
3770 address : table->mc_reg_address[i].s1;
3771 }
3772}
3773
3774static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
3775 struct ci_mc_reg_table *ci_table)
3776{
3777 u8 i, j;
3778
3779 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3780 return -EINVAL;
3781 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
3782 return -EINVAL;
3783
3784 for (i = 0; i < table->last; i++)
3785 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3786
3787 ci_table->last = table->last;
3788
3789 for (i = 0; i < table->num_entries; i++) {
3790 ci_table->mc_reg_table_entry[i].mclk_max =
3791 table->mc_reg_table_entry[i].mclk_max;
3792 for (j = 0; j < table->last; j++)
3793 ci_table->mc_reg_table_entry[i].mc_data[j] =
3794 table->mc_reg_table_entry[i].mc_data[j];
3795 }
3796 ci_table->num_entries = table->num_entries;
3797
3798 return 0;
3799}
3800
3801static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
3802{
3803 struct ci_power_info *pi = ci_get_pi(rdev);
3804 struct atom_mc_reg_table *table;
3805 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
3806 u8 module_index = rv770_get_memory_module_index(rdev);
3807 int ret;
3808
3809 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
3810 if (!table)
3811 return -ENOMEM;
3812
3813 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
3814 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
3815 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
3816 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
3817 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
3818 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
3819 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
3820 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
3821 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
3822 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
3823 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
3824 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
3825 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
3826 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
3827 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
3828 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
3829 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
3830 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
3831 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
3832 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
3833
3834 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
3835 if (ret)
3836 goto init_mc_done;
3837
3838 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
3839 if (ret)
3840 goto init_mc_done;
3841
3842 ci_set_s0_mc_reg_index(ci_table);
3843
3844 ret = ci_set_mc_special_registers(rdev, ci_table);
3845 if (ret)
3846 goto init_mc_done;
3847
3848 ci_set_valid_flag(ci_table);
3849
3850init_mc_done:
3851 kfree(table);
3852
3853 return ret;
3854}
3855
3856static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
3857 SMU7_Discrete_MCRegisters *mc_reg_table)
3858{
3859 struct ci_power_info *pi = ci_get_pi(rdev);
3860 u32 i, j;
3861
3862 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
3863 if (pi->mc_reg_table.valid_flag & (1 << j)) {
3864 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3865 return -EINVAL;
3866 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
3867 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
3868 i++;
3869 }
3870 }
3871
3872 mc_reg_table->last = (u8)i;
3873
3874 return 0;
3875}
3876
3877static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
3878 SMU7_Discrete_MCRegisterSet *data,
3879 u32 num_entries, u32 valid_flag)
3880{
3881 u32 i, j;
3882
3883 for (i = 0, j = 0; j < num_entries; j++) {
3884 if (valid_flag & (1 << j)) {
3885 data->value[i] = cpu_to_be32(entry->mc_data[j]);
3886 i++;
3887 }
3888 }
3889}
3890
3891static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
3892 const u32 memory_clock,
3893 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
3894{
3895 struct ci_power_info *pi = ci_get_pi(rdev);
3896 u32 i = 0;
3897
3898 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
3899 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
3900 break;
3901 }
3902
3903 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
3904 --i;
3905
3906 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
3907 mc_reg_table_data, pi->mc_reg_table.last,
3908 pi->mc_reg_table.valid_flag);
3909}
3910
3911static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
3912 SMU7_Discrete_MCRegisters *mc_reg_table)
3913{
3914 struct ci_power_info *pi = ci_get_pi(rdev);
3915 u32 i;
3916
3917 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
3918 ci_convert_mc_reg_table_entry_to_smc(rdev,
3919 pi->dpm_table.mclk_table.dpm_levels[i].value,
3920 &mc_reg_table->data[i]);
3921}
3922
3923static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
3924{
3925 struct ci_power_info *pi = ci_get_pi(rdev);
3926 int ret;
3927
3928 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
3929
3930 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
3931 if (ret)
3932 return ret;
3933 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
3934
3935 return ci_copy_bytes_to_smc(rdev,
3936 pi->mc_reg_table_start,
3937 (u8 *)&pi->smc_mc_reg_table,
3938 sizeof(SMU7_Discrete_MCRegisters),
3939 pi->sram_end);
3940}
3941
3942static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
3943{
3944 struct ci_power_info *pi = ci_get_pi(rdev);
3945
3946 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
3947 return 0;
3948
3949 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
3950
3951 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
3952
3953 return ci_copy_bytes_to_smc(rdev,
3954 pi->mc_reg_table_start +
3955 offsetof(SMU7_Discrete_MCRegisters, data[0]),
3956 (u8 *)&pi->smc_mc_reg_table.data[0],
3957 sizeof(SMU7_Discrete_MCRegisterSet) *
3958 pi->dpm_table.mclk_table.count,
3959 pi->sram_end);
3960}
3961
3962static void ci_enable_voltage_control(struct radeon_device *rdev)
3963{
3964 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
3965
3966 tmp |= VOLT_PWRMGT_EN;
3967 WREG32_SMC(GENERAL_PWRMGT, tmp);
3968}
3969
3970static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
3971 struct radeon_ps *radeon_state)
3972{
3973 struct ci_ps *state = ci_get_ps(radeon_state);
3974 int i;
3975 u16 pcie_speed, max_speed = 0;
3976
3977 for (i = 0; i < state->performance_level_count; i++) {
3978 pcie_speed = state->performance_levels[i].pcie_gen;
3979 if (max_speed < pcie_speed)
3980 max_speed = pcie_speed;
3981 }
3982
3983 return max_speed;
3984}
3985
3986static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
3987{
3988 u32 speed_cntl = 0;
3989
3990 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
3991 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
3992
3993 return (u16)speed_cntl;
3994}
3995
3996static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
3997{
3998 u32 link_width = 0;
3999
4000 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4001 link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4002
4003 switch (link_width) {
4004 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4005 return 1;
4006 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4007 return 2;
4008 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4009 return 4;
4010 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4011 return 8;
4012 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4013 /* not actually supported */
4014 return 12;
4015 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4016 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4017 default:
4018 return 16;
4019 }
4020}
4021
4022static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4023 struct radeon_ps *radeon_new_state,
4024 struct radeon_ps *radeon_current_state)
4025{
4026 struct ci_power_info *pi = ci_get_pi(rdev);
4027 enum radeon_pcie_gen target_link_speed =
4028 ci_get_maximum_link_speed(rdev, radeon_new_state);
4029 enum radeon_pcie_gen current_link_speed;
4030
4031 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4032 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4033 else
4034 current_link_speed = pi->force_pcie_gen;
4035
4036 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4037 pi->pspp_notify_required = false;
4038 if (target_link_speed > current_link_speed) {
4039 switch (target_link_speed) {
4040 case RADEON_PCIE_GEN3:
4041 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4042 break;
4043 pi->force_pcie_gen = RADEON_PCIE_GEN2;
4044 if (current_link_speed == RADEON_PCIE_GEN2)
4045 break;
4046 case RADEON_PCIE_GEN2:
4047 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4048 break;
4049 default:
4050 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4051 break;
4052 }
4053 } else {
4054 if (target_link_speed < current_link_speed)
4055 pi->pspp_notify_required = true;
4056 }
4057}
4058
4059static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4060 struct radeon_ps *radeon_new_state,
4061 struct radeon_ps *radeon_current_state)
4062{
4063 struct ci_power_info *pi = ci_get_pi(rdev);
4064 enum radeon_pcie_gen target_link_speed =
4065 ci_get_maximum_link_speed(rdev, radeon_new_state);
4066 u8 request;
4067
4068 if (pi->pspp_notify_required) {
4069 if (target_link_speed == RADEON_PCIE_GEN3)
4070 request = PCIE_PERF_REQ_PECI_GEN3;
4071 else if (target_link_speed == RADEON_PCIE_GEN2)
4072 request = PCIE_PERF_REQ_PECI_GEN2;
4073 else
4074 request = PCIE_PERF_REQ_PECI_GEN1;
4075
4076 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4077 (ci_get_current_pcie_speed(rdev) > 0))
4078 return;
4079
4080 radeon_acpi_pcie_performance_request(rdev, request, false);
4081 }
4082}
4083
4084static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4085{
4086 struct ci_power_info *pi = ci_get_pi(rdev);
4087 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4088 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4089 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4090 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4091 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4092 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4093
4094 if (allowed_sclk_vddc_table == NULL)
4095 return -EINVAL;
4096 if (allowed_sclk_vddc_table->count < 1)
4097 return -EINVAL;
4098 if (allowed_mclk_vddc_table == NULL)
4099 return -EINVAL;
4100 if (allowed_mclk_vddc_table->count < 1)
4101 return -EINVAL;
4102 if (allowed_mclk_vddci_table == NULL)
4103 return -EINVAL;
4104 if (allowed_mclk_vddci_table->count < 1)
4105 return -EINVAL;
4106
4107 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4108 pi->max_vddc_in_pp_table =
4109 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4110
4111 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4112 pi->max_vddci_in_pp_table =
4113 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4114
4115 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4116 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4117 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4118 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4119 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4120 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4121 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4122 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4123
4124 return 0;
4125}
4126
4127static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4128{
4129 struct ci_power_info *pi = ci_get_pi(rdev);
4130 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4131 u32 leakage_index;
4132
4133 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4134 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4135 *vddc = leakage_table->actual_voltage[leakage_index];
4136 break;
4137 }
4138 }
4139}
4140
4141static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4142{
4143 struct ci_power_info *pi = ci_get_pi(rdev);
4144 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4145 u32 leakage_index;
4146
4147 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4148 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4149 *vddci = leakage_table->actual_voltage[leakage_index];
4150 break;
4151 }
4152 }
4153}
4154
4155static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4156 struct radeon_clock_voltage_dependency_table *table)
4157{
4158 u32 i;
4159
4160 if (table) {
4161 for (i = 0; i < table->count; i++)
4162 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4163 }
4164}
4165
4166static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4167 struct radeon_clock_voltage_dependency_table *table)
4168{
4169 u32 i;
4170
4171 if (table) {
4172 for (i = 0; i < table->count; i++)
4173 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4174 }
4175}
4176
4177static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4178 struct radeon_vce_clock_voltage_dependency_table *table)
4179{
4180 u32 i;
4181
4182 if (table) {
4183 for (i = 0; i < table->count; i++)
4184 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4185 }
4186}
4187
4188static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4189 struct radeon_uvd_clock_voltage_dependency_table *table)
4190{
4191 u32 i;
4192
4193 if (table) {
4194 for (i = 0; i < table->count; i++)
4195 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4196 }
4197}
4198
4199static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4200 struct radeon_phase_shedding_limits_table *table)
4201{
4202 u32 i;
4203
4204 if (table) {
4205 for (i = 0; i < table->count; i++)
4206 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4207 }
4208}
4209
4210static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4211 struct radeon_clock_and_voltage_limits *table)
4212{
4213 if (table) {
4214 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4215 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4216 }
4217}
4218
4219static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4220 struct radeon_cac_leakage_table *table)
4221{
4222 u32 i;
4223
4224 if (table) {
4225 for (i = 0; i < table->count; i++)
4226 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4227 }
4228}
4229
4230static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4231{
4232
4233 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4234 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4235 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4236 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4237 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4238 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4239 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4240 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4241 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4242 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4243 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4244 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4245 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4246 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4247 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4248 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4249 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4250 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4251 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4252 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4253 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4254 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4255 ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4256 &rdev->pm.dpm.dyn_state.cac_leakage_table);
4257
4258}
4259
4260static void ci_get_memory_type(struct radeon_device *rdev)
4261{
4262 struct ci_power_info *pi = ci_get_pi(rdev);
4263 u32 tmp;
4264
4265 tmp = RREG32(MC_SEQ_MISC0);
4266
4267 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4268 MC_SEQ_MISC0_GDDR5_VALUE)
4269 pi->mem_gddr5 = true;
4270 else
4271 pi->mem_gddr5 = false;
4272
4273}
4274
4275void ci_update_current_ps(struct radeon_device *rdev,
4276 struct radeon_ps *rps)
4277{
4278 struct ci_ps *new_ps = ci_get_ps(rps);
4279 struct ci_power_info *pi = ci_get_pi(rdev);
4280
4281 pi->current_rps = *rps;
4282 pi->current_ps = *new_ps;
4283 pi->current_rps.ps_priv = &pi->current_ps;
4284}
4285
4286void ci_update_requested_ps(struct radeon_device *rdev,
4287 struct radeon_ps *rps)
4288{
4289 struct ci_ps *new_ps = ci_get_ps(rps);
4290 struct ci_power_info *pi = ci_get_pi(rdev);
4291
4292 pi->requested_rps = *rps;
4293 pi->requested_ps = *new_ps;
4294 pi->requested_rps.ps_priv = &pi->requested_ps;
4295}
4296
4297int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4298{
4299 struct ci_power_info *pi = ci_get_pi(rdev);
4300 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4301 struct radeon_ps *new_ps = &requested_ps;
4302
4303 ci_update_requested_ps(rdev, new_ps);
4304
4305 ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4306
4307 return 0;
4308}
4309
4310void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4311{
4312 struct ci_power_info *pi = ci_get_pi(rdev);
4313 struct radeon_ps *new_ps = &pi->requested_rps;
4314
4315 ci_update_current_ps(rdev, new_ps);
4316}
4317
4318
4319void ci_dpm_setup_asic(struct radeon_device *rdev)
4320{
4321 ci_read_clock_registers(rdev);
4322 ci_get_memory_type(rdev);
4323 ci_enable_acpi_power_management(rdev);
4324 ci_init_sclk_t(rdev);
4325}
4326
4327int ci_dpm_enable(struct radeon_device *rdev)
4328{
4329 struct ci_power_info *pi = ci_get_pi(rdev);
4330 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4331 int ret;
4332
4333 if (ci_is_smc_running(rdev))
4334 return -EINVAL;
4335 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4336 ci_enable_voltage_control(rdev);
4337 ret = ci_construct_voltage_tables(rdev);
4338 if (ret) {
4339 DRM_ERROR("ci_construct_voltage_tables failed\n");
4340 return ret;
4341 }
4342 }
4343 if (pi->caps_dynamic_ac_timing) {
4344 ret = ci_initialize_mc_reg_table(rdev);
4345 if (ret)
4346 pi->caps_dynamic_ac_timing = false;
4347 }
4348 if (pi->dynamic_ss)
4349 ci_enable_spread_spectrum(rdev, true);
4350 if (pi->thermal_protection)
4351 ci_enable_thermal_protection(rdev, true);
4352 ci_program_sstp(rdev);
4353 ci_enable_display_gap(rdev);
4354 ci_program_vc(rdev);
4355 ret = ci_upload_firmware(rdev);
4356 if (ret) {
4357 DRM_ERROR("ci_upload_firmware failed\n");
4358 return ret;
4359 }
4360 ret = ci_process_firmware_header(rdev);
4361 if (ret) {
4362 DRM_ERROR("ci_process_firmware_header failed\n");
4363 return ret;
4364 }
4365 ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4366 if (ret) {
4367 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4368 return ret;
4369 }
4370 ret = ci_init_smc_table(rdev);
4371 if (ret) {
4372 DRM_ERROR("ci_init_smc_table failed\n");
4373 return ret;
4374 }
4375 ret = ci_init_arb_table_index(rdev);
4376 if (ret) {
4377 DRM_ERROR("ci_init_arb_table_index failed\n");
4378 return ret;
4379 }
4380 if (pi->caps_dynamic_ac_timing) {
4381 ret = ci_populate_initial_mc_reg_table(rdev);
4382 if (ret) {
4383 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4384 return ret;
4385 }
4386 }
4387 ret = ci_populate_pm_base(rdev);
4388 if (ret) {
4389 DRM_ERROR("ci_populate_pm_base failed\n");
4390 return ret;
4391 }
4392 ci_dpm_start_smc(rdev);
4393 ci_enable_vr_hot_gpio_interrupt(rdev);
4394 ret = ci_notify_smc_display_change(rdev, false);
4395 if (ret) {
4396 DRM_ERROR("ci_notify_smc_display_change failed\n");
4397 return ret;
4398 }
4399 ci_enable_sclk_control(rdev, true);
4400 ret = ci_enable_ulv(rdev, true);
4401 if (ret) {
4402 DRM_ERROR("ci_enable_ulv failed\n");
4403 return ret;
4404 }
4405 ret = ci_enable_ds_master_switch(rdev, true);
4406 if (ret) {
4407 DRM_ERROR("ci_enable_ds_master_switch failed\n");
4408 return ret;
4409 }
4410 ret = ci_start_dpm(rdev);
4411 if (ret) {
4412 DRM_ERROR("ci_start_dpm failed\n");
4413 return ret;
4414 }
4415 ret = ci_enable_didt(rdev, true);
4416 if (ret) {
4417 DRM_ERROR("ci_enable_didt failed\n");
4418 return ret;
4419 }
4420 ret = ci_enable_smc_cac(rdev, true);
4421 if (ret) {
4422 DRM_ERROR("ci_enable_smc_cac failed\n");
4423 return ret;
4424 }
4425 ret = ci_enable_power_containment(rdev, true);
4426 if (ret) {
4427 DRM_ERROR("ci_enable_power_containment failed\n");
4428 return ret;
4429 }
4430 if (rdev->irq.installed &&
4431 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4432#if 0
4433 PPSMC_Result result;
4434#endif
4435 ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4436 if (ret) {
4437 DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4438 return ret;
4439 }
4440 rdev->irq.dpm_thermal = true;
4441 radeon_irq_set(rdev);
4442#if 0
4443 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4444
4445 if (result != PPSMC_Result_OK)
4446 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4447#endif
4448 }
4449
4450 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4451
4452 ci_update_current_ps(rdev, boot_ps);
4453
4454 return 0;
4455}
4456
4457void ci_dpm_disable(struct radeon_device *rdev)
4458{
4459 struct ci_power_info *pi = ci_get_pi(rdev);
4460 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4461
4462 if (!ci_is_smc_running(rdev))
4463 return;
4464
4465 if (pi->thermal_protection)
4466 ci_enable_thermal_protection(rdev, false);
4467 ci_enable_power_containment(rdev, false);
4468 ci_enable_smc_cac(rdev, false);
4469 ci_enable_didt(rdev, false);
4470 ci_enable_spread_spectrum(rdev, false);
4471 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4472 ci_stop_dpm(rdev);
4473 ci_enable_ds_master_switch(rdev, true);
4474 ci_enable_ulv(rdev, false);
4475 ci_clear_vc(rdev);
4476 ci_reset_to_default(rdev);
4477 ci_dpm_stop_smc(rdev);
4478 ci_force_switch_to_arb_f0(rdev);
4479
4480 ci_update_current_ps(rdev, boot_ps);
4481}
4482
4483int ci_dpm_set_power_state(struct radeon_device *rdev)
4484{
4485 struct ci_power_info *pi = ci_get_pi(rdev);
4486 struct radeon_ps *new_ps = &pi->requested_rps;
4487 struct radeon_ps *old_ps = &pi->current_rps;
4488 int ret;
4489
4490 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4491 if (pi->pcie_performance_request)
4492 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4493 ret = ci_freeze_sclk_mclk_dpm(rdev);
4494 if (ret) {
4495 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4496 return ret;
4497 }
4498 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4499 if (ret) {
4500 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4501 return ret;
4502 }
4503 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4504 if (ret) {
4505 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4506 return ret;
4507 }
4508#if 0
4509 ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4510 if (ret) {
4511 DRM_ERROR("ci_update_vce_dpm failed\n");
4512 return ret;
4513 }
4514#endif
4515 ret = ci_update_uvd_dpm(rdev, false);
4516 if (ret) {
4517 DRM_ERROR("ci_update_uvd_dpm failed\n");
4518 return ret;
4519 }
4520 ret = ci_update_sclk_t(rdev);
4521 if (ret) {
4522 DRM_ERROR("ci_update_sclk_t failed\n");
4523 return ret;
4524 }
4525 if (pi->caps_dynamic_ac_timing) {
4526 ret = ci_update_and_upload_mc_reg_table(rdev);
4527 if (ret) {
4528 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4529 return ret;
4530 }
4531 }
4532 ret = ci_program_memory_timing_parameters(rdev);
4533 if (ret) {
4534 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4535 return ret;
4536 }
4537 ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4538 if (ret) {
4539 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4540 return ret;
4541 }
4542 ret = ci_upload_dpm_level_enable_mask(rdev);
4543 if (ret) {
4544 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4545 return ret;
4546 }
4547 if (pi->pcie_performance_request)
4548 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4549
4550 return 0;
4551}
4552
4553int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4554{
4555 return ci_power_control_set_level(rdev);
4556}
4557
4558void ci_dpm_reset_asic(struct radeon_device *rdev)
4559{
4560 ci_set_boot_state(rdev);
4561}
4562
4563void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4564{
4565 ci_program_display_gap(rdev);
4566}
4567
4568union power_info {
4569 struct _ATOM_POWERPLAY_INFO info;
4570 struct _ATOM_POWERPLAY_INFO_V2 info_2;
4571 struct _ATOM_POWERPLAY_INFO_V3 info_3;
4572 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4573 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4574 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4575};
4576
4577union pplib_clock_info {
4578 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4579 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4580 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4581 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4582 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4583 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4584};
4585
4586union pplib_power_state {
4587 struct _ATOM_PPLIB_STATE v1;
4588 struct _ATOM_PPLIB_STATE_V2 v2;
4589};
4590
4591static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4592 struct radeon_ps *rps,
4593 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4594 u8 table_rev)
4595{
4596 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4597 rps->class = le16_to_cpu(non_clock_info->usClassification);
4598 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4599
4600 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4601 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4602 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4603 } else {
4604 rps->vclk = 0;
4605 rps->dclk = 0;
4606 }
4607
4608 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4609 rdev->pm.dpm.boot_ps = rps;
4610 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4611 rdev->pm.dpm.uvd_ps = rps;
4612}
4613
4614static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4615 struct radeon_ps *rps, int index,
4616 union pplib_clock_info *clock_info)
4617{
4618 struct ci_power_info *pi = ci_get_pi(rdev);
4619 struct ci_ps *ps = ci_get_ps(rps);
4620 struct ci_pl *pl = &ps->performance_levels[index];
4621
4622 ps->performance_level_count = index + 1;
4623
4624 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4625 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4626 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4627 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4628
4629 pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4630 pi->sys_pcie_mask,
4631 pi->vbios_boot_state.pcie_gen_bootup_value,
4632 clock_info->ci.ucPCIEGen);
4633 pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4634 pi->vbios_boot_state.pcie_lane_bootup_value,
4635 le16_to_cpu(clock_info->ci.usPCIELane));
4636
4637 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4638 pi->acpi_pcie_gen = pl->pcie_gen;
4639 }
4640
4641 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4642 pi->ulv.supported = true;
4643 pi->ulv.pl = *pl;
4644 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4645 }
4646
4647 /* patch up boot state */
4648 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4649 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4650 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4651 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4652 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4653 }
4654
4655 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4656 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4657 pi->use_pcie_powersaving_levels = true;
4658 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4659 pi->pcie_gen_powersaving.max = pl->pcie_gen;
4660 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4661 pi->pcie_gen_powersaving.min = pl->pcie_gen;
4662 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4663 pi->pcie_lane_powersaving.max = pl->pcie_lane;
4664 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4665 pi->pcie_lane_powersaving.min = pl->pcie_lane;
4666 break;
4667 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4668 pi->use_pcie_performance_levels = true;
4669 if (pi->pcie_gen_performance.max < pl->pcie_gen)
4670 pi->pcie_gen_performance.max = pl->pcie_gen;
4671 if (pi->pcie_gen_performance.min > pl->pcie_gen)
4672 pi->pcie_gen_performance.min = pl->pcie_gen;
4673 if (pi->pcie_lane_performance.max < pl->pcie_lane)
4674 pi->pcie_lane_performance.max = pl->pcie_lane;
4675 if (pi->pcie_lane_performance.min > pl->pcie_lane)
4676 pi->pcie_lane_performance.min = pl->pcie_lane;
4677 break;
4678 default:
4679 break;
4680 }
4681}
4682
4683static int ci_parse_power_table(struct radeon_device *rdev)
4684{
4685 struct radeon_mode_info *mode_info = &rdev->mode_info;
4686 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4687 union pplib_power_state *power_state;
4688 int i, j, k, non_clock_array_index, clock_array_index;
4689 union pplib_clock_info *clock_info;
4690 struct _StateArray *state_array;
4691 struct _ClockInfoArray *clock_info_array;
4692 struct _NonClockInfoArray *non_clock_info_array;
4693 union power_info *power_info;
4694 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4695 u16 data_offset;
4696 u8 frev, crev;
4697 u8 *power_state_offset;
4698 struct ci_ps *ps;
4699
4700 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4701 &frev, &crev, &data_offset))
4702 return -EINVAL;
4703 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4704
4705 state_array = (struct _StateArray *)
4706 (mode_info->atom_context->bios + data_offset +
4707 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4708 clock_info_array = (struct _ClockInfoArray *)
4709 (mode_info->atom_context->bios + data_offset +
4710 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4711 non_clock_info_array = (struct _NonClockInfoArray *)
4712 (mode_info->atom_context->bios + data_offset +
4713 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4714
4715 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4716 state_array->ucNumEntries, GFP_KERNEL);
4717 if (!rdev->pm.dpm.ps)
4718 return -ENOMEM;
4719 power_state_offset = (u8 *)state_array->states;
4720 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
4721 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
4722 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
4723 for (i = 0; i < state_array->ucNumEntries; i++) {
4724 power_state = (union pplib_power_state *)power_state_offset;
4725 non_clock_array_index = power_state->v2.nonClockInfoIndex;
4726 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4727 &non_clock_info_array->nonClockInfo[non_clock_array_index];
4728 if (!rdev->pm.power_state[i].clock_info)
4729 return -EINVAL;
4730 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4731 if (ps == NULL) {
4732 kfree(rdev->pm.dpm.ps);
4733 return -ENOMEM;
4734 }
4735 rdev->pm.dpm.ps[i].ps_priv = ps;
4736 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4737 non_clock_info,
4738 non_clock_info_array->ucEntrySize);
4739 k = 0;
4740 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4741 clock_array_index = power_state->v2.clockInfoIndex[j];
4742 if (clock_array_index >= clock_info_array->ucNumEntries)
4743 continue;
4744 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4745 break;
4746 clock_info = (union pplib_clock_info *)
4747 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
4748 ci_parse_pplib_clock_info(rdev,
4749 &rdev->pm.dpm.ps[i], k,
4750 clock_info);
4751 k++;
4752 }
4753 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
4754 }
4755 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
4756 return 0;
4757}
4758
4759int ci_get_vbios_boot_values(struct radeon_device *rdev,
4760 struct ci_vbios_boot_state *boot_state)
4761{
4762 struct radeon_mode_info *mode_info = &rdev->mode_info;
4763 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
4764 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
4765 u8 frev, crev;
4766 u16 data_offset;
4767
4768 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
4769 &frev, &crev, &data_offset)) {
4770 firmware_info =
4771 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
4772 data_offset);
4773 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
4774 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
4775 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
4776 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
4777 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
4778 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
4779 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
4780
4781 return 0;
4782 }
4783 return -EINVAL;
4784}
4785
4786void ci_dpm_fini(struct radeon_device *rdev)
4787{
4788 int i;
4789
4790 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
4791 kfree(rdev->pm.dpm.ps[i].ps_priv);
4792 }
4793 kfree(rdev->pm.dpm.ps);
4794 kfree(rdev->pm.dpm.priv);
4795 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
4796 r600_free_extended_power_table(rdev);
4797}
4798
4799int ci_dpm_init(struct radeon_device *rdev)
4800{
4801 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
4802 u16 data_offset, size;
4803 u8 frev, crev;
4804 struct ci_power_info *pi;
4805 int ret;
4806 u32 mask;
4807
4808 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
4809 if (pi == NULL)
4810 return -ENOMEM;
4811 rdev->pm.dpm.priv = pi;
4812
4813 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
4814 if (ret)
4815 pi->sys_pcie_mask = 0;
4816 else
4817 pi->sys_pcie_mask = mask;
4818 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4819
4820 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
4821 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
4822 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
4823 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
4824
4825 pi->pcie_lane_performance.max = 0;
4826 pi->pcie_lane_performance.min = 16;
4827 pi->pcie_lane_powersaving.max = 0;
4828 pi->pcie_lane_powersaving.min = 16;
4829
4830 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
4831 if (ret) {
4832 ci_dpm_fini(rdev);
4833 return ret;
4834 }
4835 ret = ci_parse_power_table(rdev);
4836 if (ret) {
4837 ci_dpm_fini(rdev);
4838 return ret;
4839 }
4840 ret = r600_parse_extended_power_table(rdev);
4841 if (ret) {
4842 ci_dpm_fini(rdev);
4843 return ret;
4844 }
4845
4846 pi->dll_default_on = false;
4847 pi->sram_end = SMC_RAM_END;
4848
4849 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
4850 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
4851 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
4852 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
4853 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
4854 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
4855 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
4856 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
4857
4858 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
4859
4860 pi->sclk_dpm_key_disabled = 0;
4861 pi->mclk_dpm_key_disabled = 0;
4862 pi->pcie_dpm_key_disabled = 0;
4863
4864 pi->caps_sclk_ds = true;
4865
4866 pi->mclk_strobe_mode_threshold = 40000;
4867 pi->mclk_stutter_mode_threshold = 40000;
4868 pi->mclk_edc_enable_threshold = 40000;
4869 pi->mclk_edc_wr_enable_threshold = 40000;
4870
4871 ci_initialize_powertune_defaults(rdev);
4872
4873 pi->caps_fps = false;
4874
4875 pi->caps_sclk_throttle_low_notification = false;
4876
4877 ci_get_leakage_voltages(rdev);
4878 ci_patch_dependency_tables_with_leakage(rdev);
4879 ci_set_private_data_variables_based_on_pptable(rdev);
4880
4881 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
4882 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
4883 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
4884 ci_dpm_fini(rdev);
4885 return -ENOMEM;
4886 }
4887 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
4888 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
4889 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
4890 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
4891 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
4892 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
4893 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
4894 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
4895 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
4896
4897 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
4898 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
4899 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
4900
4901 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
4902 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
4903 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
4904 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
4905
4906 pi->thermal_temp_setting.temperature_low = 99500;
4907 pi->thermal_temp_setting.temperature_high = 100000;
4908 pi->thermal_temp_setting.temperature_shutdown = 104000;
4909
4910 pi->uvd_enabled = false;
4911
4912 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
4913 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
4914 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
4915 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
4916 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
4917 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
4918 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
4919
4920 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
4921 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
4922 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
4923 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
4924 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
4925 else
4926 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
4927 }
4928
4929 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
4930 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
4931 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
4932 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
4933 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
4934 else
4935 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
4936 }
4937
4938 pi->vddc_phase_shed_control = true;
4939
4940#if defined(CONFIG_ACPI)
4941 pi->pcie_performance_request =
4942 radeon_acpi_is_pcie_performance_request_supported(rdev);
4943#else
4944 pi->pcie_performance_request = false;
4945#endif
4946
4947 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
4948 &frev, &crev, &data_offset)) {
4949 pi->caps_sclk_ss_support = true;
4950 pi->caps_mclk_ss_support = true;
4951 pi->dynamic_ss = true;
4952 } else {
4953 pi->caps_sclk_ss_support = false;
4954 pi->caps_mclk_ss_support = false;
4955 pi->dynamic_ss = true;
4956 }
4957
4958 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
4959 pi->thermal_protection = true;
4960 else
4961 pi->thermal_protection = false;
4962
4963 pi->caps_dynamic_ac_timing = true;
4964
4965 return 0;
4966}
4967
4968void ci_dpm_print_power_state(struct radeon_device *rdev,
4969 struct radeon_ps *rps)
4970{
4971 struct ci_ps *ps = ci_get_ps(rps);
4972 struct ci_pl *pl;
4973 int i;
4974
4975 r600_dpm_print_class_info(rps->class, rps->class2);
4976 r600_dpm_print_cap_info(rps->caps);
4977 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
4978 for (i = 0; i < ps->performance_level_count; i++) {
4979 pl = &ps->performance_levels[i];
4980 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
4981 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
4982 }
4983 r600_dpm_print_ps_status(rdev, rps);
4984}
4985
4986u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
4987{
4988 struct ci_power_info *pi = ci_get_pi(rdev);
4989 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
4990
4991 if (low)
4992 return requested_state->performance_levels[0].sclk;
4993 else
4994 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
4995}
4996
4997u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
4998{
4999 struct ci_power_info *pi = ci_get_pi(rdev);
5000 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5001
5002 if (low)
5003 return requested_state->performance_levels[0].mclk;
5004 else
5005 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5006}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
new file mode 100644
index 000000000000..de504b5ac33f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -0,0 +1,331 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __CI_DPM_H__
24#define __CI_DPM_H__
25
26#include "ppsmc.h"
27
28#define SMU__NUM_SCLK_DPM_STATE 8
29#define SMU__NUM_MCLK_DPM_LEVELS 6
30#define SMU__NUM_LCLK_DPM_LEVELS 8
31#define SMU__NUM_PCIE_DPM_LEVELS 8
32#include "smu7_discrete.h"
33
34#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
35
36struct ci_pl {
37 u32 mclk;
38 u32 sclk;
39 enum radeon_pcie_gen pcie_gen;
40 u16 pcie_lane;
41};
42
43struct ci_ps {
44 u16 performance_level_count;
45 bool dc_compatible;
46 u32 sclk_t;
47 struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
48};
49
50struct ci_dpm_level {
51 bool enabled;
52 u32 value;
53 u32 param1;
54};
55
56#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
57#define MAX_REGULAR_DPM_NUMBER 8
58#define CISLAND_MINIMUM_ENGINE_CLOCK 800
59
60struct ci_single_dpm_table {
61 u32 count;
62 struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
63};
64
65struct ci_dpm_table {
66 struct ci_single_dpm_table sclk_table;
67 struct ci_single_dpm_table mclk_table;
68 struct ci_single_dpm_table pcie_speed_table;
69 struct ci_single_dpm_table vddc_table;
70 struct ci_single_dpm_table vddci_table;
71 struct ci_single_dpm_table mvdd_table;
72};
73
74struct ci_mc_reg_entry {
75 u32 mclk_max;
76 u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
77};
78
79struct ci_mc_reg_table {
80 u8 last;
81 u8 num_entries;
82 u16 valid_flag;
83 struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
84 SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
85};
86
87struct ci_ulv_parm
88{
89 bool supported;
90 u32 cg_ulv_parameter;
91 u32 volt_change_delay;
92 struct ci_pl pl;
93};
94
95#define CISLANDS_MAX_LEAKAGE_COUNT 8
96
97struct ci_leakage_voltage {
98 u16 count;
99 u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
100 u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
101};
102
103struct ci_dpm_level_enable_mask {
104 u32 uvd_dpm_enable_mask;
105 u32 vce_dpm_enable_mask;
106 u32 acp_dpm_enable_mask;
107 u32 samu_dpm_enable_mask;
108 u32 sclk_dpm_enable_mask;
109 u32 mclk_dpm_enable_mask;
110 u32 pcie_dpm_enable_mask;
111};
112
113struct ci_vbios_boot_state
114{
115 u16 mvdd_bootup_value;
116 u16 vddc_bootup_value;
117 u16 vddci_bootup_value;
118 u32 sclk_bootup_value;
119 u32 mclk_bootup_value;
120 u16 pcie_gen_bootup_value;
121 u16 pcie_lane_bootup_value;
122};
123
124struct ci_clock_registers {
125 u32 cg_spll_func_cntl;
126 u32 cg_spll_func_cntl_2;
127 u32 cg_spll_func_cntl_3;
128 u32 cg_spll_func_cntl_4;
129 u32 cg_spll_spread_spectrum;
130 u32 cg_spll_spread_spectrum_2;
131 u32 dll_cntl;
132 u32 mclk_pwrmgt_cntl;
133 u32 mpll_ad_func_cntl;
134 u32 mpll_dq_func_cntl;
135 u32 mpll_func_cntl;
136 u32 mpll_func_cntl_1;
137 u32 mpll_func_cntl_2;
138 u32 mpll_ss1;
139 u32 mpll_ss2;
140};
141
142struct ci_thermal_temperature_setting {
143 s32 temperature_low;
144 s32 temperature_high;
145 s32 temperature_shutdown;
146};
147
148struct ci_pcie_perf_range {
149 u16 max;
150 u16 min;
151};
152
153enum ci_pt_config_reg_type {
154 CISLANDS_CONFIGREG_MMR = 0,
155 CISLANDS_CONFIGREG_SMC_IND,
156 CISLANDS_CONFIGREG_DIDT_IND,
157 CISLANDS_CONFIGREG_CACHE,
158 CISLANDS_CONFIGREG_MAX
159};
160
161#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
162#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
163#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
164
165struct ci_pt_config_reg {
166 u32 offset;
167 u32 mask;
168 u32 shift;
169 u32 value;
170 enum ci_pt_config_reg_type type;
171};
172
173struct ci_pt_defaults {
174 u8 svi_load_line_en;
175 u8 svi_load_line_vddc;
176 u8 tdc_vddc_throttle_release_limit_perc;
177 u8 tdc_mawt;
178 u8 tdc_waterfall_ctl;
179 u8 dte_ambient_temp_base;
180 u32 display_cac;
181 u32 bapm_temp_gradient;
182 u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
183 u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
184};
185
186#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
187#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
188#define DPMTABLE_UPDATE_SCLK 0x00000004
189#define DPMTABLE_UPDATE_MCLK 0x00000008
190
191struct ci_power_info {
192 struct ci_dpm_table dpm_table;
193 u32 voltage_control;
194 u32 mvdd_control;
195 u32 vddci_control;
196 u32 active_auto_throttle_sources;
197 struct ci_clock_registers clock_registers;
198 u16 acpi_vddc;
199 u16 acpi_vddci;
200 enum radeon_pcie_gen force_pcie_gen;
201 enum radeon_pcie_gen acpi_pcie_gen;
202 struct ci_leakage_voltage vddc_leakage;
203 struct ci_leakage_voltage vddci_leakage;
204 u16 max_vddc_in_pp_table;
205 u16 min_vddc_in_pp_table;
206 u16 max_vddci_in_pp_table;
207 u16 min_vddci_in_pp_table;
208 u32 mclk_strobe_mode_threshold;
209 u32 mclk_stutter_mode_threshold;
210 u32 mclk_edc_enable_threshold;
211 u32 mclk_edc_wr_enable_threshold;
212 struct ci_vbios_boot_state vbios_boot_state;
213 /* smc offsets */
214 u32 sram_end;
215 u32 dpm_table_start;
216 u32 soft_regs_start;
217 u32 mc_reg_table_start;
218 u32 fan_table_start;
219 u32 arb_table_start;
220 /* smc tables */
221 SMU7_Discrete_DpmTable smc_state_table;
222 SMU7_Discrete_MCRegisters smc_mc_reg_table;
223 SMU7_Discrete_PmFuses smc_powertune_table;
224 /* other stuff */
225 struct ci_mc_reg_table mc_reg_table;
226 struct atom_voltage_table vddc_voltage_table;
227 struct atom_voltage_table vddci_voltage_table;
228 struct atom_voltage_table mvdd_voltage_table;
229 struct ci_ulv_parm ulv;
230 u32 power_containment_features;
231 const struct ci_pt_defaults *powertune_defaults;
232 u32 dte_tj_offset;
233 bool vddc_phase_shed_control;
234 struct ci_thermal_temperature_setting thermal_temp_setting;
235 struct ci_dpm_level_enable_mask dpm_level_enable_mask;
236 u32 need_update_smu7_dpm_table;
237 u32 sclk_dpm_key_disabled;
238 u32 mclk_dpm_key_disabled;
239 u32 pcie_dpm_key_disabled;
240 struct ci_pcie_perf_range pcie_gen_performance;
241 struct ci_pcie_perf_range pcie_lane_performance;
242 struct ci_pcie_perf_range pcie_gen_powersaving;
243 struct ci_pcie_perf_range pcie_lane_powersaving;
244 u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
245 u32 mclk_activity_target;
246 u32 low_sclk_interrupt_t;
247 u32 last_mclk_dpm_enable_mask;
248 u32 sys_pcie_mask;
249 /* caps */
250 bool caps_power_containment;
251 bool caps_cac;
252 bool caps_sq_ramping;
253 bool caps_db_ramping;
254 bool caps_td_ramping;
255 bool caps_tcp_ramping;
256 bool caps_fps;
257 bool caps_sclk_ds;
258 bool caps_sclk_ss_support;
259 bool caps_mclk_ss_support;
260 bool caps_uvd_dpm;
261 bool caps_vce_dpm;
262 bool caps_samu_dpm;
263 bool caps_acp_dpm;
264 bool caps_automatic_dc_transition;
265 bool caps_sclk_throttle_low_notification;
266 bool caps_dynamic_ac_timing;
267 /* flags */
268 bool thermal_protection;
269 bool pcie_performance_request;
270 bool dynamic_ss;
271 bool dll_default_on;
272 bool cac_enabled;
273 bool uvd_enabled;
274 bool battery_state;
275 bool pspp_notify_required;
276 bool mem_gddr5;
277 bool enable_bapm_feature;
278 bool enable_tdc_limit_feature;
279 bool enable_pkg_pwr_tracking_feature;
280 bool use_pcie_performance_levels;
281 bool use_pcie_powersaving_levels;
282 /* driver states */
283 struct radeon_ps current_rps;
284 struct ci_ps current_ps;
285 struct radeon_ps requested_rps;
286 struct ci_ps requested_ps;
287};
288
289#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
290#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1
291#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2
292
293#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256
294
295#define CISLANDS_VRC_DFLT0 0x3FFFC000
296#define CISLANDS_VRC_DFLT1 0x000400
297#define CISLANDS_VRC_DFLT2 0xC00080
298#define CISLANDS_VRC_DFLT3 0xC00200
299#define CISLANDS_VRC_DFLT4 0xC01680
300#define CISLANDS_VRC_DFLT5 0xC00033
301#define CISLANDS_VRC_DFLT6 0xC00033
302#define CISLANDS_VRC_DFLT7 0x3FFFC000
303
304#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035
305#define CISLAND_TARGETACTIVITY_DFLT 30
306#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10
307
308#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
309#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
310#define PCIE_PERF_REQ_PECI_GEN1 2
311#define PCIE_PERF_REQ_PECI_GEN2 3
312#define PCIE_PERF_REQ_PECI_GEN3 4
313
314int ci_copy_bytes_to_smc(struct radeon_device *rdev,
315 u32 smc_start_address,
316 const u8 *src, u32 byte_count, u32 limit);
317void ci_start_smc(struct radeon_device *rdev);
318void ci_reset_smc(struct radeon_device *rdev);
319int ci_program_jump_on_start(struct radeon_device *rdev);
320void ci_stop_smc_clock(struct radeon_device *rdev);
321void ci_start_smc_clock(struct radeon_device *rdev);
322bool ci_is_smc_running(struct radeon_device *rdev);
323PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
324PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev);
325int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit);
326int ci_read_smc_sram_dword(struct radeon_device *rdev,
327 u32 smc_address, u32 *value, u32 limit);
328int ci_write_smc_sram_dword(struct radeon_device *rdev,
329 u32 smc_address, u32 value, u32 limit);
330
331#endif
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
new file mode 100644
index 000000000000..53b43dd3cf1e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -0,0 +1,262 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "cikd.h"
29#include "ppsmc.h"
30#include "radeon_ucode.h"
31
32static int ci_set_smc_sram_address(struct radeon_device *rdev,
33 u32 smc_address, u32 limit)
34{
35 if (smc_address & 3)
36 return -EINVAL;
37 if ((smc_address + 3) > limit)
38 return -EINVAL;
39
40 WREG32(SMC_IND_INDEX_0, smc_address);
41 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
42
43 return 0;
44}
45
46int ci_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit)
49{
50 u32 data, original_data;
51 u32 addr;
52 u32 extra_shift;
53 int ret;
54
55 if (smc_start_address & 3)
56 return -EINVAL;
57 if ((smc_start_address + byte_count) > limit)
58 return -EINVAL;
59
60 addr = smc_start_address;
61
62 while (byte_count >= 4) {
63 /* SMC address space is BE */
64 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
65
66 ret = ci_set_smc_sram_address(rdev, addr, limit);
67 if (ret)
68 return ret;
69
70 WREG32(SMC_IND_DATA_0, data);
71
72 src += 4;
73 byte_count -= 4;
74 addr += 4;
75 }
76
77 /* RMW for the final bytes */
78 if (byte_count > 0) {
79 data = 0;
80
81 ret = ci_set_smc_sram_address(rdev, addr, limit);
82 if (ret)
83 return ret;
84
85 original_data = RREG32(SMC_IND_DATA_0);
86
87 extra_shift = 8 * (4 - byte_count);
88
89 while (byte_count > 0) {
90 data = (data << 8) + *src++;
91 byte_count--;
92 }
93
94 data <<= extra_shift;
95
96 data |= (original_data & ~((~0UL) << extra_shift));
97
98 ret = ci_set_smc_sram_address(rdev, addr, limit);
99 if (ret)
100 return ret;
101
102 WREG32(SMC_IND_DATA_0, data);
103 }
104 return 0;
105}
106
107void ci_start_smc(struct radeon_device *rdev)
108{
109 u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
110
111 tmp &= ~RST_REG;
112 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
113}
114
115void ci_reset_smc(struct radeon_device *rdev)
116{
117 u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
118
119 tmp |= RST_REG;
120 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
121}
122
123int ci_program_jump_on_start(struct radeon_device *rdev)
124{
125 static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
126
127 return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
128}
129
130void ci_stop_smc_clock(struct radeon_device *rdev)
131{
132 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
133
134 tmp |= CK_DISABLE;
135
136 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
137}
138
139void ci_start_smc_clock(struct radeon_device *rdev)
140{
141 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
142
143 tmp &= ~CK_DISABLE;
144
145 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
146}
147
148bool ci_is_smc_running(struct radeon_device *rdev)
149{
150 u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
151 u32 pc_c = RREG32_SMC(SMC_PC_C);
152
153 if (!(clk & CK_DISABLE) && (0x20100 <= pc_c))
154 return true;
155
156 return false;
157}
158
159PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
160{
161 u32 tmp;
162 int i;
163
164 if (!ci_is_smc_running(rdev))
165 return PPSMC_Result_Failed;
166
167 WREG32(SMC_MESSAGE_0, msg);
168
169 for (i = 0; i < rdev->usec_timeout; i++) {
170 tmp = RREG32(SMC_RESP_0);
171 if (tmp != 0)
172 break;
173 udelay(1);
174 }
175 tmp = RREG32(SMC_RESP_0);
176
177 return (PPSMC_Result)tmp;
178}
179
180PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
181{
182 u32 tmp;
183 int i;
184
185 if (!ci_is_smc_running(rdev))
186 return PPSMC_Result_OK;
187
188 for (i = 0; i < rdev->usec_timeout; i++) {
189 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
190 if ((tmp & CKEN) == 0)
191 break;
192 udelay(1);
193 }
194
195 return PPSMC_Result_OK;
196}
197
198int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
199{
200 u32 ucode_start_address;
201 u32 ucode_size;
202 const u8 *src;
203 u32 data;
204
205 if (!rdev->smc_fw)
206 return -EINVAL;
207
208 switch (rdev->family) {
209 case CHIP_BONAIRE:
210 ucode_start_address = BONAIRE_SMC_UCODE_START;
211 ucode_size = BONAIRE_SMC_UCODE_SIZE;
212 break;
213 default:
214 DRM_ERROR("unknown asic in smc ucode loader\n");
215 BUG();
216 }
217
218 if (ucode_size & 3)
219 return -EINVAL;
220
221 src = (const u8 *)rdev->smc_fw->data;
222 WREG32(SMC_IND_INDEX_0, ucode_start_address);
223 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
224 while (ucode_size >= 4) {
225 /* SMC address space is BE */
226 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
227
228 WREG32(SMC_IND_DATA_0, data);
229
230 src += 4;
231 ucode_size -= 4;
232 }
233 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
234
235 return 0;
236}
237
238int ci_read_smc_sram_dword(struct radeon_device *rdev,
239 u32 smc_address, u32 *value, u32 limit)
240{
241 int ret;
242
243 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
244 if (ret)
245 return ret;
246
247 *value = RREG32(SMC_IND_DATA_0);
248 return 0;
249}
250
251int ci_write_smc_sram_dword(struct radeon_device *rdev,
252 u32 smc_address, u32 value, u32 limit)
253{
254 int ret;
255
256 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
257 if (ret)
258 return ret;
259
260 WREG32(SMC_IND_DATA_0, value);
261 return 0;
262}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 87e5aeed6e88..736a416b51a7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -40,6 +40,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
40MODULE_FIRMWARE("radeon/BONAIRE_mc.bin"); 40MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
41MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); 41MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
42MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); 42MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
43MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
43MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); 44MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
44MODULE_FIRMWARE("radeon/KAVERI_me.bin"); 45MODULE_FIRMWARE("radeon/KAVERI_me.bin");
45MODULE_FIRMWARE("radeon/KAVERI_ce.bin"); 46MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -1545,7 +1546,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
1545 const char *chip_name; 1546 const char *chip_name;
1546 size_t pfp_req_size, me_req_size, ce_req_size, 1547 size_t pfp_req_size, me_req_size, ce_req_size,
1547 mec_req_size, rlc_req_size, mc_req_size, 1548 mec_req_size, rlc_req_size, mc_req_size,
1548 sdma_req_size; 1549 sdma_req_size, smc_req_size;
1549 char fw_name[30]; 1550 char fw_name[30];
1550 int err; 1551 int err;
1551 1552
@@ -1561,6 +1562,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
1561 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; 1562 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1562 mc_req_size = CIK_MC_UCODE_SIZE * 4; 1563 mc_req_size = CIK_MC_UCODE_SIZE * 4;
1563 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1564 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1565 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
1564 break; 1566 break;
1565 case CHIP_KAVERI: 1567 case CHIP_KAVERI:
1566 chip_name = "KAVERI"; 1568 chip_name = "KAVERI";
@@ -1652,7 +1654,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
1652 err = -EINVAL; 1654 err = -EINVAL;
1653 } 1655 }
1654 1656
1655 /* No MC ucode on APUs */ 1657 /* No SMC, MC ucode on APUs */
1656 if (!(rdev->flags & RADEON_IS_IGP)) { 1658 if (!(rdev->flags & RADEON_IS_IGP)) {
1657 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 1659 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1658 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1660 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
@@ -1664,6 +1666,21 @@ static int cik_init_microcode(struct radeon_device *rdev)
1664 rdev->mc_fw->size, fw_name); 1666 rdev->mc_fw->size, fw_name);
1665 err = -EINVAL; 1667 err = -EINVAL;
1666 } 1668 }
1669
1670 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1671 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1672 if (err) {
1673 printk(KERN_ERR
1674 "smc: error loading firmware \"%s\"\n",
1675 fw_name);
1676 release_firmware(rdev->smc_fw);
1677 rdev->smc_fw = NULL;
1678 } else if (rdev->smc_fw->size != smc_req_size) {
1679 printk(KERN_ERR
1680 "cik_smc: Bogus length %zu in firmware \"%s\"\n",
1681 rdev->smc_fw->size, fw_name);
1682 err = -EINVAL;
1683 }
1667 } 1684 }
1668 1685
1669out: 1686out:
@@ -1682,6 +1699,8 @@ out:
1682 rdev->rlc_fw = NULL; 1699 rdev->rlc_fw = NULL;
1683 release_firmware(rdev->mc_fw); 1700 release_firmware(rdev->mc_fw);
1684 rdev->mc_fw = NULL; 1701 rdev->mc_fw = NULL;
1702 release_firmware(rdev->smc_fw);
1703 rdev->smc_fw = NULL;
1685 } 1704 }
1686 return err; 1705 return err;
1687} 1706}
@@ -6626,8 +6645,12 @@ int cik_irq_set(struct radeon_device *rdev)
6626 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6645 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6627 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6646 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6628 6647
6629 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & 6648 if (rdev->flags & RADEON_IS_IGP)
6630 ~(THERM_INTH_MASK | THERM_INTL_MASK); 6649 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
6650 ~(THERM_INTH_MASK | THERM_INTL_MASK);
6651 else
6652 thermal_int = RREG32_SMC(CG_THERMAL_INT) &
6653 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6631 6654
6632 /* enable CP interrupts on all rings */ 6655 /* enable CP interrupts on all rings */
6633 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 6656 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -6788,7 +6811,10 @@ int cik_irq_set(struct radeon_device *rdev)
6788 6811
6789 if (rdev->irq.dpm_thermal) { 6812 if (rdev->irq.dpm_thermal) {
6790 DRM_DEBUG("dpm thermal\n"); 6813 DRM_DEBUG("dpm thermal\n");
6791 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; 6814 if (rdev->flags & RADEON_IS_IGP)
6815 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
6816 else
6817 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6792 } 6818 }
6793 6819
6794 WREG32(CP_INT_CNTL_RING0, cp_int_cntl); 6820 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
@@ -6825,7 +6851,10 @@ int cik_irq_set(struct radeon_device *rdev)
6825 WREG32(DC_HPD5_INT_CONTROL, hpd5); 6851 WREG32(DC_HPD5_INT_CONTROL, hpd5);
6826 WREG32(DC_HPD6_INT_CONTROL, hpd6); 6852 WREG32(DC_HPD6_INT_CONTROL, hpd6);
6827 6853
6828 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); 6854 if (rdev->flags & RADEON_IS_IGP)
6855 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
6856 else
6857 WREG32_SMC(CG_THERMAL_INT, thermal_int);
6829 6858
6830 return 0; 6859 return 0;
6831} 6860}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 179ca3625ae4..861fb3ec161c 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -36,6 +36,23 @@
36#define DIDT_TCP_CTRL0 0x60 36#define DIDT_TCP_CTRL0 0x60
37 37
38/* SMC IND registers */ 38/* SMC IND registers */
39#define DPM_TABLE_475 0x3F768
40# define SamuBootLevel(x) ((x) << 0)
41# define SamuBootLevel_MASK 0x000000ff
42# define SamuBootLevel_SHIFT 0
43# define AcpBootLevel(x) ((x) << 8)
44# define AcpBootLevel_MASK 0x0000ff00
45# define AcpBootLevel_SHIFT 8
46# define VceBootLevel(x) ((x) << 16)
47# define VceBootLevel_MASK 0x00ff0000
48# define VceBootLevel_SHIFT 16
49# define UvdBootLevel(x) ((x) << 24)
50# define UvdBootLevel_MASK 0xff000000
51# define UvdBootLevel_SHIFT 24
52
53#define FIRMWARE_FLAGS 0x3F800
54# define INTERRUPTS_ENABLED (1 << 0)
55
39#define NB_DPM_CONFIG_1 0x3F9E8 56#define NB_DPM_CONFIG_1 0x3F9E8
40# define Dpm0PgNbPsLo(x) ((x) << 0) 57# define Dpm0PgNbPsLo(x) ((x) << 0)
41# define Dpm0PgNbPsLo_MASK 0x000000ff 58# define Dpm0PgNbPsLo_MASK 0x000000ff
@@ -50,25 +67,85 @@
50# define DpmXNbPsHi_MASK 0xff000000 67# define DpmXNbPsHi_MASK 0xff000000
51# define DpmXNbPsHi_SHIFT 24 68# define DpmXNbPsHi_SHIFT 24
52 69
70#define SMC_SYSCON_RESET_CNTL 0x80000000
71# define RST_REG (1 << 0)
72#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
73# define CK_DISABLE (1 << 0)
74# define CKEN (1 << 24)
75
76#define SMC_SYSCON_MISC_CNTL 0x80000010
77
53#define SMC_SYSCON_MSG_ARG_0 0x80000068 78#define SMC_SYSCON_MSG_ARG_0 0x80000068
54 79
80#define SMC_PC_C 0x80000370
81
82#define SMC_SCRATCH9 0x80000424
83
84#define RCU_UC_EVENTS 0xC0000004
85# define BOOT_SEQ_DONE (1 << 7)
86
55#define GENERAL_PWRMGT 0xC0200000 87#define GENERAL_PWRMGT 0xC0200000
56# define GLOBAL_PWRMGT_EN (1 << 0) 88# define GLOBAL_PWRMGT_EN (1 << 0)
89# define STATIC_PM_EN (1 << 1)
90# define THERMAL_PROTECTION_DIS (1 << 2)
91# define THERMAL_PROTECTION_TYPE (1 << 3)
92# define SW_SMIO_INDEX(x) ((x) << 6)
93# define SW_SMIO_INDEX_MASK (1 << 6)
94# define SW_SMIO_INDEX_SHIFT 6
95# define VOLT_PWRMGT_EN (1 << 10)
57# define GPU_COUNTER_CLK (1 << 15) 96# define GPU_COUNTER_CLK (1 << 15)
97# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
98
99#define CNB_PWRMGT_CNTL 0xC0200004
100# define GNB_SLOW_MODE(x) ((x) << 0)
101# define GNB_SLOW_MODE_MASK (3 << 0)
102# define GNB_SLOW_MODE_SHIFT 0
103# define GNB_SLOW (1 << 2)
104# define FORCE_NB_PS1 (1 << 3)
105# define DPM_ENABLED (1 << 4)
58 106
59#define SCLK_PWRMGT_CNTL 0xC0200008 107#define SCLK_PWRMGT_CNTL 0xC0200008
108# define SCLK_PWRMGT_OFF (1 << 0)
60# define RESET_BUSY_CNT (1 << 4) 109# define RESET_BUSY_CNT (1 << 4)
61# define RESET_SCLK_CNT (1 << 5) 110# define RESET_SCLK_CNT (1 << 5)
62# define DYNAMIC_PM_EN (1 << 21) 111# define DYNAMIC_PM_EN (1 << 21)
63 112
113#define CG_SSP 0xC0200044
114# define SST(x) ((x) << 0)
115# define SST_MASK (0xffff << 0)
116# define SSTU(x) ((x) << 16)
117# define SSTU_MASK (0xf << 16)
118
119#define CG_DISPLAY_GAP_CNTL 0xC0200060
120# define DISP_GAP(x) ((x) << 0)
121# define DISP_GAP_MASK (3 << 0)
122# define VBI_TIMER_COUNT(x) ((x) << 4)
123# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
124# define VBI_TIMER_UNIT(x) ((x) << 20)
125# define VBI_TIMER_UNIT_MASK (7 << 20)
126# define DISP_GAP_MCHG(x) ((x) << 24)
127# define DISP_GAP_MCHG_MASK (3 << 24)
128
129#define CG_ULV_PARAMETER 0xC0200158
130
64#define CG_FTV_0 0xC02001A8 131#define CG_FTV_0 0xC02001A8
132#define CG_FTV_1 0xC02001AC
133#define CG_FTV_2 0xC02001B0
134#define CG_FTV_3 0xC02001B4
135#define CG_FTV_4 0xC02001B8
136#define CG_FTV_5 0xC02001BC
137#define CG_FTV_6 0xC02001C0
138#define CG_FTV_7 0xC02001C4
139
140#define CG_DISPLAY_GAP_CNTL2 0xC0200230
65 141
66#define LCAC_SX0_OVR_SEL 0xC0400D04 142#define LCAC_SX0_OVR_SEL 0xC0400D04
67#define LCAC_SX0_OVR_VAL 0xC0400D08 143#define LCAC_SX0_OVR_VAL 0xC0400D08
68 144
145#define LCAC_MC0_CNTL 0xC0400D30
69#define LCAC_MC0_OVR_SEL 0xC0400D34 146#define LCAC_MC0_OVR_SEL 0xC0400D34
70#define LCAC_MC0_OVR_VAL 0xC0400D38 147#define LCAC_MC0_OVR_VAL 0xC0400D38
71 148#define LCAC_MC1_CNTL 0xC0400D3C
72#define LCAC_MC1_OVR_SEL 0xC0400D40 149#define LCAC_MC1_OVR_SEL 0xC0400D40
73#define LCAC_MC1_OVR_VAL 0xC0400D44 150#define LCAC_MC1_OVR_VAL 0xC0400D44
74 151
@@ -78,9 +155,28 @@
78#define LCAC_MC3_OVR_SEL 0xC0400D58 155#define LCAC_MC3_OVR_SEL 0xC0400D58
79#define LCAC_MC3_OVR_VAL 0xC0400D5C 156#define LCAC_MC3_OVR_VAL 0xC0400D5C
80 157
158#define LCAC_CPL_CNTL 0xC0400D80
81#define LCAC_CPL_OVR_SEL 0xC0400D84 159#define LCAC_CPL_OVR_SEL 0xC0400D84
82#define LCAC_CPL_OVR_VAL 0xC0400D88 160#define LCAC_CPL_OVR_VAL 0xC0400D88
83 161
162/* dGPU */
163#define CG_THERMAL_CTRL 0xC0300004
164#define DPM_EVENT_SRC(x) ((x) << 0)
165#define DPM_EVENT_SRC_MASK (7 << 0)
166#define DIG_THERM_DPM(x) ((x) << 14)
167#define DIG_THERM_DPM_MASK 0x003FC000
168#define DIG_THERM_DPM_SHIFT 14
169
170#define CG_THERMAL_INT 0xC030000C
171#define CI_DIG_THERM_INTH(x) ((x) << 8)
172#define CI_DIG_THERM_INTH_MASK 0x0000FF00
173#define CI_DIG_THERM_INTH_SHIFT 8
174#define CI_DIG_THERM_INTL(x) ((x) << 16)
175#define CI_DIG_THERM_INTL_MASK 0x00FF0000
176#define CI_DIG_THERM_INTL_SHIFT 16
177#define THERM_INT_MASK_HIGH (1 << 24)
178#define THERM_INT_MASK_LOW (1 << 25)
179
84#define CG_MULT_THERMAL_STATUS 0xC0300014 180#define CG_MULT_THERMAL_STATUS 0xC0300014
85#define ASIC_MAX_TEMP(x) ((x) << 0) 181#define ASIC_MAX_TEMP(x) ((x) << 0)
86#define ASIC_MAX_TEMP_MASK 0x000001ff 182#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -89,6 +185,35 @@
89#define CTF_TEMP_MASK 0x0003fe00 185#define CTF_TEMP_MASK 0x0003fe00
90#define CTF_TEMP_SHIFT 9 186#define CTF_TEMP_SHIFT 9
91 187
188#define CG_SPLL_FUNC_CNTL 0xC0500140
189#define SPLL_RESET (1 << 0)
190#define SPLL_PWRON (1 << 1)
191#define SPLL_BYPASS_EN (1 << 3)
192#define SPLL_REF_DIV(x) ((x) << 5)
193#define SPLL_REF_DIV_MASK (0x3f << 5)
194#define SPLL_PDIV_A(x) ((x) << 20)
195#define SPLL_PDIV_A_MASK (0x7f << 20)
196#define SPLL_PDIV_A_SHIFT 20
197#define CG_SPLL_FUNC_CNTL_2 0xC0500144
198#define SCLK_MUX_SEL(x) ((x) << 0)
199#define SCLK_MUX_SEL_MASK (0x1ff << 0)
200#define CG_SPLL_FUNC_CNTL_3 0xC0500148
201#define SPLL_FB_DIV(x) ((x) << 0)
202#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
203#define SPLL_FB_DIV_SHIFT 0
204#define SPLL_DITHEN (1 << 28)
205#define CG_SPLL_FUNC_CNTL_4 0xC050014C
206
207#define CG_SPLL_SPREAD_SPECTRUM 0xC0500164
208#define SSEN (1 << 0)
209#define CLK_S(x) ((x) << 4)
210#define CLK_S_MASK (0xfff << 4)
211#define CLK_S_SHIFT 4
212#define CG_SPLL_SPREAD_SPECTRUM_2 0xC0500168
213#define CLK_V(x) ((x) << 0)
214#define CLK_V_MASK (0x3ffffff << 0)
215#define CLK_V_SHIFT 0
216
92#define MPLL_BYPASSCLK_SEL 0xC050019C 217#define MPLL_BYPASSCLK_SEL 0xC050019C
93# define MPLL_CLKOUT_SEL(x) ((x) << 8) 218# define MPLL_CLKOUT_SEL(x) ((x) << 8)
94# define MPLL_CLKOUT_SEL_MASK 0xFF00 219# define MPLL_CLKOUT_SEL_MASK 0xFF00
@@ -109,6 +234,7 @@
109# define ZCLK_SEL(x) ((x) << 8) 234# define ZCLK_SEL(x) ((x) << 8)
110# define ZCLK_SEL_MASK 0xFF00 235# define ZCLK_SEL_MASK 0xFF00
111 236
237/* KV/KB */
112#define CG_THERMAL_INT_CTRL 0xC2100028 238#define CG_THERMAL_INT_CTRL 0xC2100028
113#define DIG_THERM_INTH(x) ((x) << 0) 239#define DIG_THERM_INTH(x) ((x) << 0)
114#define DIG_THERM_INTH_MASK 0x000000FF 240#define DIG_THERM_INTH_MASK 0x000000FF
@@ -437,9 +563,37 @@
437#define NOOFGROUPS_SHIFT 12 563#define NOOFGROUPS_SHIFT 12
438#define NOOFGROUPS_MASK 0x00001000 564#define NOOFGROUPS_MASK 0x00001000
439 565
566#define MC_ARB_DRAM_TIMING 0x2774
567#define MC_ARB_DRAM_TIMING2 0x2778
568
569#define MC_ARB_BURST_TIME 0x2808
570#define STATE0(x) ((x) << 0)
571#define STATE0_MASK (0x1f << 0)
572#define STATE0_SHIFT 0
573#define STATE1(x) ((x) << 5)
574#define STATE1_MASK (0x1f << 5)
575#define STATE1_SHIFT 5
576#define STATE2(x) ((x) << 10)
577#define STATE2_MASK (0x1f << 10)
578#define STATE2_SHIFT 10
579#define STATE3(x) ((x) << 15)
580#define STATE3_MASK (0x1f << 15)
581#define STATE3_SHIFT 15
582
583#define MC_SEQ_RAS_TIMING 0x28a0
584#define MC_SEQ_CAS_TIMING 0x28a4
585#define MC_SEQ_MISC_TIMING 0x28a8
586#define MC_SEQ_MISC_TIMING2 0x28ac
587#define MC_SEQ_PMG_TIMING 0x28b0
588#define MC_SEQ_RD_CTL_D0 0x28b4
589#define MC_SEQ_RD_CTL_D1 0x28b8
590#define MC_SEQ_WR_CTL_D0 0x28bc
591#define MC_SEQ_WR_CTL_D1 0x28c0
592
440#define MC_SEQ_SUP_CNTL 0x28c8 593#define MC_SEQ_SUP_CNTL 0x28c8
441#define RUN_MASK (1 << 0) 594#define RUN_MASK (1 << 0)
442#define MC_SEQ_SUP_PGM 0x28cc 595#define MC_SEQ_SUP_PGM 0x28cc
596#define MC_PMG_AUTO_CMD 0x28d0
443 597
444#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8 598#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
445#define TRAIN_DONE_D0 (1 << 30) 599#define TRAIN_DONE_D0 (1 << 30)
@@ -448,9 +602,90 @@
448#define MC_IO_PAD_CNTL_D0 0x29d0 602#define MC_IO_PAD_CNTL_D0 0x29d0
449#define MEM_FALL_OUT_CMD (1 << 8) 603#define MEM_FALL_OUT_CMD (1 << 8)
450 604
605#define MC_SEQ_MISC0 0x2a00
606#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
607#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
608#define MC_SEQ_MISC0_VEN_ID_VALUE 3
609#define MC_SEQ_MISC0_REV_ID_SHIFT 12
610#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
611#define MC_SEQ_MISC0_REV_ID_VALUE 1
612#define MC_SEQ_MISC0_GDDR5_SHIFT 28
613#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
614#define MC_SEQ_MISC0_GDDR5_VALUE 5
615#define MC_SEQ_MISC1 0x2a04
616#define MC_SEQ_RESERVE_M 0x2a08
617#define MC_PMG_CMD_EMRS 0x2a0c
618
451#define MC_SEQ_IO_DEBUG_INDEX 0x2a44 619#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
452#define MC_SEQ_IO_DEBUG_DATA 0x2a48 620#define MC_SEQ_IO_DEBUG_DATA 0x2a48
453 621
622#define MC_SEQ_MISC5 0x2a54
623#define MC_SEQ_MISC6 0x2a58
624
625#define MC_SEQ_MISC7 0x2a64
626
627#define MC_SEQ_RAS_TIMING_LP 0x2a6c
628#define MC_SEQ_CAS_TIMING_LP 0x2a70
629#define MC_SEQ_MISC_TIMING_LP 0x2a74
630#define MC_SEQ_MISC_TIMING2_LP 0x2a78
631#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
632#define MC_SEQ_WR_CTL_D1_LP 0x2a80
633#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
634#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
635
636#define MC_PMG_CMD_MRS 0x2aac
637
638#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
639#define MC_SEQ_RD_CTL_D1_LP 0x2b20
640
641#define MC_PMG_CMD_MRS1 0x2b44
642#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
643#define MC_SEQ_PMG_TIMING_LP 0x2b4c
644
645#define MC_SEQ_WR_CTL_2 0x2b54
646#define MC_SEQ_WR_CTL_2_LP 0x2b58
647#define MC_PMG_CMD_MRS2 0x2b5c
648#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
649
650#define MCLK_PWRMGT_CNTL 0x2ba0
651# define DLL_SPEED(x) ((x) << 0)
652# define DLL_SPEED_MASK (0x1f << 0)
653# define DLL_READY (1 << 6)
654# define MC_INT_CNTL (1 << 7)
655# define MRDCK0_PDNB (1 << 8)
656# define MRDCK1_PDNB (1 << 9)
657# define MRDCK0_RESET (1 << 16)
658# define MRDCK1_RESET (1 << 17)
659# define DLL_READY_READ (1 << 24)
660#define DLL_CNTL 0x2ba4
661# define MRDCK0_BYPASS (1 << 24)
662# define MRDCK1_BYPASS (1 << 25)
663
664#define MPLL_FUNC_CNTL 0x2bb4
665#define BWCTRL(x) ((x) << 20)
666#define BWCTRL_MASK (0xff << 20)
667#define MPLL_FUNC_CNTL_1 0x2bb8
668#define VCO_MODE(x) ((x) << 0)
669#define VCO_MODE_MASK (3 << 0)
670#define CLKFRAC(x) ((x) << 4)
671#define CLKFRAC_MASK (0xfff << 4)
672#define CLKF(x) ((x) << 16)
673#define CLKF_MASK (0xfff << 16)
674#define MPLL_FUNC_CNTL_2 0x2bbc
675#define MPLL_AD_FUNC_CNTL 0x2bc0
676#define YCLK_POST_DIV(x) ((x) << 0)
677#define YCLK_POST_DIV_MASK (7 << 0)
678#define MPLL_DQ_FUNC_CNTL 0x2bc4
679#define YCLK_SEL(x) ((x) << 4)
680#define YCLK_SEL_MASK (1 << 4)
681
682#define MPLL_SS1 0x2bcc
683#define CLKV(x) ((x) << 0)
684#define CLKV_MASK (0x3ffffff << 0)
685#define MPLL_SS2 0x2bd0
686#define CLKS(x) ((x) << 0)
687#define CLKS_MASK (0xfff << 0)
688
454#define HDP_HOST_PATH_CNTL 0x2C00 689#define HDP_HOST_PATH_CNTL 0x2C00
455#define CLOCK_GATING_DIS (1 << 23) 690#define CLOCK_GATING_DIS (1 << 23)
456#define HDP_NONSURFACE_BASE 0x2C04 691#define HDP_NONSURFACE_BASE 0x2C04
@@ -465,6 +700,22 @@
465 700
466#define ATC_MISC_CG 0x3350 701#define ATC_MISC_CG 0x3350
467 702
703#define MC_SEQ_CNTL_3 0x3600
704# define CAC_EN (1 << 31)
705#define MC_SEQ_G5PDX_CTRL 0x3604
706#define MC_SEQ_G5PDX_CTRL_LP 0x3608
707#define MC_SEQ_G5PDX_CMD0 0x360c
708#define MC_SEQ_G5PDX_CMD0_LP 0x3610
709#define MC_SEQ_G5PDX_CMD1 0x3614
710#define MC_SEQ_G5PDX_CMD1_LP 0x3618
711
712#define MC_SEQ_PMG_DVS_CTL 0x3628
713#define MC_SEQ_PMG_DVS_CTL_LP 0x362c
714#define MC_SEQ_PMG_DVS_CMD 0x3630
715#define MC_SEQ_PMG_DVS_CMD_LP 0x3634
716#define MC_SEQ_DLL_STBY 0x3638
717#define MC_SEQ_DLL_STBY_LP 0x363c
718
468#define IH_RB_CNTL 0x3e00 719#define IH_RB_CNTL 0x3e00
469# define IH_RB_ENABLE (1 << 0) 720# define IH_RB_ENABLE (1 << 0)
470# define IH_RB_SIZE(x) ((x) << 1) /* log2 */ 721# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
@@ -492,6 +743,9 @@
492# define MC_WR_CLEAN_CNT(x) ((x) << 20) 743# define MC_WR_CLEAN_CNT(x) ((x) << 20)
493# define MC_VMID(x) ((x) << 25) 744# define MC_VMID(x) ((x) << 25)
494 745
746#define BIF_LNCNT_RESET 0x5220
747# define RESET_LNCNT_EN (1 << 0)
748
495#define CONFIG_MEMSIZE 0x5428 749#define CONFIG_MEMSIZE 0x5428
496 750
497#define INTERRUPT_CNTL 0x5468 751#define INTERRUPT_CNTL 0x5468
@@ -628,6 +882,9 @@
628# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 882# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
629# define DC_HPDx_EN (1 << 28) 883# define DC_HPDx_EN (1 << 28)
630 884
885#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
886# define STUTTER_ENABLE (1 << 0)
887
631#define GRBM_CNTL 0x8000 888#define GRBM_CNTL 0x8000
632#define GRBM_READ_TIMEOUT(x) ((x) << 0) 889#define GRBM_READ_TIMEOUT(x) ((x) << 0)
633 890
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 6db6e320bc79..4c1ee6df09a0 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -99,7 +99,7 @@ typedef uint8_t PPSMC_Result;
99#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) 99#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
100#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) 100#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
101 101
102/* KV/KB */ 102/* CI/KV/KB */
103#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) 103#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
104#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) 104#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
105#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) 105#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
@@ -108,6 +108,7 @@ typedef uint8_t PPSMC_Result;
108#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) 108#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
109#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) 109#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
110#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) 110#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
111#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
111#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) 112#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
112#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) 113#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
113#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) 114#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
@@ -116,8 +117,13 @@ typedef uint8_t PPSMC_Result;
116#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) 117#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
117#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) 118#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
118#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) 119#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
120#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
121#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
122#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
119#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) 123#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
120#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) 124#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
125#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
126#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
121#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) 127#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
122#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) 128#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
123#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) 129#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
@@ -126,9 +132,25 @@ typedef uint8_t PPSMC_Result;
126#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) 132#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
127#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) 133#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
128#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) 134#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
135#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
129#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) 136#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
137#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
138#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
139#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
140#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
141#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
142#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
143#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
130#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) 144#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
131#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) 145#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
146#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
147#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
148#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
149#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
150#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
151
152#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
153#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
132 154
133/* TN */ 155/* TN */
134#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102) 156#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b6bac497f001..930650ec769c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -246,6 +246,12 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
246int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev, 246int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
247 u16 *voltage, 247 u16 *voltage,
248 u16 leakage_idx); 248 u16 leakage_idx);
249int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
250 u16 *leakage_id);
251int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
252 u16 *vddc, u16 *vddci,
253 u16 virtual_voltage_id,
254 u16 vbios_voltage_id);
249int radeon_atom_round_to_true_voltage(struct radeon_device *rdev, 255int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
250 u8 voltage_type, 256 u8 voltage_type,
251 u16 nominal_voltage, 257 u16 nominal_voltage,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 9c83ecfd0eb7..c633fa53def0 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2454,6 +2454,20 @@ static struct radeon_asic ci_asic = {
2454 .set_uvd_clocks = &cik_set_uvd_clocks, 2454 .set_uvd_clocks = &cik_set_uvd_clocks,
2455 .get_temperature = &ci_get_temp, 2455 .get_temperature = &ci_get_temp,
2456 }, 2456 },
2457 .dpm = {
2458 .init = &ci_dpm_init,
2459 .setup_asic = &ci_dpm_setup_asic,
2460 .enable = &ci_dpm_enable,
2461 .disable = &ci_dpm_disable,
2462 .pre_set_power_state = &ci_dpm_pre_set_power_state,
2463 .set_power_state = &ci_dpm_set_power_state,
2464 .post_set_power_state = &ci_dpm_post_set_power_state,
2465 .display_configuration_changed = &ci_dpm_display_configuration_changed,
2466 .fini = &ci_dpm_fini,
2467 .get_sclk = &ci_dpm_get_sclk,
2468 .get_mclk = &ci_dpm_get_mclk,
2469 .print_power_state = &ci_dpm_print_power_state,
2470 },
2457 .pflip = { 2471 .pflip = {
2458 .pre_page_flip = &evergreen_pre_page_flip, 2472 .pre_page_flip = &evergreen_pre_page_flip,
2459 .page_flip = &evergreen_page_flip, 2473 .page_flip = &evergreen_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 68a1a1fb371d..350da1704964 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -750,6 +750,20 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev,
750int ci_get_temp(struct radeon_device *rdev); 750int ci_get_temp(struct radeon_device *rdev);
751int kv_get_temp(struct radeon_device *rdev); 751int kv_get_temp(struct radeon_device *rdev);
752 752
753int ci_dpm_init(struct radeon_device *rdev);
754int ci_dpm_enable(struct radeon_device *rdev);
755void ci_dpm_disable(struct radeon_device *rdev);
756int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
757int ci_dpm_set_power_state(struct radeon_device *rdev);
758void ci_dpm_post_set_power_state(struct radeon_device *rdev);
759void ci_dpm_setup_asic(struct radeon_device *rdev);
760void ci_dpm_display_configuration_changed(struct radeon_device *rdev);
761void ci_dpm_fini(struct radeon_device *rdev);
762u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low);
763u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low);
764void ci_dpm_print_power_state(struct radeon_device *rdev,
765 struct radeon_ps *ps);
766
753int kv_dpm_init(struct radeon_device *rdev); 767int kv_dpm_init(struct radeon_device *rdev);
754int kv_dpm_enable(struct radeon_device *rdev); 768int kv_dpm_enable(struct radeon_device *rdev);
755void kv_dpm_disable(struct radeon_device *rdev); 769void kv_dpm_disable(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6247b5e2d074..7ba439e9f30f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -211,7 +211,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
211} 211}
212 212
213static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, 213static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
214 u8 id) 214 u8 id)
215{ 215{
216 struct atom_context *ctx = rdev->mode_info.atom_context; 216 struct atom_context *ctx = rdev->mode_info.atom_context;
217 struct radeon_gpio_rec gpio; 217 struct radeon_gpio_rec gpio;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 37d3d343f687..66b04af16949 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1202,6 +1202,7 @@ int radeon_pm_init(struct radeon_device *rdev)
1202 case CHIP_VERDE: 1202 case CHIP_VERDE:
1203 case CHIP_OLAND: 1203 case CHIP_OLAND:
1204 case CHIP_HAINAN: 1204 case CHIP_HAINAN:
1205 case CHIP_BONAIRE:
1205 case CHIP_KABINI: 1206 case CHIP_KABINI:
1206 case CHIP_KAVERI: 1207 case CHIP_KAVERI:
1207 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1208 /* DPM requires the RLC, RV770+ dGPU requires SMC */
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index fad27c051bbf..33858364fe89 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -140,4 +140,7 @@
140#define HAINAN_SMC_UCODE_START 0x10000 140#define HAINAN_SMC_UCODE_START 0x10000
141#define HAINAN_SMC_UCODE_SIZE 0xe67C 141#define HAINAN_SMC_UCODE_SIZE 0xe67C
142 142
143#define BONAIRE_SMC_UCODE_START 0x20000
144#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC
145
143#endif 146#endif
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0f8be48c2ef4..96d96f5df9e7 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3663,7 +3663,7 @@ static void si_clear_vc(struct radeon_device *rdev)
3663 WREG32(CG_FTV, 0); 3663 WREG32(CG_FTV, 0);
3664} 3664}
3665 3665
3666static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) 3666u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
3667{ 3667{
3668 u8 mc_para_index; 3668 u8 mc_para_index;
3669 3669
@@ -3676,7 +3676,7 @@ static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
3676 return mc_para_index; 3676 return mc_para_index;
3677} 3677}
3678 3678
3679static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) 3679u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
3680{ 3680{
3681 u8 mc_para_index; 3681 u8 mc_para_index;
3682 3682
@@ -3758,9 +3758,9 @@ static bool si_validate_phase_shedding_tables(struct radeon_device *rdev,
3758 return true; 3758 return true;
3759} 3759}
3760 3760
3761static void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, 3761void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
3762 u32 max_voltage_steps, 3762 u32 max_voltage_steps,
3763 struct atom_voltage_table *voltage_table) 3763 struct atom_voltage_table *voltage_table)
3764{ 3764{
3765 unsigned int i, diff; 3765 unsigned int i, diff;
3766 3766
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
new file mode 100644
index 000000000000..82f70c90a9ee
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -0,0 +1,486 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef SMU7_DISCRETE_H
25#define SMU7_DISCRETE_H
26
27#include "smu7.h"
28
29#pragma pack(push, 1)
30
31#define SMU7_DTE_ITERATIONS 5
32#define SMU7_DTE_SOURCES 3
33#define SMU7_DTE_SINKS 1
34#define SMU7_NUM_CPU_TES 0
35#define SMU7_NUM_GPU_TES 1
36#define SMU7_NUM_NON_TES 2
37
38struct SMU7_SoftRegisters
39{
40 uint32_t RefClockFrequency;
41 uint32_t PmTimerP;
42 uint32_t FeatureEnables;
43 uint32_t PreVBlankGap;
44 uint32_t VBlankTimeout;
45 uint32_t TrainTimeGap;
46
47 uint32_t MvddSwitchTime;
48 uint32_t LongestAcpiTrainTime;
49 uint32_t AcpiDelay;
50 uint32_t G5TrainTime;
51 uint32_t DelayMpllPwron;
52 uint32_t VoltageChangeTimeout;
53 uint32_t HandshakeDisables;
54
55 uint8_t DisplayPhy1Config;
56 uint8_t DisplayPhy2Config;
57 uint8_t DisplayPhy3Config;
58 uint8_t DisplayPhy4Config;
59
60 uint8_t DisplayPhy5Config;
61 uint8_t DisplayPhy6Config;
62 uint8_t DisplayPhy7Config;
63 uint8_t DisplayPhy8Config;
64
65 uint32_t AverageGraphicsA;
66 uint32_t AverageMemoryA;
67 uint32_t AverageGioA;
68
69 uint8_t SClkDpmEnabledLevels;
70 uint8_t MClkDpmEnabledLevels;
71 uint8_t LClkDpmEnabledLevels;
72 uint8_t PCIeDpmEnabledLevels;
73
74 uint8_t UVDDpmEnabledLevels;
75 uint8_t SAMUDpmEnabledLevels;
76 uint8_t ACPDpmEnabledLevels;
77 uint8_t VCEDpmEnabledLevels;
78
79 uint32_t DRAM_LOG_ADDR_H;
80 uint32_t DRAM_LOG_ADDR_L;
81 uint32_t DRAM_LOG_PHY_ADDR_H;
82 uint32_t DRAM_LOG_PHY_ADDR_L;
83 uint32_t DRAM_LOG_BUFF_SIZE;
84 uint32_t UlvEnterC;
85 uint32_t UlvTime;
86 uint32_t Reserved[3];
87
88};
89
90typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
91
92struct SMU7_Discrete_VoltageLevel
93{
94 uint16_t Voltage;
95 uint16_t StdVoltageHiSidd;
96 uint16_t StdVoltageLoSidd;
97 uint8_t Smio;
98 uint8_t padding;
99};
100
101typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
102
103struct SMU7_Discrete_GraphicsLevel
104{
105 uint32_t Flags;
106 uint32_t MinVddc;
107 uint32_t MinVddcPhases;
108
109 uint32_t SclkFrequency;
110
111 uint8_t padding1[2];
112 uint16_t ActivityLevel;
113
114 uint32_t CgSpllFuncCntl3;
115 uint32_t CgSpllFuncCntl4;
116 uint32_t SpllSpreadSpectrum;
117 uint32_t SpllSpreadSpectrum2;
118 uint32_t CcPwrDynRm;
119 uint32_t CcPwrDynRm1;
120 uint8_t SclkDid;
121 uint8_t DisplayWatermark;
122 uint8_t EnabledForActivity;
123 uint8_t EnabledForThrottle;
124 uint8_t UpH;
125 uint8_t DownH;
126 uint8_t VoltageDownH;
127 uint8_t PowerThrottle;
128 uint8_t DeepSleepDivId;
129 uint8_t padding[3];
130};
131
132typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
133
134struct SMU7_Discrete_ACPILevel
135{
136 uint32_t Flags;
137 uint32_t MinVddc;
138 uint32_t MinVddcPhases;
139 uint32_t SclkFrequency;
140 uint8_t SclkDid;
141 uint8_t DisplayWatermark;
142 uint8_t DeepSleepDivId;
143 uint8_t padding;
144 uint32_t CgSpllFuncCntl;
145 uint32_t CgSpllFuncCntl2;
146 uint32_t CgSpllFuncCntl3;
147 uint32_t CgSpllFuncCntl4;
148 uint32_t SpllSpreadSpectrum;
149 uint32_t SpllSpreadSpectrum2;
150 uint32_t CcPwrDynRm;
151 uint32_t CcPwrDynRm1;
152};
153
154typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
155
156struct SMU7_Discrete_Ulv
157{
158 uint32_t CcPwrDynRm;
159 uint32_t CcPwrDynRm1;
160 uint16_t VddcOffset;
161 uint8_t VddcOffsetVid;
162 uint8_t VddcPhase;
163 uint32_t Reserved;
164};
165
166typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
167
168struct SMU7_Discrete_MemoryLevel
169{
170 uint32_t MinVddc;
171 uint32_t MinVddcPhases;
172 uint32_t MinVddci;
173 uint32_t MinMvdd;
174
175 uint32_t MclkFrequency;
176
177 uint8_t EdcReadEnable;
178 uint8_t EdcWriteEnable;
179 uint8_t RttEnable;
180 uint8_t StutterEnable;
181
182 uint8_t StrobeEnable;
183 uint8_t StrobeRatio;
184 uint8_t EnabledForThrottle;
185 uint8_t EnabledForActivity;
186
187 uint8_t UpH;
188 uint8_t DownH;
189 uint8_t VoltageDownH;
190 uint8_t padding;
191
192 uint16_t ActivityLevel;
193 uint8_t DisplayWatermark;
194 uint8_t padding1;
195
196 uint32_t MpllFuncCntl;
197 uint32_t MpllFuncCntl_1;
198 uint32_t MpllFuncCntl_2;
199 uint32_t MpllAdFuncCntl;
200 uint32_t MpllDqFuncCntl;
201 uint32_t MclkPwrmgtCntl;
202 uint32_t DllCntl;
203 uint32_t MpllSs1;
204 uint32_t MpllSs2;
205};
206
207typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
208
209struct SMU7_Discrete_LinkLevel
210{
211 uint8_t PcieGenSpeed;
212 uint8_t PcieLaneCount;
213 uint8_t EnabledForActivity;
214 uint8_t Padding;
215 uint32_t DownT;
216 uint32_t UpT;
217 uint32_t Reserved;
218};
219
220typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
221
222
223struct SMU7_Discrete_MCArbDramTimingTableEntry
224{
225 uint32_t McArbDramTiming;
226 uint32_t McArbDramTiming2;
227 uint8_t McArbBurstTime;
228 uint8_t padding[3];
229};
230
231typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
232
233struct SMU7_Discrete_MCArbDramTimingTable
234{
235 SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
236};
237
238typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
239
240struct SMU7_Discrete_UvdLevel
241{
242 uint32_t VclkFrequency;
243 uint32_t DclkFrequency;
244 uint16_t MinVddc;
245 uint8_t MinVddcPhases;
246 uint8_t VclkDivider;
247 uint8_t DclkDivider;
248 uint8_t padding[3];
249};
250
251typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
252
253struct SMU7_Discrete_ExtClkLevel
254{
255 uint32_t Frequency;
256 uint16_t MinVoltage;
257 uint8_t MinPhases;
258 uint8_t Divider;
259};
260
261typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
262
263struct SMU7_Discrete_StateInfo
264{
265 uint32_t SclkFrequency;
266 uint32_t MclkFrequency;
267 uint32_t VclkFrequency;
268 uint32_t DclkFrequency;
269 uint32_t SamclkFrequency;
270 uint32_t AclkFrequency;
271 uint32_t EclkFrequency;
272 uint16_t MvddVoltage;
273 uint16_t padding16;
274 uint8_t DisplayWatermark;
275 uint8_t McArbIndex;
276 uint8_t McRegIndex;
277 uint8_t SeqIndex;
278 uint8_t SclkDid;
279 int8_t SclkIndex;
280 int8_t MclkIndex;
281 uint8_t PCIeGen;
282
283};
284
285typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
286
287
288struct SMU7_Discrete_DpmTable
289{
290 SMU7_PIDController GraphicsPIDController;
291 SMU7_PIDController MemoryPIDController;
292 SMU7_PIDController LinkPIDController;
293
294 uint32_t SystemFlags;
295
296
297 uint32_t SmioMaskVddcVid;
298 uint32_t SmioMaskVddcPhase;
299 uint32_t SmioMaskVddciVid;
300 uint32_t SmioMaskMvddVid;
301
302 uint32_t VddcLevelCount;
303 uint32_t VddciLevelCount;
304 uint32_t MvddLevelCount;
305
306 SMU7_Discrete_VoltageLevel VddcLevel [SMU7_MAX_LEVELS_VDDC];
307// SMU7_Discrete_VoltageLevel VddcStandardReference [SMU7_MAX_LEVELS_VDDC];
308 SMU7_Discrete_VoltageLevel VddciLevel [SMU7_MAX_LEVELS_VDDCI];
309 SMU7_Discrete_VoltageLevel MvddLevel [SMU7_MAX_LEVELS_MVDD];
310
311 uint8_t GraphicsDpmLevelCount;
312 uint8_t MemoryDpmLevelCount;
313 uint8_t LinkLevelCount;
314 uint8_t UvdLevelCount;
315 uint8_t VceLevelCount;
316 uint8_t AcpLevelCount;
317 uint8_t SamuLevelCount;
318 uint8_t MasterDeepSleepControl;
319 uint32_t Reserved[5];
320// uint32_t SamuDefaultLevel;
321
322 SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS];
323 SMU7_Discrete_MemoryLevel MemoryACPILevel;
324 SMU7_Discrete_MemoryLevel MemoryLevel [SMU7_MAX_LEVELS_MEMORY];
325 SMU7_Discrete_LinkLevel LinkLevel [SMU7_MAX_LEVELS_LINK];
326 SMU7_Discrete_ACPILevel ACPILevel;
327 SMU7_Discrete_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
328 SMU7_Discrete_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
329 SMU7_Discrete_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
330 SMU7_Discrete_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
331 SMU7_Discrete_Ulv Ulv;
332
333 uint32_t SclkStepSize;
334 uint32_t Smio [SMU7_MAX_ENTRIES_SMIO];
335
336 uint8_t UvdBootLevel;
337 uint8_t VceBootLevel;
338 uint8_t AcpBootLevel;
339 uint8_t SamuBootLevel;
340
341 uint8_t UVDInterval;
342 uint8_t VCEInterval;
343 uint8_t ACPInterval;
344 uint8_t SAMUInterval;
345
346 uint8_t GraphicsBootLevel;
347 uint8_t GraphicsVoltageChangeEnable;
348 uint8_t GraphicsThermThrottleEnable;
349 uint8_t GraphicsInterval;
350
351 uint8_t VoltageInterval;
352 uint8_t ThermalInterval;
353 uint16_t TemperatureLimitHigh;
354
355 uint16_t TemperatureLimitLow;
356 uint8_t MemoryBootLevel;
357 uint8_t MemoryVoltageChangeEnable;
358
359 uint8_t MemoryInterval;
360 uint8_t MemoryThermThrottleEnable;
361 uint16_t VddcVddciDelta;
362
363 uint16_t VoltageResponseTime;
364 uint16_t PhaseResponseTime;
365
366 uint8_t PCIeBootLinkLevel;
367 uint8_t PCIeGenInterval;
368 uint8_t DTEInterval;
369 uint8_t DTEMode;
370
371 uint8_t SVI2Enable;
372 uint8_t VRHotGpio;
373 uint8_t AcDcGpio;
374 uint8_t ThermGpio;
375
376 uint16_t PPM_PkgPwrLimit;
377 uint16_t PPM_TemperatureLimit;
378
379 uint16_t DefaultTdp;
380 uint16_t TargetTdp;
381
382 uint16_t FpsHighT;
383 uint16_t FpsLowT;
384
385 uint16_t BAPMTI_R [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
386 uint16_t BAPMTI_RC [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
387
388 uint8_t DTEAmbientTempBase;
389 uint8_t DTETjOffset;
390 uint8_t GpuTjMax;
391 uint8_t GpuTjHyst;
392
393 uint16_t BootVddc;
394 uint16_t BootVddci;
395
396 uint16_t BootMVdd;
397 uint16_t padding;
398
399 uint32_t BAPM_TEMP_GRADIENT;
400
401 uint32_t LowSclkInterruptT;
402};
403
404typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
405
406#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
407#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
408
409struct SMU7_Discrete_MCRegisterAddress
410{
411 uint16_t s0;
412 uint16_t s1;
413};
414
415typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
416
417struct SMU7_Discrete_MCRegisterSet
418{
419 uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
420};
421
422typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
423
424struct SMU7_Discrete_MCRegisters
425{
426 uint8_t last;
427 uint8_t reserved[3];
428 SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
429 SMU7_Discrete_MCRegisterSet data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
430};
431
432typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
433
434struct SMU7_Discrete_PmFuses {
435 // dw0-dw1
436 uint8_t BapmVddCVidHiSidd[8];
437
438 // dw2-dw3
439 uint8_t BapmVddCVidLoSidd[8];
440
441 // dw4-dw5
442 uint8_t VddCVid[8];
443
444 // dw6
445 uint8_t SviLoadLineEn;
446 uint8_t SviLoadLineVddC;
447 uint8_t SviLoadLineTrimVddC;
448 uint8_t SviLoadLineOffsetVddC;
449
450 // dw7
451 uint16_t TDC_VDDC_PkgLimit;
452 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
453 uint8_t TDC_MAWt;
454
455 // dw8
456 uint8_t TdcWaterfallCtl;
457 uint8_t LPMLTemperatureMin;
458 uint8_t LPMLTemperatureMax;
459 uint8_t Reserved;
460
461 // dw9-dw10
462 uint8_t BapmVddCVidHiSidd2[8];
463
464 // dw11-dw12
465 uint32_t Reserved6[2];
466
467 // dw13-dw16
468 uint8_t GnbLPML[16];
469
470 // dw17
471 uint8_t GnbLPMLMaxVid;
472 uint8_t GnbLPMLMinVid;
473 uint8_t Reserved1[2];
474
475 // dw18
476 uint16_t BapmVddCBaseLeakageHiSidd;
477 uint16_t BapmVddCBaseLeakageLoSidd;
478};
479
480typedef struct SMU7_Discrete_PmFuses SMU7_Discrete_PmFuses;
481
482
483#pragma pack(pop)
484
485#endif
486