diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/sumo_dpm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/sumo_dpm.c | 1876 |
1 files changed, 1876 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c new file mode 100644 index 000000000000..11b6b9924f1b --- /dev/null +++ b/drivers/gpu/drm/radeon/sumo_dpm.c | |||
@@ -0,0 +1,1876 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include "drmP.h" | ||
25 | #include "radeon.h" | ||
26 | #include "sumod.h" | ||
27 | #include "r600_dpm.h" | ||
28 | #include "cypress_dpm.h" | ||
29 | #include "sumo_dpm.h" | ||
30 | #include <linux/seq_file.h> | ||
31 | |||
32 | #define SUMO_MAX_DEEPSLEEP_DIVIDER_ID 5 | ||
33 | #define SUMO_MINIMUM_ENGINE_CLOCK 800 | ||
34 | #define BOOST_DPM_LEVEL 7 | ||
35 | |||
36 | static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] = | ||
37 | { | ||
38 | SUMO_UTC_DFLT_00, | ||
39 | SUMO_UTC_DFLT_01, | ||
40 | SUMO_UTC_DFLT_02, | ||
41 | SUMO_UTC_DFLT_03, | ||
42 | SUMO_UTC_DFLT_04, | ||
43 | SUMO_UTC_DFLT_05, | ||
44 | SUMO_UTC_DFLT_06, | ||
45 | SUMO_UTC_DFLT_07, | ||
46 | SUMO_UTC_DFLT_08, | ||
47 | SUMO_UTC_DFLT_09, | ||
48 | SUMO_UTC_DFLT_10, | ||
49 | SUMO_UTC_DFLT_11, | ||
50 | SUMO_UTC_DFLT_12, | ||
51 | SUMO_UTC_DFLT_13, | ||
52 | SUMO_UTC_DFLT_14, | ||
53 | }; | ||
54 | |||
55 | static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] = | ||
56 | { | ||
57 | SUMO_DTC_DFLT_00, | ||
58 | SUMO_DTC_DFLT_01, | ||
59 | SUMO_DTC_DFLT_02, | ||
60 | SUMO_DTC_DFLT_03, | ||
61 | SUMO_DTC_DFLT_04, | ||
62 | SUMO_DTC_DFLT_05, | ||
63 | SUMO_DTC_DFLT_06, | ||
64 | SUMO_DTC_DFLT_07, | ||
65 | SUMO_DTC_DFLT_08, | ||
66 | SUMO_DTC_DFLT_09, | ||
67 | SUMO_DTC_DFLT_10, | ||
68 | SUMO_DTC_DFLT_11, | ||
69 | SUMO_DTC_DFLT_12, | ||
70 | SUMO_DTC_DFLT_13, | ||
71 | SUMO_DTC_DFLT_14, | ||
72 | }; | ||
73 | |||
74 | struct sumo_ps *sumo_get_ps(struct radeon_ps *rps) | ||
75 | { | ||
76 | struct sumo_ps *ps = rps->ps_priv; | ||
77 | |||
78 | return ps; | ||
79 | } | ||
80 | |||
81 | struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev) | ||
82 | { | ||
83 | struct sumo_power_info *pi = rdev->pm.dpm.priv; | ||
84 | |||
85 | return pi; | ||
86 | } | ||
87 | |||
88 | static void sumo_gfx_clockgating_enable(struct radeon_device *rdev, bool enable) | ||
89 | { | ||
90 | if (enable) | ||
91 | WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); | ||
92 | else { | ||
93 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); | ||
94 | WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON); | ||
95 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON); | ||
96 | RREG32(GB_ADDR_CONFIG); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | #define CGCG_CGTT_LOCAL0_MASK 0xE5BFFFFF | ||
101 | #define CGCG_CGTT_LOCAL1_MASK 0xEFFF07FF | ||
102 | |||
103 | static void sumo_mg_clockgating_enable(struct radeon_device *rdev, bool enable) | ||
104 | { | ||
105 | u32 local0; | ||
106 | u32 local1; | ||
107 | |||
108 | local0 = RREG32(CG_CGTT_LOCAL_0); | ||
109 | local1 = RREG32(CG_CGTT_LOCAL_1); | ||
110 | |||
111 | if (enable) { | ||
112 | WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) ); | ||
113 | WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) ); | ||
114 | } else { | ||
115 | WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) ); | ||
116 | WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) ); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static void sumo_program_git(struct radeon_device *rdev) | ||
121 | { | ||
122 | u32 p, u; | ||
123 | u32 xclk = radeon_get_xclk(rdev); | ||
124 | |||
125 | r600_calculate_u_and_p(SUMO_GICST_DFLT, | ||
126 | xclk, 16, &p, &u); | ||
127 | |||
128 | WREG32_P(CG_GIT, CG_GICST(p), ~CG_GICST_MASK); | ||
129 | } | ||
130 | |||
131 | static void sumo_program_grsd(struct radeon_device *rdev) | ||
132 | { | ||
133 | u32 p, u; | ||
134 | u32 xclk = radeon_get_xclk(rdev); | ||
135 | u32 grs = 256 * 25 / 100; | ||
136 | |||
137 | r600_calculate_u_and_p(1, xclk, 14, &p, &u); | ||
138 | |||
139 | WREG32(CG_GCOOR, PHC(grs) | SDC(p) | SU(u)); | ||
140 | } | ||
141 | |||
142 | void sumo_gfx_clockgating_initialize(struct radeon_device *rdev) | ||
143 | { | ||
144 | sumo_program_git(rdev); | ||
145 | sumo_program_grsd(rdev); | ||
146 | } | ||
147 | |||
148 | static void sumo_gfx_powergating_initialize(struct radeon_device *rdev) | ||
149 | { | ||
150 | u32 rcu_pwr_gating_cntl; | ||
151 | u32 p, u; | ||
152 | u32 p_c, p_p, d_p; | ||
153 | u32 r_t, i_t; | ||
154 | u32 xclk = radeon_get_xclk(rdev); | ||
155 | |||
156 | if (rdev->family == CHIP_PALM) { | ||
157 | p_c = 4; | ||
158 | d_p = 10; | ||
159 | r_t = 10; | ||
160 | i_t = 4; | ||
161 | p_p = 50 + 1000/200 + 6 * 32; | ||
162 | } else { | ||
163 | p_c = 16; | ||
164 | d_p = 50; | ||
165 | r_t = 50; | ||
166 | i_t = 50; | ||
167 | p_p = 113; | ||
168 | } | ||
169 | |||
170 | WREG32(CG_SCRATCH2, 0x01B60A17); | ||
171 | |||
172 | r600_calculate_u_and_p(SUMO_GFXPOWERGATINGT_DFLT, | ||
173 | xclk, 16, &p, &u); | ||
174 | |||
175 | WREG32_P(CG_PWR_GATING_CNTL, PGP(p) | PGU(u), | ||
176 | ~(PGP_MASK | PGU_MASK)); | ||
177 | |||
178 | r600_calculate_u_and_p(SUMO_VOLTAGEDROPT_DFLT, | ||
179 | xclk, 16, &p, &u); | ||
180 | |||
181 | WREG32_P(CG_CG_VOLTAGE_CNTL, PGP(p) | PGU(u), | ||
182 | ~(PGP_MASK | PGU_MASK)); | ||
183 | |||
184 | if (rdev->family == CHIP_PALM) { | ||
185 | WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x10103210); | ||
186 | WREG32_RCU(RCU_PWR_GATING_SEQ1, 0x10101010); | ||
187 | } else { | ||
188 | WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x76543210); | ||
189 | WREG32_RCU(RCU_PWR_GATING_SEQ1, 0xFEDCBA98); | ||
190 | } | ||
191 | |||
192 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL); | ||
193 | rcu_pwr_gating_cntl &= | ||
194 | ~(RSVD_MASK | PCV_MASK | PGS_MASK); | ||
195 | rcu_pwr_gating_cntl |= PCV(p_c) | PGS(1) | PWR_GATING_EN; | ||
196 | if (rdev->family == CHIP_PALM) { | ||
197 | rcu_pwr_gating_cntl &= ~PCP_MASK; | ||
198 | rcu_pwr_gating_cntl |= PCP(0x77); | ||
199 | } | ||
200 | WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl); | ||
201 | |||
202 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2); | ||
203 | rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK); | ||
204 | rcu_pwr_gating_cntl |= MPPU(p_p) | MPPD(50); | ||
205 | WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl); | ||
206 | |||
207 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3); | ||
208 | rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK); | ||
209 | rcu_pwr_gating_cntl |= DPPU(d_p) | DPPD(50); | ||
210 | WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl); | ||
211 | |||
212 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_4); | ||
213 | rcu_pwr_gating_cntl &= ~(RT_MASK | IT_MASK); | ||
214 | rcu_pwr_gating_cntl |= RT(r_t) | IT(i_t); | ||
215 | WREG32_RCU(RCU_PWR_GATING_CNTL_4, rcu_pwr_gating_cntl); | ||
216 | |||
217 | if (rdev->family == CHIP_PALM) | ||
218 | WREG32_RCU(RCU_PWR_GATING_CNTL_5, 0xA02); | ||
219 | |||
220 | sumo_smu_pg_init(rdev); | ||
221 | |||
222 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL); | ||
223 | rcu_pwr_gating_cntl &= | ||
224 | ~(RSVD_MASK | PCV_MASK | PGS_MASK); | ||
225 | rcu_pwr_gating_cntl |= PCV(p_c) | PGS(4) | PWR_GATING_EN; | ||
226 | if (rdev->family == CHIP_PALM) { | ||
227 | rcu_pwr_gating_cntl &= ~PCP_MASK; | ||
228 | rcu_pwr_gating_cntl |= PCP(0x77); | ||
229 | } | ||
230 | WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl); | ||
231 | |||
232 | if (rdev->family == CHIP_PALM) { | ||
233 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2); | ||
234 | rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK); | ||
235 | rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50); | ||
236 | WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl); | ||
237 | |||
238 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3); | ||
239 | rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK); | ||
240 | rcu_pwr_gating_cntl |= DPPU(16) | DPPD(50); | ||
241 | WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl); | ||
242 | } | ||
243 | |||
244 | sumo_smu_pg_init(rdev); | ||
245 | |||
246 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL); | ||
247 | rcu_pwr_gating_cntl &= | ||
248 | ~(RSVD_MASK | PCV_MASK | PGS_MASK); | ||
249 | rcu_pwr_gating_cntl |= PGS(5) | PWR_GATING_EN; | ||
250 | |||
251 | if (rdev->family == CHIP_PALM) { | ||
252 | rcu_pwr_gating_cntl |= PCV(4); | ||
253 | rcu_pwr_gating_cntl &= ~PCP_MASK; | ||
254 | rcu_pwr_gating_cntl |= PCP(0x77); | ||
255 | } else | ||
256 | rcu_pwr_gating_cntl |= PCV(11); | ||
257 | WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl); | ||
258 | |||
259 | if (rdev->family == CHIP_PALM) { | ||
260 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2); | ||
261 | rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK); | ||
262 | rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50); | ||
263 | WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl); | ||
264 | |||
265 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3); | ||
266 | rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK); | ||
267 | rcu_pwr_gating_cntl |= DPPU(22) | DPPD(50); | ||
268 | WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl); | ||
269 | } | ||
270 | |||
271 | sumo_smu_pg_init(rdev); | ||
272 | } | ||
273 | |||
274 | static void sumo_gfx_powergating_enable(struct radeon_device *rdev, bool enable) | ||
275 | { | ||
276 | if (enable) | ||
277 | WREG32_P(CG_PWR_GATING_CNTL, DYN_PWR_DOWN_EN, ~DYN_PWR_DOWN_EN); | ||
278 | else { | ||
279 | WREG32_P(CG_PWR_GATING_CNTL, 0, ~DYN_PWR_DOWN_EN); | ||
280 | RREG32(GB_ADDR_CONFIG); | ||
281 | } | ||
282 | } | ||
283 | |||
284 | static int sumo_enable_clock_power_gating(struct radeon_device *rdev) | ||
285 | { | ||
286 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
287 | |||
288 | if (pi->enable_gfx_clock_gating) | ||
289 | sumo_gfx_clockgating_initialize(rdev); | ||
290 | if (pi->enable_gfx_power_gating) | ||
291 | sumo_gfx_powergating_initialize(rdev); | ||
292 | if (pi->enable_mg_clock_gating) | ||
293 | sumo_mg_clockgating_enable(rdev, true); | ||
294 | if (pi->enable_gfx_clock_gating) | ||
295 | sumo_gfx_clockgating_enable(rdev, true); | ||
296 | if (pi->enable_gfx_power_gating) | ||
297 | sumo_gfx_powergating_enable(rdev, true); | ||
298 | |||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | static void sumo_disable_clock_power_gating(struct radeon_device *rdev) | ||
303 | { | ||
304 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
305 | |||
306 | if (pi->enable_gfx_clock_gating) | ||
307 | sumo_gfx_clockgating_enable(rdev, false); | ||
308 | if (pi->enable_gfx_power_gating) | ||
309 | sumo_gfx_powergating_enable(rdev, false); | ||
310 | if (pi->enable_mg_clock_gating) | ||
311 | sumo_mg_clockgating_enable(rdev, false); | ||
312 | } | ||
313 | |||
314 | static void sumo_calculate_bsp(struct radeon_device *rdev, | ||
315 | u32 high_clk) | ||
316 | { | ||
317 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
318 | u32 xclk = radeon_get_xclk(rdev); | ||
319 | |||
320 | pi->pasi = 65535 * 100 / high_clk; | ||
321 | pi->asi = 65535 * 100 / high_clk; | ||
322 | |||
323 | r600_calculate_u_and_p(pi->asi, | ||
324 | xclk, 16, &pi->bsp, &pi->bsu); | ||
325 | |||
326 | r600_calculate_u_and_p(pi->pasi, | ||
327 | xclk, 16, &pi->pbsp, &pi->pbsu); | ||
328 | |||
329 | pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); | ||
330 | pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); | ||
331 | } | ||
332 | |||
333 | static void sumo_init_bsp(struct radeon_device *rdev) | ||
334 | { | ||
335 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
336 | |||
337 | WREG32(CG_BSP_0, pi->psp); | ||
338 | } | ||
339 | |||
340 | |||
341 | static void sumo_program_bsp(struct radeon_device *rdev, | ||
342 | struct radeon_ps *rps) | ||
343 | { | ||
344 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
345 | struct sumo_ps *ps = sumo_get_ps(rps); | ||
346 | u32 i; | ||
347 | u32 highest_engine_clock = ps->levels[ps->num_levels - 1].sclk; | ||
348 | |||
349 | if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) | ||
350 | highest_engine_clock = pi->boost_pl.sclk; | ||
351 | |||
352 | sumo_calculate_bsp(rdev, highest_engine_clock); | ||
353 | |||
354 | for (i = 0; i < ps->num_levels - 1; i++) | ||
355 | WREG32(CG_BSP_0 + (i * 4), pi->dsp); | ||
356 | |||
357 | WREG32(CG_BSP_0 + (i * 4), pi->psp); | ||
358 | |||
359 | if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) | ||
360 | WREG32(CG_BSP_0 + (BOOST_DPM_LEVEL * 4), pi->psp); | ||
361 | } | ||
362 | |||
363 | static void sumo_write_at(struct radeon_device *rdev, | ||
364 | u32 index, u32 value) | ||
365 | { | ||
366 | if (index == 0) | ||
367 | WREG32(CG_AT_0, value); | ||
368 | else if (index == 1) | ||
369 | WREG32(CG_AT_1, value); | ||
370 | else if (index == 2) | ||
371 | WREG32(CG_AT_2, value); | ||
372 | else if (index == 3) | ||
373 | WREG32(CG_AT_3, value); | ||
374 | else if (index == 4) | ||
375 | WREG32(CG_AT_4, value); | ||
376 | else if (index == 5) | ||
377 | WREG32(CG_AT_5, value); | ||
378 | else if (index == 6) | ||
379 | WREG32(CG_AT_6, value); | ||
380 | else if (index == 7) | ||
381 | WREG32(CG_AT_7, value); | ||
382 | } | ||
383 | |||
384 | static void sumo_program_at(struct radeon_device *rdev, | ||
385 | struct radeon_ps *rps) | ||
386 | { | ||
387 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
388 | struct sumo_ps *ps = sumo_get_ps(rps); | ||
389 | u32 asi; | ||
390 | u32 i; | ||
391 | u32 m_a; | ||
392 | u32 a_t; | ||
393 | u32 r[SUMO_MAX_HARDWARE_POWERLEVELS]; | ||
394 | u32 l[SUMO_MAX_HARDWARE_POWERLEVELS]; | ||
395 | |||
396 | r[0] = SUMO_R_DFLT0; | ||
397 | r[1] = SUMO_R_DFLT1; | ||
398 | r[2] = SUMO_R_DFLT2; | ||
399 | r[3] = SUMO_R_DFLT3; | ||
400 | r[4] = SUMO_R_DFLT4; | ||
401 | |||
402 | l[0] = SUMO_L_DFLT0; | ||
403 | l[1] = SUMO_L_DFLT1; | ||
404 | l[2] = SUMO_L_DFLT2; | ||
405 | l[3] = SUMO_L_DFLT3; | ||
406 | l[4] = SUMO_L_DFLT4; | ||
407 | |||
408 | for (i = 0; i < ps->num_levels; i++) { | ||
409 | asi = (i == ps->num_levels - 1) ? pi->pasi : pi->asi; | ||
410 | |||
411 | m_a = asi * ps->levels[i].sclk / 100; | ||
412 | |||
413 | a_t = CG_R(m_a * r[i] / 100) | CG_L(m_a * l[i] / 100); | ||
414 | |||
415 | sumo_write_at(rdev, i, a_t); | ||
416 | } | ||
417 | |||
418 | if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) { | ||
419 | asi = pi->pasi; | ||
420 | |||
421 | m_a = asi * pi->boost_pl.sclk / 100; | ||
422 | |||
423 | a_t = CG_R(m_a * r[ps->num_levels - 1] / 100) | | ||
424 | CG_L(m_a * l[ps->num_levels - 1] / 100); | ||
425 | |||
426 | sumo_write_at(rdev, BOOST_DPM_LEVEL, a_t); | ||
427 | } | ||
428 | } | ||
429 | |||
430 | static void sumo_program_tp(struct radeon_device *rdev) | ||
431 | { | ||
432 | int i; | ||
433 | enum r600_td td = R600_TD_DFLT; | ||
434 | |||
435 | for (i = 0; i < SUMO_PM_NUMBER_OF_TC; i++) { | ||
436 | WREG32_P(CG_FFCT_0 + (i * 4), UTC_0(sumo_utc[i]), ~UTC_0_MASK); | ||
437 | WREG32_P(CG_FFCT_0 + (i * 4), DTC_0(sumo_dtc[i]), ~DTC_0_MASK); | ||
438 | } | ||
439 | |||
440 | if (td == R600_TD_AUTO) | ||
441 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); | ||
442 | else | ||
443 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); | ||
444 | |||
445 | if (td == R600_TD_UP) | ||
446 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); | ||
447 | |||
448 | if (td == R600_TD_DOWN) | ||
449 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); | ||
450 | } | ||
451 | |||
452 | void sumo_program_vc(struct radeon_device *rdev, u32 vrc) | ||
453 | { | ||
454 | WREG32(CG_FTV, vrc); | ||
455 | } | ||
456 | |||
457 | void sumo_clear_vc(struct radeon_device *rdev) | ||
458 | { | ||
459 | WREG32(CG_FTV, 0); | ||
460 | } | ||
461 | |||
462 | void sumo_program_sstp(struct radeon_device *rdev) | ||
463 | { | ||
464 | u32 p, u; | ||
465 | u32 xclk = radeon_get_xclk(rdev); | ||
466 | |||
467 | r600_calculate_u_and_p(SUMO_SST_DFLT, | ||
468 | xclk, 16, &p, &u); | ||
469 | |||
470 | WREG32(CG_SSP, SSTU(u) | SST(p)); | ||
471 | } | ||
472 | |||
473 | static void sumo_set_divider_value(struct radeon_device *rdev, | ||
474 | u32 index, u32 divider) | ||
475 | { | ||
476 | u32 reg_index = index / 4; | ||
477 | u32 field_index = index % 4; | ||
478 | |||
479 | if (field_index == 0) | ||
480 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), | ||
481 | SCLK_FSTATE_0_DIV(divider), ~SCLK_FSTATE_0_DIV_MASK); | ||
482 | else if (field_index == 1) | ||
483 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), | ||
484 | SCLK_FSTATE_1_DIV(divider), ~SCLK_FSTATE_1_DIV_MASK); | ||
485 | else if (field_index == 2) | ||
486 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), | ||
487 | SCLK_FSTATE_2_DIV(divider), ~SCLK_FSTATE_2_DIV_MASK); | ||
488 | else if (field_index == 3) | ||
489 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), | ||
490 | SCLK_FSTATE_3_DIV(divider), ~SCLK_FSTATE_3_DIV_MASK); | ||
491 | } | ||
492 | |||
493 | static void sumo_set_ds_dividers(struct radeon_device *rdev, | ||
494 | u32 index, u32 divider) | ||
495 | { | ||
496 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
497 | |||
498 | if (pi->enable_sclk_ds) { | ||
499 | u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_6); | ||
500 | |||
501 | dpm_ctrl &= ~(0x7 << (index * 3)); | ||
502 | dpm_ctrl |= (divider << (index * 3)); | ||
503 | WREG32(CG_SCLK_DPM_CTRL_6, dpm_ctrl); | ||
504 | } | ||
505 | } | ||
506 | |||
507 | static void sumo_set_ss_dividers(struct radeon_device *rdev, | ||
508 | u32 index, u32 divider) | ||
509 | { | ||
510 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
511 | |||
512 | if (pi->enable_sclk_ds) { | ||
513 | u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_11); | ||
514 | |||
515 | dpm_ctrl &= ~(0x7 << (index * 3)); | ||
516 | dpm_ctrl |= (divider << (index * 3)); | ||
517 | WREG32(CG_SCLK_DPM_CTRL_11, dpm_ctrl); | ||
518 | } | ||
519 | } | ||
520 | |||
521 | static void sumo_set_vid(struct radeon_device *rdev, u32 index, u32 vid) | ||
522 | { | ||
523 | u32 voltage_cntl = RREG32(CG_DPM_VOLTAGE_CNTL); | ||
524 | |||
525 | voltage_cntl &= ~(DPM_STATE0_LEVEL_MASK << (index * 2)); | ||
526 | voltage_cntl |= (vid << (DPM_STATE0_LEVEL_SHIFT + index * 2)); | ||
527 | WREG32(CG_DPM_VOLTAGE_CNTL, voltage_cntl); | ||
528 | } | ||
529 | |||
530 | static void sumo_set_allos_gnb_slow(struct radeon_device *rdev, u32 index, u32 gnb_slow) | ||
531 | { | ||
532 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
533 | u32 temp = gnb_slow; | ||
534 | u32 cg_sclk_dpm_ctrl_3; | ||
535 | |||
536 | if (pi->driver_nbps_policy_disable) | ||
537 | temp = 1; | ||
538 | |||
539 | cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3); | ||
540 | cg_sclk_dpm_ctrl_3 &= ~(GNB_SLOW_FSTATE_0_MASK << index); | ||
541 | cg_sclk_dpm_ctrl_3 |= (temp << (GNB_SLOW_FSTATE_0_SHIFT + index)); | ||
542 | |||
543 | WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3); | ||
544 | } | ||
545 | |||
546 | static void sumo_program_power_level(struct radeon_device *rdev, | ||
547 | struct sumo_pl *pl, u32 index) | ||
548 | { | ||
549 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
550 | int ret; | ||
551 | struct atom_clock_dividers dividers; | ||
552 | u32 ds_en = RREG32(DEEP_SLEEP_CNTL) & ENABLE_DS; | ||
553 | |||
554 | ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, | ||
555 | pl->sclk, false, ÷rs); | ||
556 | if (ret) | ||
557 | return; | ||
558 | |||
559 | sumo_set_divider_value(rdev, index, dividers.post_div); | ||
560 | |||
561 | sumo_set_vid(rdev, index, pl->vddc_index); | ||
562 | |||
563 | if (pl->ss_divider_index == 0 || pl->ds_divider_index == 0) { | ||
564 | if (ds_en) | ||
565 | WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS); | ||
566 | } else { | ||
567 | sumo_set_ss_dividers(rdev, index, pl->ss_divider_index); | ||
568 | sumo_set_ds_dividers(rdev, index, pl->ds_divider_index); | ||
569 | |||
570 | if (!ds_en) | ||
571 | WREG32_P(DEEP_SLEEP_CNTL, ENABLE_DS, ~ENABLE_DS); | ||
572 | } | ||
573 | |||
574 | sumo_set_allos_gnb_slow(rdev, index, pl->allow_gnb_slow); | ||
575 | |||
576 | if (pi->enable_boost) | ||
577 | sumo_set_tdp_limit(rdev, index, pl->sclk_dpm_tdp_limit); | ||
578 | } | ||
579 | |||
580 | static void sumo_power_level_enable(struct radeon_device *rdev, u32 index, bool enable) | ||
581 | { | ||
582 | u32 reg_index = index / 4; | ||
583 | u32 field_index = index % 4; | ||
584 | |||
585 | if (field_index == 0) | ||
586 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), | ||
587 | enable ? SCLK_FSTATE_0_VLD : 0, ~SCLK_FSTATE_0_VLD); | ||
588 | else if (field_index == 1) | ||
589 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), | ||
590 | enable ? SCLK_FSTATE_1_VLD : 0, ~SCLK_FSTATE_1_VLD); | ||
591 | else if (field_index == 2) | ||
592 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), | ||
593 | enable ? SCLK_FSTATE_2_VLD : 0, ~SCLK_FSTATE_2_VLD); | ||
594 | else if (field_index == 3) | ||
595 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), | ||
596 | enable ? SCLK_FSTATE_3_VLD : 0, ~SCLK_FSTATE_3_VLD); | ||
597 | } | ||
598 | |||
599 | static bool sumo_dpm_enabled(struct radeon_device *rdev) | ||
600 | { | ||
601 | if (RREG32(CG_SCLK_DPM_CTRL_3) & DPM_SCLK_ENABLE) | ||
602 | return true; | ||
603 | else | ||
604 | return false; | ||
605 | } | ||
606 | |||
607 | static void sumo_start_dpm(struct radeon_device *rdev) | ||
608 | { | ||
609 | WREG32_P(CG_SCLK_DPM_CTRL_3, DPM_SCLK_ENABLE, ~DPM_SCLK_ENABLE); | ||
610 | } | ||
611 | |||
612 | static void sumo_stop_dpm(struct radeon_device *rdev) | ||
613 | { | ||
614 | WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~DPM_SCLK_ENABLE); | ||
615 | } | ||
616 | |||
617 | static void sumo_set_forced_mode(struct radeon_device *rdev, bool enable) | ||
618 | { | ||
619 | if (enable) | ||
620 | WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE_EN, ~FORCE_SCLK_STATE_EN); | ||
621 | else | ||
622 | WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_SCLK_STATE_EN); | ||
623 | } | ||
624 | |||
625 | static void sumo_set_forced_mode_enabled(struct radeon_device *rdev) | ||
626 | { | ||
627 | int i; | ||
628 | |||
629 | sumo_set_forced_mode(rdev, true); | ||
630 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
631 | if (RREG32(CG_SCLK_STATUS) & SCLK_OVERCLK_DETECT) | ||
632 | break; | ||
633 | udelay(1); | ||
634 | } | ||
635 | } | ||
636 | |||
637 | static void sumo_wait_for_level_0(struct radeon_device *rdev) | ||
638 | { | ||
639 | int i; | ||
640 | |||
641 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
642 | if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) == 0) | ||
643 | break; | ||
644 | udelay(1); | ||
645 | } | ||
646 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
647 | if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) == 0) | ||
648 | break; | ||
649 | udelay(1); | ||
650 | } | ||
651 | } | ||
652 | |||
653 | static void sumo_set_forced_mode_disabled(struct radeon_device *rdev) | ||
654 | { | ||
655 | sumo_set_forced_mode(rdev, false); | ||
656 | } | ||
657 | |||
658 | static void sumo_enable_power_level_0(struct radeon_device *rdev) | ||
659 | { | ||
660 | sumo_power_level_enable(rdev, 0, true); | ||
661 | } | ||
662 | |||
663 | static void sumo_patch_boost_state(struct radeon_device *rdev, | ||
664 | struct radeon_ps *rps) | ||
665 | { | ||
666 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
667 | struct sumo_ps *new_ps = sumo_get_ps(rps); | ||
668 | |||
669 | if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) { | ||
670 | pi->boost_pl = new_ps->levels[new_ps->num_levels - 1]; | ||
671 | pi->boost_pl.sclk = pi->sys_info.boost_sclk; | ||
672 | pi->boost_pl.vddc_index = pi->sys_info.boost_vid_2bit; | ||
673 | pi->boost_pl.sclk_dpm_tdp_limit = pi->sys_info.sclk_dpm_tdp_limit_boost; | ||
674 | } | ||
675 | } | ||
676 | |||
677 | static void sumo_pre_notify_alt_vddnb_change(struct radeon_device *rdev, | ||
678 | struct radeon_ps *new_rps, | ||
679 | struct radeon_ps *old_rps) | ||
680 | { | ||
681 | struct sumo_ps *new_ps = sumo_get_ps(new_rps); | ||
682 | struct sumo_ps *old_ps = sumo_get_ps(old_rps); | ||
683 | u32 nbps1_old = 0; | ||
684 | u32 nbps1_new = 0; | ||
685 | |||
686 | if (old_ps != NULL) | ||
687 | nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0; | ||
688 | |||
689 | nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0; | ||
690 | |||
691 | if (nbps1_old == 1 && nbps1_new == 0) | ||
692 | sumo_smu_notify_alt_vddnb_change(rdev, 0, 0); | ||
693 | } | ||
694 | |||
695 | static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev, | ||
696 | struct radeon_ps *new_rps, | ||
697 | struct radeon_ps *old_rps) | ||
698 | { | ||
699 | struct sumo_ps *new_ps = sumo_get_ps(new_rps); | ||
700 | struct sumo_ps *old_ps = sumo_get_ps(old_rps); | ||
701 | u32 nbps1_old = 0; | ||
702 | u32 nbps1_new = 0; | ||
703 | |||
704 | if (old_ps != NULL) | ||
705 | nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0; | ||
706 | |||
707 | nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0; | ||
708 | |||
709 | if (nbps1_old == 0 && nbps1_new == 1) | ||
710 | sumo_smu_notify_alt_vddnb_change(rdev, 1, 1); | ||
711 | } | ||
712 | |||
713 | static void sumo_enable_boost(struct radeon_device *rdev, | ||
714 | struct radeon_ps *rps, | ||
715 | bool enable) | ||
716 | { | ||
717 | struct sumo_ps *new_ps = sumo_get_ps(rps); | ||
718 | |||
719 | if (enable) { | ||
720 | if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) | ||
721 | sumo_boost_state_enable(rdev, true); | ||
722 | } else | ||
723 | sumo_boost_state_enable(rdev, false); | ||
724 | } | ||
725 | |||
726 | static void sumo_set_forced_level(struct radeon_device *rdev, u32 index) | ||
727 | { | ||
728 | WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE(index), ~FORCE_SCLK_STATE_MASK); | ||
729 | } | ||
730 | |||
731 | static void sumo_set_forced_level_0(struct radeon_device *rdev) | ||
732 | { | ||
733 | sumo_set_forced_level(rdev, 0); | ||
734 | } | ||
735 | |||
736 | static void sumo_program_wl(struct radeon_device *rdev, | ||
737 | struct radeon_ps *rps) | ||
738 | { | ||
739 | struct sumo_ps *new_ps = sumo_get_ps(rps); | ||
740 | u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4); | ||
741 | |||
742 | dpm_ctrl4 &= 0xFFFFFF00; | ||
743 | dpm_ctrl4 |= (1 << (new_ps->num_levels - 1)); | ||
744 | |||
745 | if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) | ||
746 | dpm_ctrl4 |= (1 << BOOST_DPM_LEVEL); | ||
747 | |||
748 | WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4); | ||
749 | } | ||
750 | |||
751 | static void sumo_program_power_levels_0_to_n(struct radeon_device *rdev, | ||
752 | struct radeon_ps *new_rps, | ||
753 | struct radeon_ps *old_rps) | ||
754 | { | ||
755 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
756 | struct sumo_ps *new_ps = sumo_get_ps(new_rps); | ||
757 | struct sumo_ps *old_ps = sumo_get_ps(old_rps); | ||
758 | u32 i; | ||
759 | u32 n_current_state_levels = (old_ps == NULL) ? 1 : old_ps->num_levels; | ||
760 | |||
761 | for (i = 0; i < new_ps->num_levels; i++) { | ||
762 | sumo_program_power_level(rdev, &new_ps->levels[i], i); | ||
763 | sumo_power_level_enable(rdev, i, true); | ||
764 | } | ||
765 | |||
766 | for (i = new_ps->num_levels; i < n_current_state_levels; i++) | ||
767 | sumo_power_level_enable(rdev, i, false); | ||
768 | |||
769 | if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) | ||
770 | sumo_program_power_level(rdev, &pi->boost_pl, BOOST_DPM_LEVEL); | ||
771 | } | ||
772 | |||
773 | static void sumo_enable_acpi_pm(struct radeon_device *rdev) | ||
774 | { | ||
775 | WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); | ||
776 | } | ||
777 | |||
778 | static void sumo_program_power_level_enter_state(struct radeon_device *rdev) | ||
779 | { | ||
780 | WREG32_P(CG_SCLK_DPM_CTRL_5, SCLK_FSTATE_BOOTUP(0), ~SCLK_FSTATE_BOOTUP_MASK); | ||
781 | } | ||
782 | |||
783 | static void sumo_program_acpi_power_level(struct radeon_device *rdev) | ||
784 | { | ||
785 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
786 | struct atom_clock_dividers dividers; | ||
787 | int ret; | ||
788 | |||
789 | ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, | ||
790 | pi->acpi_pl.sclk, | ||
791 | false, ÷rs); | ||
792 | if (ret) | ||
793 | return; | ||
794 | |||
795 | WREG32_P(CG_ACPI_CNTL, SCLK_ACPI_DIV(dividers.post_div), ~SCLK_ACPI_DIV_MASK); | ||
796 | WREG32_P(CG_ACPI_VOLTAGE_CNTL, 0, ~ACPI_VOLTAGE_EN); | ||
797 | } | ||
798 | |||
799 | static void sumo_program_bootup_state(struct radeon_device *rdev) | ||
800 | { | ||
801 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
802 | u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4); | ||
803 | u32 i; | ||
804 | |||
805 | sumo_program_power_level(rdev, &pi->boot_pl, 0); | ||
806 | |||
807 | dpm_ctrl4 &= 0xFFFFFF00; | ||
808 | WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4); | ||
809 | |||
810 | for (i = 1; i < 8; i++) | ||
811 | sumo_power_level_enable(rdev, i, false); | ||
812 | } | ||
813 | |||
814 | static void sumo_setup_uvd_clocks(struct radeon_device *rdev, | ||
815 | struct radeon_ps *new_rps, | ||
816 | struct radeon_ps *old_rps) | ||
817 | { | ||
818 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
819 | |||
820 | if (pi->enable_gfx_power_gating) { | ||
821 | sumo_gfx_powergating_enable(rdev, false); | ||
822 | } | ||
823 | |||
824 | radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk); | ||
825 | |||
826 | if (pi->enable_gfx_power_gating) { | ||
827 | if (!pi->disable_gfx_power_gating_in_uvd || | ||
828 | !r600_is_uvd_state(new_rps->class, new_rps->class2)) | ||
829 | sumo_gfx_powergating_enable(rdev, true); | ||
830 | } | ||
831 | } | ||
832 | |||
833 | static void sumo_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, | ||
834 | struct radeon_ps *new_rps, | ||
835 | struct radeon_ps *old_rps) | ||
836 | { | ||
837 | struct sumo_ps *new_ps = sumo_get_ps(new_rps); | ||
838 | struct sumo_ps *current_ps = sumo_get_ps(old_rps); | ||
839 | |||
840 | if ((new_rps->vclk == old_rps->vclk) && | ||
841 | (new_rps->dclk == old_rps->dclk)) | ||
842 | return; | ||
843 | |||
844 | if (new_ps->levels[new_ps->num_levels - 1].sclk >= | ||
845 | current_ps->levels[current_ps->num_levels - 1].sclk) | ||
846 | return; | ||
847 | |||
848 | sumo_setup_uvd_clocks(rdev, new_rps, old_rps); | ||
849 | } | ||
850 | |||
851 | static void sumo_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, | ||
852 | struct radeon_ps *new_rps, | ||
853 | struct radeon_ps *old_rps) | ||
854 | { | ||
855 | struct sumo_ps *new_ps = sumo_get_ps(new_rps); | ||
856 | struct sumo_ps *current_ps = sumo_get_ps(old_rps); | ||
857 | |||
858 | if ((new_rps->vclk == old_rps->vclk) && | ||
859 | (new_rps->dclk == old_rps->dclk)) | ||
860 | return; | ||
861 | |||
862 | if (new_ps->levels[new_ps->num_levels - 1].sclk < | ||
863 | current_ps->levels[current_ps->num_levels - 1].sclk) | ||
864 | return; | ||
865 | |||
866 | sumo_setup_uvd_clocks(rdev, new_rps, old_rps); | ||
867 | } | ||
868 | |||
869 | void sumo_take_smu_control(struct radeon_device *rdev, bool enable) | ||
870 | { | ||
871 | /* This bit selects who handles display phy powergating. | ||
872 | * Clear the bit to let atom handle it. | ||
873 | * Set it to let the driver handle it. | ||
874 | * For now we just let atom handle it. | ||
875 | */ | ||
876 | #if 0 | ||
877 | u32 v = RREG32(DOUT_SCRATCH3); | ||
878 | |||
879 | if (enable) | ||
880 | v |= 0x4; | ||
881 | else | ||
882 | v &= 0xFFFFFFFB; | ||
883 | |||
884 | WREG32(DOUT_SCRATCH3, v); | ||
885 | #endif | ||
886 | } | ||
887 | |||
888 | static void sumo_enable_sclk_ds(struct radeon_device *rdev, bool enable) | ||
889 | { | ||
890 | if (enable) { | ||
891 | u32 deep_sleep_cntl = RREG32(DEEP_SLEEP_CNTL); | ||
892 | u32 deep_sleep_cntl2 = RREG32(DEEP_SLEEP_CNTL2); | ||
893 | u32 t = 1; | ||
894 | |||
895 | deep_sleep_cntl &= ~R_DIS; | ||
896 | deep_sleep_cntl &= ~HS_MASK; | ||
897 | deep_sleep_cntl |= HS(t > 4095 ? 4095 : t); | ||
898 | |||
899 | deep_sleep_cntl2 |= LB_UFP_EN; | ||
900 | deep_sleep_cntl2 &= INOUT_C_MASK; | ||
901 | deep_sleep_cntl2 |= INOUT_C(0xf); | ||
902 | |||
903 | WREG32(DEEP_SLEEP_CNTL2, deep_sleep_cntl2); | ||
904 | WREG32(DEEP_SLEEP_CNTL, deep_sleep_cntl); | ||
905 | } else | ||
906 | WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS); | ||
907 | } | ||
908 | |||
909 | static void sumo_program_bootup_at(struct radeon_device *rdev) | ||
910 | { | ||
911 | WREG32_P(CG_AT_0, CG_R(0xffff), ~CG_R_MASK); | ||
912 | WREG32_P(CG_AT_0, CG_L(0), ~CG_L_MASK); | ||
913 | } | ||
914 | |||
915 | static void sumo_reset_am(struct radeon_device *rdev) | ||
916 | { | ||
917 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_RESET, ~FIR_RESET); | ||
918 | } | ||
919 | |||
920 | static void sumo_start_am(struct radeon_device *rdev) | ||
921 | { | ||
922 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_RESET); | ||
923 | } | ||
924 | |||
925 | static void sumo_program_ttp(struct radeon_device *rdev) | ||
926 | { | ||
927 | u32 xclk = radeon_get_xclk(rdev); | ||
928 | u32 p, u; | ||
929 | u32 cg_sclk_dpm_ctrl_5 = RREG32(CG_SCLK_DPM_CTRL_5); | ||
930 | |||
931 | r600_calculate_u_and_p(1000, | ||
932 | xclk, 16, &p, &u); | ||
933 | |||
934 | cg_sclk_dpm_ctrl_5 &= ~(TT_TP_MASK | TT_TU_MASK); | ||
935 | cg_sclk_dpm_ctrl_5 |= TT_TP(p) | TT_TU(u); | ||
936 | |||
937 | WREG32(CG_SCLK_DPM_CTRL_5, cg_sclk_dpm_ctrl_5); | ||
938 | } | ||
939 | |||
940 | static void sumo_program_ttt(struct radeon_device *rdev) | ||
941 | { | ||
942 | u32 cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3); | ||
943 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
944 | |||
945 | cg_sclk_dpm_ctrl_3 &= ~(GNB_TT_MASK | GNB_THERMTHRO_MASK); | ||
946 | cg_sclk_dpm_ctrl_3 |= GNB_TT(pi->thermal_auto_throttling + 49); | ||
947 | |||
948 | WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3); | ||
949 | } | ||
950 | |||
951 | |||
952 | static void sumo_enable_voltage_scaling(struct radeon_device *rdev, bool enable) | ||
953 | { | ||
954 | if (enable) { | ||
955 | WREG32_P(CG_DPM_VOLTAGE_CNTL, DPM_VOLTAGE_EN, ~DPM_VOLTAGE_EN); | ||
956 | WREG32_P(CG_CG_VOLTAGE_CNTL, 0, ~CG_VOLTAGE_EN); | ||
957 | } else { | ||
958 | WREG32_P(CG_CG_VOLTAGE_CNTL, CG_VOLTAGE_EN, ~CG_VOLTAGE_EN); | ||
959 | WREG32_P(CG_DPM_VOLTAGE_CNTL, 0, ~DPM_VOLTAGE_EN); | ||
960 | } | ||
961 | } | ||
962 | |||
963 | static void sumo_override_cnb_thermal_events(struct radeon_device *rdev) | ||
964 | { | ||
965 | WREG32_P(CG_SCLK_DPM_CTRL_3, CNB_THERMTHRO_MASK_SCLK, | ||
966 | ~CNB_THERMTHRO_MASK_SCLK); | ||
967 | } | ||
968 | |||
969 | static void sumo_program_dc_hto(struct radeon_device *rdev) | ||
970 | { | ||
971 | u32 cg_sclk_dpm_ctrl_4 = RREG32(CG_SCLK_DPM_CTRL_4); | ||
972 | u32 p, u; | ||
973 | u32 xclk = radeon_get_xclk(rdev); | ||
974 | |||
975 | r600_calculate_u_and_p(100000, | ||
976 | xclk, 14, &p, &u); | ||
977 | |||
978 | cg_sclk_dpm_ctrl_4 &= ~(DC_HDC_MASK | DC_HU_MASK); | ||
979 | cg_sclk_dpm_ctrl_4 |= DC_HDC(p) | DC_HU(u); | ||
980 | |||
981 | WREG32(CG_SCLK_DPM_CTRL_4, cg_sclk_dpm_ctrl_4); | ||
982 | } | ||
983 | |||
984 | static void sumo_force_nbp_state(struct radeon_device *rdev, | ||
985 | struct radeon_ps *rps) | ||
986 | { | ||
987 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
988 | struct sumo_ps *new_ps = sumo_get_ps(rps); | ||
989 | |||
990 | if (!pi->driver_nbps_policy_disable) { | ||
991 | if (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) | ||
992 | WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_NB_PSTATE_1, ~FORCE_NB_PSTATE_1); | ||
993 | else | ||
994 | WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_NB_PSTATE_1); | ||
995 | } | ||
996 | } | ||
997 | |||
998 | u32 sumo_get_sleep_divider_from_id(u32 id) | ||
999 | { | ||
1000 | return 1 << id; | ||
1001 | } | ||
1002 | |||
1003 | u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev, | ||
1004 | u32 sclk, | ||
1005 | u32 min_sclk_in_sr) | ||
1006 | { | ||
1007 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1008 | u32 i; | ||
1009 | u32 temp; | ||
1010 | u32 min = (min_sclk_in_sr > SUMO_MINIMUM_ENGINE_CLOCK) ? | ||
1011 | min_sclk_in_sr : SUMO_MINIMUM_ENGINE_CLOCK; | ||
1012 | |||
1013 | if (sclk < min) | ||
1014 | return 0; | ||
1015 | |||
1016 | if (!pi->enable_sclk_ds) | ||
1017 | return 0; | ||
1018 | |||
1019 | for (i = SUMO_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { | ||
1020 | temp = sclk / sumo_get_sleep_divider_from_id(i); | ||
1021 | |||
1022 | if (temp >= min || i == 0) | ||
1023 | break; | ||
1024 | } | ||
1025 | return i; | ||
1026 | } | ||
1027 | |||
1028 | static u32 sumo_get_valid_engine_clock(struct radeon_device *rdev, | ||
1029 | u32 lower_limit) | ||
1030 | { | ||
1031 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1032 | u32 i; | ||
1033 | |||
1034 | for (i = 0; i < pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries; i++) { | ||
1035 | if (pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency >= lower_limit) | ||
1036 | return pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency; | ||
1037 | } | ||
1038 | |||
1039 | return pi->sys_info.sclk_voltage_mapping_table.entries[pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1].sclk_frequency; | ||
1040 | } | ||
1041 | |||
1042 | static void sumo_patch_thermal_state(struct radeon_device *rdev, | ||
1043 | struct sumo_ps *ps, | ||
1044 | struct sumo_ps *current_ps) | ||
1045 | { | ||
1046 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1047 | u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */ | ||
1048 | u32 current_vddc; | ||
1049 | u32 current_sclk; | ||
1050 | u32 current_index = 0; | ||
1051 | |||
1052 | if (current_ps) { | ||
1053 | current_vddc = current_ps->levels[current_index].vddc_index; | ||
1054 | current_sclk = current_ps->levels[current_index].sclk; | ||
1055 | } else { | ||
1056 | current_vddc = pi->boot_pl.vddc_index; | ||
1057 | current_sclk = pi->boot_pl.sclk; | ||
1058 | } | ||
1059 | |||
1060 | ps->levels[0].vddc_index = current_vddc; | ||
1061 | |||
1062 | if (ps->levels[0].sclk > current_sclk) | ||
1063 | ps->levels[0].sclk = current_sclk; | ||
1064 | |||
1065 | ps->levels[0].ss_divider_index = | ||
1066 | sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, sclk_in_sr); | ||
1067 | |||
1068 | ps->levels[0].ds_divider_index = | ||
1069 | sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, SUMO_MINIMUM_ENGINE_CLOCK); | ||
1070 | |||
1071 | if (ps->levels[0].ds_divider_index > ps->levels[0].ss_divider_index + 1) | ||
1072 | ps->levels[0].ds_divider_index = ps->levels[0].ss_divider_index + 1; | ||
1073 | |||
1074 | if (ps->levels[0].ss_divider_index == ps->levels[0].ds_divider_index) { | ||
1075 | if (ps->levels[0].ss_divider_index > 1) | ||
1076 | ps->levels[0].ss_divider_index = ps->levels[0].ss_divider_index - 1; | ||
1077 | } | ||
1078 | |||
1079 | if (ps->levels[0].ss_divider_index == 0) | ||
1080 | ps->levels[0].ds_divider_index = 0; | ||
1081 | |||
1082 | if (ps->levels[0].ds_divider_index == 0) | ||
1083 | ps->levels[0].ss_divider_index = 0; | ||
1084 | } | ||
1085 | |||
1086 | static void sumo_apply_state_adjust_rules(struct radeon_device *rdev, | ||
1087 | struct radeon_ps *new_rps, | ||
1088 | struct radeon_ps *old_rps) | ||
1089 | { | ||
1090 | struct sumo_ps *ps = sumo_get_ps(new_rps); | ||
1091 | struct sumo_ps *current_ps = sumo_get_ps(old_rps); | ||
1092 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1093 | u32 min_voltage = 0; /* ??? */ | ||
1094 | u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */ | ||
1095 | u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */ | ||
1096 | u32 i; | ||
1097 | |||
1098 | if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) | ||
1099 | return sumo_patch_thermal_state(rdev, ps, current_ps); | ||
1100 | |||
1101 | if (pi->enable_boost) { | ||
1102 | if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) | ||
1103 | ps->flags |= SUMO_POWERSTATE_FLAGS_BOOST_STATE; | ||
1104 | } | ||
1105 | |||
1106 | if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) || | ||
1107 | (new_rps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) || | ||
1108 | (new_rps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)) | ||
1109 | ps->flags |= SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE; | ||
1110 | |||
1111 | for (i = 0; i < ps->num_levels; i++) { | ||
1112 | if (ps->levels[i].vddc_index < min_voltage) | ||
1113 | ps->levels[i].vddc_index = min_voltage; | ||
1114 | |||
1115 | if (ps->levels[i].sclk < min_sclk) | ||
1116 | ps->levels[i].sclk = | ||
1117 | sumo_get_valid_engine_clock(rdev, min_sclk); | ||
1118 | |||
1119 | ps->levels[i].ss_divider_index = | ||
1120 | sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, sclk_in_sr); | ||
1121 | |||
1122 | ps->levels[i].ds_divider_index = | ||
1123 | sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, SUMO_MINIMUM_ENGINE_CLOCK); | ||
1124 | |||
1125 | if (ps->levels[i].ds_divider_index > ps->levels[i].ss_divider_index + 1) | ||
1126 | ps->levels[i].ds_divider_index = ps->levels[i].ss_divider_index + 1; | ||
1127 | |||
1128 | if (ps->levels[i].ss_divider_index == ps->levels[i].ds_divider_index) { | ||
1129 | if (ps->levels[i].ss_divider_index > 1) | ||
1130 | ps->levels[i].ss_divider_index = ps->levels[i].ss_divider_index - 1; | ||
1131 | } | ||
1132 | |||
1133 | if (ps->levels[i].ss_divider_index == 0) | ||
1134 | ps->levels[i].ds_divider_index = 0; | ||
1135 | |||
1136 | if (ps->levels[i].ds_divider_index == 0) | ||
1137 | ps->levels[i].ss_divider_index = 0; | ||
1138 | |||
1139 | if (ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) | ||
1140 | ps->levels[i].allow_gnb_slow = 1; | ||
1141 | else if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) || | ||
1142 | (new_rps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)) | ||
1143 | ps->levels[i].allow_gnb_slow = 0; | ||
1144 | else if (i == ps->num_levels - 1) | ||
1145 | ps->levels[i].allow_gnb_slow = 0; | ||
1146 | else | ||
1147 | ps->levels[i].allow_gnb_slow = 1; | ||
1148 | } | ||
1149 | } | ||
1150 | |||
1151 | static void sumo_cleanup_asic(struct radeon_device *rdev) | ||
1152 | { | ||
1153 | sumo_take_smu_control(rdev, false); | ||
1154 | } | ||
1155 | |||
1156 | static int sumo_set_thermal_temperature_range(struct radeon_device *rdev, | ||
1157 | int min_temp, int max_temp) | ||
1158 | { | ||
1159 | int low_temp = 0 * 1000; | ||
1160 | int high_temp = 255 * 1000; | ||
1161 | |||
1162 | if (low_temp < min_temp) | ||
1163 | low_temp = min_temp; | ||
1164 | if (high_temp > max_temp) | ||
1165 | high_temp = max_temp; | ||
1166 | if (high_temp < low_temp) { | ||
1167 | DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); | ||
1168 | return -EINVAL; | ||
1169 | } | ||
1170 | |||
1171 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK); | ||
1172 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK); | ||
1173 | |||
1174 | rdev->pm.dpm.thermal.min_temp = low_temp; | ||
1175 | rdev->pm.dpm.thermal.max_temp = high_temp; | ||
1176 | |||
1177 | return 0; | ||
1178 | } | ||
1179 | |||
1180 | static void sumo_update_current_ps(struct radeon_device *rdev, | ||
1181 | struct radeon_ps *rps) | ||
1182 | { | ||
1183 | struct sumo_ps *new_ps = sumo_get_ps(rps); | ||
1184 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1185 | |||
1186 | pi->current_rps = *rps; | ||
1187 | pi->current_ps = *new_ps; | ||
1188 | pi->current_rps.ps_priv = &pi->current_ps; | ||
1189 | } | ||
1190 | |||
1191 | static void sumo_update_requested_ps(struct radeon_device *rdev, | ||
1192 | struct radeon_ps *rps) | ||
1193 | { | ||
1194 | struct sumo_ps *new_ps = sumo_get_ps(rps); | ||
1195 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1196 | |||
1197 | pi->requested_rps = *rps; | ||
1198 | pi->requested_ps = *new_ps; | ||
1199 | pi->requested_rps.ps_priv = &pi->requested_ps; | ||
1200 | } | ||
1201 | |||
1202 | int sumo_dpm_enable(struct radeon_device *rdev) | ||
1203 | { | ||
1204 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1205 | int ret; | ||
1206 | |||
1207 | if (sumo_dpm_enabled(rdev)) | ||
1208 | return -EINVAL; | ||
1209 | |||
1210 | ret = sumo_enable_clock_power_gating(rdev); | ||
1211 | if (ret) | ||
1212 | return ret; | ||
1213 | sumo_program_bootup_state(rdev); | ||
1214 | sumo_init_bsp(rdev); | ||
1215 | sumo_reset_am(rdev); | ||
1216 | sumo_program_tp(rdev); | ||
1217 | sumo_program_bootup_at(rdev); | ||
1218 | sumo_start_am(rdev); | ||
1219 | if (pi->enable_auto_thermal_throttling) { | ||
1220 | sumo_program_ttp(rdev); | ||
1221 | sumo_program_ttt(rdev); | ||
1222 | } | ||
1223 | sumo_program_dc_hto(rdev); | ||
1224 | sumo_program_power_level_enter_state(rdev); | ||
1225 | sumo_enable_voltage_scaling(rdev, true); | ||
1226 | sumo_program_sstp(rdev); | ||
1227 | sumo_program_vc(rdev, SUMO_VRC_DFLT); | ||
1228 | sumo_override_cnb_thermal_events(rdev); | ||
1229 | sumo_start_dpm(rdev); | ||
1230 | sumo_wait_for_level_0(rdev); | ||
1231 | if (pi->enable_sclk_ds) | ||
1232 | sumo_enable_sclk_ds(rdev, true); | ||
1233 | if (pi->enable_boost) | ||
1234 | sumo_enable_boost_timer(rdev); | ||
1235 | |||
1236 | if (rdev->irq.installed && | ||
1237 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
1238 | ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
1239 | if (ret) | ||
1240 | return ret; | ||
1241 | rdev->irq.dpm_thermal = true; | ||
1242 | radeon_irq_set(rdev); | ||
1243 | } | ||
1244 | |||
1245 | sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
1246 | |||
1247 | return 0; | ||
1248 | } | ||
1249 | |||
1250 | void sumo_dpm_disable(struct radeon_device *rdev) | ||
1251 | { | ||
1252 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1253 | |||
1254 | if (!sumo_dpm_enabled(rdev)) | ||
1255 | return; | ||
1256 | sumo_disable_clock_power_gating(rdev); | ||
1257 | if (pi->enable_sclk_ds) | ||
1258 | sumo_enable_sclk_ds(rdev, false); | ||
1259 | sumo_clear_vc(rdev); | ||
1260 | sumo_wait_for_level_0(rdev); | ||
1261 | sumo_stop_dpm(rdev); | ||
1262 | sumo_enable_voltage_scaling(rdev, false); | ||
1263 | |||
1264 | if (rdev->irq.installed && | ||
1265 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
1266 | rdev->irq.dpm_thermal = false; | ||
1267 | radeon_irq_set(rdev); | ||
1268 | } | ||
1269 | |||
1270 | sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
1271 | } | ||
1272 | |||
1273 | int sumo_dpm_pre_set_power_state(struct radeon_device *rdev) | ||
1274 | { | ||
1275 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1276 | struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; | ||
1277 | struct radeon_ps *new_ps = &requested_ps; | ||
1278 | |||
1279 | sumo_update_requested_ps(rdev, new_ps); | ||
1280 | |||
1281 | if (pi->enable_dynamic_patch_ps) | ||
1282 | sumo_apply_state_adjust_rules(rdev, | ||
1283 | &pi->requested_rps, | ||
1284 | &pi->current_rps); | ||
1285 | |||
1286 | return 0; | ||
1287 | } | ||
1288 | |||
1289 | int sumo_dpm_set_power_state(struct radeon_device *rdev) | ||
1290 | { | ||
1291 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1292 | struct radeon_ps *new_ps = &pi->requested_rps; | ||
1293 | struct radeon_ps *old_ps = &pi->current_rps; | ||
1294 | |||
1295 | if (pi->enable_dpm) | ||
1296 | sumo_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); | ||
1297 | if (pi->enable_boost) { | ||
1298 | sumo_enable_boost(rdev, new_ps, false); | ||
1299 | sumo_patch_boost_state(rdev, new_ps); | ||
1300 | } | ||
1301 | if (pi->enable_dpm) { | ||
1302 | sumo_pre_notify_alt_vddnb_change(rdev, new_ps, old_ps); | ||
1303 | sumo_enable_power_level_0(rdev); | ||
1304 | sumo_set_forced_level_0(rdev); | ||
1305 | sumo_set_forced_mode_enabled(rdev); | ||
1306 | sumo_wait_for_level_0(rdev); | ||
1307 | sumo_program_power_levels_0_to_n(rdev, new_ps, old_ps); | ||
1308 | sumo_program_wl(rdev, new_ps); | ||
1309 | sumo_program_bsp(rdev, new_ps); | ||
1310 | sumo_program_at(rdev, new_ps); | ||
1311 | sumo_force_nbp_state(rdev, new_ps); | ||
1312 | sumo_set_forced_mode_disabled(rdev); | ||
1313 | sumo_set_forced_mode_enabled(rdev); | ||
1314 | sumo_set_forced_mode_disabled(rdev); | ||
1315 | sumo_post_notify_alt_vddnb_change(rdev, new_ps, old_ps); | ||
1316 | } | ||
1317 | if (pi->enable_boost) | ||
1318 | sumo_enable_boost(rdev, new_ps, true); | ||
1319 | if (pi->enable_dpm) | ||
1320 | sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); | ||
1321 | |||
1322 | rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; | ||
1323 | |||
1324 | return 0; | ||
1325 | } | ||
1326 | |||
1327 | void sumo_dpm_post_set_power_state(struct radeon_device *rdev) | ||
1328 | { | ||
1329 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1330 | struct radeon_ps *new_ps = &pi->requested_rps; | ||
1331 | |||
1332 | sumo_update_current_ps(rdev, new_ps); | ||
1333 | } | ||
1334 | |||
1335 | void sumo_dpm_reset_asic(struct radeon_device *rdev) | ||
1336 | { | ||
1337 | sumo_program_bootup_state(rdev); | ||
1338 | sumo_enable_power_level_0(rdev); | ||
1339 | sumo_set_forced_level_0(rdev); | ||
1340 | sumo_set_forced_mode_enabled(rdev); | ||
1341 | sumo_wait_for_level_0(rdev); | ||
1342 | sumo_set_forced_mode_disabled(rdev); | ||
1343 | sumo_set_forced_mode_enabled(rdev); | ||
1344 | sumo_set_forced_mode_disabled(rdev); | ||
1345 | } | ||
1346 | |||
1347 | void sumo_dpm_setup_asic(struct radeon_device *rdev) | ||
1348 | { | ||
1349 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1350 | |||
1351 | sumo_initialize_m3_arb(rdev); | ||
1352 | pi->fw_version = sumo_get_running_fw_version(rdev); | ||
1353 | DRM_INFO("Found smc ucode version: 0x%08x\n", pi->fw_version); | ||
1354 | sumo_program_acpi_power_level(rdev); | ||
1355 | sumo_enable_acpi_pm(rdev); | ||
1356 | sumo_take_smu_control(rdev, true); | ||
1357 | } | ||
1358 | |||
1359 | void sumo_dpm_display_configuration_changed(struct radeon_device *rdev) | ||
1360 | { | ||
1361 | |||
1362 | } | ||
1363 | |||
1364 | union power_info { | ||
1365 | struct _ATOM_POWERPLAY_INFO info; | ||
1366 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | ||
1367 | struct _ATOM_POWERPLAY_INFO_V3 info_3; | ||
1368 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; | ||
1369 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; | ||
1370 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; | ||
1371 | }; | ||
1372 | |||
1373 | union pplib_clock_info { | ||
1374 | struct _ATOM_PPLIB_R600_CLOCK_INFO r600; | ||
1375 | struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; | ||
1376 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; | ||
1377 | struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; | ||
1378 | }; | ||
1379 | |||
1380 | union pplib_power_state { | ||
1381 | struct _ATOM_PPLIB_STATE v1; | ||
1382 | struct _ATOM_PPLIB_STATE_V2 v2; | ||
1383 | }; | ||
1384 | |||
1385 | static void sumo_patch_boot_state(struct radeon_device *rdev, | ||
1386 | struct sumo_ps *ps) | ||
1387 | { | ||
1388 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1389 | |||
1390 | ps->num_levels = 1; | ||
1391 | ps->flags = 0; | ||
1392 | ps->levels[0] = pi->boot_pl; | ||
1393 | } | ||
1394 | |||
1395 | static void sumo_parse_pplib_non_clock_info(struct radeon_device *rdev, | ||
1396 | struct radeon_ps *rps, | ||
1397 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, | ||
1398 | u8 table_rev) | ||
1399 | { | ||
1400 | struct sumo_ps *ps = sumo_get_ps(rps); | ||
1401 | |||
1402 | rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); | ||
1403 | rps->class = le16_to_cpu(non_clock_info->usClassification); | ||
1404 | rps->class2 = le16_to_cpu(non_clock_info->usClassification2); | ||
1405 | |||
1406 | if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { | ||
1407 | rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); | ||
1408 | rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); | ||
1409 | } else { | ||
1410 | rps->vclk = 0; | ||
1411 | rps->dclk = 0; | ||
1412 | } | ||
1413 | |||
1414 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { | ||
1415 | rdev->pm.dpm.boot_ps = rps; | ||
1416 | sumo_patch_boot_state(rdev, ps); | ||
1417 | } | ||
1418 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | ||
1419 | rdev->pm.dpm.uvd_ps = rps; | ||
1420 | } | ||
1421 | |||
1422 | static void sumo_parse_pplib_clock_info(struct radeon_device *rdev, | ||
1423 | struct radeon_ps *rps, int index, | ||
1424 | union pplib_clock_info *clock_info) | ||
1425 | { | ||
1426 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1427 | struct sumo_ps *ps = sumo_get_ps(rps); | ||
1428 | struct sumo_pl *pl = &ps->levels[index]; | ||
1429 | u32 sclk; | ||
1430 | |||
1431 | sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); | ||
1432 | sclk |= clock_info->sumo.ucEngineClockHigh << 16; | ||
1433 | pl->sclk = sclk; | ||
1434 | pl->vddc_index = clock_info->sumo.vddcIndex; | ||
1435 | pl->sclk_dpm_tdp_limit = clock_info->sumo.tdpLimit; | ||
1436 | |||
1437 | ps->num_levels = index + 1; | ||
1438 | |||
1439 | if (pi->enable_sclk_ds) { | ||
1440 | pl->ds_divider_index = 5; | ||
1441 | pl->ss_divider_index = 4; | ||
1442 | } | ||
1443 | } | ||
1444 | |||
1445 | static int sumo_parse_power_table(struct radeon_device *rdev) | ||
1446 | { | ||
1447 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
1448 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; | ||
1449 | union pplib_power_state *power_state; | ||
1450 | int i, j, k, non_clock_array_index, clock_array_index; | ||
1451 | union pplib_clock_info *clock_info; | ||
1452 | struct _StateArray *state_array; | ||
1453 | struct _ClockInfoArray *clock_info_array; | ||
1454 | struct _NonClockInfoArray *non_clock_info_array; | ||
1455 | union power_info *power_info; | ||
1456 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
1457 | u16 data_offset; | ||
1458 | u8 frev, crev; | ||
1459 | u8 *power_state_offset; | ||
1460 | struct sumo_ps *ps; | ||
1461 | |||
1462 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
1463 | &frev, &crev, &data_offset)) | ||
1464 | return -EINVAL; | ||
1465 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
1466 | |||
1467 | state_array = (struct _StateArray *) | ||
1468 | (mode_info->atom_context->bios + data_offset + | ||
1469 | le16_to_cpu(power_info->pplib.usStateArrayOffset)); | ||
1470 | clock_info_array = (struct _ClockInfoArray *) | ||
1471 | (mode_info->atom_context->bios + data_offset + | ||
1472 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); | ||
1473 | non_clock_info_array = (struct _NonClockInfoArray *) | ||
1474 | (mode_info->atom_context->bios + data_offset + | ||
1475 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); | ||
1476 | |||
1477 | rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * | ||
1478 | state_array->ucNumEntries, GFP_KERNEL); | ||
1479 | if (!rdev->pm.dpm.ps) | ||
1480 | return -ENOMEM; | ||
1481 | power_state_offset = (u8 *)state_array->states; | ||
1482 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
1483 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
1484 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
1485 | for (i = 0; i < state_array->ucNumEntries; i++) { | ||
1486 | power_state = (union pplib_power_state *)power_state_offset; | ||
1487 | non_clock_array_index = power_state->v2.nonClockInfoIndex; | ||
1488 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) | ||
1489 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; | ||
1490 | if (!rdev->pm.power_state[i].clock_info) | ||
1491 | return -EINVAL; | ||
1492 | ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL); | ||
1493 | if (ps == NULL) { | ||
1494 | kfree(rdev->pm.dpm.ps); | ||
1495 | return -ENOMEM; | ||
1496 | } | ||
1497 | rdev->pm.dpm.ps[i].ps_priv = ps; | ||
1498 | k = 0; | ||
1499 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { | ||
1500 | clock_array_index = power_state->v2.clockInfoIndex[j]; | ||
1501 | if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) | ||
1502 | break; | ||
1503 | clock_info = (union pplib_clock_info *) | ||
1504 | &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; | ||
1505 | sumo_parse_pplib_clock_info(rdev, | ||
1506 | &rdev->pm.dpm.ps[i], k, | ||
1507 | clock_info); | ||
1508 | k++; | ||
1509 | } | ||
1510 | sumo_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], | ||
1511 | non_clock_info, | ||
1512 | non_clock_info_array->ucEntrySize); | ||
1513 | power_state_offset += 2 + power_state->v2.ucNumDPMLevels; | ||
1514 | } | ||
1515 | rdev->pm.dpm.num_ps = state_array->ucNumEntries; | ||
1516 | return 0; | ||
1517 | } | ||
1518 | |||
1519 | u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev, | ||
1520 | struct sumo_vid_mapping_table *vid_mapping_table, | ||
1521 | u32 vid_2bit) | ||
1522 | { | ||
1523 | u32 i; | ||
1524 | |||
1525 | for (i = 0; i < vid_mapping_table->num_entries; i++) { | ||
1526 | if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) | ||
1527 | return vid_mapping_table->entries[i].vid_7bit; | ||
1528 | } | ||
1529 | |||
1530 | return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; | ||
1531 | } | ||
1532 | |||
1533 | static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev, | ||
1534 | u32 vid_2bit) | ||
1535 | { | ||
1536 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1537 | u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid_2bit); | ||
1538 | |||
1539 | if (vid_7bit > 0x7C) | ||
1540 | return 0; | ||
1541 | |||
1542 | return (15500 - vid_7bit * 125 + 5) / 10; | ||
1543 | } | ||
1544 | |||
1545 | static void sumo_construct_display_voltage_mapping_table(struct radeon_device *rdev, | ||
1546 | struct sumo_disp_clock_voltage_mapping_table *disp_clk_voltage_mapping_table, | ||
1547 | ATOM_CLK_VOLT_CAPABILITY *table) | ||
1548 | { | ||
1549 | u32 i; | ||
1550 | |||
1551 | for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { | ||
1552 | if (table[i].ulMaximumSupportedCLK == 0) | ||
1553 | break; | ||
1554 | |||
1555 | disp_clk_voltage_mapping_table->display_clock_frequency[i] = | ||
1556 | table[i].ulMaximumSupportedCLK; | ||
1557 | } | ||
1558 | |||
1559 | disp_clk_voltage_mapping_table->num_max_voltage_levels = i; | ||
1560 | |||
1561 | if (disp_clk_voltage_mapping_table->num_max_voltage_levels == 0) { | ||
1562 | disp_clk_voltage_mapping_table->display_clock_frequency[0] = 80000; | ||
1563 | disp_clk_voltage_mapping_table->num_max_voltage_levels = 1; | ||
1564 | } | ||
1565 | } | ||
1566 | |||
1567 | void sumo_construct_sclk_voltage_mapping_table(struct radeon_device *rdev, | ||
1568 | struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, | ||
1569 | ATOM_AVAILABLE_SCLK_LIST *table) | ||
1570 | { | ||
1571 | u32 i; | ||
1572 | u32 n = 0; | ||
1573 | u32 prev_sclk = 0; | ||
1574 | |||
1575 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { | ||
1576 | if (table[i].ulSupportedSCLK > prev_sclk) { | ||
1577 | sclk_voltage_mapping_table->entries[n].sclk_frequency = | ||
1578 | table[i].ulSupportedSCLK; | ||
1579 | sclk_voltage_mapping_table->entries[n].vid_2bit = | ||
1580 | table[i].usVoltageIndex; | ||
1581 | prev_sclk = table[i].ulSupportedSCLK; | ||
1582 | n++; | ||
1583 | } | ||
1584 | } | ||
1585 | |||
1586 | sclk_voltage_mapping_table->num_max_dpm_entries = n; | ||
1587 | } | ||
1588 | |||
1589 | void sumo_construct_vid_mapping_table(struct radeon_device *rdev, | ||
1590 | struct sumo_vid_mapping_table *vid_mapping_table, | ||
1591 | ATOM_AVAILABLE_SCLK_LIST *table) | ||
1592 | { | ||
1593 | u32 i, j; | ||
1594 | |||
1595 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { | ||
1596 | if (table[i].ulSupportedSCLK != 0) { | ||
1597 | vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = | ||
1598 | table[i].usVoltageID; | ||
1599 | vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = | ||
1600 | table[i].usVoltageIndex; | ||
1601 | } | ||
1602 | } | ||
1603 | |||
1604 | for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { | ||
1605 | if (vid_mapping_table->entries[i].vid_7bit == 0) { | ||
1606 | for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { | ||
1607 | if (vid_mapping_table->entries[j].vid_7bit != 0) { | ||
1608 | vid_mapping_table->entries[i] = | ||
1609 | vid_mapping_table->entries[j]; | ||
1610 | vid_mapping_table->entries[j].vid_7bit = 0; | ||
1611 | break; | ||
1612 | } | ||
1613 | } | ||
1614 | |||
1615 | if (j == SUMO_MAX_NUMBER_VOLTAGES) | ||
1616 | break; | ||
1617 | } | ||
1618 | } | ||
1619 | |||
1620 | vid_mapping_table->num_entries = i; | ||
1621 | } | ||
1622 | |||
1623 | union igp_info { | ||
1624 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; | ||
1625 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; | ||
1626 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; | ||
1627 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; | ||
1628 | }; | ||
1629 | |||
1630 | static int sumo_parse_sys_info_table(struct radeon_device *rdev) | ||
1631 | { | ||
1632 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1633 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
1634 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | ||
1635 | union igp_info *igp_info; | ||
1636 | u8 frev, crev; | ||
1637 | u16 data_offset; | ||
1638 | int i; | ||
1639 | |||
1640 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
1641 | &frev, &crev, &data_offset)) { | ||
1642 | igp_info = (union igp_info *)(mode_info->atom_context->bios + | ||
1643 | data_offset); | ||
1644 | |||
1645 | if (crev != 6) { | ||
1646 | DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); | ||
1647 | return -EINVAL; | ||
1648 | } | ||
1649 | pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_6.ulBootUpEngineClock); | ||
1650 | pi->sys_info.min_sclk = le32_to_cpu(igp_info->info_6.ulMinEngineClock); | ||
1651 | pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_6.ulBootUpUMAClock); | ||
1652 | pi->sys_info.bootup_nb_voltage_index = | ||
1653 | le16_to_cpu(igp_info->info_6.usBootUpNBVoltage); | ||
1654 | if (igp_info->info_6.ucHtcTmpLmt == 0) | ||
1655 | pi->sys_info.htc_tmp_lmt = 203; | ||
1656 | else | ||
1657 | pi->sys_info.htc_tmp_lmt = igp_info->info_6.ucHtcTmpLmt; | ||
1658 | if (igp_info->info_6.ucHtcHystLmt == 0) | ||
1659 | pi->sys_info.htc_hyst_lmt = 5; | ||
1660 | else | ||
1661 | pi->sys_info.htc_hyst_lmt = igp_info->info_6.ucHtcHystLmt; | ||
1662 | if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { | ||
1663 | DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); | ||
1664 | } | ||
1665 | for (i = 0; i < NUMBER_OF_M3ARB_PARAM_SETS; i++) { | ||
1666 | pi->sys_info.csr_m3_arb_cntl_default[i] = | ||
1667 | le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_DEFAULT[i]); | ||
1668 | pi->sys_info.csr_m3_arb_cntl_uvd[i] = | ||
1669 | le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_UVD[i]); | ||
1670 | pi->sys_info.csr_m3_arb_cntl_fs3d[i] = | ||
1671 | le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_FS3D[i]); | ||
1672 | } | ||
1673 | pi->sys_info.sclk_dpm_boost_margin = | ||
1674 | le32_to_cpu(igp_info->info_6.SclkDpmBoostMargin); | ||
1675 | pi->sys_info.sclk_dpm_throttle_margin = | ||
1676 | le32_to_cpu(igp_info->info_6.SclkDpmThrottleMargin); | ||
1677 | pi->sys_info.sclk_dpm_tdp_limit_pg = | ||
1678 | le16_to_cpu(igp_info->info_6.SclkDpmTdpLimitPG); | ||
1679 | pi->sys_info.gnb_tdp_limit = le16_to_cpu(igp_info->info_6.GnbTdpLimit); | ||
1680 | pi->sys_info.sclk_dpm_tdp_limit_boost = | ||
1681 | le16_to_cpu(igp_info->info_6.SclkDpmTdpLimitBoost); | ||
1682 | pi->sys_info.boost_sclk = le32_to_cpu(igp_info->info_6.ulBoostEngineCLock); | ||
1683 | pi->sys_info.boost_vid_2bit = igp_info->info_6.ulBoostVid_2bit; | ||
1684 | if (igp_info->info_6.EnableBoost) | ||
1685 | pi->sys_info.enable_boost = true; | ||
1686 | else | ||
1687 | pi->sys_info.enable_boost = false; | ||
1688 | sumo_construct_display_voltage_mapping_table(rdev, | ||
1689 | &pi->sys_info.disp_clk_voltage_mapping_table, | ||
1690 | igp_info->info_6.sDISPCLK_Voltage); | ||
1691 | sumo_construct_sclk_voltage_mapping_table(rdev, | ||
1692 | &pi->sys_info.sclk_voltage_mapping_table, | ||
1693 | igp_info->info_6.sAvail_SCLK); | ||
1694 | sumo_construct_vid_mapping_table(rdev, &pi->sys_info.vid_mapping_table, | ||
1695 | igp_info->info_6.sAvail_SCLK); | ||
1696 | |||
1697 | } | ||
1698 | return 0; | ||
1699 | } | ||
1700 | |||
1701 | static void sumo_construct_boot_and_acpi_state(struct radeon_device *rdev) | ||
1702 | { | ||
1703 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1704 | |||
1705 | pi->boot_pl.sclk = pi->sys_info.bootup_sclk; | ||
1706 | pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; | ||
1707 | pi->boot_pl.ds_divider_index = 0; | ||
1708 | pi->boot_pl.ss_divider_index = 0; | ||
1709 | pi->boot_pl.allow_gnb_slow = 1; | ||
1710 | pi->acpi_pl = pi->boot_pl; | ||
1711 | pi->current_ps.num_levels = 1; | ||
1712 | pi->current_ps.levels[0] = pi->boot_pl; | ||
1713 | } | ||
1714 | |||
1715 | int sumo_dpm_init(struct radeon_device *rdev) | ||
1716 | { | ||
1717 | struct sumo_power_info *pi; | ||
1718 | u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT; | ||
1719 | int ret; | ||
1720 | |||
1721 | pi = kzalloc(sizeof(struct sumo_power_info), GFP_KERNEL); | ||
1722 | if (pi == NULL) | ||
1723 | return -ENOMEM; | ||
1724 | rdev->pm.dpm.priv = pi; | ||
1725 | |||
1726 | pi->driver_nbps_policy_disable = false; | ||
1727 | if ((rdev->family == CHIP_PALM) && (hw_rev < 3)) | ||
1728 | pi->disable_gfx_power_gating_in_uvd = true; | ||
1729 | else | ||
1730 | pi->disable_gfx_power_gating_in_uvd = false; | ||
1731 | pi->enable_alt_vddnb = true; | ||
1732 | pi->enable_sclk_ds = true; | ||
1733 | pi->enable_dynamic_m3_arbiter = false; | ||
1734 | pi->enable_dynamic_patch_ps = true; | ||
1735 | pi->enable_gfx_power_gating = true; | ||
1736 | pi->enable_gfx_clock_gating = true; | ||
1737 | pi->enable_mg_clock_gating = true; | ||
1738 | pi->enable_auto_thermal_throttling = true; | ||
1739 | |||
1740 | ret = sumo_parse_sys_info_table(rdev); | ||
1741 | if (ret) | ||
1742 | return ret; | ||
1743 | |||
1744 | sumo_construct_boot_and_acpi_state(rdev); | ||
1745 | |||
1746 | ret = sumo_parse_power_table(rdev); | ||
1747 | if (ret) | ||
1748 | return ret; | ||
1749 | |||
1750 | pi->pasi = CYPRESS_HASI_DFLT; | ||
1751 | pi->asi = RV770_ASI_DFLT; | ||
1752 | pi->thermal_auto_throttling = pi->sys_info.htc_tmp_lmt; | ||
1753 | pi->enable_boost = pi->sys_info.enable_boost; | ||
1754 | pi->enable_dpm = true; | ||
1755 | |||
1756 | return 0; | ||
1757 | } | ||
1758 | |||
1759 | void sumo_dpm_print_power_state(struct radeon_device *rdev, | ||
1760 | struct radeon_ps *rps) | ||
1761 | { | ||
1762 | int i; | ||
1763 | struct sumo_ps *ps = sumo_get_ps(rps); | ||
1764 | |||
1765 | r600_dpm_print_class_info(rps->class, rps->class2); | ||
1766 | r600_dpm_print_cap_info(rps->caps); | ||
1767 | printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); | ||
1768 | for (i = 0; i < ps->num_levels; i++) { | ||
1769 | struct sumo_pl *pl = &ps->levels[i]; | ||
1770 | printk("\t\tpower level %d sclk: %u vddc: %u\n", | ||
1771 | i, pl->sclk, | ||
1772 | sumo_convert_voltage_index_to_value(rdev, pl->vddc_index)); | ||
1773 | } | ||
1774 | r600_dpm_print_ps_status(rdev, rps); | ||
1775 | } | ||
1776 | |||
1777 | void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | ||
1778 | struct seq_file *m) | ||
1779 | { | ||
1780 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1781 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | ||
1782 | struct sumo_ps *ps = sumo_get_ps(rps); | ||
1783 | struct sumo_pl *pl; | ||
1784 | u32 current_index = | ||
1785 | (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >> | ||
1786 | CURR_INDEX_SHIFT; | ||
1787 | |||
1788 | if (current_index == BOOST_DPM_LEVEL) { | ||
1789 | pl = &pi->boost_pl; | ||
1790 | seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); | ||
1791 | seq_printf(m, "power level %d sclk: %u vddc: %u\n", | ||
1792 | current_index, pl->sclk, | ||
1793 | sumo_convert_voltage_index_to_value(rdev, pl->vddc_index)); | ||
1794 | } else if (current_index >= ps->num_levels) { | ||
1795 | seq_printf(m, "invalid dpm profile %d\n", current_index); | ||
1796 | } else { | ||
1797 | pl = &ps->levels[current_index]; | ||
1798 | seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); | ||
1799 | seq_printf(m, "power level %d sclk: %u vddc: %u\n", | ||
1800 | current_index, pl->sclk, | ||
1801 | sumo_convert_voltage_index_to_value(rdev, pl->vddc_index)); | ||
1802 | } | ||
1803 | } | ||
1804 | |||
1805 | void sumo_dpm_fini(struct radeon_device *rdev) | ||
1806 | { | ||
1807 | int i; | ||
1808 | |||
1809 | sumo_cleanup_asic(rdev); /* ??? */ | ||
1810 | |||
1811 | for (i = 0; i < rdev->pm.dpm.num_ps; i++) { | ||
1812 | kfree(rdev->pm.dpm.ps[i].ps_priv); | ||
1813 | } | ||
1814 | kfree(rdev->pm.dpm.ps); | ||
1815 | kfree(rdev->pm.dpm.priv); | ||
1816 | } | ||
1817 | |||
1818 | u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low) | ||
1819 | { | ||
1820 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1821 | struct sumo_ps *requested_state = sumo_get_ps(&pi->requested_rps); | ||
1822 | |||
1823 | if (low) | ||
1824 | return requested_state->levels[0].sclk; | ||
1825 | else | ||
1826 | return requested_state->levels[requested_state->num_levels - 1].sclk; | ||
1827 | } | ||
1828 | |||
1829 | u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low) | ||
1830 | { | ||
1831 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1832 | |||
1833 | return pi->sys_info.bootup_uma_clk; | ||
1834 | } | ||
1835 | |||
1836 | int sumo_dpm_force_performance_level(struct radeon_device *rdev, | ||
1837 | enum radeon_dpm_forced_level level) | ||
1838 | { | ||
1839 | struct sumo_power_info *pi = sumo_get_pi(rdev); | ||
1840 | struct radeon_ps *rps = &pi->current_rps; | ||
1841 | struct sumo_ps *ps = sumo_get_ps(rps); | ||
1842 | int i; | ||
1843 | |||
1844 | if (ps->num_levels <= 1) | ||
1845 | return 0; | ||
1846 | |||
1847 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | ||
1848 | sumo_power_level_enable(rdev, ps->num_levels - 1, true); | ||
1849 | sumo_set_forced_level(rdev, ps->num_levels - 1); | ||
1850 | sumo_set_forced_mode_enabled(rdev); | ||
1851 | for (i = 0; i < ps->num_levels - 1; i++) { | ||
1852 | sumo_power_level_enable(rdev, i, false); | ||
1853 | } | ||
1854 | sumo_set_forced_mode(rdev, false); | ||
1855 | sumo_set_forced_mode_enabled(rdev); | ||
1856 | sumo_set_forced_mode(rdev, false); | ||
1857 | } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { | ||
1858 | sumo_power_level_enable(rdev, 0, true); | ||
1859 | sumo_set_forced_level(rdev, 0); | ||
1860 | sumo_set_forced_mode_enabled(rdev); | ||
1861 | for (i = 1; i < ps->num_levels; i++) { | ||
1862 | sumo_power_level_enable(rdev, i, false); | ||
1863 | } | ||
1864 | sumo_set_forced_mode(rdev, false); | ||
1865 | sumo_set_forced_mode_enabled(rdev); | ||
1866 | sumo_set_forced_mode(rdev, false); | ||
1867 | } else { | ||
1868 | for (i = 0; i < ps->num_levels; i++) { | ||
1869 | sumo_power_level_enable(rdev, i, true); | ||
1870 | } | ||
1871 | } | ||
1872 | |||
1873 | rdev->pm.dpm.forced_level = level; | ||
1874 | |||
1875 | return 0; | ||
1876 | } | ||