diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r600_dpm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r600_dpm.c | 1048 |
1 files changed, 1048 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c new file mode 100644 index 000000000000..b88f54b134ab --- /dev/null +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
@@ -0,0 +1,1048 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "radeon.h" | ||
27 | #include "r600d.h" | ||
28 | #include "r600_dpm.h" | ||
29 | #include "atom.h" | ||
30 | |||
31 | const u32 r600_utc[R600_PM_NUMBER_OF_TC] = | ||
32 | { | ||
33 | R600_UTC_DFLT_00, | ||
34 | R600_UTC_DFLT_01, | ||
35 | R600_UTC_DFLT_02, | ||
36 | R600_UTC_DFLT_03, | ||
37 | R600_UTC_DFLT_04, | ||
38 | R600_UTC_DFLT_05, | ||
39 | R600_UTC_DFLT_06, | ||
40 | R600_UTC_DFLT_07, | ||
41 | R600_UTC_DFLT_08, | ||
42 | R600_UTC_DFLT_09, | ||
43 | R600_UTC_DFLT_10, | ||
44 | R600_UTC_DFLT_11, | ||
45 | R600_UTC_DFLT_12, | ||
46 | R600_UTC_DFLT_13, | ||
47 | R600_UTC_DFLT_14, | ||
48 | }; | ||
49 | |||
50 | const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = | ||
51 | { | ||
52 | R600_DTC_DFLT_00, | ||
53 | R600_DTC_DFLT_01, | ||
54 | R600_DTC_DFLT_02, | ||
55 | R600_DTC_DFLT_03, | ||
56 | R600_DTC_DFLT_04, | ||
57 | R600_DTC_DFLT_05, | ||
58 | R600_DTC_DFLT_06, | ||
59 | R600_DTC_DFLT_07, | ||
60 | R600_DTC_DFLT_08, | ||
61 | R600_DTC_DFLT_09, | ||
62 | R600_DTC_DFLT_10, | ||
63 | R600_DTC_DFLT_11, | ||
64 | R600_DTC_DFLT_12, | ||
65 | R600_DTC_DFLT_13, | ||
66 | R600_DTC_DFLT_14, | ||
67 | }; | ||
68 | |||
69 | void r600_dpm_print_class_info(u32 class, u32 class2) | ||
70 | { | ||
71 | printk("\tui class: "); | ||
72 | switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { | ||
73 | case ATOM_PPLIB_CLASSIFICATION_UI_NONE: | ||
74 | default: | ||
75 | printk("none\n"); | ||
76 | break; | ||
77 | case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: | ||
78 | printk("battery\n"); | ||
79 | break; | ||
80 | case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: | ||
81 | printk("balanced\n"); | ||
82 | break; | ||
83 | case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: | ||
84 | printk("performance\n"); | ||
85 | break; | ||
86 | } | ||
87 | printk("\tinternal class: "); | ||
88 | if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && | ||
89 | (class2 == 0)) | ||
90 | printk("none"); | ||
91 | else { | ||
92 | if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) | ||
93 | printk("boot "); | ||
94 | if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) | ||
95 | printk("thermal "); | ||
96 | if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) | ||
97 | printk("limited_pwr "); | ||
98 | if (class & ATOM_PPLIB_CLASSIFICATION_REST) | ||
99 | printk("rest "); | ||
100 | if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) | ||
101 | printk("forced "); | ||
102 | if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) | ||
103 | printk("3d_perf "); | ||
104 | if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) | ||
105 | printk("ovrdrv "); | ||
106 | if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | ||
107 | printk("uvd "); | ||
108 | if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) | ||
109 | printk("3d_low "); | ||
110 | if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) | ||
111 | printk("acpi "); | ||
112 | if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) | ||
113 | printk("uvd_hd2 "); | ||
114 | if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) | ||
115 | printk("uvd_hd "); | ||
116 | if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) | ||
117 | printk("uvd_sd "); | ||
118 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) | ||
119 | printk("limited_pwr2 "); | ||
120 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) | ||
121 | printk("ulv "); | ||
122 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) | ||
123 | printk("uvd_mvc "); | ||
124 | } | ||
125 | printk("\n"); | ||
126 | } | ||
127 | |||
128 | void r600_dpm_print_cap_info(u32 caps) | ||
129 | { | ||
130 | printk("\tcaps: "); | ||
131 | if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) | ||
132 | printk("single_disp "); | ||
133 | if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) | ||
134 | printk("video "); | ||
135 | if (caps & ATOM_PPLIB_DISALLOW_ON_DC) | ||
136 | printk("no_dc "); | ||
137 | printk("\n"); | ||
138 | } | ||
139 | |||
140 | void r600_dpm_print_ps_status(struct radeon_device *rdev, | ||
141 | struct radeon_ps *rps) | ||
142 | { | ||
143 | printk("\tstatus: "); | ||
144 | if (rps == rdev->pm.dpm.current_ps) | ||
145 | printk("c "); | ||
146 | if (rps == rdev->pm.dpm.requested_ps) | ||
147 | printk("r "); | ||
148 | if (rps == rdev->pm.dpm.boot_ps) | ||
149 | printk("b "); | ||
150 | printk("\n"); | ||
151 | } | ||
152 | |||
153 | u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) | ||
154 | { | ||
155 | struct drm_device *dev = rdev->ddev; | ||
156 | struct drm_crtc *crtc; | ||
157 | struct radeon_crtc *radeon_crtc; | ||
158 | u32 line_time_us, vblank_lines; | ||
159 | u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ | ||
160 | |||
161 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
162 | radeon_crtc = to_radeon_crtc(crtc); | ||
163 | if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { | ||
164 | line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / | ||
165 | radeon_crtc->hw_mode.clock; | ||
166 | vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - | ||
167 | radeon_crtc->hw_mode.crtc_vdisplay + | ||
168 | (radeon_crtc->v_border * 2); | ||
169 | vblank_time_us = vblank_lines * line_time_us; | ||
170 | break; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | return vblank_time_us; | ||
175 | } | ||
176 | |||
177 | void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, | ||
178 | u32 *p, u32 *u) | ||
179 | { | ||
180 | u32 b_c = 0; | ||
181 | u32 i_c; | ||
182 | u32 tmp; | ||
183 | |||
184 | i_c = (i * r_c) / 100; | ||
185 | tmp = i_c >> p_b; | ||
186 | |||
187 | while (tmp) { | ||
188 | b_c++; | ||
189 | tmp >>= 1; | ||
190 | } | ||
191 | |||
192 | *u = (b_c + 1) / 2; | ||
193 | *p = i_c / (1 << (2 * (*u))); | ||
194 | } | ||
195 | |||
196 | int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) | ||
197 | { | ||
198 | u32 k, a, ah, al; | ||
199 | u32 t1; | ||
200 | |||
201 | if ((fl == 0) || (fh == 0) || (fl > fh)) | ||
202 | return -EINVAL; | ||
203 | |||
204 | k = (100 * fh) / fl; | ||
205 | t1 = (t * (k - 100)); | ||
206 | a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); | ||
207 | a = (a + 5) / 10; | ||
208 | ah = ((a * t) + 5000) / 10000; | ||
209 | al = a - ah; | ||
210 | |||
211 | *th = t - ah; | ||
212 | *tl = t + al; | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable) | ||
218 | { | ||
219 | int i; | ||
220 | |||
221 | if (enable) { | ||
222 | WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); | ||
223 | } else { | ||
224 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); | ||
225 | |||
226 | WREG32(CG_RLC_REQ_AND_RSP, 0x2); | ||
227 | |||
228 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
229 | if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1) | ||
230 | break; | ||
231 | udelay(1); | ||
232 | } | ||
233 | |||
234 | WREG32(CG_RLC_REQ_AND_RSP, 0x0); | ||
235 | |||
236 | WREG32(GRBM_PWR_CNTL, 0x1); | ||
237 | RREG32(GRBM_PWR_CNTL); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable) | ||
242 | { | ||
243 | if (enable) | ||
244 | WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); | ||
245 | else | ||
246 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); | ||
247 | } | ||
248 | |||
249 | void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable) | ||
250 | { | ||
251 | if (enable) | ||
252 | WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); | ||
253 | else | ||
254 | WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); | ||
255 | } | ||
256 | |||
257 | void r600_enable_acpi_pm(struct radeon_device *rdev) | ||
258 | { | ||
259 | WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); | ||
260 | } | ||
261 | |||
262 | void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable) | ||
263 | { | ||
264 | if (enable) | ||
265 | WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); | ||
266 | else | ||
267 | WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); | ||
268 | } | ||
269 | |||
270 | bool r600_dynamicpm_enabled(struct radeon_device *rdev) | ||
271 | { | ||
272 | if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) | ||
273 | return true; | ||
274 | else | ||
275 | return false; | ||
276 | } | ||
277 | |||
278 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) | ||
279 | { | ||
280 | if (enable) | ||
281 | WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF); | ||
282 | else | ||
283 | WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); | ||
284 | } | ||
285 | |||
286 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) | ||
287 | { | ||
288 | if (enable) | ||
289 | WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF); | ||
290 | else | ||
291 | WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF); | ||
292 | } | ||
293 | |||
294 | void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable) | ||
295 | { | ||
296 | if (enable) | ||
297 | WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN); | ||
298 | else | ||
299 | WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN); | ||
300 | } | ||
301 | |||
302 | void r600_wait_for_spll_change(struct radeon_device *rdev) | ||
303 | { | ||
304 | int i; | ||
305 | |||
306 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
307 | if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS) | ||
308 | break; | ||
309 | udelay(1); | ||
310 | } | ||
311 | } | ||
312 | |||
313 | void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p) | ||
314 | { | ||
315 | WREG32(CG_BSP, BSP(p) | BSU(u)); | ||
316 | } | ||
317 | |||
318 | void r600_set_at(struct radeon_device *rdev, | ||
319 | u32 l_to_m, u32 m_to_h, | ||
320 | u32 h_to_m, u32 m_to_l) | ||
321 | { | ||
322 | WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h)); | ||
323 | WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l)); | ||
324 | } | ||
325 | |||
326 | void r600_set_tc(struct radeon_device *rdev, | ||
327 | u32 index, u32 u_t, u32 d_t) | ||
328 | { | ||
329 | WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t)); | ||
330 | } | ||
331 | |||
332 | void r600_select_td(struct radeon_device *rdev, | ||
333 | enum r600_td td) | ||
334 | { | ||
335 | if (td == R600_TD_AUTO) | ||
336 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); | ||
337 | else | ||
338 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); | ||
339 | if (td == R600_TD_UP) | ||
340 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); | ||
341 | if (td == R600_TD_DOWN) | ||
342 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); | ||
343 | } | ||
344 | |||
345 | void r600_set_vrc(struct radeon_device *rdev, u32 vrv) | ||
346 | { | ||
347 | WREG32(CG_FTV, vrv); | ||
348 | } | ||
349 | |||
350 | void r600_set_tpu(struct radeon_device *rdev, u32 u) | ||
351 | { | ||
352 | WREG32_P(CG_TPC, TPU(u), ~TPU_MASK); | ||
353 | } | ||
354 | |||
355 | void r600_set_tpc(struct radeon_device *rdev, u32 c) | ||
356 | { | ||
357 | WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK); | ||
358 | } | ||
359 | |||
360 | void r600_set_sstu(struct radeon_device *rdev, u32 u) | ||
361 | { | ||
362 | WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK); | ||
363 | } | ||
364 | |||
365 | void r600_set_sst(struct radeon_device *rdev, u32 t) | ||
366 | { | ||
367 | WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK); | ||
368 | } | ||
369 | |||
370 | void r600_set_git(struct radeon_device *rdev, u32 t) | ||
371 | { | ||
372 | WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK); | ||
373 | } | ||
374 | |||
375 | void r600_set_fctu(struct radeon_device *rdev, u32 u) | ||
376 | { | ||
377 | WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK); | ||
378 | } | ||
379 | |||
380 | void r600_set_fct(struct radeon_device *rdev, u32 t) | ||
381 | { | ||
382 | WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK); | ||
383 | } | ||
384 | |||
385 | void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p) | ||
386 | { | ||
387 | WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK); | ||
388 | } | ||
389 | |||
390 | void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s) | ||
391 | { | ||
392 | WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK); | ||
393 | } | ||
394 | |||
395 | void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u) | ||
396 | { | ||
397 | WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK); | ||
398 | } | ||
399 | |||
400 | void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p) | ||
401 | { | ||
402 | WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK); | ||
403 | } | ||
404 | |||
405 | void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s) | ||
406 | { | ||
407 | WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK); | ||
408 | } | ||
409 | |||
410 | void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time) | ||
411 | { | ||
412 | WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK); | ||
413 | } | ||
414 | |||
415 | void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time) | ||
416 | { | ||
417 | WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK); | ||
418 | } | ||
419 | |||
420 | void r600_engine_clock_entry_enable(struct radeon_device *rdev, | ||
421 | u32 index, bool enable) | ||
422 | { | ||
423 | if (enable) | ||
424 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | ||
425 | STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID); | ||
426 | else | ||
427 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | ||
428 | 0, ~STEP_0_SPLL_ENTRY_VALID); | ||
429 | } | ||
430 | |||
431 | void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev, | ||
432 | u32 index, bool enable) | ||
433 | { | ||
434 | if (enable) | ||
435 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | ||
436 | STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE); | ||
437 | else | ||
438 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | ||
439 | 0, ~STEP_0_SPLL_STEP_ENABLE); | ||
440 | } | ||
441 | |||
442 | void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev, | ||
443 | u32 index, bool enable) | ||
444 | { | ||
445 | if (enable) | ||
446 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | ||
447 | STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN); | ||
448 | else | ||
449 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | ||
450 | 0, ~STEP_0_POST_DIV_EN); | ||
451 | } | ||
452 | |||
453 | void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev, | ||
454 | u32 index, u32 divider) | ||
455 | { | ||
456 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | ||
457 | STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK); | ||
458 | } | ||
459 | |||
460 | void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev, | ||
461 | u32 index, u32 divider) | ||
462 | { | ||
463 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | ||
464 | STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK); | ||
465 | } | ||
466 | |||
467 | void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev, | ||
468 | u32 index, u32 divider) | ||
469 | { | ||
470 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | ||
471 | STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK); | ||
472 | } | ||
473 | |||
474 | void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev, | ||
475 | u32 index, u32 step_time) | ||
476 | { | ||
477 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | ||
478 | STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK); | ||
479 | } | ||
480 | |||
481 | void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u) | ||
482 | { | ||
483 | WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK); | ||
484 | } | ||
485 | |||
486 | void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u) | ||
487 | { | ||
488 | WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK); | ||
489 | } | ||
490 | |||
491 | void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt) | ||
492 | { | ||
493 | WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK); | ||
494 | } | ||
495 | |||
496 | void r600_voltage_control_enable_pins(struct radeon_device *rdev, | ||
497 | u64 mask) | ||
498 | { | ||
499 | WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff); | ||
500 | WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask)); | ||
501 | } | ||
502 | |||
503 | |||
504 | void r600_voltage_control_program_voltages(struct radeon_device *rdev, | ||
505 | enum r600_power_level index, u64 pins) | ||
506 | { | ||
507 | u32 tmp, mask; | ||
508 | u32 ix = 3 - (3 & index); | ||
509 | |||
510 | WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff); | ||
511 | |||
512 | mask = 7 << (3 * ix); | ||
513 | tmp = RREG32(VID_UPPER_GPIO_CNTL); | ||
514 | tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask); | ||
515 | WREG32(VID_UPPER_GPIO_CNTL, tmp); | ||
516 | } | ||
517 | |||
518 | void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev, | ||
519 | u64 mask) | ||
520 | { | ||
521 | u32 gpio; | ||
522 | |||
523 | gpio = RREG32(GPIOPAD_MASK); | ||
524 | gpio &= ~mask; | ||
525 | WREG32(GPIOPAD_MASK, gpio); | ||
526 | |||
527 | gpio = RREG32(GPIOPAD_EN); | ||
528 | gpio &= ~mask; | ||
529 | WREG32(GPIOPAD_EN, gpio); | ||
530 | |||
531 | gpio = RREG32(GPIOPAD_A); | ||
532 | gpio &= ~mask; | ||
533 | WREG32(GPIOPAD_A, gpio); | ||
534 | } | ||
535 | |||
536 | void r600_power_level_enable(struct radeon_device *rdev, | ||
537 | enum r600_power_level index, bool enable) | ||
538 | { | ||
539 | u32 ix = 3 - (3 & index); | ||
540 | |||
541 | if (enable) | ||
542 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE, | ||
543 | ~CTXSW_FREQ_STATE_ENABLE); | ||
544 | else | ||
545 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0, | ||
546 | ~CTXSW_FREQ_STATE_ENABLE); | ||
547 | } | ||
548 | |||
549 | void r600_power_level_set_voltage_index(struct radeon_device *rdev, | ||
550 | enum r600_power_level index, u32 voltage_index) | ||
551 | { | ||
552 | u32 ix = 3 - (3 & index); | ||
553 | |||
554 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), | ||
555 | CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK); | ||
556 | } | ||
557 | |||
558 | void r600_power_level_set_mem_clock_index(struct radeon_device *rdev, | ||
559 | enum r600_power_level index, u32 mem_clock_index) | ||
560 | { | ||
561 | u32 ix = 3 - (3 & index); | ||
562 | |||
563 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), | ||
564 | CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK); | ||
565 | } | ||
566 | |||
567 | void r600_power_level_set_eng_clock_index(struct radeon_device *rdev, | ||
568 | enum r600_power_level index, u32 eng_clock_index) | ||
569 | { | ||
570 | u32 ix = 3 - (3 & index); | ||
571 | |||
572 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), | ||
573 | CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK); | ||
574 | } | ||
575 | |||
576 | void r600_power_level_set_watermark_id(struct radeon_device *rdev, | ||
577 | enum r600_power_level index, | ||
578 | enum r600_display_watermark watermark_id) | ||
579 | { | ||
580 | u32 ix = 3 - (3 & index); | ||
581 | u32 tmp = 0; | ||
582 | |||
583 | if (watermark_id == R600_DISPLAY_WATERMARK_HIGH) | ||
584 | tmp = CTXSW_FREQ_DISPLAY_WATERMARK; | ||
585 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK); | ||
586 | } | ||
587 | |||
588 | void r600_power_level_set_pcie_gen2(struct radeon_device *rdev, | ||
589 | enum r600_power_level index, bool compatible) | ||
590 | { | ||
591 | u32 ix = 3 - (3 & index); | ||
592 | u32 tmp = 0; | ||
593 | |||
594 | if (compatible) | ||
595 | tmp = CTXSW_FREQ_GEN2PCIE_VOLT; | ||
596 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT); | ||
597 | } | ||
598 | |||
599 | enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev) | ||
600 | { | ||
601 | u32 tmp; | ||
602 | |||
603 | tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK; | ||
604 | tmp >>= CURRENT_PROFILE_INDEX_SHIFT; | ||
605 | return tmp; | ||
606 | } | ||
607 | |||
608 | enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev) | ||
609 | { | ||
610 | u32 tmp; | ||
611 | |||
612 | tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK; | ||
613 | tmp >>= TARGET_PROFILE_INDEX_SHIFT; | ||
614 | return tmp; | ||
615 | } | ||
616 | |||
617 | void r600_power_level_set_enter_index(struct radeon_device *rdev, | ||
618 | enum r600_power_level index) | ||
619 | { | ||
620 | WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index), | ||
621 | ~DYN_PWR_ENTER_INDEX_MASK); | ||
622 | } | ||
623 | |||
624 | void r600_wait_for_power_level_unequal(struct radeon_device *rdev, | ||
625 | enum r600_power_level index) | ||
626 | { | ||
627 | int i; | ||
628 | |||
629 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
630 | if (r600_power_level_get_target_index(rdev) != index) | ||
631 | break; | ||
632 | udelay(1); | ||
633 | } | ||
634 | |||
635 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
636 | if (r600_power_level_get_current_index(rdev) != index) | ||
637 | break; | ||
638 | udelay(1); | ||
639 | } | ||
640 | } | ||
641 | |||
642 | void r600_wait_for_power_level(struct radeon_device *rdev, | ||
643 | enum r600_power_level index) | ||
644 | { | ||
645 | int i; | ||
646 | |||
647 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
648 | if (r600_power_level_get_target_index(rdev) == index) | ||
649 | break; | ||
650 | udelay(1); | ||
651 | } | ||
652 | |||
653 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
654 | if (r600_power_level_get_current_index(rdev) == index) | ||
655 | break; | ||
656 | udelay(1); | ||
657 | } | ||
658 | } | ||
659 | |||
660 | void r600_start_dpm(struct radeon_device *rdev) | ||
661 | { | ||
662 | r600_enable_sclk_control(rdev, false); | ||
663 | r600_enable_mclk_control(rdev, false); | ||
664 | |||
665 | r600_dynamicpm_enable(rdev, true); | ||
666 | |||
667 | radeon_wait_for_vblank(rdev, 0); | ||
668 | radeon_wait_for_vblank(rdev, 1); | ||
669 | |||
670 | r600_enable_spll_bypass(rdev, true); | ||
671 | r600_wait_for_spll_change(rdev); | ||
672 | r600_enable_spll_bypass(rdev, false); | ||
673 | r600_wait_for_spll_change(rdev); | ||
674 | |||
675 | r600_enable_spll_bypass(rdev, true); | ||
676 | r600_wait_for_spll_change(rdev); | ||
677 | r600_enable_spll_bypass(rdev, false); | ||
678 | r600_wait_for_spll_change(rdev); | ||
679 | |||
680 | r600_enable_sclk_control(rdev, true); | ||
681 | r600_enable_mclk_control(rdev, true); | ||
682 | } | ||
683 | |||
684 | void r600_stop_dpm(struct radeon_device *rdev) | ||
685 | { | ||
686 | r600_dynamicpm_enable(rdev, false); | ||
687 | } | ||
688 | |||
689 | int r600_dpm_pre_set_power_state(struct radeon_device *rdev) | ||
690 | { | ||
691 | return 0; | ||
692 | } | ||
693 | |||
694 | void r600_dpm_post_set_power_state(struct radeon_device *rdev) | ||
695 | { | ||
696 | |||
697 | } | ||
698 | |||
699 | bool r600_is_uvd_state(u32 class, u32 class2) | ||
700 | { | ||
701 | if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | ||
702 | return true; | ||
703 | if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) | ||
704 | return true; | ||
705 | if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) | ||
706 | return true; | ||
707 | if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) | ||
708 | return true; | ||
709 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) | ||
710 | return true; | ||
711 | return false; | ||
712 | } | ||
713 | |||
714 | int r600_set_thermal_temperature_range(struct radeon_device *rdev, | ||
715 | int min_temp, int max_temp) | ||
716 | { | ||
717 | int low_temp = 0 * 1000; | ||
718 | int high_temp = 255 * 1000; | ||
719 | |||
720 | if (low_temp < min_temp) | ||
721 | low_temp = min_temp; | ||
722 | if (high_temp > max_temp) | ||
723 | high_temp = max_temp; | ||
724 | if (high_temp < low_temp) { | ||
725 | DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); | ||
726 | return -EINVAL; | ||
727 | } | ||
728 | |||
729 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); | ||
730 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); | ||
731 | WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); | ||
732 | |||
733 | rdev->pm.dpm.thermal.min_temp = low_temp; | ||
734 | rdev->pm.dpm.thermal.max_temp = high_temp; | ||
735 | |||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) | ||
740 | { | ||
741 | switch (sensor) { | ||
742 | case THERMAL_TYPE_RV6XX: | ||
743 | case THERMAL_TYPE_RV770: | ||
744 | case THERMAL_TYPE_EVERGREEN: | ||
745 | case THERMAL_TYPE_SUMO: | ||
746 | case THERMAL_TYPE_NI: | ||
747 | case THERMAL_TYPE_SI: | ||
748 | return true; | ||
749 | case THERMAL_TYPE_ADT7473_WITH_INTERNAL: | ||
750 | case THERMAL_TYPE_EMC2103_WITH_INTERNAL: | ||
751 | return false; /* need special handling */ | ||
752 | case THERMAL_TYPE_NONE: | ||
753 | case THERMAL_TYPE_EXTERNAL: | ||
754 | case THERMAL_TYPE_EXTERNAL_GPIO: | ||
755 | default: | ||
756 | return false; | ||
757 | } | ||
758 | } | ||
759 | |||
760 | union power_info { | ||
761 | struct _ATOM_POWERPLAY_INFO info; | ||
762 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | ||
763 | struct _ATOM_POWERPLAY_INFO_V3 info_3; | ||
764 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; | ||
765 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; | ||
766 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; | ||
767 | struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; | ||
768 | struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; | ||
769 | }; | ||
770 | |||
771 | union fan_info { | ||
772 | struct _ATOM_PPLIB_FANTABLE fan; | ||
773 | struct _ATOM_PPLIB_FANTABLE2 fan2; | ||
774 | }; | ||
775 | |||
776 | static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, | ||
777 | ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) | ||
778 | { | ||
779 | u32 size = atom_table->ucNumEntries * | ||
780 | sizeof(struct radeon_clock_voltage_dependency_entry); | ||
781 | int i; | ||
782 | |||
783 | radeon_table->entries = kzalloc(size, GFP_KERNEL); | ||
784 | if (!radeon_table->entries) | ||
785 | return -ENOMEM; | ||
786 | |||
787 | for (i = 0; i < atom_table->ucNumEntries; i++) { | ||
788 | radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) | | ||
789 | (atom_table->entries[i].ucClockHigh << 16); | ||
790 | radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage); | ||
791 | } | ||
792 | radeon_table->count = atom_table->ucNumEntries; | ||
793 | |||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ | ||
798 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 | ||
799 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 | ||
800 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 | ||
801 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 | ||
802 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 | ||
803 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 | ||
804 | |||
805 | int r600_parse_extended_power_table(struct radeon_device *rdev) | ||
806 | { | ||
807 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
808 | union power_info *power_info; | ||
809 | union fan_info *fan_info; | ||
810 | ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; | ||
811 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
812 | u16 data_offset; | ||
813 | u8 frev, crev; | ||
814 | int ret, i; | ||
815 | |||
816 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
817 | &frev, &crev, &data_offset)) | ||
818 | return -EINVAL; | ||
819 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
820 | |||
821 | /* fan table */ | ||
822 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | ||
823 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { | ||
824 | if (power_info->pplib3.usFanTableOffset) { | ||
825 | fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + | ||
826 | le16_to_cpu(power_info->pplib3.usFanTableOffset)); | ||
827 | rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; | ||
828 | rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); | ||
829 | rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); | ||
830 | rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); | ||
831 | rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); | ||
832 | rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); | ||
833 | rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); | ||
834 | if (fan_info->fan.ucFanTableFormat >= 2) | ||
835 | rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); | ||
836 | else | ||
837 | rdev->pm.dpm.fan.t_max = 10900; | ||
838 | rdev->pm.dpm.fan.cycle_delay = 100000; | ||
839 | rdev->pm.dpm.fan.ucode_fan_control = true; | ||
840 | } | ||
841 | } | ||
842 | |||
843 | /* clock dependancy tables, shedding tables */ | ||
844 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | ||
845 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { | ||
846 | if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { | ||
847 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | ||
848 | (mode_info->atom_context->bios + data_offset + | ||
849 | le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); | ||
850 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, | ||
851 | dep_table); | ||
852 | if (ret) | ||
853 | return ret; | ||
854 | } | ||
855 | if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { | ||
856 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | ||
857 | (mode_info->atom_context->bios + data_offset + | ||
858 | le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); | ||
859 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, | ||
860 | dep_table); | ||
861 | if (ret) { | ||
862 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | ||
863 | return ret; | ||
864 | } | ||
865 | } | ||
866 | if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { | ||
867 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | ||
868 | (mode_info->atom_context->bios + data_offset + | ||
869 | le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); | ||
870 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, | ||
871 | dep_table); | ||
872 | if (ret) { | ||
873 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | ||
874 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | ||
875 | return ret; | ||
876 | } | ||
877 | } | ||
878 | if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { | ||
879 | ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = | ||
880 | (ATOM_PPLIB_Clock_Voltage_Limit_Table *) | ||
881 | (mode_info->atom_context->bios + data_offset + | ||
882 | le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); | ||
883 | if (clk_v->ucNumEntries) { | ||
884 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = | ||
885 | le16_to_cpu(clk_v->entries[0].usSclkLow) | | ||
886 | (clk_v->entries[0].ucSclkHigh << 16); | ||
887 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = | ||
888 | le16_to_cpu(clk_v->entries[0].usMclkLow) | | ||
889 | (clk_v->entries[0].ucMclkHigh << 16); | ||
890 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = | ||
891 | le16_to_cpu(clk_v->entries[0].usVddc); | ||
892 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = | ||
893 | le16_to_cpu(clk_v->entries[0].usVddci); | ||
894 | } | ||
895 | } | ||
896 | if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { | ||
897 | ATOM_PPLIB_PhaseSheddingLimits_Table *psl = | ||
898 | (ATOM_PPLIB_PhaseSheddingLimits_Table *) | ||
899 | (mode_info->atom_context->bios + data_offset + | ||
900 | le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); | ||
901 | |||
902 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = | ||
903 | kzalloc(psl->ucNumEntries * | ||
904 | sizeof(struct radeon_phase_shedding_limits_entry), | ||
905 | GFP_KERNEL); | ||
906 | if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { | ||
907 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | ||
908 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | ||
909 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); | ||
910 | return -ENOMEM; | ||
911 | } | ||
912 | |||
913 | for (i = 0; i < psl->ucNumEntries; i++) { | ||
914 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = | ||
915 | le16_to_cpu(psl->entries[i].usSclkLow) | | ||
916 | (psl->entries[i].ucSclkHigh << 16); | ||
917 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = | ||
918 | le16_to_cpu(psl->entries[i].usMclkLow) | | ||
919 | (psl->entries[i].ucMclkHigh << 16); | ||
920 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = | ||
921 | le16_to_cpu(psl->entries[i].usVoltage); | ||
922 | } | ||
923 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count = | ||
924 | psl->ucNumEntries; | ||
925 | } | ||
926 | } | ||
927 | |||
928 | /* cac data */ | ||
929 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | ||
930 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { | ||
931 | rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); | ||
932 | rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); | ||
933 | rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit; | ||
934 | rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); | ||
935 | if (rdev->pm.dpm.tdp_od_limit) | ||
936 | rdev->pm.dpm.power_control = true; | ||
937 | else | ||
938 | rdev->pm.dpm.power_control = false; | ||
939 | rdev->pm.dpm.tdp_adjustment = 0; | ||
940 | rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); | ||
941 | rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); | ||
942 | rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); | ||
943 | if (power_info->pplib5.usCACLeakageTableOffset) { | ||
944 | ATOM_PPLIB_CAC_Leakage_Table *cac_table = | ||
945 | (ATOM_PPLIB_CAC_Leakage_Table *) | ||
946 | (mode_info->atom_context->bios + data_offset + | ||
947 | le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); | ||
948 | u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); | ||
949 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); | ||
950 | if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { | ||
951 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | ||
952 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | ||
953 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); | ||
954 | return -ENOMEM; | ||
955 | } | ||
956 | for (i = 0; i < cac_table->ucNumEntries; i++) { | ||
957 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = | ||
958 | le16_to_cpu(cac_table->entries[i].usVddc); | ||
959 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = | ||
960 | le32_to_cpu(cac_table->entries[i].ulLeakageValue); | ||
961 | } | ||
962 | rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; | ||
963 | } | ||
964 | } | ||
965 | |||
966 | /* ppm table */ | ||
967 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | ||
968 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { | ||
969 | ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) | ||
970 | (mode_info->atom_context->bios + data_offset + | ||
971 | le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); | ||
972 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && | ||
973 | ext_hdr->usPPMTableOffset) { | ||
974 | ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) | ||
975 | (mode_info->atom_context->bios + data_offset + | ||
976 | le16_to_cpu(ext_hdr->usPPMTableOffset)); | ||
977 | rdev->pm.dpm.dyn_state.ppm_table = | ||
978 | kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL); | ||
979 | if (!rdev->pm.dpm.dyn_state.ppm_table) { | ||
980 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | ||
981 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | ||
982 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); | ||
983 | kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); | ||
984 | return -ENOMEM; | ||
985 | } | ||
986 | rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; | ||
987 | rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number = | ||
988 | le16_to_cpu(ppm->usCpuCoreNumber); | ||
989 | rdev->pm.dpm.dyn_state.ppm_table->platform_tdp = | ||
990 | le32_to_cpu(ppm->ulPlatformTDP); | ||
991 | rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = | ||
992 | le32_to_cpu(ppm->ulSmallACPlatformTDP); | ||
993 | rdev->pm.dpm.dyn_state.ppm_table->platform_tdc = | ||
994 | le32_to_cpu(ppm->ulPlatformTDC); | ||
995 | rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = | ||
996 | le32_to_cpu(ppm->ulSmallACPlatformTDC); | ||
997 | rdev->pm.dpm.dyn_state.ppm_table->apu_tdp = | ||
998 | le32_to_cpu(ppm->ulApuTDP); | ||
999 | rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = | ||
1000 | le32_to_cpu(ppm->ulDGpuTDP); | ||
1001 | rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = | ||
1002 | le32_to_cpu(ppm->ulDGpuUlvPower); | ||
1003 | rdev->pm.dpm.dyn_state.ppm_table->tj_max = | ||
1004 | le32_to_cpu(ppm->ulTjmax); | ||
1005 | } | ||
1006 | } | ||
1007 | |||
1008 | return 0; | ||
1009 | } | ||
1010 | |||
1011 | void r600_free_extended_power_table(struct radeon_device *rdev) | ||
1012 | { | ||
1013 | if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries) | ||
1014 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | ||
1015 | if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) | ||
1016 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | ||
1017 | if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) | ||
1018 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); | ||
1019 | if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) | ||
1020 | kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); | ||
1021 | if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) | ||
1022 | kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); | ||
1023 | if (rdev->pm.dpm.dyn_state.ppm_table) | ||
1024 | kfree(rdev->pm.dpm.dyn_state.ppm_table); | ||
1025 | } | ||
1026 | |||
1027 | enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, | ||
1028 | u32 sys_mask, | ||
1029 | enum radeon_pcie_gen asic_gen, | ||
1030 | enum radeon_pcie_gen default_gen) | ||
1031 | { | ||
1032 | switch (asic_gen) { | ||
1033 | case RADEON_PCIE_GEN1: | ||
1034 | return RADEON_PCIE_GEN1; | ||
1035 | case RADEON_PCIE_GEN2: | ||
1036 | return RADEON_PCIE_GEN2; | ||
1037 | case RADEON_PCIE_GEN3: | ||
1038 | return RADEON_PCIE_GEN3; | ||
1039 | default: | ||
1040 | if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3)) | ||
1041 | return RADEON_PCIE_GEN3; | ||
1042 | else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2)) | ||
1043 | return RADEON_PCIE_GEN2; | ||
1044 | else | ||
1045 | return RADEON_PCIE_GEN1; | ||
1046 | } | ||
1047 | return RADEON_PCIE_GEN1; | ||
1048 | } | ||