aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2015-04-20 17:31:14 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-06-03 21:03:17 -0400
commitaaa36a976bbb9b02a54c087ff390c0bad1d18e3e (patch)
tree105be3c06ef33c39e6934801d386847950d4ebf9 /drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
parenta2e73f56fa6282481927ec43aa9362c03c2e2104 (diff)
drm/amdgpu: Add initial VI support
This adds initial support for VI asics. This includes Iceland, Tonga, and Carrizo. Our inital focus as been Carrizo, so there are still gaps in support for Tonga and Iceland, notably power management. Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jammy Zhou <Jammy.Zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/dce_v10_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c3871
1 files changed, 3871 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
new file mode 100644
index 000000000000..d412291ed70e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -0,0 +1,3871 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "drmP.h"
24#include "amdgpu.h"
25#include "amdgpu_pm.h"
26#include "amdgpu_i2c.h"
27#include "vid.h"
28#include "atom.h"
29#include "amdgpu_atombios.h"
30#include "atombios_crtc.h"
31#include "atombios_encoders.h"
32#include "amdgpu_pll.h"
33#include "amdgpu_connectors.h"
34
35#include "dce/dce_10_0_d.h"
36#include "dce/dce_10_0_sh_mask.h"
37#include "dce/dce_10_0_enum.h"
38#include "oss/oss_3_0_d.h"
39#include "oss/oss_3_0_sh_mask.h"
40#include "gmc/gmc_8_1_d.h"
41#include "gmc/gmc_8_1_sh_mask.h"
42
43static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
44static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
45
46static const u32 crtc_offsets[] =
47{
48 CRTC0_REGISTER_OFFSET,
49 CRTC1_REGISTER_OFFSET,
50 CRTC2_REGISTER_OFFSET,
51 CRTC3_REGISTER_OFFSET,
52 CRTC4_REGISTER_OFFSET,
53 CRTC5_REGISTER_OFFSET,
54 CRTC6_REGISTER_OFFSET
55};
56
57static const u32 hpd_offsets[] =
58{
59 HPD0_REGISTER_OFFSET,
60 HPD1_REGISTER_OFFSET,
61 HPD2_REGISTER_OFFSET,
62 HPD3_REGISTER_OFFSET,
63 HPD4_REGISTER_OFFSET,
64 HPD5_REGISTER_OFFSET
65};
66
67static const uint32_t dig_offsets[] = {
68 DIG0_REGISTER_OFFSET,
69 DIG1_REGISTER_OFFSET,
70 DIG2_REGISTER_OFFSET,
71 DIG3_REGISTER_OFFSET,
72 DIG4_REGISTER_OFFSET,
73 DIG5_REGISTER_OFFSET,
74 DIG6_REGISTER_OFFSET
75};
76
77static const struct {
78 uint32_t reg;
79 uint32_t vblank;
80 uint32_t vline;
81 uint32_t hpd;
82
83} interrupt_status_offsets[] = { {
84 .reg = mmDISP_INTERRUPT_STATUS,
85 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
86 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
87 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
88}, {
89 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
90 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
91 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
92 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
93}, {
94 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
95 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
96 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
97 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
98}, {
99 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
100 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
101 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
102 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
103}, {
104 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
105 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
106 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
107 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
108}, {
109 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
110 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
111 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
112 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
113} };
114
115static const u32 golden_settings_tonga_a11[] =
116{
117 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
118 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
119 mmFBC_MISC, 0x1f311fff, 0x12300000,
120 mmHDMI_CONTROL, 0x31000111, 0x00000011,
121};
122
123static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
124{
125 switch (adev->asic_type) {
126 case CHIP_TONGA:
127 amdgpu_program_register_sequence(adev,
128 golden_settings_tonga_a11,
129 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
130 break;
131 default:
132 break;
133 }
134}
135
136static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev,
137 u32 block_offset, u32 reg)
138{
139 unsigned long flags;
140 u32 r;
141
142 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
143 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
144 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
145 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
146
147 return r;
148}
149
150static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
151 u32 block_offset, u32 reg, u32 v)
152{
153 unsigned long flags;
154
155 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
156 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
157 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
158 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
159}
160
161static bool dce_v10_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
162{
163 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
164 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
165 return true;
166 else
167 return false;
168}
169
170static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
171{
172 u32 pos1, pos2;
173
174 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
175 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
176
177 if (pos1 != pos2)
178 return true;
179 else
180 return false;
181}
182
183/**
184 * dce_v10_0_vblank_wait - vblank wait asic callback.
185 *
186 * @adev: amdgpu_device pointer
187 * @crtc: crtc to wait for vblank on
188 *
189 * Wait for vblank on the requested crtc (evergreen+).
190 */
191static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
192{
193 unsigned i = 0;
194
195 if (crtc >= adev->mode_info.num_crtc)
196 return;
197
198 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
199 return;
200
201 /* depending on when we hit vblank, we may be close to active; if so,
202 * wait for another frame.
203 */
204 while (dce_v10_0_is_in_vblank(adev, crtc)) {
205 if (i++ % 100 == 0) {
206 if (!dce_v10_0_is_counter_moving(adev, crtc))
207 break;
208 }
209 }
210
211 while (!dce_v10_0_is_in_vblank(adev, crtc)) {
212 if (i++ % 100 == 0) {
213 if (!dce_v10_0_is_counter_moving(adev, crtc))
214 break;
215 }
216 }
217}
218
219static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
220{
221 if (crtc >= adev->mode_info.num_crtc)
222 return 0;
223 else
224 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
225}
226
227/**
228 * dce_v10_0_page_flip - pageflip callback.
229 *
230 * @adev: amdgpu_device pointer
231 * @crtc_id: crtc to cleanup pageflip on
232 * @crtc_base: new address of the crtc (GPU MC address)
233 *
234 * Does the actual pageflip (evergreen+).
235 * During vblank we take the crtc lock and wait for the update_pending
236 * bit to go high, when it does, we release the lock, and allow the
237 * double buffered update to take place.
238 * Returns the current update pending status.
239 */
240static void dce_v10_0_page_flip(struct amdgpu_device *adev,
241 int crtc_id, u64 crtc_base)
242{
243 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
244 u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
245 int i;
246
247 /* Lock the graphics update lock */
248 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
249 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
250
251 /* update the scanout addresses */
252 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
253 upper_32_bits(crtc_base));
254 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
255 lower_32_bits(crtc_base));
256
257 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
258 upper_32_bits(crtc_base));
259 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
260 lower_32_bits(crtc_base));
261
262 /* Wait for update_pending to go high. */
263 for (i = 0; i < adev->usec_timeout; i++) {
264 if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
265 GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
266 break;
267 udelay(1);
268 }
269 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
270
271 /* Unlock the lock, so double-buffering can take place inside vblank */
272 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
273 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
274}
275
276static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
277 u32 *vbl, u32 *position)
278{
279 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
280 return -EINVAL;
281
282 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
283 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
284
285 return 0;
286}
287
288/**
289 * dce_v10_0_hpd_sense - hpd sense callback.
290 *
291 * @adev: amdgpu_device pointer
292 * @hpd: hpd (hotplug detect) pin
293 *
294 * Checks if a digital monitor is connected (evergreen+).
295 * Returns true if connected, false if not connected.
296 */
297static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
298 enum amdgpu_hpd_id hpd)
299{
300 int idx;
301 bool connected = false;
302
303 switch (hpd) {
304 case AMDGPU_HPD_1:
305 idx = 0;
306 break;
307 case AMDGPU_HPD_2:
308 idx = 1;
309 break;
310 case AMDGPU_HPD_3:
311 idx = 2;
312 break;
313 case AMDGPU_HPD_4:
314 idx = 3;
315 break;
316 case AMDGPU_HPD_5:
317 idx = 4;
318 break;
319 case AMDGPU_HPD_6:
320 idx = 5;
321 break;
322 default:
323 return connected;
324 }
325
326 if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
327 DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
328 connected = true;
329
330 return connected;
331}
332
333/**
334 * dce_v10_0_hpd_set_polarity - hpd set polarity callback.
335 *
336 * @adev: amdgpu_device pointer
337 * @hpd: hpd (hotplug detect) pin
338 *
339 * Set the polarity of the hpd pin (evergreen+).
340 */
341static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
342 enum amdgpu_hpd_id hpd)
343{
344 u32 tmp;
345 bool connected = dce_v10_0_hpd_sense(adev, hpd);
346 int idx;
347
348 switch (hpd) {
349 case AMDGPU_HPD_1:
350 idx = 0;
351 break;
352 case AMDGPU_HPD_2:
353 idx = 1;
354 break;
355 case AMDGPU_HPD_3:
356 idx = 2;
357 break;
358 case AMDGPU_HPD_4:
359 idx = 3;
360 break;
361 case AMDGPU_HPD_5:
362 idx = 4;
363 break;
364 case AMDGPU_HPD_6:
365 idx = 5;
366 break;
367 default:
368 return;
369 }
370
371 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
372 if (connected)
373 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
374 else
375 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
376 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
377}
378
379/**
380 * dce_v10_0_hpd_init - hpd setup callback.
381 *
382 * @adev: amdgpu_device pointer
383 *
384 * Setup the hpd pins used by the card (evergreen+).
385 * Enable the pin, set the polarity, and enable the hpd interrupts.
386 */
387static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
388{
389 struct drm_device *dev = adev->ddev;
390 struct drm_connector *connector;
391 u32 tmp;
392 int idx;
393
394 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
395 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
396
397 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
398 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
399 /* don't try to enable hpd on eDP or LVDS avoid breaking the
400 * aux dp channel on imac and help (but not completely fix)
401 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
402 * also avoid interrupt storms during dpms.
403 */
404 continue;
405 }
406
407 switch (amdgpu_connector->hpd.hpd) {
408 case AMDGPU_HPD_1:
409 idx = 0;
410 break;
411 case AMDGPU_HPD_2:
412 idx = 1;
413 break;
414 case AMDGPU_HPD_3:
415 idx = 2;
416 break;
417 case AMDGPU_HPD_4:
418 idx = 3;
419 break;
420 case AMDGPU_HPD_5:
421 idx = 4;
422 break;
423 case AMDGPU_HPD_6:
424 idx = 5;
425 break;
426 default:
427 continue;
428 }
429
430 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
431 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
432 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
433
434 tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
435 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
436 DC_HPD_CONNECT_INT_DELAY,
437 AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
438 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
439 DC_HPD_DISCONNECT_INT_DELAY,
440 AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
441 WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
442
443 dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
444 amdgpu_irq_get(adev, &adev->hpd_irq,
445 amdgpu_connector->hpd.hpd);
446 }
447}
448
449/**
450 * dce_v10_0_hpd_fini - hpd tear down callback.
451 *
452 * @adev: amdgpu_device pointer
453 *
454 * Tear down the hpd pins used by the card (evergreen+).
455 * Disable the hpd interrupts.
456 */
457static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
458{
459 struct drm_device *dev = adev->ddev;
460 struct drm_connector *connector;
461 u32 tmp;
462 int idx;
463
464 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
465 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
466
467 switch (amdgpu_connector->hpd.hpd) {
468 case AMDGPU_HPD_1:
469 idx = 0;
470 break;
471 case AMDGPU_HPD_2:
472 idx = 1;
473 break;
474 case AMDGPU_HPD_3:
475 idx = 2;
476 break;
477 case AMDGPU_HPD_4:
478 idx = 3;
479 break;
480 case AMDGPU_HPD_5:
481 idx = 4;
482 break;
483 case AMDGPU_HPD_6:
484 idx = 5;
485 break;
486 default:
487 continue;
488 }
489
490 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
491 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
492 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
493
494 amdgpu_irq_put(adev, &adev->hpd_irq,
495 amdgpu_connector->hpd.hpd);
496 }
497}
498
499static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
500{
501 return mmDC_GPIO_HPD_A;
502}
503
504static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
505{
506 u32 crtc_hung = 0;
507 u32 crtc_status[6];
508 u32 i, j, tmp;
509
510 for (i = 0; i < adev->mode_info.num_crtc; i++) {
511 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
512 if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
513 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
514 crtc_hung |= (1 << i);
515 }
516 }
517
518 for (j = 0; j < 10; j++) {
519 for (i = 0; i < adev->mode_info.num_crtc; i++) {
520 if (crtc_hung & (1 << i)) {
521 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
522 if (tmp != crtc_status[i])
523 crtc_hung &= ~(1 << i);
524 }
525 }
526 if (crtc_hung == 0)
527 return false;
528 udelay(100);
529 }
530
531 return true;
532}
533
534static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev,
535 struct amdgpu_mode_mc_save *save)
536{
537 u32 crtc_enabled, tmp;
538 int i;
539
540 save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
541 save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
542
543 /* disable VGA render */
544 tmp = RREG32(mmVGA_RENDER_CONTROL);
545 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
546 WREG32(mmVGA_RENDER_CONTROL, tmp);
547
548 /* blank the display controllers */
549 for (i = 0; i < adev->mode_info.num_crtc; i++) {
550 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
551 CRTC_CONTROL, CRTC_MASTER_EN);
552 if (crtc_enabled) {
553#if 0
554 u32 frame_count;
555 int j;
556
557 save->crtc_enabled[i] = true;
558 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
559 if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
560 amdgpu_display_vblank_wait(adev, i);
561 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
562 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
563 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
564 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
565 }
566 /* wait for the next frame */
567 frame_count = amdgpu_display_vblank_get_counter(adev, i);
568 for (j = 0; j < adev->usec_timeout; j++) {
569 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
570 break;
571 udelay(1);
572 }
573 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
574 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
575 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
576 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
577 }
578 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
579 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
580 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
581 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
582 }
583#else
584 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
585 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
586 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
587 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
588 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
589 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
590 save->crtc_enabled[i] = false;
591 /* ***** */
592#endif
593 } else {
594 save->crtc_enabled[i] = false;
595 }
596 }
597}
598
599static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
600 struct amdgpu_mode_mc_save *save)
601{
602 u32 tmp, frame_count;
603 int i, j;
604
605 /* update crtc base addresses */
606 for (i = 0; i < adev->mode_info.num_crtc; i++) {
607 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
608 upper_32_bits(adev->mc.vram_start));
609 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
610 upper_32_bits(adev->mc.vram_start));
611 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
612 (u32)adev->mc.vram_start);
613 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
614 (u32)adev->mc.vram_start);
615
616 if (save->crtc_enabled[i]) {
617 tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
618 if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
619 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
620 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
621 }
622 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
623 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
624 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
625 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
626 }
627 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
628 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
629 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
630 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
631 }
632 for (j = 0; j < adev->usec_timeout; j++) {
633 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
634 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
635 break;
636 udelay(1);
637 }
638 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
639 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
640 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
641 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
642 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
643 /* wait for the next frame */
644 frame_count = amdgpu_display_vblank_get_counter(adev, i);
645 for (j = 0; j < adev->usec_timeout; j++) {
646 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
647 break;
648 udelay(1);
649 }
650 }
651 }
652
653 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
654 WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
655
656 /* Unlock vga access */
657 WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
658 mdelay(1);
659 WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
660}
661
662static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
663 bool render)
664{
665 u32 tmp;
666
667 /* Lockout access through VGA aperture*/
668 tmp = RREG32(mmVGA_HDP_CONTROL);
669 if (render)
670 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
671 else
672 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
673 WREG32(mmVGA_HDP_CONTROL, tmp);
674
675 /* disable VGA render */
676 tmp = RREG32(mmVGA_RENDER_CONTROL);
677 if (render)
678 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
679 else
680 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
681 WREG32(mmVGA_RENDER_CONTROL, tmp);
682}
683
684static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
685{
686 struct drm_device *dev = encoder->dev;
687 struct amdgpu_device *adev = dev->dev_private;
688 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
689 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
690 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
691 int bpc = 0;
692 u32 tmp = 0;
693 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
694
695 if (connector) {
696 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
697 bpc = amdgpu_connector_get_monitor_bpc(connector);
698 dither = amdgpu_connector->dither;
699 }
700
701 /* LVDS/eDP FMT is set up by atom */
702 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
703 return;
704
705 /* not needed for analog */
706 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
707 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
708 return;
709
710 if (bpc == 0)
711 return;
712
713 switch (bpc) {
714 case 6:
715 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
716 /* XXX sort out optimal dither settings */
717 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
718 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
719 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
720 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
721 } else {
722 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
723 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
724 }
725 break;
726 case 8:
727 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
728 /* XXX sort out optimal dither settings */
729 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
730 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
731 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
732 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
733 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
734 } else {
735 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
736 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
737 }
738 break;
739 case 10:
740 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
741 /* XXX sort out optimal dither settings */
742 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
743 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
744 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
745 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
746 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
747 } else {
748 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
749 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
750 }
751 break;
752 default:
753 /* not needed */
754 break;
755 }
756
757 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
758}
759
760
761/* display watermark setup */
762/**
763 * dce_v10_0_line_buffer_adjust - Set up the line buffer
764 *
765 * @adev: amdgpu_device pointer
766 * @amdgpu_crtc: the selected display controller
767 * @mode: the current display mode on the selected display
768 * controller
769 *
770 * Setup up the line buffer allocation for
771 * the selected display controller (CIK).
772 * Returns the line buffer size in pixels.
773 */
774static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
775 struct amdgpu_crtc *amdgpu_crtc,
776 struct drm_display_mode *mode)
777{
778 u32 tmp, buffer_alloc, i, mem_cfg;
779 u32 pipe_offset = amdgpu_crtc->crtc_id;
780 /*
781 * Line Buffer Setup
782 * There are 6 line buffers, one for each display controllers.
783 * There are 3 partitions per LB. Select the number of partitions
784 * to enable based on the display width. For display widths larger
785 * than 4096, you need use to use 2 display controllers and combine
786 * them using the stereo blender.
787 */
788 if (amdgpu_crtc->base.enabled && mode) {
789 if (mode->crtc_hdisplay < 1920) {
790 mem_cfg = 1;
791 buffer_alloc = 2;
792 } else if (mode->crtc_hdisplay < 2560) {
793 mem_cfg = 2;
794 buffer_alloc = 2;
795 } else if (mode->crtc_hdisplay < 4096) {
796 mem_cfg = 0;
797 buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
798 } else {
799 DRM_DEBUG_KMS("Mode too big for LB!\n");
800 mem_cfg = 0;
801 buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
802 }
803 } else {
804 mem_cfg = 1;
805 buffer_alloc = 0;
806 }
807
808 tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
809 tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
810 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
811
812 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
813 tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
814 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
815
816 for (i = 0; i < adev->usec_timeout; i++) {
817 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
818 if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
819 break;
820 udelay(1);
821 }
822
823 if (amdgpu_crtc->base.enabled && mode) {
824 switch (mem_cfg) {
825 case 0:
826 default:
827 return 4096 * 2;
828 case 1:
829 return 1920 * 2;
830 case 2:
831 return 2560 * 2;
832 }
833 }
834
835 /* controller not enabled, so no lb used */
836 return 0;
837}
838
839/**
840 * cik_get_number_of_dram_channels - get the number of dram channels
841 *
842 * @adev: amdgpu_device pointer
843 *
844 * Look up the number of video ram channels (CIK).
845 * Used for display watermark bandwidth calculations
846 * Returns the number of dram channels
847 */
848static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
849{
850 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
851
852 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
853 case 0:
854 default:
855 return 1;
856 case 1:
857 return 2;
858 case 2:
859 return 4;
860 case 3:
861 return 8;
862 case 4:
863 return 3;
864 case 5:
865 return 6;
866 case 6:
867 return 10;
868 case 7:
869 return 12;
870 case 8:
871 return 16;
872 }
873}
874
875struct dce10_wm_params {
876 u32 dram_channels; /* number of dram channels */
877 u32 yclk; /* bandwidth per dram data pin in kHz */
878 u32 sclk; /* engine clock in kHz */
879 u32 disp_clk; /* display clock in kHz */
880 u32 src_width; /* viewport width */
881 u32 active_time; /* active display time in ns */
882 u32 blank_time; /* blank time in ns */
883 bool interlaced; /* mode is interlaced */
884 fixed20_12 vsc; /* vertical scale ratio */
885 u32 num_heads; /* number of active crtcs */
886 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
887 u32 lb_size; /* line buffer allocated to pipe */
888 u32 vtaps; /* vertical scaler taps */
889};
890
891/**
892 * dce_v10_0_dram_bandwidth - get the dram bandwidth
893 *
894 * @wm: watermark calculation data
895 *
896 * Calculate the raw dram bandwidth (CIK).
897 * Used for display watermark bandwidth calculations
898 * Returns the dram bandwidth in MBytes/s
899 */
900static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm)
901{
902 /* Calculate raw DRAM Bandwidth */
903 fixed20_12 dram_efficiency; /* 0.7 */
904 fixed20_12 yclk, dram_channels, bandwidth;
905 fixed20_12 a;
906
907 a.full = dfixed_const(1000);
908 yclk.full = dfixed_const(wm->yclk);
909 yclk.full = dfixed_div(yclk, a);
910 dram_channels.full = dfixed_const(wm->dram_channels * 4);
911 a.full = dfixed_const(10);
912 dram_efficiency.full = dfixed_const(7);
913 dram_efficiency.full = dfixed_div(dram_efficiency, a);
914 bandwidth.full = dfixed_mul(dram_channels, yclk);
915 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
916
917 return dfixed_trunc(bandwidth);
918}
919
920/**
921 * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display
922 *
923 * @wm: watermark calculation data
924 *
925 * Calculate the dram bandwidth used for display (CIK).
926 * Used for display watermark bandwidth calculations
927 * Returns the dram bandwidth for display in MBytes/s
928 */
929static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
930{
931 /* Calculate DRAM Bandwidth and the part allocated to display. */
932 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
933 fixed20_12 yclk, dram_channels, bandwidth;
934 fixed20_12 a;
935
936 a.full = dfixed_const(1000);
937 yclk.full = dfixed_const(wm->yclk);
938 yclk.full = dfixed_div(yclk, a);
939 dram_channels.full = dfixed_const(wm->dram_channels * 4);
940 a.full = dfixed_const(10);
941 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
942 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
943 bandwidth.full = dfixed_mul(dram_channels, yclk);
944 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
945
946 return dfixed_trunc(bandwidth);
947}
948
949/**
950 * dce_v10_0_data_return_bandwidth - get the data return bandwidth
951 *
952 * @wm: watermark calculation data
953 *
954 * Calculate the data return bandwidth used for display (CIK).
955 * Used for display watermark bandwidth calculations
956 * Returns the data return bandwidth in MBytes/s
957 */
958static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm)
959{
960 /* Calculate the display Data return Bandwidth */
961 fixed20_12 return_efficiency; /* 0.8 */
962 fixed20_12 sclk, bandwidth;
963 fixed20_12 a;
964
965 a.full = dfixed_const(1000);
966 sclk.full = dfixed_const(wm->sclk);
967 sclk.full = dfixed_div(sclk, a);
968 a.full = dfixed_const(10);
969 return_efficiency.full = dfixed_const(8);
970 return_efficiency.full = dfixed_div(return_efficiency, a);
971 a.full = dfixed_const(32);
972 bandwidth.full = dfixed_mul(a, sclk);
973 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
974
975 return dfixed_trunc(bandwidth);
976}
977
978/**
979 * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth
980 *
981 * @wm: watermark calculation data
982 *
983 * Calculate the dmif bandwidth used for display (CIK).
984 * Used for display watermark bandwidth calculations
985 * Returns the dmif bandwidth in MBytes/s
986 */
987static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
988{
989 /* Calculate the DMIF Request Bandwidth */
990 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
991 fixed20_12 disp_clk, bandwidth;
992 fixed20_12 a, b;
993
994 a.full = dfixed_const(1000);
995 disp_clk.full = dfixed_const(wm->disp_clk);
996 disp_clk.full = dfixed_div(disp_clk, a);
997 a.full = dfixed_const(32);
998 b.full = dfixed_mul(a, disp_clk);
999
1000 a.full = dfixed_const(10);
1001 disp_clk_request_efficiency.full = dfixed_const(8);
1002 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
1003
1004 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
1005
1006 return dfixed_trunc(bandwidth);
1007}
1008
1009/**
1010 * dce_v10_0_available_bandwidth - get the min available bandwidth
1011 *
1012 * @wm: watermark calculation data
1013 *
1014 * Calculate the min available bandwidth used for display (CIK).
1015 * Used for display watermark bandwidth calculations
1016 * Returns the min available bandwidth in MBytes/s
1017 */
1018static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm)
1019{
1020 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
1021 u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm);
1022 u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm);
1023 u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm);
1024
1025 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1026}
1027
1028/**
1029 * dce_v10_0_average_bandwidth - get the average available bandwidth
1030 *
1031 * @wm: watermark calculation data
1032 *
1033 * Calculate the average available bandwidth used for display (CIK).
1034 * Used for display watermark bandwidth calculations
1035 * Returns the average available bandwidth in MBytes/s
1036 */
1037static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm)
1038{
1039 /* Calculate the display mode Average Bandwidth
1040 * DisplayMode should contain the source and destination dimensions,
1041 * timing, etc.
1042 */
1043 fixed20_12 bpp;
1044 fixed20_12 line_time;
1045 fixed20_12 src_width;
1046 fixed20_12 bandwidth;
1047 fixed20_12 a;
1048
1049 a.full = dfixed_const(1000);
1050 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1051 line_time.full = dfixed_div(line_time, a);
1052 bpp.full = dfixed_const(wm->bytes_per_pixel);
1053 src_width.full = dfixed_const(wm->src_width);
1054 bandwidth.full = dfixed_mul(src_width, bpp);
1055 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1056 bandwidth.full = dfixed_div(bandwidth, line_time);
1057
1058 return dfixed_trunc(bandwidth);
1059}
1060
1061/**
1062 * dce_v10_0_latency_watermark - get the latency watermark
1063 *
1064 * @wm: watermark calculation data
1065 *
1066 * Calculate the latency watermark (CIK).
1067 * Used for display watermark bandwidth calculations
1068 * Returns the latency watermark in ns
1069 */
1070static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
1071{
1072 /* First calculate the latency in ns */
1073 u32 mc_latency = 2000; /* 2000 ns. */
1074 u32 available_bandwidth = dce_v10_0_available_bandwidth(wm);
1075 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1076 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1077 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1078 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1079 (wm->num_heads * cursor_line_pair_return_time);
1080 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1081 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1082 u32 tmp, dmif_size = 12288;
1083 fixed20_12 a, b, c;
1084
1085 if (wm->num_heads == 0)
1086 return 0;
1087
1088 a.full = dfixed_const(2);
1089 b.full = dfixed_const(1);
1090 if ((wm->vsc.full > a.full) ||
1091 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1092 (wm->vtaps >= 5) ||
1093 ((wm->vsc.full >= a.full) && wm->interlaced))
1094 max_src_lines_per_dst_line = 4;
1095 else
1096 max_src_lines_per_dst_line = 2;
1097
1098 a.full = dfixed_const(available_bandwidth);
1099 b.full = dfixed_const(wm->num_heads);
1100 a.full = dfixed_div(a, b);
1101
1102 b.full = dfixed_const(mc_latency + 512);
1103 c.full = dfixed_const(wm->disp_clk);
1104 b.full = dfixed_div(b, c);
1105
1106 c.full = dfixed_const(dmif_size);
1107 b.full = dfixed_div(c, b);
1108
1109 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1110
1111 b.full = dfixed_const(1000);
1112 c.full = dfixed_const(wm->disp_clk);
1113 b.full = dfixed_div(c, b);
1114 c.full = dfixed_const(wm->bytes_per_pixel);
1115 b.full = dfixed_mul(b, c);
1116
1117 lb_fill_bw = min(tmp, dfixed_trunc(b));
1118
1119 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1120 b.full = dfixed_const(1000);
1121 c.full = dfixed_const(lb_fill_bw);
1122 b.full = dfixed_div(c, b);
1123 a.full = dfixed_div(a, b);
1124 line_fill_time = dfixed_trunc(a);
1125
1126 if (line_fill_time < wm->active_time)
1127 return latency;
1128 else
1129 return latency + (line_fill_time - wm->active_time);
1130
1131}
1132
1133/**
1134 * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1135 * average and available dram bandwidth
1136 *
1137 * @wm: watermark calculation data
1138 *
1139 * Check if the display average bandwidth fits in the display
1140 * dram bandwidth (CIK).
1141 * Used for display watermark bandwidth calculations
1142 * Returns true if the display fits, false if not.
1143 */
1144static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
1145{
1146 if (dce_v10_0_average_bandwidth(wm) <=
1147 (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1148 return true;
1149 else
1150 return false;
1151}
1152
1153/**
1154 * dce_v10_0_average_bandwidth_vs_available_bandwidth - check
1155 * average and available bandwidth
1156 *
1157 * @wm: watermark calculation data
1158 *
1159 * Check if the display average bandwidth fits in the display
1160 * available bandwidth (CIK).
1161 * Used for display watermark bandwidth calculations
1162 * Returns true if the display fits, false if not.
1163 */
1164static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
1165{
1166 if (dce_v10_0_average_bandwidth(wm) <=
1167 (dce_v10_0_available_bandwidth(wm) / wm->num_heads))
1168 return true;
1169 else
1170 return false;
1171}
1172
1173/**
1174 * dce_v10_0_check_latency_hiding - check latency hiding
1175 *
1176 * @wm: watermark calculation data
1177 *
1178 * Check latency hiding (CIK).
1179 * Used for display watermark bandwidth calculations
1180 * Returns true if the display fits, false if not.
1181 */
1182static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm)
1183{
1184 u32 lb_partitions = wm->lb_size / wm->src_width;
1185 u32 line_time = wm->active_time + wm->blank_time;
1186 u32 latency_tolerant_lines;
1187 u32 latency_hiding;
1188 fixed20_12 a;
1189
1190 a.full = dfixed_const(1);
1191 if (wm->vsc.full > a.full)
1192 latency_tolerant_lines = 1;
1193 else {
1194 if (lb_partitions <= (wm->vtaps + 1))
1195 latency_tolerant_lines = 1;
1196 else
1197 latency_tolerant_lines = 2;
1198 }
1199
1200 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1201
1202 if (dce_v10_0_latency_watermark(wm) <= latency_hiding)
1203 return true;
1204 else
1205 return false;
1206}
1207
1208/**
1209 * dce_v10_0_program_watermarks - program display watermarks
1210 *
1211 * @adev: amdgpu_device pointer
1212 * @amdgpu_crtc: the selected display controller
1213 * @lb_size: line buffer size
1214 * @num_heads: number of display controllers in use
1215 *
1216 * Calculate and program the display watermarks for the
1217 * selected display controller (CIK).
1218 */
1219static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1220 struct amdgpu_crtc *amdgpu_crtc,
1221 u32 lb_size, u32 num_heads)
1222{
1223 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1224 struct dce10_wm_params wm_low, wm_high;
1225 u32 pixel_period;
1226 u32 line_time = 0;
1227 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1228 u32 tmp, wm_mask;
1229
1230 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1231 pixel_period = 1000000 / (u32)mode->clock;
1232 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1233
1234 /* watermark for high clocks */
1235 if (adev->pm.dpm_enabled) {
1236 wm_high.yclk =
1237 amdgpu_dpm_get_mclk(adev, false) * 10;
1238 wm_high.sclk =
1239 amdgpu_dpm_get_sclk(adev, false) * 10;
1240 } else {
1241 wm_high.yclk = adev->pm.current_mclk * 10;
1242 wm_high.sclk = adev->pm.current_sclk * 10;
1243 }
1244
1245 wm_high.disp_clk = mode->clock;
1246 wm_high.src_width = mode->crtc_hdisplay;
1247 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1248 wm_high.blank_time = line_time - wm_high.active_time;
1249 wm_high.interlaced = false;
1250 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1251 wm_high.interlaced = true;
1252 wm_high.vsc = amdgpu_crtc->vsc;
1253 wm_high.vtaps = 1;
1254 if (amdgpu_crtc->rmx_type != RMX_OFF)
1255 wm_high.vtaps = 2;
1256 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1257 wm_high.lb_size = lb_size;
1258 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1259 wm_high.num_heads = num_heads;
1260
1261 /* set for high clocks */
1262 latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535);
1263
1264 /* possibly force display priority to high */
1265 /* should really do this at mode validation time... */
1266 if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1267 !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1268 !dce_v10_0_check_latency_hiding(&wm_high) ||
1269 (adev->mode_info.disp_priority == 2)) {
1270 DRM_DEBUG_KMS("force priority to high\n");
1271 }
1272
1273 /* watermark for low clocks */
1274 if (adev->pm.dpm_enabled) {
1275 wm_low.yclk =
1276 amdgpu_dpm_get_mclk(adev, true) * 10;
1277 wm_low.sclk =
1278 amdgpu_dpm_get_sclk(adev, true) * 10;
1279 } else {
1280 wm_low.yclk = adev->pm.current_mclk * 10;
1281 wm_low.sclk = adev->pm.current_sclk * 10;
1282 }
1283
1284 wm_low.disp_clk = mode->clock;
1285 wm_low.src_width = mode->crtc_hdisplay;
1286 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1287 wm_low.blank_time = line_time - wm_low.active_time;
1288 wm_low.interlaced = false;
1289 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1290 wm_low.interlaced = true;
1291 wm_low.vsc = amdgpu_crtc->vsc;
1292 wm_low.vtaps = 1;
1293 if (amdgpu_crtc->rmx_type != RMX_OFF)
1294 wm_low.vtaps = 2;
1295 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1296 wm_low.lb_size = lb_size;
1297 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1298 wm_low.num_heads = num_heads;
1299
1300 /* set for low clocks */
1301 latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535);
1302
1303 /* possibly force display priority to high */
1304 /* should really do this at mode validation time... */
1305 if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1306 !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1307 !dce_v10_0_check_latency_hiding(&wm_low) ||
1308 (adev->mode_info.disp_priority == 2)) {
1309 DRM_DEBUG_KMS("force priority to high\n");
1310 }
1311 }
1312
1313 /* select wm A */
1314 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1315 tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1316 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1317 tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1318 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1319 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1320 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1321 /* select wm B */
1322 tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1323 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1324 tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1325 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1326 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1327 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1328 /* restore original selection */
1329 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1330
1331 /* save values for DPM */
1332 amdgpu_crtc->line_time = line_time;
1333 amdgpu_crtc->wm_high = latency_watermark_a;
1334 amdgpu_crtc->wm_low = latency_watermark_b;
1335}
1336
1337/**
1338 * dce_v10_0_bandwidth_update - program display watermarks
1339 *
1340 * @adev: amdgpu_device pointer
1341 *
1342 * Calculate and program the display watermarks and line
1343 * buffer allocation (CIK).
1344 */
1345static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
1346{
1347 struct drm_display_mode *mode = NULL;
1348 u32 num_heads = 0, lb_size;
1349 int i;
1350
1351 amdgpu_update_display_priority(adev);
1352
1353 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1354 if (adev->mode_info.crtcs[i]->base.enabled)
1355 num_heads++;
1356 }
1357 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1358 mode = &adev->mode_info.crtcs[i]->base.mode;
1359 lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1360 dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1361 lb_size, num_heads);
1362 }
1363}
1364
1365static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev)
1366{
1367 int i;
1368 u32 offset, tmp;
1369
1370 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1371 offset = adev->mode_info.audio.pin[i].offset;
1372 tmp = RREG32_AUDIO_ENDPT(offset,
1373 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1374 if (((tmp &
1375 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1376 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1377 adev->mode_info.audio.pin[i].connected = false;
1378 else
1379 adev->mode_info.audio.pin[i].connected = true;
1380 }
1381}
1382
1383static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev)
1384{
1385 int i;
1386
1387 dce_v10_0_audio_get_connected_pins(adev);
1388
1389 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1390 if (adev->mode_info.audio.pin[i].connected)
1391 return &adev->mode_info.audio.pin[i];
1392 }
1393 DRM_ERROR("No connected audio pins found!\n");
1394 return NULL;
1395}
1396
1397static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1398{
1399 struct amdgpu_device *adev = encoder->dev->dev_private;
1400 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1401 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1402 u32 tmp;
1403
1404 if (!dig || !dig->afmt || !dig->afmt->pin)
1405 return;
1406
1407 tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1408 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1409 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1410}
1411
1412static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
1413 struct drm_display_mode *mode)
1414{
1415 struct amdgpu_device *adev = encoder->dev->dev_private;
1416 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1417 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1418 struct drm_connector *connector;
1419 struct amdgpu_connector *amdgpu_connector = NULL;
1420 u32 tmp;
1421 int interlace = 0;
1422
1423 if (!dig || !dig->afmt || !dig->afmt->pin)
1424 return;
1425
1426 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1427 if (connector->encoder == encoder) {
1428 amdgpu_connector = to_amdgpu_connector(connector);
1429 break;
1430 }
1431 }
1432
1433 if (!amdgpu_connector) {
1434 DRM_ERROR("Couldn't find encoder's connector\n");
1435 return;
1436 }
1437
1438 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1439 interlace = 1;
1440 if (connector->latency_present[interlace]) {
1441 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1442 VIDEO_LIPSYNC, connector->video_latency[interlace]);
1443 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1444 AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1445 } else {
1446 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1447 VIDEO_LIPSYNC, 0);
1448 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1449 AUDIO_LIPSYNC, 0);
1450 }
1451 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1452 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1453}
1454
1455static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1456{
1457 struct amdgpu_device *adev = encoder->dev->dev_private;
1458 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1459 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1460 struct drm_connector *connector;
1461 struct amdgpu_connector *amdgpu_connector = NULL;
1462 u32 tmp;
1463 u8 *sadb = NULL;
1464 int sad_count;
1465
1466 if (!dig || !dig->afmt || !dig->afmt->pin)
1467 return;
1468
1469 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1470 if (connector->encoder == encoder) {
1471 amdgpu_connector = to_amdgpu_connector(connector);
1472 break;
1473 }
1474 }
1475
1476 if (!amdgpu_connector) {
1477 DRM_ERROR("Couldn't find encoder's connector\n");
1478 return;
1479 }
1480
1481 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1482 if (sad_count < 0) {
1483 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1484 sad_count = 0;
1485 }
1486
1487 /* program the speaker allocation */
1488 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1489 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1490 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1491 DP_CONNECTION, 0);
1492 /* set HDMI mode */
1493 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1494 HDMI_CONNECTION, 1);
1495 if (sad_count)
1496 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1497 SPEAKER_ALLOCATION, sadb[0]);
1498 else
1499 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1500 SPEAKER_ALLOCATION, 5); /* stereo */
1501 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1502 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1503
1504 kfree(sadb);
1505}
1506
1507static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
1508{
1509 struct amdgpu_device *adev = encoder->dev->dev_private;
1510 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1511 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1512 struct drm_connector *connector;
1513 struct amdgpu_connector *amdgpu_connector = NULL;
1514 struct cea_sad *sads;
1515 int i, sad_count;
1516
1517 static const u16 eld_reg_to_type[][2] = {
1518 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1519 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1520 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1521 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1522 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1523 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1524 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1525 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1526 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1527 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1528 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1529 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1530 };
1531
1532 if (!dig || !dig->afmt || !dig->afmt->pin)
1533 return;
1534
1535 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1536 if (connector->encoder == encoder) {
1537 amdgpu_connector = to_amdgpu_connector(connector);
1538 break;
1539 }
1540 }
1541
1542 if (!amdgpu_connector) {
1543 DRM_ERROR("Couldn't find encoder's connector\n");
1544 return;
1545 }
1546
1547 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1548 if (sad_count <= 0) {
1549 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1550 return;
1551 }
1552 BUG_ON(!sads);
1553
1554 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1555 u32 tmp = 0;
1556 u8 stereo_freqs = 0;
1557 int max_channels = -1;
1558 int j;
1559
1560 for (j = 0; j < sad_count; j++) {
1561 struct cea_sad *sad = &sads[j];
1562
1563 if (sad->format == eld_reg_to_type[i][1]) {
1564 if (sad->channels > max_channels) {
1565 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1566 MAX_CHANNELS, sad->channels);
1567 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1568 DESCRIPTOR_BYTE_2, sad->byte2);
1569 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1570 SUPPORTED_FREQUENCIES, sad->freq);
1571 max_channels = sad->channels;
1572 }
1573
1574 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1575 stereo_freqs |= sad->freq;
1576 else
1577 break;
1578 }
1579 }
1580
1581 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1582 SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1583 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1584 }
1585
1586 kfree(sads);
1587}
1588
1589static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
1590 struct amdgpu_audio_pin *pin,
1591 bool enable)
1592{
1593 if (!pin)
1594 return;
1595
1596 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1597 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1598}
1599
1600static const u32 pin_offsets[] =
1601{
1602 AUD0_REGISTER_OFFSET,
1603 AUD1_REGISTER_OFFSET,
1604 AUD2_REGISTER_OFFSET,
1605 AUD3_REGISTER_OFFSET,
1606 AUD4_REGISTER_OFFSET,
1607 AUD5_REGISTER_OFFSET,
1608 AUD6_REGISTER_OFFSET,
1609};
1610
1611static int dce_v10_0_audio_init(struct amdgpu_device *adev)
1612{
1613 int i;
1614
1615 if (!amdgpu_audio)
1616 return 0;
1617
1618 adev->mode_info.audio.enabled = true;
1619
1620 adev->mode_info.audio.num_pins = 7;
1621
1622 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1623 adev->mode_info.audio.pin[i].channels = -1;
1624 adev->mode_info.audio.pin[i].rate = -1;
1625 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1626 adev->mode_info.audio.pin[i].status_bits = 0;
1627 adev->mode_info.audio.pin[i].category_code = 0;
1628 adev->mode_info.audio.pin[i].connected = false;
1629 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1630 adev->mode_info.audio.pin[i].id = i;
1631 /* disable audio. it will be set up later */
1632 /* XXX remove once we switch to ip funcs */
1633 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1634 }
1635
1636 return 0;
1637}
1638
1639static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
1640{
1641 int i;
1642
1643 if (!adev->mode_info.audio.enabled)
1644 return;
1645
1646 for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1647 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1648
1649 adev->mode_info.audio.enabled = false;
1650}
1651
1652/*
1653 * update the N and CTS parameters for a given pixel clock rate
1654 */
1655static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1656{
1657 struct drm_device *dev = encoder->dev;
1658 struct amdgpu_device *adev = dev->dev_private;
1659 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1660 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1661 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1662 u32 tmp;
1663
1664 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1665 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1666 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1667 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1668 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1669 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1670
1671 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1672 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1673 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1674 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1675 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1676 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1677
1678 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1679 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1680 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1681 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1682 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1683 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1684
1685}
1686
1687/*
1688 * build a HDMI Video Info Frame
1689 */
1690static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1691 void *buffer, size_t size)
1692{
1693 struct drm_device *dev = encoder->dev;
1694 struct amdgpu_device *adev = dev->dev_private;
1695 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1696 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1697 uint8_t *frame = buffer + 3;
1698 uint8_t *header = buffer;
1699
1700 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1701 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1702 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1703 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1704 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1705 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1706 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1707 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1708}
1709
1710static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1711{
1712 struct drm_device *dev = encoder->dev;
1713 struct amdgpu_device *adev = dev->dev_private;
1714 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1715 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1716 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1717 u32 dto_phase = 24 * 1000;
1718 u32 dto_modulo = clock;
1719 u32 tmp;
1720
1721 if (!dig || !dig->afmt)
1722 return;
1723
1724 /* XXX two dtos; generally use dto0 for hdmi */
1725 /* Express [24MHz / target pixel clock] as an exact rational
1726 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1727 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1728 */
1729 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1730 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1731 amdgpu_crtc->crtc_id);
1732 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1733 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1734 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1735}
1736
1737/*
1738 * update the info frames with the data from the current display mode
1739 */
1740static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
1741 struct drm_display_mode *mode)
1742{
1743 struct drm_device *dev = encoder->dev;
1744 struct amdgpu_device *adev = dev->dev_private;
1745 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1746 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1747 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1748 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1749 struct hdmi_avi_infoframe frame;
1750 ssize_t err;
1751 u32 tmp;
1752 int bpc = 8;
1753
1754 if (!dig || !dig->afmt)
1755 return;
1756
1757 /* Silent, r600_hdmi_enable will raise WARN for us */
1758 if (!dig->afmt->enabled)
1759 return;
1760
1761 /* hdmi deep color mode general control packets setup, if bpc > 8 */
1762 if (encoder->crtc) {
1763 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1764 bpc = amdgpu_crtc->bpc;
1765 }
1766
1767 /* disable audio prior to setting up hw */
1768 dig->afmt->pin = dce_v10_0_audio_get_pin(adev);
1769 dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1770
1771 dce_v10_0_audio_set_dto(encoder, mode->clock);
1772
1773 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1774 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1775 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1776
1777 WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1778
1779 tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1780 switch (bpc) {
1781 case 0:
1782 case 6:
1783 case 8:
1784 case 16:
1785 default:
1786 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1787 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1788 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1789 connector->name, bpc);
1790 break;
1791 case 10:
1792 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1793 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1794 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1795 connector->name);
1796 break;
1797 case 12:
1798 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1799 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1800 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1801 connector->name);
1802 break;
1803 }
1804 WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1805
1806 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1807 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1808 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1809 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1810 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1811
1812 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1813 /* enable audio info frames (frames won't be set until audio is enabled) */
1814 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1815 /* required for audio info values to be updated */
1816 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1817 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1818
1819 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1820 /* required for audio info values to be updated */
1821 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1822 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1823
1824 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1825 /* anything other than 0 */
1826 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1827 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1828
1829 WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1830
1831 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1832 /* set the default audio delay */
1833 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1834 /* should be suffient for all audio modes and small enough for all hblanks */
1835 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1836 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1837
1838 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1839 /* allow 60958 channel status fields to be updated */
1840 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1841 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1842
1843 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1844 if (bpc > 8)
1845 /* clear SW CTS value */
1846 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1847 else
1848 /* select SW CTS value */
1849 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1850 /* allow hw to sent ACR packets when required */
1851 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1852 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1853
1854 dce_v10_0_afmt_update_ACR(encoder, mode->clock);
1855
1856 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1857 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1858 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1859
1860 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1861 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1862 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1863
1864 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1865 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1866 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1867 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1868 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1869 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1870 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1871 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1872
1873 dce_v10_0_audio_write_speaker_allocation(encoder);
1874
1875 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1876 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1877
1878 dce_v10_0_afmt_audio_select_pin(encoder);
1879 dce_v10_0_audio_write_sad_regs(encoder);
1880 dce_v10_0_audio_write_latency_fields(encoder, mode);
1881
1882 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1883 if (err < 0) {
1884 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1885 return;
1886 }
1887
1888 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1889 if (err < 0) {
1890 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1891 return;
1892 }
1893
1894 dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1895
1896 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1897 /* enable AVI info frames */
1898 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1899 /* required for audio info values to be updated */
1900 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1901 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1902
1903 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1904 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1905 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1906
1907 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1908 /* send audio packets */
1909 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1910 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1911
1912 WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1913 WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1914 WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1915 WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1916
1917 /* enable audio after to setting up hw */
1918 dce_v10_0_audio_enable(adev, dig->afmt->pin, true);
1919}
1920
1921static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1922{
1923 struct drm_device *dev = encoder->dev;
1924 struct amdgpu_device *adev = dev->dev_private;
1925 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1926 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1927
1928 if (!dig || !dig->afmt)
1929 return;
1930
1931 /* Silent, r600_hdmi_enable will raise WARN for us */
1932 if (enable && dig->afmt->enabled)
1933 return;
1934 if (!enable && !dig->afmt->enabled)
1935 return;
1936
1937 if (!enable && dig->afmt->pin) {
1938 dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1939 dig->afmt->pin = NULL;
1940 }
1941
1942 dig->afmt->enabled = enable;
1943
1944 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1945 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1946}
1947
1948static void dce_v10_0_afmt_init(struct amdgpu_device *adev)
1949{
1950 int i;
1951
1952 for (i = 0; i < adev->mode_info.num_dig; i++)
1953 adev->mode_info.afmt[i] = NULL;
1954
1955 /* DCE10 has audio blocks tied to DIG encoders */
1956 for (i = 0; i < adev->mode_info.num_dig; i++) {
1957 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1958 if (adev->mode_info.afmt[i]) {
1959 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1960 adev->mode_info.afmt[i]->id = i;
1961 }
1962 }
1963}
1964
1965static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
1966{
1967 int i;
1968
1969 for (i = 0; i < adev->mode_info.num_dig; i++) {
1970 kfree(adev->mode_info.afmt[i]);
1971 adev->mode_info.afmt[i] = NULL;
1972 }
1973}
1974
1975static const u32 vga_control_regs[6] =
1976{
1977 mmD1VGA_CONTROL,
1978 mmD2VGA_CONTROL,
1979 mmD3VGA_CONTROL,
1980 mmD4VGA_CONTROL,
1981 mmD5VGA_CONTROL,
1982 mmD6VGA_CONTROL,
1983};
1984
1985static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
1986{
1987 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1988 struct drm_device *dev = crtc->dev;
1989 struct amdgpu_device *adev = dev->dev_private;
1990 u32 vga_control;
1991
1992 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1993 if (enable)
1994 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1995 else
1996 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1997}
1998
1999static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
2000{
2001 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2002 struct drm_device *dev = crtc->dev;
2003 struct amdgpu_device *adev = dev->dev_private;
2004
2005 if (enable)
2006 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
2007 else
2008 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
2009}
2010
2011static void dce_v10_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw,
2012 unsigned *bankh, unsigned *mtaspect,
2013 unsigned *tile_split)
2014{
2015 *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK;
2016 *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK;
2017 *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK;
2018 *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK;
2019 switch (*bankw) {
2020 default:
2021 case 1:
2022 *bankw = ADDR_SURF_BANK_WIDTH_1;
2023 break;
2024 case 2:
2025 *bankw = ADDR_SURF_BANK_WIDTH_2;
2026 break;
2027 case 4:
2028 *bankw = ADDR_SURF_BANK_WIDTH_4;
2029 break;
2030 case 8:
2031 *bankw = ADDR_SURF_BANK_WIDTH_8;
2032 break;
2033 }
2034 switch (*bankh) {
2035 default:
2036 case 1:
2037 *bankh = ADDR_SURF_BANK_HEIGHT_1;
2038 break;
2039 case 2:
2040 *bankh = ADDR_SURF_BANK_HEIGHT_2;
2041 break;
2042 case 4:
2043 *bankh = ADDR_SURF_BANK_HEIGHT_4;
2044 break;
2045 case 8:
2046 *bankh = ADDR_SURF_BANK_HEIGHT_8;
2047 break;
2048 }
2049 switch (*mtaspect) {
2050 default:
2051 case 1:
2052 *mtaspect = ADDR_SURF_MACRO_ASPECT_1;
2053 break;
2054 case 2:
2055 *mtaspect = ADDR_SURF_MACRO_ASPECT_2;
2056 break;
2057 case 4:
2058 *mtaspect = ADDR_SURF_MACRO_ASPECT_4;
2059 break;
2060 case 8:
2061 *mtaspect = ADDR_SURF_MACRO_ASPECT_8;
2062 break;
2063 }
2064}
2065
2066static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
2067 struct drm_framebuffer *fb,
2068 int x, int y, int atomic)
2069{
2070 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2071 struct drm_device *dev = crtc->dev;
2072 struct amdgpu_device *adev = dev->dev_private;
2073 struct amdgpu_framebuffer *amdgpu_fb;
2074 struct drm_framebuffer *target_fb;
2075 struct drm_gem_object *obj;
2076 struct amdgpu_bo *rbo;
2077 uint64_t fb_location, tiling_flags;
2078 uint32_t fb_format, fb_pitch_pixels;
2079 unsigned bankw, bankh, mtaspect, tile_split;
2080 u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
2081 /* XXX change to VI */
2082 u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f;
2083 u32 tmp, viewport_w, viewport_h;
2084 int r;
2085 bool bypass_lut = false;
2086
2087 /* no fb bound */
2088 if (!atomic && !crtc->primary->fb) {
2089 DRM_DEBUG_KMS("No FB bound\n");
2090 return 0;
2091 }
2092
2093 if (atomic) {
2094 amdgpu_fb = to_amdgpu_framebuffer(fb);
2095 target_fb = fb;
2096 }
2097 else {
2098 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2099 target_fb = crtc->primary->fb;
2100 }
2101
2102 /* If atomic, assume fb object is pinned & idle & fenced and
2103 * just update base pointers
2104 */
2105 obj = amdgpu_fb->obj;
2106 rbo = gem_to_amdgpu_bo(obj);
2107 r = amdgpu_bo_reserve(rbo, false);
2108 if (unlikely(r != 0))
2109 return r;
2110
2111 if (atomic)
2112 fb_location = amdgpu_bo_gpu_offset(rbo);
2113 else {
2114 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
2115 if (unlikely(r != 0)) {
2116 amdgpu_bo_unreserve(rbo);
2117 return -EINVAL;
2118 }
2119 }
2120
2121 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
2122 amdgpu_bo_unreserve(rbo);
2123
2124 switch (target_fb->pixel_format) {
2125 case DRM_FORMAT_C8:
2126 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
2127 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2128 break;
2129 case DRM_FORMAT_XRGB4444:
2130 case DRM_FORMAT_ARGB4444:
2131 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2132 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
2133#ifdef __BIG_ENDIAN
2134 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2135 ENDIAN_8IN16);
2136#endif
2137 break;
2138 case DRM_FORMAT_XRGB1555:
2139 case DRM_FORMAT_ARGB1555:
2140 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2141 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2142#ifdef __BIG_ENDIAN
2143 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2144 ENDIAN_8IN16);
2145#endif
2146 break;
2147 case DRM_FORMAT_BGRX5551:
2148 case DRM_FORMAT_BGRA5551:
2149 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2150 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
2151#ifdef __BIG_ENDIAN
2152 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2153 ENDIAN_8IN16);
2154#endif
2155 break;
2156 case DRM_FORMAT_RGB565:
2157 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2158 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2159#ifdef __BIG_ENDIAN
2160 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2161 ENDIAN_8IN16);
2162#endif
2163 break;
2164 case DRM_FORMAT_XRGB8888:
2165 case DRM_FORMAT_ARGB8888:
2166 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2167 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2168#ifdef __BIG_ENDIAN
2169 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2170 ENDIAN_8IN32);
2171#endif
2172 break;
2173 case DRM_FORMAT_XRGB2101010:
2174 case DRM_FORMAT_ARGB2101010:
2175 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2176 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2177#ifdef __BIG_ENDIAN
2178 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2179 ENDIAN_8IN32);
2180#endif
2181 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2182 bypass_lut = true;
2183 break;
2184 case DRM_FORMAT_BGRX1010102:
2185 case DRM_FORMAT_BGRA1010102:
2186 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2187 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
2188#ifdef __BIG_ENDIAN
2189 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2190 ENDIAN_8IN32);
2191#endif
2192 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2193 bypass_lut = true;
2194 break;
2195 default:
2196 DRM_ERROR("Unsupported screen format %s\n",
2197 drm_get_format_name(target_fb->pixel_format));
2198 return -EINVAL;
2199 }
2200
2201 if (tiling_flags & AMDGPU_TILING_MACRO) {
2202 unsigned tileb, index, num_banks, tile_split_bytes;
2203
2204 dce_v10_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
2205 /* Set NUM_BANKS. */
2206 /* Calculate the macrotile mode index. */
2207 tile_split_bytes = 64 << tile_split;
2208 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
2209 tileb = min(tile_split_bytes, tileb);
2210
2211 for (index = 0; tileb > 64; index++) {
2212 tileb >>= 1;
2213 }
2214
2215 if (index >= 16) {
2216 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
2217 target_fb->bits_per_pixel, tile_split);
2218 return -EINVAL;
2219 }
2220
2221 num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3;
2222 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
2223 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2224 ARRAY_2D_TILED_THIN1);
2225 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
2226 tile_split);
2227 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2228 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2229 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2230 mtaspect);
2231 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2232 ADDR_SURF_MICRO_TILING_DISPLAY);
2233 } else if (tiling_flags & AMDGPU_TILING_MICRO) {
2234 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2235 ARRAY_1D_TILED_THIN1);
2236 }
2237
2238 /* Read the pipe config from the 2D TILED SCANOUT mode.
2239 * It should be the same for the other modes too, but not all
2240 * modes set the pipe config field. */
2241 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2242 pipe_config);
2243
2244 dce_v10_0_vga_enable(crtc, false);
2245
2246 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2247 upper_32_bits(fb_location));
2248 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2249 upper_32_bits(fb_location));
2250 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2251 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2252 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2253 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2254 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2255 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2256
2257 /*
2258 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2259 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2260 * retain the full precision throughout the pipeline.
2261 */
2262 tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2263 if (bypass_lut)
2264 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2265 else
2266 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2267 WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2268
2269 if (bypass_lut)
2270 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2271
2272 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2273 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2274 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2275 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2276 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2277 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2278
2279 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2280 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2281
2282 dce_v10_0_grph_enable(crtc, true);
2283
2284 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2285 target_fb->height);
2286
2287 x &= ~3;
2288 y &= ~1;
2289 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2290 (x << 16) | y);
2291 viewport_w = crtc->mode.hdisplay;
2292 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2293 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2294 (viewport_w << 16) | viewport_h);
2295
2296 /* pageflip setup */
2297 /* make sure flip is at vb rather than hb */
2298 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2299 tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2300 GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2301 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2302
2303 /* set pageflip to happen only at start of vblank interval (front porch) */
2304 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
2305
2306 if (!atomic && fb && fb != crtc->primary->fb) {
2307 amdgpu_fb = to_amdgpu_framebuffer(fb);
2308 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2309 r = amdgpu_bo_reserve(rbo, false);
2310 if (unlikely(r != 0))
2311 return r;
2312 amdgpu_bo_unpin(rbo);
2313 amdgpu_bo_unreserve(rbo);
2314 }
2315
2316 /* Bytes per pixel may have changed */
2317 dce_v10_0_bandwidth_update(adev);
2318
2319 return 0;
2320}
2321
2322static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
2323 struct drm_display_mode *mode)
2324{
2325 struct drm_device *dev = crtc->dev;
2326 struct amdgpu_device *adev = dev->dev_private;
2327 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2328 u32 tmp;
2329
2330 tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2331 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2332 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2333 else
2334 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2335 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2336}
2337
2338static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
2339{
2340 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2341 struct drm_device *dev = crtc->dev;
2342 struct amdgpu_device *adev = dev->dev_private;
2343 int i;
2344 u32 tmp;
2345
2346 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2347
2348 tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2349 tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2350 tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0);
2351 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2352
2353 tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2354 tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2355 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2356
2357 tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset);
2358 tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1);
2359 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2360
2361 tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2362 tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2363 tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0);
2364 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2365
2366 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2367
2368 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2369 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2370 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2371
2372 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2373 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2374 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2375
2376 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2377 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2378
2379 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2380 for (i = 0; i < 256; i++) {
2381 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2382 (amdgpu_crtc->lut_r[i] << 20) |
2383 (amdgpu_crtc->lut_g[i] << 10) |
2384 (amdgpu_crtc->lut_b[i] << 0));
2385 }
2386
2387 tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2388 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2389 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0);
2390 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2391 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2392
2393 tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2394 tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2395 tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0);
2396 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2397
2398 tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2399 tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2400 tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0);
2401 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2402
2403 tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2404 tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2405 tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0);
2406 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2407
2408 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2409 WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2410 /* XXX this only needs to be programmed once per crtc at startup,
2411 * not sure where the best place for it is
2412 */
2413 tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2414 tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2415 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2416}
2417
2418static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder)
2419{
2420 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2421 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2422
2423 switch (amdgpu_encoder->encoder_id) {
2424 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2425 if (dig->linkb)
2426 return 1;
2427 else
2428 return 0;
2429 break;
2430 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2431 if (dig->linkb)
2432 return 3;
2433 else
2434 return 2;
2435 break;
2436 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2437 if (dig->linkb)
2438 return 5;
2439 else
2440 return 4;
2441 break;
2442 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2443 return 6;
2444 break;
2445 default:
2446 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2447 return 0;
2448 }
2449}
2450
2451/**
2452 * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc.
2453 *
2454 * @crtc: drm crtc
2455 *
2456 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
2457 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
2458 * monitors a dedicated PPLL must be used. If a particular board has
2459 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2460 * as there is no need to program the PLL itself. If we are not able to
2461 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2462 * avoid messing up an existing monitor.
2463 *
2464 * Asic specific PLL information
2465 *
2466 * DCE 10.x
2467 * Tonga
2468 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2469 * CI
2470 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2471 *
2472 */
2473static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
2474{
2475 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2476 struct drm_device *dev = crtc->dev;
2477 struct amdgpu_device *adev = dev->dev_private;
2478 u32 pll_in_use;
2479 int pll;
2480
2481 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2482 if (adev->clock.dp_extclk)
2483 /* skip PPLL programming if using ext clock */
2484 return ATOM_PPLL_INVALID;
2485 else {
2486 /* use the same PPLL for all DP monitors */
2487 pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2488 if (pll != ATOM_PPLL_INVALID)
2489 return pll;
2490 }
2491 } else {
2492 /* use the same PPLL for all monitors with the same clock */
2493 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2494 if (pll != ATOM_PPLL_INVALID)
2495 return pll;
2496 }
2497
2498 /* DCE10 has PPLL0, PPLL1, and PPLL2 */
2499 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2500 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2501 return ATOM_PPLL2;
2502 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2503 return ATOM_PPLL1;
2504 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2505 return ATOM_PPLL0;
2506 DRM_ERROR("unable to allocate a PPLL\n");
2507 return ATOM_PPLL_INVALID;
2508}
2509
2510static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2511{
2512 struct amdgpu_device *adev = crtc->dev->dev_private;
2513 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2514 uint32_t cur_lock;
2515
2516 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2517 if (lock)
2518 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2519 else
2520 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2521 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2522}
2523
2524static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
2525{
2526 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2527 struct amdgpu_device *adev = crtc->dev->dev_private;
2528 u32 tmp;
2529
2530 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2531 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2532 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2533}
2534
2535static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
2536{
2537 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2538 struct amdgpu_device *adev = crtc->dev->dev_private;
2539 u32 tmp;
2540
2541 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2542 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2543 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2544 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2545}
2546
2547static void dce_v10_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
2548 uint64_t gpu_addr)
2549{
2550 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2551 struct amdgpu_device *adev = crtc->dev->dev_private;
2552
2553 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2554 upper_32_bits(gpu_addr));
2555 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2556 lower_32_bits(gpu_addr));
2557}
2558
2559static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2560 int x, int y)
2561{
2562 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2563 struct amdgpu_device *adev = crtc->dev->dev_private;
2564 int xorigin = 0, yorigin = 0;
2565
2566 /* avivo cursor are offset into the total surface */
2567 x += crtc->x;
2568 y += crtc->y;
2569 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2570
2571 if (x < 0) {
2572 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2573 x = 0;
2574 }
2575 if (y < 0) {
2576 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2577 y = 0;
2578 }
2579
2580 dce_v10_0_lock_cursor(crtc, true);
2581 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2582 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2583 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2584 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2585 dce_v10_0_lock_cursor(crtc, false);
2586
2587 return 0;
2588}
2589
2590static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc,
2591 struct drm_file *file_priv,
2592 uint32_t handle,
2593 uint32_t width,
2594 uint32_t height)
2595{
2596 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2597 struct drm_gem_object *obj;
2598 struct amdgpu_bo *robj;
2599 uint64_t gpu_addr;
2600 int ret;
2601
2602 if (!handle) {
2603 /* turn off cursor */
2604 dce_v10_0_hide_cursor(crtc);
2605 obj = NULL;
2606 goto unpin;
2607 }
2608
2609 if ((width > amdgpu_crtc->max_cursor_width) ||
2610 (height > amdgpu_crtc->max_cursor_height)) {
2611 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2612 return -EINVAL;
2613 }
2614
2615 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
2616 if (!obj) {
2617 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2618 return -ENOENT;
2619 }
2620
2621 robj = gem_to_amdgpu_bo(obj);
2622 ret = amdgpu_bo_reserve(robj, false);
2623 if (unlikely(ret != 0))
2624 goto fail;
2625 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
2626 0, &gpu_addr);
2627 amdgpu_bo_unreserve(robj);
2628 if (ret)
2629 goto fail;
2630
2631 amdgpu_crtc->cursor_width = width;
2632 amdgpu_crtc->cursor_height = height;
2633
2634 dce_v10_0_lock_cursor(crtc, true);
2635 dce_v10_0_set_cursor(crtc, obj, gpu_addr);
2636 dce_v10_0_show_cursor(crtc);
2637 dce_v10_0_lock_cursor(crtc, false);
2638
2639unpin:
2640 if (amdgpu_crtc->cursor_bo) {
2641 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2642 ret = amdgpu_bo_reserve(robj, false);
2643 if (likely(ret == 0)) {
2644 amdgpu_bo_unpin(robj);
2645 amdgpu_bo_unreserve(robj);
2646 }
2647 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2648 }
2649
2650 amdgpu_crtc->cursor_bo = obj;
2651 return 0;
2652fail:
2653 drm_gem_object_unreference_unlocked(obj);
2654
2655 return ret;
2656}
2657
2658static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2659 u16 *blue, uint32_t start, uint32_t size)
2660{
2661 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2662 int end = (start + size > 256) ? 256 : start + size, i;
2663
2664 /* userspace palettes are always correct as is */
2665 for (i = start; i < end; i++) {
2666 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2667 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2668 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2669 }
2670 dce_v10_0_crtc_load_lut(crtc);
2671}
2672
2673static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
2674{
2675 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2676
2677 drm_crtc_cleanup(crtc);
2678 destroy_workqueue(amdgpu_crtc->pflip_queue);
2679 kfree(amdgpu_crtc);
2680}
2681
2682static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
2683 .cursor_set = dce_v10_0_crtc_cursor_set,
2684 .cursor_move = dce_v10_0_crtc_cursor_move,
2685 .gamma_set = dce_v10_0_crtc_gamma_set,
2686 .set_config = amdgpu_crtc_set_config,
2687 .destroy = dce_v10_0_crtc_destroy,
2688 .page_flip = amdgpu_crtc_page_flip,
2689};
2690
2691static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2692{
2693 struct drm_device *dev = crtc->dev;
2694 struct amdgpu_device *adev = dev->dev_private;
2695 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2696
2697 switch (mode) {
2698 case DRM_MODE_DPMS_ON:
2699 amdgpu_crtc->enabled = true;
2700 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2701 dce_v10_0_vga_enable(crtc, true);
2702 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2703 dce_v10_0_vga_enable(crtc, false);
2704 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2705 dce_v10_0_crtc_load_lut(crtc);
2706 break;
2707 case DRM_MODE_DPMS_STANDBY:
2708 case DRM_MODE_DPMS_SUSPEND:
2709 case DRM_MODE_DPMS_OFF:
2710 drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
2711 if (amdgpu_crtc->enabled) {
2712 dce_v10_0_vga_enable(crtc, true);
2713 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2714 dce_v10_0_vga_enable(crtc, false);
2715 }
2716 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2717 amdgpu_crtc->enabled = false;
2718 break;
2719 }
2720 /* adjust pm to dpms */
2721 amdgpu_pm_compute_clocks(adev);
2722}
2723
2724static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
2725{
2726 /* disable crtc pair power gating before programming */
2727 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2728 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2729 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2730}
2731
2732static void dce_v10_0_crtc_commit(struct drm_crtc *crtc)
2733{
2734 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2735 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2736}
2737
2738static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
2739{
2740 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2741 struct drm_device *dev = crtc->dev;
2742 struct amdgpu_device *adev = dev->dev_private;
2743 struct amdgpu_atom_ss ss;
2744 int i;
2745
2746 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2747 if (crtc->primary->fb) {
2748 int r;
2749 struct amdgpu_framebuffer *amdgpu_fb;
2750 struct amdgpu_bo *rbo;
2751
2752 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2753 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2754 r = amdgpu_bo_reserve(rbo, false);
2755 if (unlikely(r))
2756 DRM_ERROR("failed to reserve rbo before unpin\n");
2757 else {
2758 amdgpu_bo_unpin(rbo);
2759 amdgpu_bo_unreserve(rbo);
2760 }
2761 }
2762 /* disable the GRPH */
2763 dce_v10_0_grph_enable(crtc, false);
2764
2765 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2766
2767 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2768 if (adev->mode_info.crtcs[i] &&
2769 adev->mode_info.crtcs[i]->enabled &&
2770 i != amdgpu_crtc->crtc_id &&
2771 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2772 /* one other crtc is using this pll don't turn
2773 * off the pll
2774 */
2775 goto done;
2776 }
2777 }
2778
2779 switch (amdgpu_crtc->pll_id) {
2780 case ATOM_PPLL0:
2781 case ATOM_PPLL1:
2782 case ATOM_PPLL2:
2783 /* disable the ppll */
2784 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2785 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2786 break;
2787 default:
2788 break;
2789 }
2790done:
2791 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2792 amdgpu_crtc->adjusted_clock = 0;
2793 amdgpu_crtc->encoder = NULL;
2794 amdgpu_crtc->connector = NULL;
2795}
2796
2797static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
2798 struct drm_display_mode *mode,
2799 struct drm_display_mode *adjusted_mode,
2800 int x, int y, struct drm_framebuffer *old_fb)
2801{
2802 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2803
2804 if (!amdgpu_crtc->adjusted_clock)
2805 return -EINVAL;
2806
2807 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2808 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2809 dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2810 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2811 amdgpu_atombios_crtc_scaler_setup(crtc);
2812 /* update the hw version fpr dpm */
2813 amdgpu_crtc->hw_mode = *adjusted_mode;
2814
2815 return 0;
2816}
2817
2818static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
2819 const struct drm_display_mode *mode,
2820 struct drm_display_mode *adjusted_mode)
2821{
2822 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2823 struct drm_device *dev = crtc->dev;
2824 struct drm_encoder *encoder;
2825
2826 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2827 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2828 if (encoder->crtc == crtc) {
2829 amdgpu_crtc->encoder = encoder;
2830 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2831 break;
2832 }
2833 }
2834 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2835 amdgpu_crtc->encoder = NULL;
2836 amdgpu_crtc->connector = NULL;
2837 return false;
2838 }
2839 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2840 return false;
2841 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2842 return false;
2843 /* pick pll */
2844 amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc);
2845 /* if we can't get a PPLL for a non-DP encoder, fail */
2846 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2847 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2848 return false;
2849
2850 return true;
2851}
2852
2853static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2854 struct drm_framebuffer *old_fb)
2855{
2856 return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2857}
2858
2859static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2860 struct drm_framebuffer *fb,
2861 int x, int y, enum mode_set_atomic state)
2862{
2863 return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
2864}
2865
2866static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
2867 .dpms = dce_v10_0_crtc_dpms,
2868 .mode_fixup = dce_v10_0_crtc_mode_fixup,
2869 .mode_set = dce_v10_0_crtc_mode_set,
2870 .mode_set_base = dce_v10_0_crtc_set_base,
2871 .mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
2872 .prepare = dce_v10_0_crtc_prepare,
2873 .commit = dce_v10_0_crtc_commit,
2874 .load_lut = dce_v10_0_crtc_load_lut,
2875 .disable = dce_v10_0_crtc_disable,
2876};
2877
2878static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
2879{
2880 struct amdgpu_crtc *amdgpu_crtc;
2881 int i;
2882
2883 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2884 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2885 if (amdgpu_crtc == NULL)
2886 return -ENOMEM;
2887
2888 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
2889
2890 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2891 amdgpu_crtc->crtc_id = index;
2892 amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
2893 adev->mode_info.crtcs[index] = amdgpu_crtc;
2894
2895 amdgpu_crtc->max_cursor_width = 128;
2896 amdgpu_crtc->max_cursor_height = 128;
2897 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2898 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2899
2900 for (i = 0; i < 256; i++) {
2901 amdgpu_crtc->lut_r[i] = i << 2;
2902 amdgpu_crtc->lut_g[i] = i << 2;
2903 amdgpu_crtc->lut_b[i] = i << 2;
2904 }
2905
2906 switch (amdgpu_crtc->crtc_id) {
2907 case 0:
2908 default:
2909 amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2910 break;
2911 case 1:
2912 amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2913 break;
2914 case 2:
2915 amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2916 break;
2917 case 3:
2918 amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2919 break;
2920 case 4:
2921 amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2922 break;
2923 case 5:
2924 amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2925 break;
2926 }
2927
2928 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2929 amdgpu_crtc->adjusted_clock = 0;
2930 amdgpu_crtc->encoder = NULL;
2931 amdgpu_crtc->connector = NULL;
2932 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
2933
2934 return 0;
2935}
2936
2937static int dce_v10_0_early_init(struct amdgpu_device *adev)
2938{
2939 adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
2940 adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
2941
2942 dce_v10_0_set_display_funcs(adev);
2943 dce_v10_0_set_irq_funcs(adev);
2944
2945 switch (adev->asic_type) {
2946 case CHIP_TONGA:
2947 adev->mode_info.num_crtc = 6; /* XXX 7??? */
2948 adev->mode_info.num_hpd = 6;
2949 adev->mode_info.num_dig = 7;
2950 break;
2951 default:
2952 /* FIXME: not supported yet */
2953 return -EINVAL;
2954 }
2955
2956 return 0;
2957}
2958
2959static int dce_v10_0_sw_init(struct amdgpu_device *adev)
2960{
2961 int r, i;
2962
2963 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2964 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2965 if (r)
2966 return r;
2967 }
2968
2969 for (i = 8; i < 20; i += 2) {
2970 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2971 if (r)
2972 return r;
2973 }
2974
2975 /* HPD hotplug */
2976 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2977 if (r)
2978 return r;
2979
2980 adev->mode_info.mode_config_initialized = true;
2981
2982 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2983
2984 adev->ddev->mode_config.max_width = 16384;
2985 adev->ddev->mode_config.max_height = 16384;
2986
2987 adev->ddev->mode_config.preferred_depth = 24;
2988 adev->ddev->mode_config.prefer_shadow = 1;
2989
2990 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2991
2992 r = amdgpu_modeset_create_props(adev);
2993 if (r)
2994 return r;
2995
2996 adev->ddev->mode_config.max_width = 16384;
2997 adev->ddev->mode_config.max_height = 16384;
2998
2999 /* allocate crtcs */
3000 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3001 r = dce_v10_0_crtc_init(adev, i);
3002 if (r)
3003 return r;
3004 }
3005
3006 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
3007 amdgpu_print_display_setup(adev->ddev);
3008 else
3009 return -EINVAL;
3010
3011 /* setup afmt */
3012 dce_v10_0_afmt_init(adev);
3013
3014 r = dce_v10_0_audio_init(adev);
3015 if (r)
3016 return r;
3017
3018 drm_kms_helper_poll_init(adev->ddev);
3019
3020 return r;
3021}
3022
3023static int dce_v10_0_sw_fini(struct amdgpu_device *adev)
3024{
3025 kfree(adev->mode_info.bios_hardcoded_edid);
3026
3027 drm_kms_helper_poll_fini(adev->ddev);
3028
3029 dce_v10_0_audio_fini(adev);
3030
3031 dce_v10_0_afmt_fini(adev);
3032
3033 drm_mode_config_cleanup(adev->ddev);
3034 adev->mode_info.mode_config_initialized = false;
3035
3036 return 0;
3037}
3038
3039static int dce_v10_0_hw_init(struct amdgpu_device *adev)
3040{
3041 int i;
3042
3043 dce_v10_0_init_golden_registers(adev);
3044
3045 /* init dig PHYs, disp eng pll */
3046 amdgpu_atombios_encoder_init_dig(adev);
3047 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3048
3049 /* initialize hpd */
3050 dce_v10_0_hpd_init(adev);
3051
3052 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3053 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3054 }
3055
3056 return 0;
3057}
3058
3059static int dce_v10_0_hw_fini(struct amdgpu_device *adev)
3060{
3061 int i;
3062
3063 dce_v10_0_hpd_fini(adev);
3064
3065 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3066 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3067 }
3068
3069 return 0;
3070}
3071
3072static int dce_v10_0_suspend(struct amdgpu_device *adev)
3073{
3074 struct drm_connector *connector;
3075
3076 drm_kms_helper_poll_disable(adev->ddev);
3077
3078 /* turn off display hw */
3079 list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
3080 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
3081 }
3082
3083 amdgpu_atombios_scratch_regs_save(adev);
3084
3085 dce_v10_0_hpd_fini(adev);
3086
3087 return 0;
3088}
3089
3090static int dce_v10_0_resume(struct amdgpu_device *adev)
3091{
3092 struct drm_connector *connector;
3093
3094 dce_v10_0_init_golden_registers(adev);
3095
3096 amdgpu_atombios_scratch_regs_restore(adev);
3097
3098 /* init dig PHYs, disp eng pll */
3099 amdgpu_atombios_encoder_init_dig(adev);
3100 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3101 /* turn on the BL */
3102 if (adev->mode_info.bl_encoder) {
3103 u8 bl_level = amdgpu_display_backlight_get_level(adev,
3104 adev->mode_info.bl_encoder);
3105 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3106 bl_level);
3107 }
3108
3109 /* initialize hpd */
3110 dce_v10_0_hpd_init(adev);
3111
3112 /* blat the mode back in */
3113 drm_helper_resume_force_mode(adev->ddev);
3114 /* turn on display hw */
3115 list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
3116 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
3117 }
3118
3119 drm_kms_helper_poll_enable(adev->ddev);
3120
3121 return 0;
3122}
3123
3124static bool dce_v10_0_is_idle(struct amdgpu_device *adev)
3125{
3126 /* XXX todo */
3127 return true;
3128}
3129
3130static int dce_v10_0_wait_for_idle(struct amdgpu_device *adev)
3131{
3132 /* XXX todo */
3133 return 0;
3134}
3135
3136static void dce_v10_0_print_status(struct amdgpu_device *adev)
3137{
3138 dev_info(adev->dev, "DCE 10.x registers\n");
3139 /* XXX todo */
3140}
3141
3142static int dce_v10_0_soft_reset(struct amdgpu_device *adev)
3143{
3144 u32 srbm_soft_reset = 0, tmp;
3145
3146 if (dce_v10_0_is_display_hung(adev))
3147 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3148
3149 if (srbm_soft_reset) {
3150 dce_v10_0_print_status(adev);
3151
3152 tmp = RREG32(mmSRBM_SOFT_RESET);
3153 tmp |= srbm_soft_reset;
3154 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3155 WREG32(mmSRBM_SOFT_RESET, tmp);
3156 tmp = RREG32(mmSRBM_SOFT_RESET);
3157
3158 udelay(50);
3159
3160 tmp &= ~srbm_soft_reset;
3161 WREG32(mmSRBM_SOFT_RESET, tmp);
3162 tmp = RREG32(mmSRBM_SOFT_RESET);
3163
3164 /* Wait a little for things to settle down */
3165 udelay(50);
3166 dce_v10_0_print_status(adev);
3167 }
3168 return 0;
3169}
3170
3171static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3172 int crtc,
3173 enum amdgpu_interrupt_state state)
3174{
3175 u32 lb_interrupt_mask;
3176
3177 if (crtc >= adev->mode_info.num_crtc) {
3178 DRM_DEBUG("invalid crtc %d\n", crtc);
3179 return;
3180 }
3181
3182 switch (state) {
3183 case AMDGPU_IRQ_STATE_DISABLE:
3184 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3185 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3186 VBLANK_INTERRUPT_MASK, 0);
3187 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3188 break;
3189 case AMDGPU_IRQ_STATE_ENABLE:
3190 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3191 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3192 VBLANK_INTERRUPT_MASK, 1);
3193 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3194 break;
3195 default:
3196 break;
3197 }
3198}
3199
3200static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3201 int crtc,
3202 enum amdgpu_interrupt_state state)
3203{
3204 u32 lb_interrupt_mask;
3205
3206 if (crtc >= adev->mode_info.num_crtc) {
3207 DRM_DEBUG("invalid crtc %d\n", crtc);
3208 return;
3209 }
3210
3211 switch (state) {
3212 case AMDGPU_IRQ_STATE_DISABLE:
3213 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3214 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3215 VLINE_INTERRUPT_MASK, 0);
3216 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3217 break;
3218 case AMDGPU_IRQ_STATE_ENABLE:
3219 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3220 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3221 VLINE_INTERRUPT_MASK, 1);
3222 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3223 break;
3224 default:
3225 break;
3226 }
3227}
3228
3229static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
3230 struct amdgpu_irq_src *source,
3231 unsigned hpd,
3232 enum amdgpu_interrupt_state state)
3233{
3234 u32 tmp;
3235
3236 if (hpd >= adev->mode_info.num_hpd) {
3237 DRM_DEBUG("invalid hdp %d\n", hpd);
3238 return 0;
3239 }
3240
3241 switch (state) {
3242 case AMDGPU_IRQ_STATE_DISABLE:
3243 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3244 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3245 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3246 break;
3247 case AMDGPU_IRQ_STATE_ENABLE:
3248 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3249 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3250 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3251 break;
3252 default:
3253 break;
3254 }
3255
3256 return 0;
3257}
3258
3259static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev,
3260 struct amdgpu_irq_src *source,
3261 unsigned type,
3262 enum amdgpu_interrupt_state state)
3263{
3264 switch (type) {
3265 case AMDGPU_CRTC_IRQ_VBLANK1:
3266 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3267 break;
3268 case AMDGPU_CRTC_IRQ_VBLANK2:
3269 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3270 break;
3271 case AMDGPU_CRTC_IRQ_VBLANK3:
3272 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3273 break;
3274 case AMDGPU_CRTC_IRQ_VBLANK4:
3275 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3276 break;
3277 case AMDGPU_CRTC_IRQ_VBLANK5:
3278 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3279 break;
3280 case AMDGPU_CRTC_IRQ_VBLANK6:
3281 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3282 break;
3283 case AMDGPU_CRTC_IRQ_VLINE1:
3284 dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state);
3285 break;
3286 case AMDGPU_CRTC_IRQ_VLINE2:
3287 dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state);
3288 break;
3289 case AMDGPU_CRTC_IRQ_VLINE3:
3290 dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state);
3291 break;
3292 case AMDGPU_CRTC_IRQ_VLINE4:
3293 dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state);
3294 break;
3295 case AMDGPU_CRTC_IRQ_VLINE5:
3296 dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state);
3297 break;
3298 case AMDGPU_CRTC_IRQ_VLINE6:
3299 dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state);
3300 break;
3301 default:
3302 break;
3303 }
3304 return 0;
3305}
3306
3307static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3308 struct amdgpu_irq_src *src,
3309 unsigned type,
3310 enum amdgpu_interrupt_state state)
3311{
3312 u32 reg, reg_block;
3313 /* now deal with page flip IRQ */
3314 switch (type) {
3315 case AMDGPU_PAGEFLIP_IRQ_D1:
3316 reg_block = CRTC0_REGISTER_OFFSET;
3317 break;
3318 case AMDGPU_PAGEFLIP_IRQ_D2:
3319 reg_block = CRTC1_REGISTER_OFFSET;
3320 break;
3321 case AMDGPU_PAGEFLIP_IRQ_D3:
3322 reg_block = CRTC2_REGISTER_OFFSET;
3323 break;
3324 case AMDGPU_PAGEFLIP_IRQ_D4:
3325 reg_block = CRTC3_REGISTER_OFFSET;
3326 break;
3327 case AMDGPU_PAGEFLIP_IRQ_D5:
3328 reg_block = CRTC4_REGISTER_OFFSET;
3329 break;
3330 case AMDGPU_PAGEFLIP_IRQ_D6:
3331 reg_block = CRTC5_REGISTER_OFFSET;
3332 break;
3333 default:
3334 DRM_ERROR("invalid pageflip crtc %d\n", type);
3335 return -EINVAL;
3336 }
3337
3338 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
3339 if (state == AMDGPU_IRQ_STATE_DISABLE)
3340 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3341 else
3342 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3343
3344 return 0;
3345}
3346
3347static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3348 struct amdgpu_irq_src *source,
3349 struct amdgpu_iv_entry *entry)
3350{
3351 int reg_block;
3352 unsigned long flags;
3353 unsigned crtc_id;
3354 struct amdgpu_crtc *amdgpu_crtc;
3355 struct amdgpu_flip_work *works;
3356
3357 crtc_id = (entry->src_id - 8) >> 1;
3358 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3359
3360 /* ack the interrupt */
3361 switch(crtc_id){
3362 case AMDGPU_PAGEFLIP_IRQ_D1:
3363 reg_block = CRTC0_REGISTER_OFFSET;
3364 break;
3365 case AMDGPU_PAGEFLIP_IRQ_D2:
3366 reg_block = CRTC1_REGISTER_OFFSET;
3367 break;
3368 case AMDGPU_PAGEFLIP_IRQ_D3:
3369 reg_block = CRTC2_REGISTER_OFFSET;
3370 break;
3371 case AMDGPU_PAGEFLIP_IRQ_D4:
3372 reg_block = CRTC3_REGISTER_OFFSET;
3373 break;
3374 case AMDGPU_PAGEFLIP_IRQ_D5:
3375 reg_block = CRTC4_REGISTER_OFFSET;
3376 break;
3377 case AMDGPU_PAGEFLIP_IRQ_D6:
3378 reg_block = CRTC5_REGISTER_OFFSET;
3379 break;
3380 default:
3381 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3382 return -EINVAL;
3383 }
3384
3385 if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3386 WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3387
3388 /* IRQ could occur when in initial stage */
3389 if (amdgpu_crtc == NULL)
3390 return 0;
3391
3392 spin_lock_irqsave(&adev->ddev->event_lock, flags);
3393 works = amdgpu_crtc->pflip_works;
3394 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3395 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3396 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3397 amdgpu_crtc->pflip_status,
3398 AMDGPU_FLIP_SUBMITTED);
3399 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3400 return 0;
3401 }
3402
3403 /* page flip completed. clean up */
3404 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3405 amdgpu_crtc->pflip_works = NULL;
3406
3407 /* wakeup usersapce */
3408 if (works->event)
3409 drm_send_vblank_event(adev->ddev, crtc_id, works->event);
3410
3411 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3412
3413 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
3414 amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
3415 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
3416
3417 return 0;
3418}
3419
3420static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
3421 int hpd)
3422{
3423 u32 tmp;
3424
3425 if (hpd >= adev->mode_info.num_hpd) {
3426 DRM_DEBUG("invalid hdp %d\n", hpd);
3427 return;
3428 }
3429
3430 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3431 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3432 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3433}
3434
3435static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3436 int crtc)
3437{
3438 u32 tmp;
3439
3440 if (crtc >= adev->mode_info.num_crtc) {
3441 DRM_DEBUG("invalid crtc %d\n", crtc);
3442 return;
3443 }
3444
3445 tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3446 tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3447 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3448}
3449
3450static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3451 int crtc)
3452{
3453 u32 tmp;
3454
3455 if (crtc >= adev->mode_info.num_crtc) {
3456 DRM_DEBUG("invalid crtc %d\n", crtc);
3457 return;
3458 }
3459
3460 tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3461 tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3462 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3463}
3464
3465static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3466 struct amdgpu_irq_src *source,
3467 struct amdgpu_iv_entry *entry)
3468{
3469 unsigned crtc = entry->src_id - 1;
3470 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3471 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3472
3473 switch (entry->src_data) {
3474 case 0: /* vblank */
3475 if (disp_int & interrupt_status_offsets[crtc].vblank) {
3476 dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3477 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3478 drm_handle_vblank(adev->ddev, crtc);
3479 }
3480 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3481 }
3482 break;
3483 case 1: /* vline */
3484 if (disp_int & interrupt_status_offsets[crtc].vline) {
3485 dce_v10_0_crtc_vline_int_ack(adev, crtc);
3486 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3487 }
3488 break;
3489 default:
3490 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3491 break;
3492 }
3493
3494 return 0;
3495}
3496
3497static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
3498 struct amdgpu_irq_src *source,
3499 struct amdgpu_iv_entry *entry)
3500{
3501 uint32_t disp_int, mask;
3502 unsigned hpd;
3503
3504 if (entry->src_data >= adev->mode_info.num_hpd) {
3505 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3506 return 0;
3507 }
3508
3509 hpd = entry->src_data;
3510 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3511 mask = interrupt_status_offsets[hpd].hpd;
3512
3513 if (disp_int & mask) {
3514 dce_v10_0_hpd_int_ack(adev, hpd);
3515 schedule_work(&adev->hotplug_work);
3516 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3517 }
3518
3519 return 0;
3520}
3521
3522static int dce_v10_0_set_clockgating_state(struct amdgpu_device *adev,
3523 enum amdgpu_clockgating_state state)
3524{
3525 return 0;
3526}
3527
3528static int dce_v10_0_set_powergating_state(struct amdgpu_device *adev,
3529 enum amdgpu_powergating_state state)
3530{
3531 return 0;
3532}
3533
3534const struct amdgpu_ip_funcs dce_v10_0_ip_funcs = {
3535 .early_init = dce_v10_0_early_init,
3536 .late_init = NULL,
3537 .sw_init = dce_v10_0_sw_init,
3538 .sw_fini = dce_v10_0_sw_fini,
3539 .hw_init = dce_v10_0_hw_init,
3540 .hw_fini = dce_v10_0_hw_fini,
3541 .suspend = dce_v10_0_suspend,
3542 .resume = dce_v10_0_resume,
3543 .is_idle = dce_v10_0_is_idle,
3544 .wait_for_idle = dce_v10_0_wait_for_idle,
3545 .soft_reset = dce_v10_0_soft_reset,
3546 .print_status = dce_v10_0_print_status,
3547 .set_clockgating_state = dce_v10_0_set_clockgating_state,
3548 .set_powergating_state = dce_v10_0_set_powergating_state,
3549};
3550
3551static void
3552dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
3553 struct drm_display_mode *mode,
3554 struct drm_display_mode *adjusted_mode)
3555{
3556 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3557
3558 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3559
3560 /* need to call this here rather than in prepare() since we need some crtc info */
3561 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3562
3563 /* set scaler clears this on some chips */
3564 dce_v10_0_set_interleave(encoder->crtc, mode);
3565
3566 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3567 dce_v10_0_afmt_enable(encoder, true);
3568 dce_v10_0_afmt_setmode(encoder, adjusted_mode);
3569 }
3570}
3571
3572static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
3573{
3574 struct amdgpu_device *adev = encoder->dev->dev_private;
3575 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3576 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3577
3578 if ((amdgpu_encoder->active_device &
3579 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3580 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3581 ENCODER_OBJECT_ID_NONE)) {
3582 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3583 if (dig) {
3584 dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder);
3585 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3586 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3587 }
3588 }
3589
3590 amdgpu_atombios_scratch_regs_lock(adev, true);
3591
3592 if (connector) {
3593 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3594
3595 /* select the clock/data port if it uses a router */
3596 if (amdgpu_connector->router.cd_valid)
3597 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3598
3599 /* turn eDP panel on for mode set */
3600 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3601 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3602 ATOM_TRANSMITTER_ACTION_POWER_ON);
3603 }
3604
3605 /* this is needed for the pll/ss setup to work correctly in some cases */
3606 amdgpu_atombios_encoder_set_crtc_source(encoder);
3607 /* set up the FMT blocks */
3608 dce_v10_0_program_fmt(encoder);
3609}
3610
3611static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
3612{
3613 struct drm_device *dev = encoder->dev;
3614 struct amdgpu_device *adev = dev->dev_private;
3615
3616 /* need to call this here as we need the crtc set up */
3617 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3618 amdgpu_atombios_scratch_regs_lock(adev, false);
3619}
3620
3621static void dce_v10_0_encoder_disable(struct drm_encoder *encoder)
3622{
3623 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3624 struct amdgpu_encoder_atom_dig *dig;
3625
3626 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3627
3628 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3629 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3630 dce_v10_0_afmt_enable(encoder, false);
3631 dig = amdgpu_encoder->enc_priv;
3632 dig->dig_encoder = -1;
3633 }
3634 amdgpu_encoder->active_device = 0;
3635}
3636
3637/* these are handled by the primary encoders */
3638static void dce_v10_0_ext_prepare(struct drm_encoder *encoder)
3639{
3640
3641}
3642
3643static void dce_v10_0_ext_commit(struct drm_encoder *encoder)
3644{
3645
3646}
3647
3648static void
3649dce_v10_0_ext_mode_set(struct drm_encoder *encoder,
3650 struct drm_display_mode *mode,
3651 struct drm_display_mode *adjusted_mode)
3652{
3653
3654}
3655
3656static void dce_v10_0_ext_disable(struct drm_encoder *encoder)
3657{
3658
3659}
3660
3661static void
3662dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode)
3663{
3664
3665}
3666
3667static bool dce_v10_0_ext_mode_fixup(struct drm_encoder *encoder,
3668 const struct drm_display_mode *mode,
3669 struct drm_display_mode *adjusted_mode)
3670{
3671 return true;
3672}
3673
3674static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
3675 .dpms = dce_v10_0_ext_dpms,
3676 .mode_fixup = dce_v10_0_ext_mode_fixup,
3677 .prepare = dce_v10_0_ext_prepare,
3678 .mode_set = dce_v10_0_ext_mode_set,
3679 .commit = dce_v10_0_ext_commit,
3680 .disable = dce_v10_0_ext_disable,
3681 /* no detect for TMDS/LVDS yet */
3682};
3683
3684static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = {
3685 .dpms = amdgpu_atombios_encoder_dpms,
3686 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3687 .prepare = dce_v10_0_encoder_prepare,
3688 .mode_set = dce_v10_0_encoder_mode_set,
3689 .commit = dce_v10_0_encoder_commit,
3690 .disable = dce_v10_0_encoder_disable,
3691 .detect = amdgpu_atombios_encoder_dig_detect,
3692};
3693
3694static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = {
3695 .dpms = amdgpu_atombios_encoder_dpms,
3696 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3697 .prepare = dce_v10_0_encoder_prepare,
3698 .mode_set = dce_v10_0_encoder_mode_set,
3699 .commit = dce_v10_0_encoder_commit,
3700 .detect = amdgpu_atombios_encoder_dac_detect,
3701};
3702
3703static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder)
3704{
3705 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3706 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3707 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3708 kfree(amdgpu_encoder->enc_priv);
3709 drm_encoder_cleanup(encoder);
3710 kfree(amdgpu_encoder);
3711}
3712
3713static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = {
3714 .destroy = dce_v10_0_encoder_destroy,
3715};
3716
3717static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3718 uint32_t encoder_enum,
3719 uint32_t supported_device,
3720 u16 caps)
3721{
3722 struct drm_device *dev = adev->ddev;
3723 struct drm_encoder *encoder;
3724 struct amdgpu_encoder *amdgpu_encoder;
3725
3726 /* see if we already added it */
3727 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3728 amdgpu_encoder = to_amdgpu_encoder(encoder);
3729 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3730 amdgpu_encoder->devices |= supported_device;
3731 return;
3732 }
3733
3734 }
3735
3736 /* add a new one */
3737 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3738 if (!amdgpu_encoder)
3739 return;
3740
3741 encoder = &amdgpu_encoder->base;
3742 switch (adev->mode_info.num_crtc) {
3743 case 1:
3744 encoder->possible_crtcs = 0x1;
3745 break;
3746 case 2:
3747 default:
3748 encoder->possible_crtcs = 0x3;
3749 break;
3750 case 4:
3751 encoder->possible_crtcs = 0xf;
3752 break;
3753 case 6:
3754 encoder->possible_crtcs = 0x3f;
3755 break;
3756 }
3757
3758 amdgpu_encoder->enc_priv = NULL;
3759
3760 amdgpu_encoder->encoder_enum = encoder_enum;
3761 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3762 amdgpu_encoder->devices = supported_device;
3763 amdgpu_encoder->rmx_type = RMX_OFF;
3764 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3765 amdgpu_encoder->is_ext_encoder = false;
3766 amdgpu_encoder->caps = caps;
3767
3768 switch (amdgpu_encoder->encoder_id) {
3769 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3770 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3771 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3772 DRM_MODE_ENCODER_DAC);
3773 drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
3774 break;
3775 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3776 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3777 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3778 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3779 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3780 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3781 amdgpu_encoder->rmx_type = RMX_FULL;
3782 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3783 DRM_MODE_ENCODER_LVDS);
3784 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3785 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3786 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3787 DRM_MODE_ENCODER_DAC);
3788 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3789 } else {
3790 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3791 DRM_MODE_ENCODER_TMDS);
3792 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3793 }
3794 drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
3795 break;
3796 case ENCODER_OBJECT_ID_SI170B:
3797 case ENCODER_OBJECT_ID_CH7303:
3798 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3799 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3800 case ENCODER_OBJECT_ID_TITFP513:
3801 case ENCODER_OBJECT_ID_VT1623:
3802 case ENCODER_OBJECT_ID_HDMI_SI1930:
3803 case ENCODER_OBJECT_ID_TRAVIS:
3804 case ENCODER_OBJECT_ID_NUTMEG:
3805 /* these are handled by the primary encoders */
3806 amdgpu_encoder->is_ext_encoder = true;
3807 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3808 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3809 DRM_MODE_ENCODER_LVDS);
3810 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3811 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3812 DRM_MODE_ENCODER_DAC);
3813 else
3814 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3815 DRM_MODE_ENCODER_TMDS);
3816 drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
3817 break;
3818 }
3819}
3820
3821static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
3822 .set_vga_render_state = &dce_v10_0_set_vga_render_state,
3823 .bandwidth_update = &dce_v10_0_bandwidth_update,
3824 .vblank_get_counter = &dce_v10_0_vblank_get_counter,
3825 .vblank_wait = &dce_v10_0_vblank_wait,
3826 .is_display_hung = &dce_v10_0_is_display_hung,
3827 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3828 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3829 .hpd_sense = &dce_v10_0_hpd_sense,
3830 .hpd_set_polarity = &dce_v10_0_hpd_set_polarity,
3831 .hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg,
3832 .page_flip = &dce_v10_0_page_flip,
3833 .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
3834 .add_encoder = &dce_v10_0_encoder_add,
3835 .add_connector = &amdgpu_connector_add,
3836 .stop_mc_access = &dce_v10_0_stop_mc_access,
3837 .resume_mc_access = &dce_v10_0_resume_mc_access,
3838};
3839
3840static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
3841{
3842 if (adev->mode_info.funcs == NULL)
3843 adev->mode_info.funcs = &dce_v10_0_display_funcs;
3844}
3845
3846static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
3847 .set = dce_v10_0_set_crtc_irq_state,
3848 .process = dce_v10_0_crtc_irq,
3849};
3850
3851static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = {
3852 .set = dce_v10_0_set_pageflip_irq_state,
3853 .process = dce_v10_0_pageflip_irq,
3854};
3855
3856static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
3857 .set = dce_v10_0_set_hpd_irq_state,
3858 .process = dce_v10_0_hpd_irq,
3859};
3860
3861static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
3862{
3863 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3864 adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
3865
3866 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3867 adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
3868
3869 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3870 adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
3871}