aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorKen Wang <Qingqing.Wang@amd.com>2016-01-19 01:03:24 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-08-31 12:10:19 -0400
commite2cdf640cbb5b7d6643e1c8ad54bf3bfc99d4d48 (patch)
treebc3b3adfd9e0dadbb03c0c5c4b1e9a42f26852df /drivers/gpu/drm/amd/amdgpu
parent27ae10641e9c99f32db004cc54cb0639cd58d6d1 (diff)
drm/amdgpu: add display controller implementation for si v10
v4: rebase fixups v5: more fixes based on dce8 code v6: squash in dmif offset fix v7: rebase fixups v8: rebase fixups, drop some debugging remnants v9: fix BE build v10: include Marek's tiling fixes, add support for page_flip_target, set MASTER_UDPATE_MODE=0, fix cursor Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Ken Wang <Qingqing.Wang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c3160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.h29
2 files changed, 3189 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
new file mode 100644
index 000000000000..d3512f381e53
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -0,0 +1,3160 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "drmP.h"
24#include "amdgpu.h"
25#include "amdgpu_pm.h"
26#include "amdgpu_i2c.h"
27#include "atom.h"
28#include "amdgpu_atombios.h"
29#include "atombios_crtc.h"
30#include "atombios_encoders.h"
31#include "amdgpu_pll.h"
32#include "amdgpu_connectors.h"
33#include "si/si_reg.h"
34#include "si/sid.h"
35
36static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
37static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
38
39static const u32 crtc_offsets[6] =
40{
41 SI_CRTC0_REGISTER_OFFSET,
42 SI_CRTC1_REGISTER_OFFSET,
43 SI_CRTC2_REGISTER_OFFSET,
44 SI_CRTC3_REGISTER_OFFSET,
45 SI_CRTC4_REGISTER_OFFSET,
46 SI_CRTC5_REGISTER_OFFSET
47};
48
49static const uint32_t dig_offsets[] = {
50 SI_CRTC0_REGISTER_OFFSET,
51 SI_CRTC1_REGISTER_OFFSET,
52 SI_CRTC2_REGISTER_OFFSET,
53 SI_CRTC3_REGISTER_OFFSET,
54 SI_CRTC4_REGISTER_OFFSET,
55 SI_CRTC5_REGISTER_OFFSET,
56 (0x13830 - 0x7030) >> 2,
57};
58
59static const struct {
60 uint32_t reg;
61 uint32_t vblank;
62 uint32_t vline;
63 uint32_t hpd;
64
65} interrupt_status_offsets[6] = { {
66 .reg = DISP_INTERRUPT_STATUS,
67 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
68 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
69 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
70}, {
71 .reg = DISP_INTERRUPT_STATUS_CONTINUE,
72 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
73 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
74 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
75}, {
76 .reg = DISP_INTERRUPT_STATUS_CONTINUE2,
77 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
78 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
79 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
80}, {
81 .reg = DISP_INTERRUPT_STATUS_CONTINUE3,
82 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
83 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
84 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
85}, {
86 .reg = DISP_INTERRUPT_STATUS_CONTINUE4,
87 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
88 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
89 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
90}, {
91 .reg = DISP_INTERRUPT_STATUS_CONTINUE5,
92 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
93 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
94 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
95} };
96
97static const uint32_t hpd_int_control_offsets[6] = {
98 DC_HPD1_INT_CONTROL,
99 DC_HPD2_INT_CONTROL,
100 DC_HPD3_INT_CONTROL,
101 DC_HPD4_INT_CONTROL,
102 DC_HPD5_INT_CONTROL,
103 DC_HPD6_INT_CONTROL,
104};
105
106static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
107 u32 block_offset, u32 reg)
108{
109 DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
110 return 0;
111}
112
113static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
114 u32 block_offset, u32 reg, u32 v)
115{
116 DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
117}
118
119static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
120{
121 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
122 return true;
123 else
124 return false;
125}
126
127static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
128{
129 u32 pos1, pos2;
130
131 pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
132 pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
133
134 if (pos1 != pos2)
135 return true;
136 else
137 return false;
138}
139
140/**
141 * dce_v6_0_wait_for_vblank - vblank wait asic callback.
142 *
143 * @crtc: crtc to wait for vblank on
144 *
145 * Wait for vblank on the requested crtc (evergreen+).
146 */
147static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
148{
149 unsigned i = 0;
150
151 if (crtc >= adev->mode_info.num_crtc)
152 return;
153
154 if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
155 return;
156
157 /* depending on when we hit vblank, we may be close to active; if so,
158 * wait for another frame.
159 */
160 while (dce_v6_0_is_in_vblank(adev, crtc)) {
161 if (i++ % 100 == 0) {
162 if (!dce_v6_0_is_counter_moving(adev, crtc))
163 break;
164 }
165 }
166
167 while (!dce_v6_0_is_in_vblank(adev, crtc)) {
168 if (i++ % 100 == 0) {
169 if (!dce_v6_0_is_counter_moving(adev, crtc))
170 break;
171 }
172 }
173}
174
175static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
176{
177 if (crtc >= adev->mode_info.num_crtc)
178 return 0;
179 else
180 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
181}
182
183static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
184{
185 unsigned i;
186
187 /* Enable pflip interrupts */
188 for (i = 0; i <= adev->mode_info.num_crtc; i++)
189 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
190}
191
192static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
193{
194 unsigned i;
195
196 /* Disable pflip interrupts */
197 for (i = 0; i <= adev->mode_info.num_crtc; i++)
198 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
199}
200
201/**
202 * dce_v6_0_page_flip - pageflip callback.
203 *
204 * @adev: amdgpu_device pointer
205 * @crtc_id: crtc to cleanup pageflip on
206 * @crtc_base: new address of the crtc (GPU MC address)
207 *
208 * Does the actual pageflip (evergreen+).
209 * During vblank we take the crtc lock and wait for the update_pending
210 * bit to go high, when it does, we release the lock, and allow the
211 * double buffered update to take place.
212 * Returns the current update pending status.
213 */
214static void dce_v6_0_page_flip(struct amdgpu_device *adev,
215 int crtc_id, u64 crtc_base, bool async)
216{
217 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
218
219 /* flip at hsync for async, default is vsync */
220 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
221 EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
222 /* update the scanout addresses */
223 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
224 upper_32_bits(crtc_base));
225 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
226 (u32)crtc_base);
227
228 /* post the write */
229 RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
230}
231
232static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
233 u32 *vbl, u32 *position)
234{
235 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
236 return -EINVAL;
237 *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
238 *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
239
240 return 0;
241
242}
243
244/**
245 * dce_v6_0_hpd_sense - hpd sense callback.
246 *
247 * @adev: amdgpu_device pointer
248 * @hpd: hpd (hotplug detect) pin
249 *
250 * Checks if a digital monitor is connected (evergreen+).
251 * Returns true if connected, false if not connected.
252 */
253static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
254 enum amdgpu_hpd_id hpd)
255{
256 bool connected = false;
257
258 switch (hpd) {
259 case AMDGPU_HPD_1:
260 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
261 connected = true;
262 break;
263 case AMDGPU_HPD_2:
264 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
265 connected = true;
266 break;
267 case AMDGPU_HPD_3:
268 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
269 connected = true;
270 break;
271 case AMDGPU_HPD_4:
272 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
273 connected = true;
274 break;
275 case AMDGPU_HPD_5:
276 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
277 connected = true;
278 break;
279 case AMDGPU_HPD_6:
280 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
281 connected = true;
282 break;
283 default:
284 break;
285 }
286
287 return connected;
288}
289
290/**
291 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
292 *
293 * @adev: amdgpu_device pointer
294 * @hpd: hpd (hotplug detect) pin
295 *
296 * Set the polarity of the hpd pin (evergreen+).
297 */
298static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
299 enum amdgpu_hpd_id hpd)
300{
301 u32 tmp;
302 bool connected = dce_v6_0_hpd_sense(adev, hpd);
303
304 switch (hpd) {
305 case AMDGPU_HPD_1:
306 tmp = RREG32(DC_HPD1_INT_CONTROL);
307 if (connected)
308 tmp &= ~DC_HPDx_INT_POLARITY;
309 else
310 tmp |= DC_HPDx_INT_POLARITY;
311 WREG32(DC_HPD1_INT_CONTROL, tmp);
312 break;
313 case AMDGPU_HPD_2:
314 tmp = RREG32(DC_HPD2_INT_CONTROL);
315 if (connected)
316 tmp &= ~DC_HPDx_INT_POLARITY;
317 else
318 tmp |= DC_HPDx_INT_POLARITY;
319 WREG32(DC_HPD2_INT_CONTROL, tmp);
320 break;
321 case AMDGPU_HPD_3:
322 tmp = RREG32(DC_HPD3_INT_CONTROL);
323 if (connected)
324 tmp &= ~DC_HPDx_INT_POLARITY;
325 else
326 tmp |= DC_HPDx_INT_POLARITY;
327 WREG32(DC_HPD3_INT_CONTROL, tmp);
328 break;
329 case AMDGPU_HPD_4:
330 tmp = RREG32(DC_HPD4_INT_CONTROL);
331 if (connected)
332 tmp &= ~DC_HPDx_INT_POLARITY;
333 else
334 tmp |= DC_HPDx_INT_POLARITY;
335 WREG32(DC_HPD4_INT_CONTROL, tmp);
336 break;
337 case AMDGPU_HPD_5:
338 tmp = RREG32(DC_HPD5_INT_CONTROL);
339 if (connected)
340 tmp &= ~DC_HPDx_INT_POLARITY;
341 else
342 tmp |= DC_HPDx_INT_POLARITY;
343 WREG32(DC_HPD5_INT_CONTROL, tmp);
344 break;
345 case AMDGPU_HPD_6:
346 tmp = RREG32(DC_HPD6_INT_CONTROL);
347 if (connected)
348 tmp &= ~DC_HPDx_INT_POLARITY;
349 else
350 tmp |= DC_HPDx_INT_POLARITY;
351 WREG32(DC_HPD6_INT_CONTROL, tmp);
352 break;
353 default:
354 break;
355 }
356}
357
358/**
359 * dce_v6_0_hpd_init - hpd setup callback.
360 *
361 * @adev: amdgpu_device pointer
362 *
363 * Setup the hpd pins used by the card (evergreen+).
364 * Enable the pin, set the polarity, and enable the hpd interrupts.
365 */
366static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
367{
368 struct drm_device *dev = adev->ddev;
369 struct drm_connector *connector;
370 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
371 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
372
373 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
374 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
375
376 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
377 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
378 /* don't try to enable hpd on eDP or LVDS avoid breaking the
379 * aux dp channel on imac and help (but not completely fix)
380 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
381 * also avoid interrupt storms during dpms.
382 */
383 continue;
384 }
385 switch (amdgpu_connector->hpd.hpd) {
386 case AMDGPU_HPD_1:
387 WREG32(DC_HPD1_CONTROL, tmp);
388 break;
389 case AMDGPU_HPD_2:
390 WREG32(DC_HPD2_CONTROL, tmp);
391 break;
392 case AMDGPU_HPD_3:
393 WREG32(DC_HPD3_CONTROL, tmp);
394 break;
395 case AMDGPU_HPD_4:
396 WREG32(DC_HPD4_CONTROL, tmp);
397 break;
398 case AMDGPU_HPD_5:
399 WREG32(DC_HPD5_CONTROL, tmp);
400 break;
401 case AMDGPU_HPD_6:
402 WREG32(DC_HPD6_CONTROL, tmp);
403 break;
404 default:
405 break;
406 }
407 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
408 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
409 }
410
411}
412
413/**
414 * dce_v6_0_hpd_fini - hpd tear down callback.
415 *
416 * @adev: amdgpu_device pointer
417 *
418 * Tear down the hpd pins used by the card (evergreen+).
419 * Disable the hpd interrupts.
420 */
421static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
422{
423 struct drm_device *dev = adev->ddev;
424 struct drm_connector *connector;
425
426 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
427 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
428
429 switch (amdgpu_connector->hpd.hpd) {
430 case AMDGPU_HPD_1:
431 WREG32(DC_HPD1_CONTROL, 0);
432 break;
433 case AMDGPU_HPD_2:
434 WREG32(DC_HPD2_CONTROL, 0);
435 break;
436 case AMDGPU_HPD_3:
437 WREG32(DC_HPD3_CONTROL, 0);
438 break;
439 case AMDGPU_HPD_4:
440 WREG32(DC_HPD4_CONTROL, 0);
441 break;
442 case AMDGPU_HPD_5:
443 WREG32(DC_HPD5_CONTROL, 0);
444 break;
445 case AMDGPU_HPD_6:
446 WREG32(DC_HPD6_CONTROL, 0);
447 break;
448 default:
449 break;
450 }
451 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
452 }
453}
454
455static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
456{
457 return SI_DC_GPIO_HPD_A;
458}
459
460static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
461{
462 DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
463
464 return true;
465}
466
467static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
468{
469 if (crtc >= adev->mode_info.num_crtc)
470 return 0;
471 else
472 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
473}
474
475static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
476 struct amdgpu_mode_mc_save *save)
477{
478 u32 crtc_enabled, tmp, frame_count;
479 int i, j;
480
481 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
482 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
483
484 /* disable VGA render */
485 WREG32(VGA_RENDER_CONTROL, 0);
486
487 /* blank the display controllers */
488 for (i = 0; i < adev->mode_info.num_crtc; i++) {
489 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
490 if (crtc_enabled) {
491 save->crtc_enabled[i] = true;
492 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
493
494 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
495 dce_v6_0_vblank_wait(adev, i);
496 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
497 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
498 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
499 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
500 }
501 /* wait for the next frame */
502 frame_count = evergreen_get_vblank_counter(adev, i);
503 for (j = 0; j < adev->usec_timeout; j++) {
504 if (evergreen_get_vblank_counter(adev, i) != frame_count)
505 break;
506 udelay(1);
507 }
508
509 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
510 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
511 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
512 tmp &= ~EVERGREEN_CRTC_MASTER_EN;
513 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
514 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
515 save->crtc_enabled[i] = false;
516 /* ***** */
517 } else {
518 save->crtc_enabled[i] = false;
519 }
520 }
521}
522
523static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
524 struct amdgpu_mode_mc_save *save)
525{
526 u32 tmp;
527 int i, j;
528
529 /* update crtc base addresses */
530 for (i = 0; i < adev->mode_info.num_crtc; i++) {
531 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
532 upper_32_bits(adev->mc.vram_start));
533 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
534 upper_32_bits(adev->mc.vram_start));
535 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
536 (u32)adev->mc.vram_start);
537 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
538 (u32)adev->mc.vram_start);
539 }
540
541 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
542 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
543
544 /* unlock regs and wait for update */
545 for (i = 0; i < adev->mode_info.num_crtc; i++) {
546 if (save->crtc_enabled[i]) {
547 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
548 if ((tmp & 0x7) != 3) {
549 tmp &= ~0x7;
550 tmp |= 0x3;
551 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
552 }
553 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
554 if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
555 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
556 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
557 }
558 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
559 if (tmp & 1) {
560 tmp &= ~1;
561 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
562 }
563 for (j = 0; j < adev->usec_timeout; j++) {
564 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
565 if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
566 break;
567 udelay(1);
568 }
569 }
570 }
571
572 /* Unlock vga access */
573 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
574 mdelay(1);
575 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
576
577}
578
579static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
580 bool render)
581{
582 if (!render)
583 WREG32(R_000300_VGA_RENDER_CONTROL,
584 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
585
586}
587
588static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
589{
590
591 struct drm_device *dev = encoder->dev;
592 struct amdgpu_device *adev = dev->dev_private;
593 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
594 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
595 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
596 int bpc = 0;
597 u32 tmp = 0;
598 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
599
600 if (connector) {
601 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
602 bpc = amdgpu_connector_get_monitor_bpc(connector);
603 dither = amdgpu_connector->dither;
604 }
605
606 /* LVDS FMT is set up by atom */
607 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
608 return;
609
610 if (bpc == 0)
611 return;
612
613
614 switch (bpc) {
615 case 6:
616 if (dither == AMDGPU_FMT_DITHER_ENABLE)
617 /* XXX sort out optimal dither settings */
618 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
619 FMT_SPATIAL_DITHER_EN);
620 else
621 tmp |= FMT_TRUNCATE_EN;
622 break;
623 case 8:
624 if (dither == AMDGPU_FMT_DITHER_ENABLE)
625 /* XXX sort out optimal dither settings */
626 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
627 FMT_RGB_RANDOM_ENABLE |
628 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
629 else
630 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
631 break;
632 case 10:
633 default:
634 /* not needed */
635 break;
636 }
637
638 WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
639}
640
641/**
642 * cik_get_number_of_dram_channels - get the number of dram channels
643 *
644 * @adev: amdgpu_device pointer
645 *
646 * Look up the number of video ram channels (CIK).
647 * Used for display watermark bandwidth calculations
648 * Returns the number of dram channels
649 */
650static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
651{
652 u32 tmp = RREG32(MC_SHARED_CHMAP);
653
654 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
655 case 0:
656 default:
657 return 1;
658 case 1:
659 return 2;
660 case 2:
661 return 4;
662 case 3:
663 return 8;
664 case 4:
665 return 3;
666 case 5:
667 return 6;
668 case 6:
669 return 10;
670 case 7:
671 return 12;
672 case 8:
673 return 16;
674 }
675}
676
677struct dce6_wm_params {
678 u32 dram_channels; /* number of dram channels */
679 u32 yclk; /* bandwidth per dram data pin in kHz */
680 u32 sclk; /* engine clock in kHz */
681 u32 disp_clk; /* display clock in kHz */
682 u32 src_width; /* viewport width */
683 u32 active_time; /* active display time in ns */
684 u32 blank_time; /* blank time in ns */
685 bool interlaced; /* mode is interlaced */
686 fixed20_12 vsc; /* vertical scale ratio */
687 u32 num_heads; /* number of active crtcs */
688 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
689 u32 lb_size; /* line buffer allocated to pipe */
690 u32 vtaps; /* vertical scaler taps */
691};
692
693/**
694 * dce_v6_0_dram_bandwidth - get the dram bandwidth
695 *
696 * @wm: watermark calculation data
697 *
698 * Calculate the raw dram bandwidth (CIK).
699 * Used for display watermark bandwidth calculations
700 * Returns the dram bandwidth in MBytes/s
701 */
702static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
703{
704 /* Calculate raw DRAM Bandwidth */
705 fixed20_12 dram_efficiency; /* 0.7 */
706 fixed20_12 yclk, dram_channels, bandwidth;
707 fixed20_12 a;
708
709 a.full = dfixed_const(1000);
710 yclk.full = dfixed_const(wm->yclk);
711 yclk.full = dfixed_div(yclk, a);
712 dram_channels.full = dfixed_const(wm->dram_channels * 4);
713 a.full = dfixed_const(10);
714 dram_efficiency.full = dfixed_const(7);
715 dram_efficiency.full = dfixed_div(dram_efficiency, a);
716 bandwidth.full = dfixed_mul(dram_channels, yclk);
717 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
718
719 return dfixed_trunc(bandwidth);
720}
721
722/**
723 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
724 *
725 * @wm: watermark calculation data
726 *
727 * Calculate the dram bandwidth used for display (CIK).
728 * Used for display watermark bandwidth calculations
729 * Returns the dram bandwidth for display in MBytes/s
730 */
731static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
732{
733 /* Calculate DRAM Bandwidth and the part allocated to display. */
734 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
735 fixed20_12 yclk, dram_channels, bandwidth;
736 fixed20_12 a;
737
738 a.full = dfixed_const(1000);
739 yclk.full = dfixed_const(wm->yclk);
740 yclk.full = dfixed_div(yclk, a);
741 dram_channels.full = dfixed_const(wm->dram_channels * 4);
742 a.full = dfixed_const(10);
743 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
744 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
745 bandwidth.full = dfixed_mul(dram_channels, yclk);
746 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
747
748 return dfixed_trunc(bandwidth);
749}
750
751/**
752 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
753 *
754 * @wm: watermark calculation data
755 *
756 * Calculate the data return bandwidth used for display (CIK).
757 * Used for display watermark bandwidth calculations
758 * Returns the data return bandwidth in MBytes/s
759 */
760static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
761{
762 /* Calculate the display Data return Bandwidth */
763 fixed20_12 return_efficiency; /* 0.8 */
764 fixed20_12 sclk, bandwidth;
765 fixed20_12 a;
766
767 a.full = dfixed_const(1000);
768 sclk.full = dfixed_const(wm->sclk);
769 sclk.full = dfixed_div(sclk, a);
770 a.full = dfixed_const(10);
771 return_efficiency.full = dfixed_const(8);
772 return_efficiency.full = dfixed_div(return_efficiency, a);
773 a.full = dfixed_const(32);
774 bandwidth.full = dfixed_mul(a, sclk);
775 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
776
777 return dfixed_trunc(bandwidth);
778}
779
780/**
781 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
782 *
783 * @wm: watermark calculation data
784 *
785 * Calculate the dmif bandwidth used for display (CIK).
786 * Used for display watermark bandwidth calculations
787 * Returns the dmif bandwidth in MBytes/s
788 */
789static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
790{
791 /* Calculate the DMIF Request Bandwidth */
792 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
793 fixed20_12 disp_clk, bandwidth;
794 fixed20_12 a, b;
795
796 a.full = dfixed_const(1000);
797 disp_clk.full = dfixed_const(wm->disp_clk);
798 disp_clk.full = dfixed_div(disp_clk, a);
799 a.full = dfixed_const(32);
800 b.full = dfixed_mul(a, disp_clk);
801
802 a.full = dfixed_const(10);
803 disp_clk_request_efficiency.full = dfixed_const(8);
804 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
805
806 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
807
808 return dfixed_trunc(bandwidth);
809}
810
811/**
812 * dce_v6_0_available_bandwidth - get the min available bandwidth
813 *
814 * @wm: watermark calculation data
815 *
816 * Calculate the min available bandwidth used for display (CIK).
817 * Used for display watermark bandwidth calculations
818 * Returns the min available bandwidth in MBytes/s
819 */
820static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
821{
822 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
823 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
824 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
825 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
826
827 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
828}
829
830/**
831 * dce_v6_0_average_bandwidth - get the average available bandwidth
832 *
833 * @wm: watermark calculation data
834 *
835 * Calculate the average available bandwidth used for display (CIK).
836 * Used for display watermark bandwidth calculations
837 * Returns the average available bandwidth in MBytes/s
838 */
839static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
840{
841 /* Calculate the display mode Average Bandwidth
842 * DisplayMode should contain the source and destination dimensions,
843 * timing, etc.
844 */
845 fixed20_12 bpp;
846 fixed20_12 line_time;
847 fixed20_12 src_width;
848 fixed20_12 bandwidth;
849 fixed20_12 a;
850
851 a.full = dfixed_const(1000);
852 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
853 line_time.full = dfixed_div(line_time, a);
854 bpp.full = dfixed_const(wm->bytes_per_pixel);
855 src_width.full = dfixed_const(wm->src_width);
856 bandwidth.full = dfixed_mul(src_width, bpp);
857 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
858 bandwidth.full = dfixed_div(bandwidth, line_time);
859
860 return dfixed_trunc(bandwidth);
861}
862
863/**
864 * dce_v6_0_latency_watermark - get the latency watermark
865 *
866 * @wm: watermark calculation data
867 *
868 * Calculate the latency watermark (CIK).
869 * Used for display watermark bandwidth calculations
870 * Returns the latency watermark in ns
871 */
872static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
873{
874 /* First calculate the latency in ns */
875 u32 mc_latency = 2000; /* 2000 ns. */
876 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
877 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
878 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
879 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
880 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
881 (wm->num_heads * cursor_line_pair_return_time);
882 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
883 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
884 u32 tmp, dmif_size = 12288;
885 fixed20_12 a, b, c;
886
887 if (wm->num_heads == 0)
888 return 0;
889
890 a.full = dfixed_const(2);
891 b.full = dfixed_const(1);
892 if ((wm->vsc.full > a.full) ||
893 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
894 (wm->vtaps >= 5) ||
895 ((wm->vsc.full >= a.full) && wm->interlaced))
896 max_src_lines_per_dst_line = 4;
897 else
898 max_src_lines_per_dst_line = 2;
899
900 a.full = dfixed_const(available_bandwidth);
901 b.full = dfixed_const(wm->num_heads);
902 a.full = dfixed_div(a, b);
903
904 b.full = dfixed_const(mc_latency + 512);
905 c.full = dfixed_const(wm->disp_clk);
906 b.full = dfixed_div(b, c);
907
908 c.full = dfixed_const(dmif_size);
909 b.full = dfixed_div(c, b);
910
911 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
912
913 b.full = dfixed_const(1000);
914 c.full = dfixed_const(wm->disp_clk);
915 b.full = dfixed_div(c, b);
916 c.full = dfixed_const(wm->bytes_per_pixel);
917 b.full = dfixed_mul(b, c);
918
919 lb_fill_bw = min(tmp, dfixed_trunc(b));
920
921 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
922 b.full = dfixed_const(1000);
923 c.full = dfixed_const(lb_fill_bw);
924 b.full = dfixed_div(c, b);
925 a.full = dfixed_div(a, b);
926 line_fill_time = dfixed_trunc(a);
927
928 if (line_fill_time < wm->active_time)
929 return latency;
930 else
931 return latency + (line_fill_time - wm->active_time);
932
933}
934
935/**
936 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
937 * average and available dram bandwidth
938 *
939 * @wm: watermark calculation data
940 *
941 * Check if the display average bandwidth fits in the display
942 * dram bandwidth (CIK).
943 * Used for display watermark bandwidth calculations
944 * Returns true if the display fits, false if not.
945 */
946static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
947{
948 if (dce_v6_0_average_bandwidth(wm) <=
949 (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
950 return true;
951 else
952 return false;
953}
954
955/**
956 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
957 * average and available bandwidth
958 *
959 * @wm: watermark calculation data
960 *
961 * Check if the display average bandwidth fits in the display
962 * available bandwidth (CIK).
963 * Used for display watermark bandwidth calculations
964 * Returns true if the display fits, false if not.
965 */
966static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
967{
968 if (dce_v6_0_average_bandwidth(wm) <=
969 (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
970 return true;
971 else
972 return false;
973}
974
975/**
976 * dce_v6_0_check_latency_hiding - check latency hiding
977 *
978 * @wm: watermark calculation data
979 *
980 * Check latency hiding (CIK).
981 * Used for display watermark bandwidth calculations
982 * Returns true if the display fits, false if not.
983 */
984static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
985{
986 u32 lb_partitions = wm->lb_size / wm->src_width;
987 u32 line_time = wm->active_time + wm->blank_time;
988 u32 latency_tolerant_lines;
989 u32 latency_hiding;
990 fixed20_12 a;
991
992 a.full = dfixed_const(1);
993 if (wm->vsc.full > a.full)
994 latency_tolerant_lines = 1;
995 else {
996 if (lb_partitions <= (wm->vtaps + 1))
997 latency_tolerant_lines = 1;
998 else
999 latency_tolerant_lines = 2;
1000 }
1001
1002 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1003
1004 if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
1005 return true;
1006 else
1007 return false;
1008}
1009
1010/**
1011 * dce_v6_0_program_watermarks - program display watermarks
1012 *
1013 * @adev: amdgpu_device pointer
1014 * @amdgpu_crtc: the selected display controller
1015 * @lb_size: line buffer size
1016 * @num_heads: number of display controllers in use
1017 *
1018 * Calculate and program the display watermarks for the
1019 * selected display controller (CIK).
1020 */
1021static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
1022 struct amdgpu_crtc *amdgpu_crtc,
1023 u32 lb_size, u32 num_heads)
1024{
1025 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1026 struct dce6_wm_params wm_low, wm_high;
1027 u32 dram_channels;
1028 u32 pixel_period;
1029 u32 line_time = 0;
1030 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1031 u32 priority_a_mark = 0, priority_b_mark = 0;
1032 u32 priority_a_cnt = PRIORITY_OFF;
1033 u32 priority_b_cnt = PRIORITY_OFF;
1034 u32 tmp, arb_control3;
1035 fixed20_12 a, b, c;
1036
1037 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1038 pixel_period = 1000000 / (u32)mode->clock;
1039 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1040 priority_a_cnt = 0;
1041 priority_b_cnt = 0;
1042
1043 dram_channels = si_get_number_of_dram_channels(adev);
1044
1045 /* watermark for high clocks */
1046 if (adev->pm.dpm_enabled) {
1047 wm_high.yclk =
1048 amdgpu_dpm_get_mclk(adev, false) * 10;
1049 wm_high.sclk =
1050 amdgpu_dpm_get_sclk(adev, false) * 10;
1051 } else {
1052 wm_high.yclk = adev->pm.current_mclk * 10;
1053 wm_high.sclk = adev->pm.current_sclk * 10;
1054 }
1055
1056 wm_high.disp_clk = mode->clock;
1057 wm_high.src_width = mode->crtc_hdisplay;
1058 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1059 wm_high.blank_time = line_time - wm_high.active_time;
1060 wm_high.interlaced = false;
1061 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1062 wm_high.interlaced = true;
1063 wm_high.vsc = amdgpu_crtc->vsc;
1064 wm_high.vtaps = 1;
1065 if (amdgpu_crtc->rmx_type != RMX_OFF)
1066 wm_high.vtaps = 2;
1067 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1068 wm_high.lb_size = lb_size;
1069 wm_high.dram_channels = dram_channels;
1070 wm_high.num_heads = num_heads;
1071
1072 if (adev->pm.dpm_enabled) {
1073 /* watermark for low clocks */
1074 wm_low.yclk =
1075 amdgpu_dpm_get_mclk(adev, true) * 10;
1076 wm_low.sclk =
1077 amdgpu_dpm_get_sclk(adev, true) * 10;
1078 } else {
1079 wm_low.yclk = adev->pm.current_mclk * 10;
1080 wm_low.sclk = adev->pm.current_sclk * 10;
1081 }
1082
1083 wm_low.disp_clk = mode->clock;
1084 wm_low.src_width = mode->crtc_hdisplay;
1085 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1086 wm_low.blank_time = line_time - wm_low.active_time;
1087 wm_low.interlaced = false;
1088 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1089 wm_low.interlaced = true;
1090 wm_low.vsc = amdgpu_crtc->vsc;
1091 wm_low.vtaps = 1;
1092 if (amdgpu_crtc->rmx_type != RMX_OFF)
1093 wm_low.vtaps = 2;
1094 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1095 wm_low.lb_size = lb_size;
1096 wm_low.dram_channels = dram_channels;
1097 wm_low.num_heads = num_heads;
1098
1099 /* set for high clocks */
1100 latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
1101 /* set for low clocks */
1102 latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
1103
1104 /* possibly force display priority to high */
1105 /* should really do this at mode validation time... */
1106 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1107 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1108 !dce_v6_0_check_latency_hiding(&wm_high) ||
1109 (adev->mode_info.disp_priority == 2)) {
1110 DRM_DEBUG_KMS("force priority to high\n");
1111 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1112 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1113 }
1114 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1115 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1116 !dce_v6_0_check_latency_hiding(&wm_low) ||
1117 (adev->mode_info.disp_priority == 2)) {
1118 DRM_DEBUG_KMS("force priority to high\n");
1119 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1120 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1121 }
1122
1123 a.full = dfixed_const(1000);
1124 b.full = dfixed_const(mode->clock);
1125 b.full = dfixed_div(b, a);
1126 c.full = dfixed_const(latency_watermark_a);
1127 c.full = dfixed_mul(c, b);
1128 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1129 c.full = dfixed_div(c, a);
1130 a.full = dfixed_const(16);
1131 c.full = dfixed_div(c, a);
1132 priority_a_mark = dfixed_trunc(c);
1133 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1134
1135 a.full = dfixed_const(1000);
1136 b.full = dfixed_const(mode->clock);
1137 b.full = dfixed_div(b, a);
1138 c.full = dfixed_const(latency_watermark_b);
1139 c.full = dfixed_mul(c, b);
1140 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1141 c.full = dfixed_div(c, a);
1142 a.full = dfixed_const(16);
1143 c.full = dfixed_div(c, a);
1144 priority_b_mark = dfixed_trunc(c);
1145 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1146 }
1147
1148 /* select wm A */
1149 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1150 tmp = arb_control3;
1151 tmp &= ~LATENCY_WATERMARK_MASK(3);
1152 tmp |= LATENCY_WATERMARK_MASK(1);
1153 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1154 WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
1155 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
1156 LATENCY_HIGH_WATERMARK(line_time)));
1157 /* select wm B */
1158 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1159 tmp &= ~LATENCY_WATERMARK_MASK(3);
1160 tmp |= LATENCY_WATERMARK_MASK(2);
1161 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1162 WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
1163 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
1164 LATENCY_HIGH_WATERMARK(line_time)));
1165 /* restore original selection */
1166 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1167
1168 /* write the priority marks */
1169 WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1170 WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1171
1172 /* save values for DPM */
1173 amdgpu_crtc->line_time = line_time;
1174 amdgpu_crtc->wm_high = latency_watermark_a;
1175}
1176
1177/* watermark setup */
1178static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1179 struct amdgpu_crtc *amdgpu_crtc,
1180 struct drm_display_mode *mode,
1181 struct drm_display_mode *other_mode)
1182{
1183 u32 tmp, buffer_alloc, i;
1184 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1185 /*
1186 * Line Buffer Setup
1187 * There are 3 line buffers, each one shared by 2 display controllers.
1188 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1189 * the display controllers. The paritioning is done via one of four
1190 * preset allocations specified in bits 21:20:
1191 * 0 - half lb
1192 * 2 - whole lb, other crtc must be disabled
1193 */
1194 /* this can get tricky if we have two large displays on a paired group
1195 * of crtcs. Ideally for multiple large displays we'd assign them to
1196 * non-linked crtcs for maximum line buffer allocation.
1197 */
1198 if (amdgpu_crtc->base.enabled && mode) {
1199 if (other_mode) {
1200 tmp = 0; /* 1/2 */
1201 buffer_alloc = 1;
1202 } else {
1203 tmp = 2; /* whole */
1204 buffer_alloc = 2;
1205 }
1206 } else {
1207 tmp = 0;
1208 buffer_alloc = 0;
1209 }
1210
1211 WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1212 DC_LB_MEMORY_CONFIG(tmp));
1213
1214 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1215 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1216 for (i = 0; i < adev->usec_timeout; i++) {
1217 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1218 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1219 break;
1220 udelay(1);
1221 }
1222
1223 if (amdgpu_crtc->base.enabled && mode) {
1224 switch (tmp) {
1225 case 0:
1226 default:
1227 return 4096 * 2;
1228 case 2:
1229 return 8192 * 2;
1230 }
1231 }
1232
1233 /* controller not enabled, so no lb used */
1234 return 0;
1235}
1236
1237
1238/**
1239 *
1240 * dce_v6_0_bandwidth_update - program display watermarks
1241 *
1242 * @adev: amdgpu_device pointer
1243 *
1244 * Calculate and program the display watermarks and line
1245 * buffer allocation (CIK).
1246 */
1247static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1248{
1249 struct drm_display_mode *mode0 = NULL;
1250 struct drm_display_mode *mode1 = NULL;
1251 u32 num_heads = 0, lb_size;
1252 int i;
1253
1254 if (!adev->mode_info.mode_config_initialized)
1255 return;
1256
1257 amdgpu_update_display_priority(adev);
1258
1259 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1260 if (adev->mode_info.crtcs[i]->base.enabled)
1261 num_heads++;
1262 }
1263 for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1264 mode0 = &adev->mode_info.crtcs[i]->base.mode;
1265 mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1266 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1267 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1268 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1269 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1270 }
1271}
1272/*
1273static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1274{
1275 int i;
1276 u32 offset, tmp;
1277
1278 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1279 offset = adev->mode_info.audio.pin[i].offset;
1280 tmp = RREG32_AUDIO_ENDPT(offset,
1281 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1282 if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
1283 adev->mode_info.audio.pin[i].connected = false;
1284 else
1285 adev->mode_info.audio.pin[i].connected = true;
1286 }
1287
1288}
1289
1290static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1291{
1292 int i;
1293
1294 dce_v6_0_audio_get_connected_pins(adev);
1295
1296 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1297 if (adev->mode_info.audio.pin[i].connected)
1298 return &adev->mode_info.audio.pin[i];
1299 }
1300 DRM_ERROR("No connected audio pins found!\n");
1301 return NULL;
1302}
1303
1304static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1305{
1306 struct amdgpu_device *adev = encoder->dev->dev_private;
1307 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1308 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1309 u32 offset;
1310
1311 if (!dig || !dig->afmt || !dig->afmt->pin)
1312 return;
1313
1314 offset = dig->afmt->offset;
1315
1316 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
1317 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
1318
1319}
1320
1321static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1322 struct drm_display_mode *mode)
1323{
1324 DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
1325}
1326
1327static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1328{
1329 DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
1330}
1331
1332static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1333{
1334 DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
1335
1336}
1337*/
1338static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1339 struct amdgpu_audio_pin *pin,
1340 bool enable)
1341{
1342 DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
1343}
1344
1345static const u32 pin_offsets[7] =
1346{
1347 (0x1780 - 0x1780),
1348 (0x1786 - 0x1780),
1349 (0x178c - 0x1780),
1350 (0x1792 - 0x1780),
1351 (0x1798 - 0x1780),
1352 (0x179d - 0x1780),
1353 (0x17a4 - 0x1780),
1354};
1355
1356static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1357{
1358 return 0;
1359}
1360
1361static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1362{
1363
1364}
1365
1366/*
1367static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1368{
1369 DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
1370}
1371*/
1372/*
1373 * build a HDMI Video Info Frame
1374 */
1375/*
1376static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1377 void *buffer, size_t size)
1378{
1379 DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
1380}
1381
1382static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1383{
1384 DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
1385}
1386*/
1387/*
1388 * update the info frames with the data from the current display mode
1389 */
1390static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1391 struct drm_display_mode *mode)
1392{
1393 DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
1394}
1395
1396static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1397{
1398 struct drm_device *dev = encoder->dev;
1399 struct amdgpu_device *adev = dev->dev_private;
1400 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1401 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1402
1403 if (!dig || !dig->afmt)
1404 return;
1405
1406 /* Silent, r600_hdmi_enable will raise WARN for us */
1407 if (enable && dig->afmt->enabled)
1408 return;
1409 if (!enable && !dig->afmt->enabled)
1410 return;
1411
1412 if (!enable && dig->afmt->pin) {
1413 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1414 dig->afmt->pin = NULL;
1415 }
1416
1417 dig->afmt->enabled = enable;
1418
1419 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1420 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1421}
1422
1423static void dce_v6_0_afmt_init(struct amdgpu_device *adev)
1424{
1425 int i;
1426
1427 for (i = 0; i < adev->mode_info.num_dig; i++)
1428 adev->mode_info.afmt[i] = NULL;
1429
1430 /* DCE8 has audio blocks tied to DIG encoders */
1431 for (i = 0; i < adev->mode_info.num_dig; i++) {
1432 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1433 if (adev->mode_info.afmt[i]) {
1434 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1435 adev->mode_info.afmt[i]->id = i;
1436 }
1437 }
1438}
1439
1440static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1441{
1442 int i;
1443
1444 for (i = 0; i < adev->mode_info.num_dig; i++) {
1445 kfree(adev->mode_info.afmt[i]);
1446 adev->mode_info.afmt[i] = NULL;
1447 }
1448}
1449
1450static const u32 vga_control_regs[6] =
1451{
1452 AVIVO_D1VGA_CONTROL,
1453 AVIVO_D2VGA_CONTROL,
1454 EVERGREEN_D3VGA_CONTROL,
1455 EVERGREEN_D4VGA_CONTROL,
1456 EVERGREEN_D5VGA_CONTROL,
1457 EVERGREEN_D6VGA_CONTROL,
1458};
1459
1460static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1461{
1462 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1463 struct drm_device *dev = crtc->dev;
1464 struct amdgpu_device *adev = dev->dev_private;
1465 u32 vga_control;
1466
1467 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1468 if (enable)
1469 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1470 else
1471 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1472}
1473
1474static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1475{
1476 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1477 struct drm_device *dev = crtc->dev;
1478 struct amdgpu_device *adev = dev->dev_private;
1479
1480 if (enable)
1481 WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1482 else
1483 WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1484}
1485
1486static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1487 struct drm_framebuffer *fb,
1488 int x, int y, int atomic)
1489{
1490 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1491 struct drm_device *dev = crtc->dev;
1492 struct amdgpu_device *adev = dev->dev_private;
1493 struct amdgpu_framebuffer *amdgpu_fb;
1494 struct drm_framebuffer *target_fb;
1495 struct drm_gem_object *obj;
1496 struct amdgpu_bo *rbo;
1497 uint64_t fb_location, tiling_flags;
1498 uint32_t fb_format, fb_pitch_pixels, pipe_config;
1499 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
1500 u32 viewport_w, viewport_h;
1501 int r;
1502 bool bypass_lut = false;
1503
1504 /* no fb bound */
1505 if (!atomic && !crtc->primary->fb) {
1506 DRM_DEBUG_KMS("No FB bound\n");
1507 return 0;
1508 }
1509
1510 if (atomic) {
1511 amdgpu_fb = to_amdgpu_framebuffer(fb);
1512 target_fb = fb;
1513 }
1514 else {
1515 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1516 target_fb = crtc->primary->fb;
1517 }
1518
1519 /* If atomic, assume fb object is pinned & idle & fenced and
1520 * just update base pointers
1521 */
1522 obj = amdgpu_fb->obj;
1523 rbo = gem_to_amdgpu_bo(obj);
1524 r = amdgpu_bo_reserve(rbo, false);
1525 if (unlikely(r != 0))
1526 return r;
1527
1528 if (atomic)
1529 fb_location = amdgpu_bo_gpu_offset(rbo);
1530 else {
1531 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1532 if (unlikely(r != 0)) {
1533 amdgpu_bo_unreserve(rbo);
1534 return -EINVAL;
1535 }
1536 }
1537
1538 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
1539 amdgpu_bo_unreserve(rbo);
1540
1541 switch (target_fb->pixel_format) {
1542 case DRM_FORMAT_C8:
1543 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
1544 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
1545 break;
1546 case DRM_FORMAT_XRGB4444:
1547 case DRM_FORMAT_ARGB4444:
1548 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1549 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
1550#ifdef __BIG_ENDIAN
1551 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1552#endif
1553 break;
1554 case DRM_FORMAT_XRGB1555:
1555 case DRM_FORMAT_ARGB1555:
1556 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1557 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
1558#ifdef __BIG_ENDIAN
1559 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1560#endif
1561 break;
1562 case DRM_FORMAT_BGRX5551:
1563 case DRM_FORMAT_BGRA5551:
1564 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1565 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
1566#ifdef __BIG_ENDIAN
1567 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1568#endif
1569 break;
1570 case DRM_FORMAT_RGB565:
1571 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1572 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
1573#ifdef __BIG_ENDIAN
1574 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1575#endif
1576 break;
1577 case DRM_FORMAT_XRGB8888:
1578 case DRM_FORMAT_ARGB8888:
1579 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1580 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
1581#ifdef __BIG_ENDIAN
1582 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1583#endif
1584 break;
1585 case DRM_FORMAT_XRGB2101010:
1586 case DRM_FORMAT_ARGB2101010:
1587 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1588 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
1589#ifdef __BIG_ENDIAN
1590 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1591#endif
1592 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1593 bypass_lut = true;
1594 break;
1595 case DRM_FORMAT_BGRX1010102:
1596 case DRM_FORMAT_BGRA1010102:
1597 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1598 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
1599#ifdef __BIG_ENDIAN
1600 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1601#endif
1602 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1603 bypass_lut = true;
1604 break;
1605 default:
1606 DRM_ERROR("Unsupported screen format %s\n",
1607 drm_get_format_name(target_fb->pixel_format));
1608 return -EINVAL;
1609 }
1610
1611 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1612 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1613
1614 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1615 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1616 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1617 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1618 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1619
1620 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1621 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1622 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
1623 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1624 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
1625 fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
1626 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1)
1627 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1628
1629 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1630 fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
1631
1632 dce_v6_0_vga_enable(crtc, false);
1633
1634 /* Make sure surface address is updated at vertical blank rather than
1635 * horizontal blank
1636 */
1637 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1638
1639 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1640 upper_32_bits(fb_location));
1641 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1642 upper_32_bits(fb_location));
1643 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1644 (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
1645 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1646 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
1647 WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1648 WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
1649
1650 /*
1651 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1652 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1653 * retain the full precision throughout the pipeline.
1654 */
1655 WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1656 (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
1657 ~EVERGREEN_LUT_10BIT_BYPASS_EN);
1658
1659 if (bypass_lut)
1660 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1661
1662 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1663 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1664 WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1665 WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1666 WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1667 WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1668
1669 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1670 WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1671
1672 dce_v6_0_grph_enable(crtc, true);
1673
1674 WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1675 target_fb->height);
1676 x &= ~3;
1677 y &= ~1;
1678 WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
1679 (x << 16) | y);
1680 viewport_w = crtc->mode.hdisplay;
1681 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1682
1683 WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1684 (viewport_w << 16) | viewport_h);
1685
1686 /* set pageflip to happen anywhere in vblank interval */
1687 WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1688
1689 if (!atomic && fb && fb != crtc->primary->fb) {
1690 amdgpu_fb = to_amdgpu_framebuffer(fb);
1691 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1692 r = amdgpu_bo_reserve(rbo, false);
1693 if (unlikely(r != 0))
1694 return r;
1695 amdgpu_bo_unpin(rbo);
1696 amdgpu_bo_unreserve(rbo);
1697 }
1698
1699 /* Bytes per pixel may have changed */
1700 dce_v6_0_bandwidth_update(adev);
1701
1702 return 0;
1703
1704}
1705
1706static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
1707 struct drm_display_mode *mode)
1708{
1709 struct drm_device *dev = crtc->dev;
1710 struct amdgpu_device *adev = dev->dev_private;
1711 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1712
1713 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1714 WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
1715 EVERGREEN_INTERLEAVE_EN);
1716 else
1717 WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1718}
1719
1720static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
1721{
1722
1723 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1724 struct drm_device *dev = crtc->dev;
1725 struct amdgpu_device *adev = dev->dev_private;
1726 int i;
1727
1728 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1729
1730 WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1731 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
1732 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
1733 WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
1734 NI_GRPH_PRESCALE_BYPASS);
1735 WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
1736 NI_OVL_PRESCALE_BYPASS);
1737 WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1738 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
1739 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
1740
1741
1742
1743 WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
1744
1745 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
1746 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
1747 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
1748
1749 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
1750 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
1751 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
1752
1753 WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
1754 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
1755
1756 WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
1757 for (i = 0; i < 256; i++) {
1758 WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
1759 (amdgpu_crtc->lut_r[i] << 20) |
1760 (amdgpu_crtc->lut_g[i] << 10) |
1761 (amdgpu_crtc->lut_b[i] << 0));
1762 }
1763
1764 WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1765 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
1766 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
1767 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
1768 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
1769 WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
1770 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
1771 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
1772 WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1773 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
1774 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
1775 WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1776 (NI_OUTPUT_CSC_GRPH_MODE(0) |
1777 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
1778 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
1779 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
1780
1781
1782}
1783
1784static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
1785{
1786 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1787 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1788
1789 switch (amdgpu_encoder->encoder_id) {
1790 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1791 if (dig->linkb)
1792 return 1;
1793 else
1794 return 0;
1795 break;
1796 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1797 if (dig->linkb)
1798 return 3;
1799 else
1800 return 2;
1801 break;
1802 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1803 if (dig->linkb)
1804 return 5;
1805 else
1806 return 4;
1807 break;
1808 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1809 return 6;
1810 break;
1811 default:
1812 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
1813 return 0;
1814 }
1815}
1816
1817/**
1818 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
1819 *
1820 * @crtc: drm crtc
1821 *
1822 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
1823 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
1824 * monitors a dedicated PPLL must be used. If a particular board has
1825 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1826 * as there is no need to program the PLL itself. If we are not able to
1827 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1828 * avoid messing up an existing monitor.
1829 *
1830 *
1831 */
1832static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
1833{
1834 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1835 struct drm_device *dev = crtc->dev;
1836 struct amdgpu_device *adev = dev->dev_private;
1837 u32 pll_in_use;
1838 int pll;
1839
1840 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
1841 if (adev->clock.dp_extclk)
1842 /* skip PPLL programming if using ext clock */
1843 return ATOM_PPLL_INVALID;
1844 else
1845 return ATOM_PPLL0;
1846 } else {
1847 /* use the same PPLL for all monitors with the same clock */
1848 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
1849 if (pll != ATOM_PPLL_INVALID)
1850 return pll;
1851 }
1852
1853 /* PPLL1, and PPLL2 */
1854 pll_in_use = amdgpu_pll_get_use_mask(crtc);
1855 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1856 return ATOM_PPLL2;
1857 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1858 return ATOM_PPLL1;
1859 DRM_ERROR("unable to allocate a PPLL\n");
1860 return ATOM_PPLL_INVALID;
1861}
1862
1863static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
1864{
1865 struct amdgpu_device *adev = crtc->dev->dev_private;
1866 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1867 uint32_t cur_lock;
1868
1869 cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
1870 if (lock)
1871 cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
1872 else
1873 cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
1874 WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
1875}
1876
1877static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
1878{
1879 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1880 struct amdgpu_device *adev = crtc->dev->dev_private;
1881
1882 WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
1883 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
1884 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
1885
1886
1887}
1888
1889static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
1890{
1891 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1892 struct amdgpu_device *adev = crtc->dev->dev_private;
1893
1894 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1895 upper_32_bits(amdgpu_crtc->cursor_addr));
1896 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1897 lower_32_bits(amdgpu_crtc->cursor_addr));
1898
1899 WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
1900 EVERGREEN_CURSOR_EN |
1901 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
1902 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
1903
1904}
1905
1906static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1907 int x, int y)
1908{
1909 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1910 struct amdgpu_device *adev = crtc->dev->dev_private;
1911 int xorigin = 0, yorigin = 0;
1912
1913 int w = amdgpu_crtc->cursor_width;
1914
1915 /* avivo cursor are offset into the total surface */
1916 x += crtc->x;
1917 y += crtc->y;
1918 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
1919
1920 if (x < 0) {
1921 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1922 x = 0;
1923 }
1924 if (y < 0) {
1925 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1926 y = 0;
1927 }
1928
1929 WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1930 WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1931 WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
1932 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1933
1934 amdgpu_crtc->cursor_x = x;
1935 amdgpu_crtc->cursor_y = y;
1936 return 0;
1937}
1938
1939static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
1940 int x, int y)
1941{
1942 int ret;
1943
1944 dce_v6_0_lock_cursor(crtc, true);
1945 ret = dce_v6_0_cursor_move_locked(crtc, x, y);
1946 dce_v6_0_lock_cursor(crtc, false);
1947
1948 return ret;
1949}
1950
1951static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1952 struct drm_file *file_priv,
1953 uint32_t handle,
1954 uint32_t width,
1955 uint32_t height,
1956 int32_t hot_x,
1957 int32_t hot_y)
1958{
1959 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1960 struct drm_gem_object *obj;
1961 struct amdgpu_bo *aobj;
1962 int ret;
1963
1964 if (!handle) {
1965 /* turn off cursor */
1966 dce_v6_0_hide_cursor(crtc);
1967 obj = NULL;
1968 goto unpin;
1969 }
1970
1971 if ((width > amdgpu_crtc->max_cursor_width) ||
1972 (height > amdgpu_crtc->max_cursor_height)) {
1973 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
1974 return -EINVAL;
1975 }
1976
1977 obj = drm_gem_object_lookup(file_priv, handle);
1978 if (!obj) {
1979 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
1980 return -ENOENT;
1981 }
1982
1983 aobj = gem_to_amdgpu_bo(obj);
1984 ret = amdgpu_bo_reserve(aobj, false);
1985 if (ret != 0) {
1986 drm_gem_object_unreference_unlocked(obj);
1987 return ret;
1988 }
1989
1990 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
1991 amdgpu_bo_unreserve(aobj);
1992 if (ret) {
1993 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
1994 drm_gem_object_unreference_unlocked(obj);
1995 return ret;
1996 }
1997
1998 amdgpu_crtc->cursor_width = width;
1999 amdgpu_crtc->cursor_height = height;
2000
2001 dce_v6_0_lock_cursor(crtc, true);
2002
2003 if (hot_x != amdgpu_crtc->cursor_hot_x ||
2004 hot_y != amdgpu_crtc->cursor_hot_y) {
2005 int x, y;
2006
2007 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2008 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2009
2010 dce_v6_0_cursor_move_locked(crtc, x, y);
2011
2012 amdgpu_crtc->cursor_hot_x = hot_x;
2013 amdgpu_crtc->cursor_hot_y = hot_y;
2014 }
2015
2016 dce_v6_0_show_cursor(crtc);
2017 dce_v6_0_lock_cursor(crtc, false);
2018
2019unpin:
2020 if (amdgpu_crtc->cursor_bo) {
2021 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2022 ret = amdgpu_bo_reserve(aobj, false);
2023 if (likely(ret == 0)) {
2024 amdgpu_bo_unpin(aobj);
2025 amdgpu_bo_unreserve(aobj);
2026 }
2027 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2028 }
2029
2030 amdgpu_crtc->cursor_bo = obj;
2031 return 0;
2032}
2033
2034static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2035{
2036 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2037
2038 if (amdgpu_crtc->cursor_bo) {
2039 dce_v6_0_lock_cursor(crtc, true);
2040
2041 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2042 amdgpu_crtc->cursor_y);
2043
2044 dce_v6_0_show_cursor(crtc);
2045
2046 dce_v6_0_lock_cursor(crtc, false);
2047 }
2048}
2049
2050static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2051 u16 *blue, uint32_t size)
2052{
2053 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2054 int i;
2055
2056 /* userspace palettes are always correct as is */
2057 for (i = 0; i < size; i++) {
2058 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2059 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2060 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2061 }
2062 dce_v6_0_crtc_load_lut(crtc);
2063
2064 return 0;
2065}
2066
2067static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2068{
2069 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2070
2071 drm_crtc_cleanup(crtc);
2072 kfree(amdgpu_crtc);
2073}
2074
2075static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2076 .cursor_set2 = dce_v6_0_crtc_cursor_set2,
2077 .cursor_move = dce_v6_0_crtc_cursor_move,
2078 .gamma_set = dce_v6_0_crtc_gamma_set,
2079 .set_config = amdgpu_crtc_set_config,
2080 .destroy = dce_v6_0_crtc_destroy,
2081 .page_flip_target = amdgpu_crtc_page_flip_target,
2082};
2083
2084static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2085{
2086 struct drm_device *dev = crtc->dev;
2087 struct amdgpu_device *adev = dev->dev_private;
2088 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2089 unsigned type;
2090
2091 switch (mode) {
2092 case DRM_MODE_DPMS_ON:
2093 amdgpu_crtc->enabled = true;
2094 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2095 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2096 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2097 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2098 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2099 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2100 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2101 dce_v6_0_crtc_load_lut(crtc);
2102 break;
2103 case DRM_MODE_DPMS_STANDBY:
2104 case DRM_MODE_DPMS_SUSPEND:
2105 case DRM_MODE_DPMS_OFF:
2106 drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
2107 if (amdgpu_crtc->enabled)
2108 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2109 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2110 amdgpu_crtc->enabled = false;
2111 break;
2112 }
2113 /* adjust pm to dpms */
2114 amdgpu_pm_compute_clocks(adev);
2115}
2116
2117static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2118{
2119 /* disable crtc pair power gating before programming */
2120 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2121 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2122 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2123}
2124
2125static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2126{
2127 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2128 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2129}
2130
2131static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2132{
2133
2134 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2135 struct drm_device *dev = crtc->dev;
2136 struct amdgpu_device *adev = dev->dev_private;
2137 struct amdgpu_atom_ss ss;
2138 int i;
2139
2140 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2141 if (crtc->primary->fb) {
2142 int r;
2143 struct amdgpu_framebuffer *amdgpu_fb;
2144 struct amdgpu_bo *rbo;
2145
2146 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2147 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2148 r = amdgpu_bo_reserve(rbo, false);
2149 if (unlikely(r))
2150 DRM_ERROR("failed to reserve rbo before unpin\n");
2151 else {
2152 amdgpu_bo_unpin(rbo);
2153 amdgpu_bo_unreserve(rbo);
2154 }
2155 }
2156 /* disable the GRPH */
2157 dce_v6_0_grph_enable(crtc, false);
2158
2159 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2160
2161 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2162 if (adev->mode_info.crtcs[i] &&
2163 adev->mode_info.crtcs[i]->enabled &&
2164 i != amdgpu_crtc->crtc_id &&
2165 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2166 /* one other crtc is using this pll don't turn
2167 * off the pll
2168 */
2169 goto done;
2170 }
2171 }
2172
2173 switch (amdgpu_crtc->pll_id) {
2174 case ATOM_PPLL1:
2175 case ATOM_PPLL2:
2176 /* disable the ppll */
2177 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2178 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2179 break;
2180 default:
2181 break;
2182 }
2183done:
2184 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2185 amdgpu_crtc->adjusted_clock = 0;
2186 amdgpu_crtc->encoder = NULL;
2187 amdgpu_crtc->connector = NULL;
2188}
2189
2190static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2191 struct drm_display_mode *mode,
2192 struct drm_display_mode *adjusted_mode,
2193 int x, int y, struct drm_framebuffer *old_fb)
2194{
2195 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2196
2197 if (!amdgpu_crtc->adjusted_clock)
2198 return -EINVAL;
2199
2200 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2201 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2202 dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2203 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2204 amdgpu_atombios_crtc_scaler_setup(crtc);
2205 dce_v6_0_cursor_reset(crtc);
2206 /* update the hw version fpr dpm */
2207 amdgpu_crtc->hw_mode = *adjusted_mode;
2208
2209 return 0;
2210}
2211
2212static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2213 const struct drm_display_mode *mode,
2214 struct drm_display_mode *adjusted_mode)
2215{
2216
2217 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2218 struct drm_device *dev = crtc->dev;
2219 struct drm_encoder *encoder;
2220
2221 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2222 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2223 if (encoder->crtc == crtc) {
2224 amdgpu_crtc->encoder = encoder;
2225 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2226 break;
2227 }
2228 }
2229 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2230 amdgpu_crtc->encoder = NULL;
2231 amdgpu_crtc->connector = NULL;
2232 return false;
2233 }
2234 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2235 return false;
2236 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2237 return false;
2238 /* pick pll */
2239 amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2240 /* if we can't get a PPLL for a non-DP encoder, fail */
2241 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2242 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2243 return false;
2244
2245 return true;
2246}
2247
2248static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2249 struct drm_framebuffer *old_fb)
2250{
2251 return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2252}
2253
2254static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2255 struct drm_framebuffer *fb,
2256 int x, int y, enum mode_set_atomic state)
2257{
2258 return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2259}
2260
2261static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2262 .dpms = dce_v6_0_crtc_dpms,
2263 .mode_fixup = dce_v6_0_crtc_mode_fixup,
2264 .mode_set = dce_v6_0_crtc_mode_set,
2265 .mode_set_base = dce_v6_0_crtc_set_base,
2266 .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2267 .prepare = dce_v6_0_crtc_prepare,
2268 .commit = dce_v6_0_crtc_commit,
2269 .load_lut = dce_v6_0_crtc_load_lut,
2270 .disable = dce_v6_0_crtc_disable,
2271};
2272
2273static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2274{
2275 struct amdgpu_crtc *amdgpu_crtc;
2276 int i;
2277
2278 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2279 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2280 if (amdgpu_crtc == NULL)
2281 return -ENOMEM;
2282
2283 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2284
2285 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2286 amdgpu_crtc->crtc_id = index;
2287 adev->mode_info.crtcs[index] = amdgpu_crtc;
2288
2289 amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2290 amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2291 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2292 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2293
2294 for (i = 0; i < 256; i++) {
2295 amdgpu_crtc->lut_r[i] = i << 2;
2296 amdgpu_crtc->lut_g[i] = i << 2;
2297 amdgpu_crtc->lut_b[i] = i << 2;
2298 }
2299
2300 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2301
2302 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2303 amdgpu_crtc->adjusted_clock = 0;
2304 amdgpu_crtc->encoder = NULL;
2305 amdgpu_crtc->connector = NULL;
2306 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2307
2308 return 0;
2309}
2310
2311static int dce_v6_0_early_init(void *handle)
2312{
2313 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2314
2315 adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2316 adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2317
2318 dce_v6_0_set_display_funcs(adev);
2319 dce_v6_0_set_irq_funcs(adev);
2320
2321 switch (adev->asic_type) {
2322 case CHIP_TAHITI:
2323 case CHIP_PITCAIRN:
2324 case CHIP_VERDE:
2325 adev->mode_info.num_crtc = 6;
2326 adev->mode_info.num_hpd = 6;
2327 adev->mode_info.num_dig = 6;
2328 break;
2329 case CHIP_OLAND:
2330 adev->mode_info.num_crtc = 2;
2331 adev->mode_info.num_hpd = 2;
2332 adev->mode_info.num_dig = 2;
2333 break;
2334 default:
2335 /* FIXME: not supported yet */
2336 return -EINVAL;
2337 }
2338
2339 return 0;
2340}
2341
2342static int dce_v6_0_sw_init(void *handle)
2343{
2344 int r, i;
2345 bool ret;
2346 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2347
2348 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2349 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2350 if (r)
2351 return r;
2352 }
2353
2354 for (i = 8; i < 20; i += 2) {
2355 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2356 if (r)
2357 return r;
2358 }
2359
2360 /* HPD hotplug */
2361 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2362 if (r)
2363 return r;
2364
2365 adev->mode_info.mode_config_initialized = true;
2366
2367 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2368
2369 adev->ddev->mode_config.async_page_flip = true;
2370
2371 adev->ddev->mode_config.max_width = 16384;
2372 adev->ddev->mode_config.max_height = 16384;
2373
2374 adev->ddev->mode_config.preferred_depth = 24;
2375 adev->ddev->mode_config.prefer_shadow = 1;
2376
2377 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2378
2379 r = amdgpu_modeset_create_props(adev);
2380 if (r)
2381 return r;
2382
2383 adev->ddev->mode_config.max_width = 16384;
2384 adev->ddev->mode_config.max_height = 16384;
2385
2386 /* allocate crtcs */
2387 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2388 r = dce_v6_0_crtc_init(adev, i);
2389 if (r)
2390 return r;
2391 }
2392
2393 ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2394 if (ret)
2395 amdgpu_print_display_setup(adev->ddev);
2396 else
2397 return -EINVAL;
2398
2399 /* setup afmt */
2400 dce_v6_0_afmt_init(adev);
2401
2402 r = dce_v6_0_audio_init(adev);
2403 if (r)
2404 return r;
2405
2406 drm_kms_helper_poll_init(adev->ddev);
2407
2408 return r;
2409}
2410
2411static int dce_v6_0_sw_fini(void *handle)
2412{
2413 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2414
2415 kfree(adev->mode_info.bios_hardcoded_edid);
2416
2417 drm_kms_helper_poll_fini(adev->ddev);
2418
2419 dce_v6_0_audio_fini(adev);
2420
2421 dce_v6_0_afmt_fini(adev);
2422
2423 drm_mode_config_cleanup(adev->ddev);
2424 adev->mode_info.mode_config_initialized = false;
2425
2426 return 0;
2427}
2428
2429static int dce_v6_0_hw_init(void *handle)
2430{
2431 int i;
2432 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2433
2434 /* init dig PHYs, disp eng pll */
2435 amdgpu_atombios_encoder_init_dig(adev);
2436 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2437
2438 /* initialize hpd */
2439 dce_v6_0_hpd_init(adev);
2440
2441 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2442 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2443 }
2444
2445 dce_v6_0_pageflip_interrupt_init(adev);
2446
2447 return 0;
2448}
2449
2450static int dce_v6_0_hw_fini(void *handle)
2451{
2452 int i;
2453 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2454
2455 dce_v6_0_hpd_fini(adev);
2456
2457 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2458 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2459 }
2460
2461 dce_v6_0_pageflip_interrupt_fini(adev);
2462
2463 return 0;
2464}
2465
2466static int dce_v6_0_suspend(void *handle)
2467{
2468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2469
2470 amdgpu_atombios_scratch_regs_save(adev);
2471
2472 return dce_v6_0_hw_fini(handle);
2473}
2474
2475static int dce_v6_0_resume(void *handle)
2476{
2477 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2478 int ret;
2479
2480 ret = dce_v6_0_hw_init(handle);
2481
2482 amdgpu_atombios_scratch_regs_restore(adev);
2483
2484 /* turn on the BL */
2485 if (adev->mode_info.bl_encoder) {
2486 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2487 adev->mode_info.bl_encoder);
2488 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2489 bl_level);
2490 }
2491
2492 return ret;
2493}
2494
2495static bool dce_v6_0_is_idle(void *handle)
2496{
2497 return true;
2498}
2499
2500static int dce_v6_0_wait_for_idle(void *handle)
2501{
2502 return 0;
2503}
2504
2505static int dce_v6_0_soft_reset(void *handle)
2506{
2507 DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2508 return 0;
2509}
2510
2511static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2512 int crtc,
2513 enum amdgpu_interrupt_state state)
2514{
2515 u32 reg_block, interrupt_mask;
2516
2517 if (crtc >= adev->mode_info.num_crtc) {
2518 DRM_DEBUG("invalid crtc %d\n", crtc);
2519 return;
2520 }
2521
2522 switch (crtc) {
2523 case 0:
2524 reg_block = SI_CRTC0_REGISTER_OFFSET;
2525 break;
2526 case 1:
2527 reg_block = SI_CRTC1_REGISTER_OFFSET;
2528 break;
2529 case 2:
2530 reg_block = SI_CRTC2_REGISTER_OFFSET;
2531 break;
2532 case 3:
2533 reg_block = SI_CRTC3_REGISTER_OFFSET;
2534 break;
2535 case 4:
2536 reg_block = SI_CRTC4_REGISTER_OFFSET;
2537 break;
2538 case 5:
2539 reg_block = SI_CRTC5_REGISTER_OFFSET;
2540 break;
2541 default:
2542 DRM_DEBUG("invalid crtc %d\n", crtc);
2543 return;
2544 }
2545
2546 switch (state) {
2547 case AMDGPU_IRQ_STATE_DISABLE:
2548 interrupt_mask = RREG32(INT_MASK + reg_block);
2549 interrupt_mask &= ~VBLANK_INT_MASK;
2550 WREG32(INT_MASK + reg_block, interrupt_mask);
2551 break;
2552 case AMDGPU_IRQ_STATE_ENABLE:
2553 interrupt_mask = RREG32(INT_MASK + reg_block);
2554 interrupt_mask |= VBLANK_INT_MASK;
2555 WREG32(INT_MASK + reg_block, interrupt_mask);
2556 break;
2557 default:
2558 break;
2559 }
2560}
2561
2562static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2563 int crtc,
2564 enum amdgpu_interrupt_state state)
2565{
2566
2567}
2568
2569static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2570 struct amdgpu_irq_src *src,
2571 unsigned type,
2572 enum amdgpu_interrupt_state state)
2573{
2574 u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
2575
2576 switch (type) {
2577 case AMDGPU_HPD_1:
2578 dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
2579 break;
2580 case AMDGPU_HPD_2:
2581 dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
2582 break;
2583 case AMDGPU_HPD_3:
2584 dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
2585 break;
2586 case AMDGPU_HPD_4:
2587 dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
2588 break;
2589 case AMDGPU_HPD_5:
2590 dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
2591 break;
2592 case AMDGPU_HPD_6:
2593 dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
2594 break;
2595 default:
2596 DRM_DEBUG("invalid hdp %d\n", type);
2597 return 0;
2598 }
2599
2600 switch (state) {
2601 case AMDGPU_IRQ_STATE_DISABLE:
2602 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
2603 dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
2604 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
2605 break;
2606 case AMDGPU_IRQ_STATE_ENABLE:
2607 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
2608 dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
2609 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
2610 break;
2611 default:
2612 break;
2613 }
2614
2615 return 0;
2616}
2617
2618static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2619 struct amdgpu_irq_src *src,
2620 unsigned type,
2621 enum amdgpu_interrupt_state state)
2622{
2623 switch (type) {
2624 case AMDGPU_CRTC_IRQ_VBLANK1:
2625 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2626 break;
2627 case AMDGPU_CRTC_IRQ_VBLANK2:
2628 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2629 break;
2630 case AMDGPU_CRTC_IRQ_VBLANK3:
2631 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2632 break;
2633 case AMDGPU_CRTC_IRQ_VBLANK4:
2634 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2635 break;
2636 case AMDGPU_CRTC_IRQ_VBLANK5:
2637 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2638 break;
2639 case AMDGPU_CRTC_IRQ_VBLANK6:
2640 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2641 break;
2642 case AMDGPU_CRTC_IRQ_VLINE1:
2643 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2644 break;
2645 case AMDGPU_CRTC_IRQ_VLINE2:
2646 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2647 break;
2648 case AMDGPU_CRTC_IRQ_VLINE3:
2649 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2650 break;
2651 case AMDGPU_CRTC_IRQ_VLINE4:
2652 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2653 break;
2654 case AMDGPU_CRTC_IRQ_VLINE5:
2655 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2656 break;
2657 case AMDGPU_CRTC_IRQ_VLINE6:
2658 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2659 break;
2660 default:
2661 break;
2662 }
2663 return 0;
2664}
2665
2666static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2667 struct amdgpu_irq_src *source,
2668 struct amdgpu_iv_entry *entry)
2669{
2670 unsigned crtc = entry->src_id - 1;
2671 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2672 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
2673
2674 switch (entry->src_data) {
2675 case 0: /* vblank */
2676 if (disp_int & interrupt_status_offsets[crtc].vblank)
2677 WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2678 else
2679 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2680
2681 if (amdgpu_irq_enabled(adev, source, irq_type)) {
2682 drm_handle_vblank(adev->ddev, crtc);
2683 }
2684 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2685 break;
2686 case 1: /* vline */
2687 if (disp_int & interrupt_status_offsets[crtc].vline)
2688 WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2689 else
2690 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2691
2692 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2693 break;
2694 default:
2695 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2696 break;
2697 }
2698
2699 return 0;
2700}
2701
2702static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2703 struct amdgpu_irq_src *src,
2704 unsigned type,
2705 enum amdgpu_interrupt_state state)
2706{
2707 u32 reg;
2708
2709 if (type >= adev->mode_info.num_crtc) {
2710 DRM_ERROR("invalid pageflip crtc %d\n", type);
2711 return -EINVAL;
2712 }
2713
2714 reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
2715 if (state == AMDGPU_IRQ_STATE_DISABLE)
2716 WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
2717 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2718 else
2719 WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
2720 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2721
2722 return 0;
2723}
2724
2725static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2726 struct amdgpu_irq_src *source,
2727 struct amdgpu_iv_entry *entry)
2728{
2729 unsigned long flags;
2730 unsigned crtc_id;
2731 struct amdgpu_crtc *amdgpu_crtc;
2732 struct amdgpu_flip_work *works;
2733
2734 crtc_id = (entry->src_id - 8) >> 1;
2735 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2736
2737 if (crtc_id >= adev->mode_info.num_crtc) {
2738 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2739 return -EINVAL;
2740 }
2741
2742 if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
2743 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
2744 WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
2745 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
2746
2747 /* IRQ could occur when in initial stage */
2748 if (amdgpu_crtc == NULL)
2749 return 0;
2750
2751 spin_lock_irqsave(&adev->ddev->event_lock, flags);
2752 works = amdgpu_crtc->pflip_works;
2753 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
2754 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
2755 "AMDGPU_FLIP_SUBMITTED(%d)\n",
2756 amdgpu_crtc->pflip_status,
2757 AMDGPU_FLIP_SUBMITTED);
2758 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2759 return 0;
2760 }
2761
2762 /* page flip completed. clean up */
2763 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
2764 amdgpu_crtc->pflip_works = NULL;
2765
2766 /* wakeup usersapce */
2767 if (works->event)
2768 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
2769
2770 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2771
2772 drm_crtc_vblank_put(&amdgpu_crtc->base);
2773 schedule_work(&works->unpin_work);
2774
2775 return 0;
2776}
2777
2778static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2779 struct amdgpu_irq_src *source,
2780 struct amdgpu_iv_entry *entry)
2781{
2782 uint32_t disp_int, mask, int_control, tmp;
2783 unsigned hpd;
2784
2785 if (entry->src_data > 6) {
2786 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2787 return 0;
2788 }
2789
2790 hpd = entry->src_data;
2791 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
2792 mask = interrupt_status_offsets[hpd].hpd;
2793 int_control = hpd_int_control_offsets[hpd];
2794
2795 if (disp_int & mask) {
2796 tmp = RREG32(int_control);
2797 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
2798 WREG32(int_control, tmp);
2799 schedule_work(&adev->hotplug_work);
2800 DRM_INFO("IH: HPD%d\n", hpd + 1);
2801 }
2802
2803 return 0;
2804
2805}
2806
2807static int dce_v6_0_set_clockgating_state(void *handle,
2808 enum amd_clockgating_state state)
2809{
2810 return 0;
2811}
2812
2813static int dce_v6_0_set_powergating_state(void *handle,
2814 enum amd_powergating_state state)
2815{
2816 return 0;
2817}
2818
2819const struct amd_ip_funcs dce_v6_0_ip_funcs = {
2820 .name = "dce_v6_0",
2821 .early_init = dce_v6_0_early_init,
2822 .late_init = NULL,
2823 .sw_init = dce_v6_0_sw_init,
2824 .sw_fini = dce_v6_0_sw_fini,
2825 .hw_init = dce_v6_0_hw_init,
2826 .hw_fini = dce_v6_0_hw_fini,
2827 .suspend = dce_v6_0_suspend,
2828 .resume = dce_v6_0_resume,
2829 .is_idle = dce_v6_0_is_idle,
2830 .wait_for_idle = dce_v6_0_wait_for_idle,
2831 .soft_reset = dce_v6_0_soft_reset,
2832 .set_clockgating_state = dce_v6_0_set_clockgating_state,
2833 .set_powergating_state = dce_v6_0_set_powergating_state,
2834};
2835
2836static void
2837dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
2838 struct drm_display_mode *mode,
2839 struct drm_display_mode *adjusted_mode)
2840{
2841
2842 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2843
2844 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
2845
2846 /* need to call this here rather than in prepare() since we need some crtc info */
2847 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2848
2849 /* set scaler clears this on some chips */
2850 dce_v6_0_set_interleave(encoder->crtc, mode);
2851
2852 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2853 dce_v6_0_afmt_enable(encoder, true);
2854 dce_v6_0_afmt_setmode(encoder, adjusted_mode);
2855 }
2856}
2857
2858static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
2859{
2860
2861 struct amdgpu_device *adev = encoder->dev->dev_private;
2862 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2863 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
2864
2865 if ((amdgpu_encoder->active_device &
2866 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2867 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
2868 ENCODER_OBJECT_ID_NONE)) {
2869 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2870 if (dig) {
2871 dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
2872 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
2873 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
2874 }
2875 }
2876
2877 amdgpu_atombios_scratch_regs_lock(adev, true);
2878
2879 if (connector) {
2880 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
2881
2882 /* select the clock/data port if it uses a router */
2883 if (amdgpu_connector->router.cd_valid)
2884 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
2885
2886 /* turn eDP panel on for mode set */
2887 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2888 amdgpu_atombios_encoder_set_edp_panel_power(connector,
2889 ATOM_TRANSMITTER_ACTION_POWER_ON);
2890 }
2891
2892 /* this is needed for the pll/ss setup to work correctly in some cases */
2893 amdgpu_atombios_encoder_set_crtc_source(encoder);
2894 /* set up the FMT blocks */
2895 dce_v6_0_program_fmt(encoder);
2896}
2897
2898static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
2899{
2900
2901 struct drm_device *dev = encoder->dev;
2902 struct amdgpu_device *adev = dev->dev_private;
2903
2904 /* need to call this here as we need the crtc set up */
2905 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2906 amdgpu_atombios_scratch_regs_lock(adev, false);
2907}
2908
2909static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
2910{
2911
2912 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2913 struct amdgpu_encoder_atom_dig *dig;
2914
2915 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2916
2917 if (amdgpu_atombios_encoder_is_digital(encoder)) {
2918 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2919 dce_v6_0_afmt_enable(encoder, false);
2920 dig = amdgpu_encoder->enc_priv;
2921 dig->dig_encoder = -1;
2922 }
2923 amdgpu_encoder->active_device = 0;
2924}
2925
2926/* these are handled by the primary encoders */
2927static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
2928{
2929
2930}
2931
2932static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
2933{
2934
2935}
2936
2937static void
2938dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
2939 struct drm_display_mode *mode,
2940 struct drm_display_mode *adjusted_mode)
2941{
2942
2943}
2944
2945static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
2946{
2947
2948}
2949
2950static void
2951dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
2952{
2953
2954}
2955
2956static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
2957 const struct drm_display_mode *mode,
2958 struct drm_display_mode *adjusted_mode)
2959{
2960 return true;
2961}
2962
2963static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
2964 .dpms = dce_v6_0_ext_dpms,
2965 .mode_fixup = dce_v6_0_ext_mode_fixup,
2966 .prepare = dce_v6_0_ext_prepare,
2967 .mode_set = dce_v6_0_ext_mode_set,
2968 .commit = dce_v6_0_ext_commit,
2969 .disable = dce_v6_0_ext_disable,
2970 /* no detect for TMDS/LVDS yet */
2971};
2972
2973static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
2974 .dpms = amdgpu_atombios_encoder_dpms,
2975 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2976 .prepare = dce_v6_0_encoder_prepare,
2977 .mode_set = dce_v6_0_encoder_mode_set,
2978 .commit = dce_v6_0_encoder_commit,
2979 .disable = dce_v6_0_encoder_disable,
2980 .detect = amdgpu_atombios_encoder_dig_detect,
2981};
2982
2983static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
2984 .dpms = amdgpu_atombios_encoder_dpms,
2985 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2986 .prepare = dce_v6_0_encoder_prepare,
2987 .mode_set = dce_v6_0_encoder_mode_set,
2988 .commit = dce_v6_0_encoder_commit,
2989 .detect = amdgpu_atombios_encoder_dac_detect,
2990};
2991
2992static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
2993{
2994 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2995 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2996 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
2997 kfree(amdgpu_encoder->enc_priv);
2998 drm_encoder_cleanup(encoder);
2999 kfree(amdgpu_encoder);
3000}
3001
3002static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3003 .destroy = dce_v6_0_encoder_destroy,
3004};
3005
3006static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3007 uint32_t encoder_enum,
3008 uint32_t supported_device,
3009 u16 caps)
3010{
3011 struct drm_device *dev = adev->ddev;
3012 struct drm_encoder *encoder;
3013 struct amdgpu_encoder *amdgpu_encoder;
3014
3015 /* see if we already added it */
3016 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3017 amdgpu_encoder = to_amdgpu_encoder(encoder);
3018 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3019 amdgpu_encoder->devices |= supported_device;
3020 return;
3021 }
3022
3023 }
3024
3025 /* add a new one */
3026 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3027 if (!amdgpu_encoder)
3028 return;
3029
3030 encoder = &amdgpu_encoder->base;
3031 switch (adev->mode_info.num_crtc) {
3032 case 1:
3033 encoder->possible_crtcs = 0x1;
3034 break;
3035 case 2:
3036 default:
3037 encoder->possible_crtcs = 0x3;
3038 break;
3039 case 4:
3040 encoder->possible_crtcs = 0xf;
3041 break;
3042 case 6:
3043 encoder->possible_crtcs = 0x3f;
3044 break;
3045 }
3046
3047 amdgpu_encoder->enc_priv = NULL;
3048
3049 amdgpu_encoder->encoder_enum = encoder_enum;
3050 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3051 amdgpu_encoder->devices = supported_device;
3052 amdgpu_encoder->rmx_type = RMX_OFF;
3053 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3054 amdgpu_encoder->is_ext_encoder = false;
3055 amdgpu_encoder->caps = caps;
3056
3057 switch (amdgpu_encoder->encoder_id) {
3058 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3059 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3060 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3061 DRM_MODE_ENCODER_DAC, NULL);
3062 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3063 break;
3064 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3065 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3066 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3067 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3068 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3069 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3070 amdgpu_encoder->rmx_type = RMX_FULL;
3071 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3072 DRM_MODE_ENCODER_LVDS, NULL);
3073 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3074 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3075 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3076 DRM_MODE_ENCODER_DAC, NULL);
3077 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3078 } else {
3079 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3080 DRM_MODE_ENCODER_TMDS, NULL);
3081 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3082 }
3083 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3084 break;
3085 case ENCODER_OBJECT_ID_SI170B:
3086 case ENCODER_OBJECT_ID_CH7303:
3087 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3088 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3089 case ENCODER_OBJECT_ID_TITFP513:
3090 case ENCODER_OBJECT_ID_VT1623:
3091 case ENCODER_OBJECT_ID_HDMI_SI1930:
3092 case ENCODER_OBJECT_ID_TRAVIS:
3093 case ENCODER_OBJECT_ID_NUTMEG:
3094 /* these are handled by the primary encoders */
3095 amdgpu_encoder->is_ext_encoder = true;
3096 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3097 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3098 DRM_MODE_ENCODER_LVDS, NULL);
3099 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3100 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3101 DRM_MODE_ENCODER_DAC, NULL);
3102 else
3103 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3104 DRM_MODE_ENCODER_TMDS, NULL);
3105 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3106 break;
3107 }
3108}
3109
3110static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3111 .set_vga_render_state = &dce_v6_0_set_vga_render_state,
3112 .bandwidth_update = &dce_v6_0_bandwidth_update,
3113 .vblank_get_counter = &dce_v6_0_vblank_get_counter,
3114 .vblank_wait = &dce_v6_0_vblank_wait,
3115 .is_display_hung = &dce_v6_0_is_display_hung,
3116 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3117 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3118 .hpd_sense = &dce_v6_0_hpd_sense,
3119 .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3120 .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3121 .page_flip = &dce_v6_0_page_flip,
3122 .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3123 .add_encoder = &dce_v6_0_encoder_add,
3124 .add_connector = &amdgpu_connector_add,
3125 .stop_mc_access = &dce_v6_0_stop_mc_access,
3126 .resume_mc_access = &dce_v6_0_resume_mc_access,
3127};
3128
3129static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3130{
3131 if (adev->mode_info.funcs == NULL)
3132 adev->mode_info.funcs = &dce_v6_0_display_funcs;
3133}
3134
3135static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3136 .set = dce_v6_0_set_crtc_interrupt_state,
3137 .process = dce_v6_0_crtc_irq,
3138};
3139
3140static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3141 .set = dce_v6_0_set_pageflip_interrupt_state,
3142 .process = dce_v6_0_pageflip_irq,
3143};
3144
3145static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3146 .set = dce_v6_0_set_hpd_interrupt_state,
3147 .process = dce_v6_0_hpd_irq,
3148};
3149
3150static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3151{
3152 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3153 adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3154
3155 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3156 adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3157
3158 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3159 adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3160}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
new file mode 100644
index 000000000000..6a5528105bb6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef __DCE_V6_0_H__
25#define __DCE_V6_0_H__
26
27extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
28
29#endif