aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/dce_v8_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c293
1 files changed, 99 insertions, 194 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 5966166ec94c..979aedf4b74d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -31,6 +31,7 @@
31#include "atombios_encoders.h" 31#include "atombios_encoders.h"
32#include "amdgpu_pll.h" 32#include "amdgpu_pll.h"
33#include "amdgpu_connectors.h" 33#include "amdgpu_connectors.h"
34#include "dce_v8_0.h"
34 35
35#include "dce/dce_8_0_d.h" 36#include "dce/dce_8_0_d.h"
36#include "dce/dce_8_0_sh_mask.h" 37#include "dce/dce_8_0_sh_mask.h"
@@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] =
56 CRTC5_REGISTER_OFFSET 57 CRTC5_REGISTER_OFFSET
57}; 58};
58 59
60static const u32 hpd_offsets[] =
61{
62 HPD0_REGISTER_OFFSET,
63 HPD1_REGISTER_OFFSET,
64 HPD2_REGISTER_OFFSET,
65 HPD3_REGISTER_OFFSET,
66 HPD4_REGISTER_OFFSET,
67 HPD5_REGISTER_OFFSET
68};
69
59static const uint32_t dig_offsets[] = { 70static const uint32_t dig_offsets[] = {
60 CRTC0_REGISTER_OFFSET, 71 CRTC0_REGISTER_OFFSET,
61 CRTC1_REGISTER_OFFSET, 72 CRTC1_REGISTER_OFFSET,
@@ -104,15 +115,6 @@ static const struct {
104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 115 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
105} }; 116} };
106 117
107static const uint32_t hpd_int_control_offsets[6] = {
108 mmDC_HPD1_INT_CONTROL,
109 mmDC_HPD2_INT_CONTROL,
110 mmDC_HPD3_INT_CONTROL,
111 mmDC_HPD4_INT_CONTROL,
112 mmDC_HPD5_INT_CONTROL,
113 mmDC_HPD6_INT_CONTROL,
114};
115
116static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev, 118static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
117 u32 block_offset, u32 reg) 119 u32 block_offset, u32 reg)
118{ 120{
@@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
278{ 280{
279 bool connected = false; 281 bool connected = false;
280 282
281 switch (hpd) { 283 if (hpd >= adev->mode_info.num_hpd)
282 case AMDGPU_HPD_1: 284 return connected;
283 if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) 285
284 connected = true; 286 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
285 break; 287 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
286 case AMDGPU_HPD_2: 288 connected = true;
287 if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
288 connected = true;
289 break;
290 case AMDGPU_HPD_3:
291 if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
292 connected = true;
293 break;
294 case AMDGPU_HPD_4:
295 if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
296 connected = true;
297 break;
298 case AMDGPU_HPD_5:
299 if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
300 connected = true;
301 break;
302 case AMDGPU_HPD_6:
303 if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
304 connected = true;
305 break;
306 default:
307 break;
308 }
309 289
310 return connected; 290 return connected;
311} 291}
@@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
324 u32 tmp; 304 u32 tmp;
325 bool connected = dce_v8_0_hpd_sense(adev, hpd); 305 bool connected = dce_v8_0_hpd_sense(adev, hpd);
326 306
327 switch (hpd) { 307 if (hpd >= adev->mode_info.num_hpd)
328 case AMDGPU_HPD_1: 308 return;
329 tmp = RREG32(mmDC_HPD1_INT_CONTROL); 309
330 if (connected) 310 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
331 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 311 if (connected)
332 else 312 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
333 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 313 else
334 WREG32(mmDC_HPD1_INT_CONTROL, tmp); 314 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
335 break; 315 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
336 case AMDGPU_HPD_2:
337 tmp = RREG32(mmDC_HPD2_INT_CONTROL);
338 if (connected)
339 tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
340 else
341 tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
342 WREG32(mmDC_HPD2_INT_CONTROL, tmp);
343 break;
344 case AMDGPU_HPD_3:
345 tmp = RREG32(mmDC_HPD3_INT_CONTROL);
346 if (connected)
347 tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
348 else
349 tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
350 WREG32(mmDC_HPD3_INT_CONTROL, tmp);
351 break;
352 case AMDGPU_HPD_4:
353 tmp = RREG32(mmDC_HPD4_INT_CONTROL);
354 if (connected)
355 tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
356 else
357 tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
358 WREG32(mmDC_HPD4_INT_CONTROL, tmp);
359 break;
360 case AMDGPU_HPD_5:
361 tmp = RREG32(mmDC_HPD5_INT_CONTROL);
362 if (connected)
363 tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
364 else
365 tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
366 WREG32(mmDC_HPD5_INT_CONTROL, tmp);
367 break;
368 case AMDGPU_HPD_6:
369 tmp = RREG32(mmDC_HPD6_INT_CONTROL);
370 if (connected)
371 tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
372 else
373 tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
374 WREG32(mmDC_HPD6_INT_CONTROL, tmp);
375 break;
376 default:
377 break;
378 }
379} 316}
380 317
381/** 318/**
@@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
390{ 327{
391 struct drm_device *dev = adev->ddev; 328 struct drm_device *dev = adev->ddev;
392 struct drm_connector *connector; 329 struct drm_connector *connector;
393 u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) | 330 u32 tmp;
394 (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
395 DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
396 331
397 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 332 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
398 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 333 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
399 334
400 switch (amdgpu_connector->hpd.hpd) { 335 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
401 case AMDGPU_HPD_1: 336 continue;
402 WREG32(mmDC_HPD1_CONTROL, tmp); 337
403 break; 338 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
404 case AMDGPU_HPD_2: 339 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
405 WREG32(mmDC_HPD2_CONTROL, tmp); 340 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
406 break;
407 case AMDGPU_HPD_3:
408 WREG32(mmDC_HPD3_CONTROL, tmp);
409 break;
410 case AMDGPU_HPD_4:
411 WREG32(mmDC_HPD4_CONTROL, tmp);
412 break;
413 case AMDGPU_HPD_5:
414 WREG32(mmDC_HPD5_CONTROL, tmp);
415 break;
416 case AMDGPU_HPD_6:
417 WREG32(mmDC_HPD6_CONTROL, tmp);
418 break;
419 default:
420 break;
421 }
422 341
423 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 342 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
424 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 343 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
427 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 346 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
428 * also avoid interrupt storms during dpms. 347 * also avoid interrupt storms during dpms.
429 */ 348 */
430 u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; 349 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
431 350 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
432 switch (amdgpu_connector->hpd.hpd) { 351 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
433 case AMDGPU_HPD_1:
434 dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
435 break;
436 case AMDGPU_HPD_2:
437 dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
438 break;
439 case AMDGPU_HPD_3:
440 dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
441 break;
442 case AMDGPU_HPD_4:
443 dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
444 break;
445 case AMDGPU_HPD_5:
446 dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
447 break;
448 case AMDGPU_HPD_6:
449 dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
450 break;
451 default:
452 continue;
453 }
454
455 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
456 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
457 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
458 continue; 352 continue;
459 } 353 }
460 354
@@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
475{ 369{
476 struct drm_device *dev = adev->ddev; 370 struct drm_device *dev = adev->ddev;
477 struct drm_connector *connector; 371 struct drm_connector *connector;
372 u32 tmp;
478 373
479 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 374 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
480 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 375 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
481 376
482 switch (amdgpu_connector->hpd.hpd) { 377 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
483 case AMDGPU_HPD_1: 378 continue;
484 WREG32(mmDC_HPD1_CONTROL, 0); 379
485 break; 380 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
486 case AMDGPU_HPD_2: 381 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
487 WREG32(mmDC_HPD2_CONTROL, 0); 382 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
488 break; 383
489 case AMDGPU_HPD_3:
490 WREG32(mmDC_HPD3_CONTROL, 0);
491 break;
492 case AMDGPU_HPD_4:
493 WREG32(mmDC_HPD4_CONTROL, 0);
494 break;
495 case AMDGPU_HPD_5:
496 WREG32(mmDC_HPD5_CONTROL, 0);
497 break;
498 case AMDGPU_HPD_6:
499 WREG32(mmDC_HPD6_CONTROL, 0);
500 break;
501 default:
502 break;
503 }
504 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 384 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
505 } 385 }
506} 386}
@@ -3204,42 +3084,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3204 unsigned type, 3084 unsigned type,
3205 enum amdgpu_interrupt_state state) 3085 enum amdgpu_interrupt_state state)
3206{ 3086{
3207 u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; 3087 u32 dc_hpd_int_cntl;
3208 3088
3209 switch (type) { 3089 if (type >= adev->mode_info.num_hpd) {
3210 case AMDGPU_HPD_1:
3211 dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
3212 break;
3213 case AMDGPU_HPD_2:
3214 dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
3215 break;
3216 case AMDGPU_HPD_3:
3217 dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
3218 break;
3219 case AMDGPU_HPD_4:
3220 dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
3221 break;
3222 case AMDGPU_HPD_5:
3223 dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
3224 break;
3225 case AMDGPU_HPD_6:
3226 dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
3227 break;
3228 default:
3229 DRM_DEBUG("invalid hdp %d\n", type); 3090 DRM_DEBUG("invalid hdp %d\n", type);
3230 return 0; 3091 return 0;
3231 } 3092 }
3232 3093
3233 switch (state) { 3094 switch (state) {
3234 case AMDGPU_IRQ_STATE_DISABLE: 3095 case AMDGPU_IRQ_STATE_DISABLE:
3235 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); 3096 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3236 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 3097 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3237 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); 3098 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3238 break; 3099 break;
3239 case AMDGPU_IRQ_STATE_ENABLE: 3100 case AMDGPU_IRQ_STATE_ENABLE:
3240 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); 3101 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3241 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 3102 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3242 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); 3103 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3243 break; 3104 break;
3244 default: 3105 default:
3245 break; 3106 break;
@@ -3412,7 +3273,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3412 struct amdgpu_irq_src *source, 3273 struct amdgpu_irq_src *source,
3413 struct amdgpu_iv_entry *entry) 3274 struct amdgpu_iv_entry *entry)
3414{ 3275{
3415 uint32_t disp_int, mask, int_control, tmp; 3276 uint32_t disp_int, mask, tmp;
3416 unsigned hpd; 3277 unsigned hpd;
3417 3278
3418 if (entry->src_data >= adev->mode_info.num_hpd) { 3279 if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -3423,12 +3284,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3423 hpd = entry->src_data; 3284 hpd = entry->src_data;
3424 disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3285 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3425 mask = interrupt_status_offsets[hpd].hpd; 3286 mask = interrupt_status_offsets[hpd].hpd;
3426 int_control = hpd_int_control_offsets[hpd];
3427 3287
3428 if (disp_int & mask) { 3288 if (disp_int & mask) {
3429 tmp = RREG32(int_control); 3289 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3430 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3290 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3431 WREG32(int_control, tmp); 3291 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3432 schedule_work(&adev->hotplug_work); 3292 schedule_work(&adev->hotplug_work);
3433 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3293 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3434 } 3294 }
@@ -3449,7 +3309,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
3449 return 0; 3309 return 0;
3450} 3310}
3451 3311
3452const struct amd_ip_funcs dce_v8_0_ip_funcs = { 3312static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3453 .name = "dce_v8_0", 3313 .name = "dce_v8_0",
3454 .early_init = dce_v8_0_early_init, 3314 .early_init = dce_v8_0_early_init,
3455 .late_init = NULL, 3315 .late_init = NULL,
@@ -3779,3 +3639,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3779 adev->hpd_irq.num_types = AMDGPU_HPD_LAST; 3639 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3780 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs; 3640 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3781} 3641}
3642
3643const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3644{
3645 .type = AMD_IP_BLOCK_TYPE_DCE,
3646 .major = 8,
3647 .minor = 0,
3648 .rev = 0,
3649 .funcs = &dce_v8_0_ip_funcs,
3650};
3651
3652const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3653{
3654 .type = AMD_IP_BLOCK_TYPE_DCE,
3655 .major = 8,
3656 .minor = 1,
3657 .rev = 0,
3658 .funcs = &dce_v8_0_ip_funcs,
3659};
3660
3661const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3662{
3663 .type = AMD_IP_BLOCK_TYPE_DCE,
3664 .major = 8,
3665 .minor = 2,
3666 .rev = 0,
3667 .funcs = &dce_v8_0_ip_funcs,
3668};
3669
3670const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3671{
3672 .type = AMD_IP_BLOCK_TYPE_DCE,
3673 .major = 8,
3674 .minor = 3,
3675 .rev = 0,
3676 .funcs = &dce_v8_0_ip_funcs,
3677};
3678
3679const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3680{
3681 .type = AMD_IP_BLOCK_TYPE_DCE,
3682 .major = 8,
3683 .minor = 5,
3684 .rev = 0,
3685 .funcs = &dce_v8_0_ip_funcs,
3686};