aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_psr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_psr.c')
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c248
1 files changed, 130 insertions, 118 deletions
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b6838b525502..54fa17a5596a 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -71,6 +71,10 @@ static bool psr_global_enabled(u32 debug)
71static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, 71static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
72 const struct intel_crtc_state *crtc_state) 72 const struct intel_crtc_state *crtc_state)
73{ 73{
74 /* Disable PSR2 by default for all platforms */
75 if (i915_modparams.enable_psr == -1)
76 return false;
77
74 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 78 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
75 case I915_PSR_DEBUG_FORCE_PSR1: 79 case I915_PSR_DEBUG_FORCE_PSR1:
76 return false; 80 return false;
@@ -79,25 +83,42 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
79 } 83 }
80} 84}
81 85
86static int edp_psr_shift(enum transcoder cpu_transcoder)
87{
88 switch (cpu_transcoder) {
89 case TRANSCODER_A:
90 return EDP_PSR_TRANSCODER_A_SHIFT;
91 case TRANSCODER_B:
92 return EDP_PSR_TRANSCODER_B_SHIFT;
93 case TRANSCODER_C:
94 return EDP_PSR_TRANSCODER_C_SHIFT;
95 default:
96 MISSING_CASE(cpu_transcoder);
97 /* fallthrough */
98 case TRANSCODER_EDP:
99 return EDP_PSR_TRANSCODER_EDP_SHIFT;
100 }
101}
102
82void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) 103void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
83{ 104{
84 u32 debug_mask, mask; 105 u32 debug_mask, mask;
106 enum transcoder cpu_transcoder;
107 u32 transcoders = BIT(TRANSCODER_EDP);
108
109 if (INTEL_GEN(dev_priv) >= 8)
110 transcoders |= BIT(TRANSCODER_A) |
111 BIT(TRANSCODER_B) |
112 BIT(TRANSCODER_C);
85 113
86 mask = EDP_PSR_ERROR(TRANSCODER_EDP); 114 debug_mask = 0;
87 debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) | 115 mask = 0;
88 EDP_PSR_PRE_ENTRY(TRANSCODER_EDP); 116 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
89 117 int shift = edp_psr_shift(cpu_transcoder);
90 if (INTEL_GEN(dev_priv) >= 8) { 118
91 mask |= EDP_PSR_ERROR(TRANSCODER_A) | 119 mask |= EDP_PSR_ERROR(shift);
92 EDP_PSR_ERROR(TRANSCODER_B) | 120 debug_mask |= EDP_PSR_POST_EXIT(shift) |
93 EDP_PSR_ERROR(TRANSCODER_C); 121 EDP_PSR_PRE_ENTRY(shift);
94
95 debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
96 EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
97 EDP_PSR_POST_EXIT(TRANSCODER_B) |
98 EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
99 EDP_PSR_POST_EXIT(TRANSCODER_C) |
100 EDP_PSR_PRE_ENTRY(TRANSCODER_C);
101 } 122 }
102 123
103 if (debug & I915_PSR_DEBUG_IRQ) 124 if (debug & I915_PSR_DEBUG_IRQ)
@@ -155,18 +176,20 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
155 BIT(TRANSCODER_C); 176 BIT(TRANSCODER_C);
156 177
157 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { 178 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
179 int shift = edp_psr_shift(cpu_transcoder);
180
158 /* FIXME: Exit PSR and link train manually when this happens. */ 181 /* FIXME: Exit PSR and link train manually when this happens. */
159 if (psr_iir & EDP_PSR_ERROR(cpu_transcoder)) 182 if (psr_iir & EDP_PSR_ERROR(shift))
160 DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n", 183 DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
161 transcoder_name(cpu_transcoder)); 184 transcoder_name(cpu_transcoder));
162 185
163 if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) { 186 if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
164 dev_priv->psr.last_entry_attempt = time_ns; 187 dev_priv->psr.last_entry_attempt = time_ns;
165 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", 188 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
166 transcoder_name(cpu_transcoder)); 189 transcoder_name(cpu_transcoder));
167 } 190 }
168 191
169 if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) { 192 if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
170 dev_priv->psr.last_exit = time_ns; 193 dev_priv->psr.last_exit = time_ns;
171 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", 194 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
172 transcoder_name(cpu_transcoder)); 195 transcoder_name(cpu_transcoder));
@@ -294,7 +317,8 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
294 psr_vsc.sdp_header.HB3 = 0x8; 317 psr_vsc.sdp_header.HB3 = 0x8;
295 } 318 }
296 319
297 intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state, 320 intel_dig_port->write_infoframe(&intel_dig_port->base,
321 crtc_state,
298 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); 322 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
299} 323}
300 324
@@ -553,11 +577,31 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
553 dev_priv->psr.active = true; 577 dev_priv->psr.active = true;
554} 578}
555 579
580static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
581 enum transcoder cpu_transcoder)
582{
583 static const i915_reg_t regs[] = {
584 [TRANSCODER_A] = CHICKEN_TRANS_A,
585 [TRANSCODER_B] = CHICKEN_TRANS_B,
586 [TRANSCODER_C] = CHICKEN_TRANS_C,
587 [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
588 };
589
590 WARN_ON(INTEL_GEN(dev_priv) < 9);
591
592 if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
593 !regs[cpu_transcoder].reg))
594 cpu_transcoder = TRANSCODER_A;
595
596 return regs[cpu_transcoder];
597}
598
556static void intel_psr_enable_source(struct intel_dp *intel_dp, 599static void intel_psr_enable_source(struct intel_dp *intel_dp,
557 const struct intel_crtc_state *crtc_state) 600 const struct intel_crtc_state *crtc_state)
558{ 601{
559 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 602 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
560 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 603 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
604 u32 mask;
561 605
562 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ 606 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
563 * use hardcoded values PSR AUX transactions 607 * use hardcoded values PSR AUX transactions
@@ -566,37 +610,34 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
566 hsw_psr_setup_aux(intel_dp); 610 hsw_psr_setup_aux(intel_dp);
567 611
568 if (dev_priv->psr.psr2_enabled) { 612 if (dev_priv->psr.psr2_enabled) {
569 u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder)); 613 i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
614 cpu_transcoder);
615 u32 chicken = I915_READ(reg);
570 616
571 if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) 617 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
572 chicken |= (PSR2_VSC_ENABLE_PROG_HEADER 618 chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
573 | PSR2_ADD_VERTICAL_LINE_COUNT); 619 | PSR2_ADD_VERTICAL_LINE_COUNT);
574 620
575 else 621 else
576 chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL; 622 chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
577 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken); 623 I915_WRITE(reg, chicken);
578
579 I915_WRITE(EDP_PSR_DEBUG,
580 EDP_PSR_DEBUG_MASK_MEMUP |
581 EDP_PSR_DEBUG_MASK_HPD |
582 EDP_PSR_DEBUG_MASK_LPSP |
583 EDP_PSR_DEBUG_MASK_MAX_SLEEP |
584 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
585 } else {
586 /*
587 * Per Spec: Avoid continuous PSR exit by masking MEMUP
588 * and HPD. also mask LPSP to avoid dependency on other
589 * drivers that might block runtime_pm besides
590 * preventing other hw tracking issues now we can rely
591 * on frontbuffer tracking.
592 */
593 I915_WRITE(EDP_PSR_DEBUG,
594 EDP_PSR_DEBUG_MASK_MEMUP |
595 EDP_PSR_DEBUG_MASK_HPD |
596 EDP_PSR_DEBUG_MASK_LPSP |
597 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
598 EDP_PSR_DEBUG_MASK_MAX_SLEEP);
599 } 624 }
625
626 /*
627 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
628 * mask LPSP to avoid dependency on other drivers that might block
629 * runtime_pm besides preventing other hw tracking issues now we
630 * can rely on frontbuffer tracking.
631 */
632 mask = EDP_PSR_DEBUG_MASK_MEMUP |
633 EDP_PSR_DEBUG_MASK_HPD |
634 EDP_PSR_DEBUG_MASK_LPSP |
635 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
636
637 if (INTEL_GEN(dev_priv) < 11)
638 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
639
640 I915_WRITE(EDP_PSR_DEBUG, mask);
600} 641}
601 642
602static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, 643static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -656,49 +697,34 @@ unlock:
656 mutex_unlock(&dev_priv->psr.lock); 697 mutex_unlock(&dev_priv->psr.lock);
657} 698}
658 699
659static void 700static void intel_psr_exit(struct drm_i915_private *dev_priv)
660intel_psr_disable_source(struct intel_dp *intel_dp)
661{ 701{
662 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 702 u32 val;
663
664 if (dev_priv->psr.active) {
665 i915_reg_t psr_status;
666 u32 psr_status_mask;
667
668 if (dev_priv->psr.psr2_enabled) {
669 psr_status = EDP_PSR2_STATUS;
670 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
671
672 I915_WRITE(EDP_PSR2_CTL,
673 I915_READ(EDP_PSR2_CTL) &
674 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
675
676 } else {
677 psr_status = EDP_PSR_STATUS;
678 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
679
680 I915_WRITE(EDP_PSR_CTL,
681 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
682 }
683 703
684 /* Wait till PSR is idle */ 704 if (!dev_priv->psr.active) {
685 if (intel_wait_for_register(dev_priv, 705 if (INTEL_GEN(dev_priv) >= 9)
686 psr_status, psr_status_mask, 0, 706 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
687 2000)) 707 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
688 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 708 return;
709 }
689 710
690 dev_priv->psr.active = false; 711 if (dev_priv->psr.psr2_enabled) {
712 val = I915_READ(EDP_PSR2_CTL);
713 WARN_ON(!(val & EDP_PSR2_ENABLE));
714 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
691 } else { 715 } else {
692 if (dev_priv->psr.psr2_enabled) 716 val = I915_READ(EDP_PSR_CTL);
693 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); 717 WARN_ON(!(val & EDP_PSR_ENABLE));
694 else 718 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
695 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
696 } 719 }
720 dev_priv->psr.active = false;
697} 721}
698 722
699static void intel_psr_disable_locked(struct intel_dp *intel_dp) 723static void intel_psr_disable_locked(struct intel_dp *intel_dp)
700{ 724{
701 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 725 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
726 i915_reg_t psr_status;
727 u32 psr_status_mask;
702 728
703 lockdep_assert_held(&dev_priv->psr.lock); 729 lockdep_assert_held(&dev_priv->psr.lock);
704 730
@@ -707,7 +733,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
707 733
708 DRM_DEBUG_KMS("Disabling PSR%s\n", 734 DRM_DEBUG_KMS("Disabling PSR%s\n",
709 dev_priv->psr.psr2_enabled ? "2" : "1"); 735 dev_priv->psr.psr2_enabled ? "2" : "1");
710 intel_psr_disable_source(intel_dp); 736
737 intel_psr_exit(dev_priv);
738
739 if (dev_priv->psr.psr2_enabled) {
740 psr_status = EDP_PSR2_STATUS;
741 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
742 } else {
743 psr_status = EDP_PSR_STATUS;
744 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
745 }
746
747 /* Wait till PSR is idle */
748 if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
749 2000))
750 DRM_ERROR("Timed out waiting PSR idle state\n");
711 751
712 /* Disable PSR on Sink */ 752 /* Disable PSR on Sink */
713 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 753 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -925,25 +965,6 @@ unlock:
925 mutex_unlock(&dev_priv->psr.lock); 965 mutex_unlock(&dev_priv->psr.lock);
926} 966}
927 967
928static void intel_psr_exit(struct drm_i915_private *dev_priv)
929{
930 u32 val;
931
932 if (!dev_priv->psr.active)
933 return;
934
935 if (dev_priv->psr.psr2_enabled) {
936 val = I915_READ(EDP_PSR2_CTL);
937 WARN_ON(!(val & EDP_PSR2_ENABLE));
938 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
939 } else {
940 val = I915_READ(EDP_PSR_CTL);
941 WARN_ON(!(val & EDP_PSR_ENABLE));
942 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
943 }
944 dev_priv->psr.active = false;
945}
946
947/** 968/**
948 * intel_psr_invalidate - Invalidade PSR 969 * intel_psr_invalidate - Invalidade PSR
949 * @dev_priv: i915 device 970 * @dev_priv: i915 device
@@ -1026,20 +1047,16 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
1026 1047
1027 /* By definition flush = invalidate + flush */ 1048 /* By definition flush = invalidate + flush */
1028 if (frontbuffer_bits) { 1049 if (frontbuffer_bits) {
1029 if (dev_priv->psr.psr2_enabled) { 1050 /*
1030 intel_psr_exit(dev_priv); 1051 * Display WA #0884: all
1031 } else { 1052 * This documented WA for bxt can be safely applied
1032 /* 1053 * broadly so we can force HW tracking to exit PSR
1033 * Display WA #0884: all 1054 * instead of disabling and re-enabling.
1034 * This documented WA for bxt can be safely applied 1055 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1035 * broadly so we can force HW tracking to exit PSR 1056 * but it makes more sense write to the current active
1036 * instead of disabling and re-enabling. 1057 * pipe.
1037 * Workaround tells us to write 0 to CUR_SURFLIVE_A, 1058 */
1038 * but it makes more sense write to the current active 1059 I915_WRITE(CURSURFLIVE(pipe), 0);
1039 * pipe.
1040 */
1041 I915_WRITE(CURSURFLIVE(pipe), 0);
1042 }
1043 } 1060 }
1044 1061
1045 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) 1062 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1065,12 +1082,9 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
1065 if (!dev_priv->psr.sink_support) 1082 if (!dev_priv->psr.sink_support)
1066 return; 1083 return;
1067 1084
1068 if (i915_modparams.enable_psr == -1) { 1085 if (i915_modparams.enable_psr == -1)
1069 i915_modparams.enable_psr = dev_priv->vbt.psr.enable; 1086 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1070 1087 i915_modparams.enable_psr = 0;
1071 /* Per platform default: all disabled. */
1072 i915_modparams.enable_psr = 0;
1073 }
1074 1088
1075 /* Set link_standby x link_off defaults */ 1089 /* Set link_standby x link_off defaults */
1076 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1090 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -1130,8 +1144,6 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
1130 intel_psr_disable_locked(intel_dp); 1144 intel_psr_disable_locked(intel_dp);
1131 /* clear status register */ 1145 /* clear status register */
1132 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); 1146 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1133
1134 /* TODO: handle PSR2 errors */
1135exit: 1147exit:
1136 mutex_unlock(&psr->lock); 1148 mutex_unlock(&psr->lock);
1137} 1149}