aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_dp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_dp.c')
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c745
1 files changed, 479 insertions, 266 deletions
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1a431377d83b..eb8139da9763 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -38,6 +38,32 @@
38 38
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40 40
41struct dp_link_dpll {
42 int link_bw;
43 struct dpll dpll;
44};
45
46static const struct dp_link_dpll gen4_dpll[] = {
47 { DP_LINK_BW_1_62,
48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49 { DP_LINK_BW_2_7,
50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51};
52
53static const struct dp_link_dpll pch_dpll[] = {
54 { DP_LINK_BW_1_62,
55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56 { DP_LINK_BW_2_7,
57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58};
59
60static const struct dp_link_dpll vlv_dpll[] = {
61 { DP_LINK_BW_1_62,
62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63 { DP_LINK_BW_2_7,
64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65};
66
41/** 67/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct 69 * @intel_dp: DP struct
@@ -211,24 +237,77 @@ intel_hrawclk(struct drm_device *dev)
211 } 237 }
212} 238}
213 239
240static void
241intel_dp_init_panel_power_sequencer(struct drm_device *dev,
242 struct intel_dp *intel_dp,
243 struct edp_power_seq *out);
244static void
245intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
246 struct intel_dp *intel_dp,
247 struct edp_power_seq *out);
248
249static enum pipe
250vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
251{
252 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
253 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
254 struct drm_device *dev = intel_dig_port->base.base.dev;
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 enum port port = intel_dig_port->port;
257 enum pipe pipe;
258
259 /* modeset should have pipe */
260 if (crtc)
261 return to_intel_crtc(crtc)->pipe;
262
263 /* init time, try to find a pipe with this port selected */
264 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
265 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
266 PANEL_PORT_SELECT_MASK;
267 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
268 return pipe;
269 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
270 return pipe;
271 }
272
273 /* shrug */
274 return PIPE_A;
275}
276
277static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
278{
279 struct drm_device *dev = intel_dp_to_dev(intel_dp);
280
281 if (HAS_PCH_SPLIT(dev))
282 return PCH_PP_CONTROL;
283 else
284 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
285}
286
287static u32 _pp_stat_reg(struct intel_dp *intel_dp)
288{
289 struct drm_device *dev = intel_dp_to_dev(intel_dp);
290
291 if (HAS_PCH_SPLIT(dev))
292 return PCH_PP_STATUS;
293 else
294 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
295}
296
214static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 297static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
215{ 298{
216 struct drm_device *dev = intel_dp_to_dev(intel_dp); 299 struct drm_device *dev = intel_dp_to_dev(intel_dp);
217 struct drm_i915_private *dev_priv = dev->dev_private; 300 struct drm_i915_private *dev_priv = dev->dev_private;
218 u32 pp_stat_reg;
219 301
220 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 302 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
221 return (I915_READ(pp_stat_reg) & PP_ON) != 0;
222} 303}
223 304
224static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 305static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
225{ 306{
226 struct drm_device *dev = intel_dp_to_dev(intel_dp); 307 struct drm_device *dev = intel_dp_to_dev(intel_dp);
227 struct drm_i915_private *dev_priv = dev->dev_private; 308 struct drm_i915_private *dev_priv = dev->dev_private;
228 u32 pp_ctrl_reg;
229 309
230 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 310 return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
231 return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
232} 311}
233 312
234static void 313static void
@@ -236,19 +315,15 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
236{ 315{
237 struct drm_device *dev = intel_dp_to_dev(intel_dp); 316 struct drm_device *dev = intel_dp_to_dev(intel_dp);
238 struct drm_i915_private *dev_priv = dev->dev_private; 317 struct drm_i915_private *dev_priv = dev->dev_private;
239 u32 pp_stat_reg, pp_ctrl_reg;
240 318
241 if (!is_edp(intel_dp)) 319 if (!is_edp(intel_dp))
242 return; 320 return;
243 321
244 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
245 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
246
247 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 322 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
248 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 323 WARN(1, "eDP powered off while attempting aux channel communication.\n");
249 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 324 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
250 I915_READ(pp_stat_reg), 325 I915_READ(_pp_stat_reg(intel_dp)),
251 I915_READ(pp_ctrl_reg)); 326 I915_READ(_pp_ctrl_reg(intel_dp)));
252 } 327 }
253} 328}
254 329
@@ -330,6 +405,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
330 uint32_t status; 405 uint32_t status;
331 int try, precharge, clock = 0; 406 int try, precharge, clock = 0;
332 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); 407 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
408 uint32_t timeout;
333 409
334 /* dp aux is extremely sensitive to irq latency, hence request the 410 /* dp aux is extremely sensitive to irq latency, hence request the
335 * lowest possible wakeup latency and so prevent the cpu from going into 411 * lowest possible wakeup latency and so prevent the cpu from going into
@@ -344,6 +420,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
344 else 420 else
345 precharge = 5; 421 precharge = 5;
346 422
423 if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
424 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
425 else
426 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
427
347 intel_aux_display_runtime_get(dev_priv); 428 intel_aux_display_runtime_get(dev_priv);
348 429
349 /* Try to wait for any previous AUX channel activity */ 430 /* Try to wait for any previous AUX channel activity */
@@ -361,6 +442,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
361 goto out; 442 goto out;
362 } 443 }
363 444
445 /* Only 5 data registers! */
446 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
447 ret = -E2BIG;
448 goto out;
449 }
450
364 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { 451 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
365 /* Must try at least 3 times according to DP spec */ 452 /* Must try at least 3 times according to DP spec */
366 for (try = 0; try < 5; try++) { 453 for (try = 0; try < 5; try++) {
@@ -373,7 +460,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
373 I915_WRITE(ch_ctl, 460 I915_WRITE(ch_ctl,
374 DP_AUX_CH_CTL_SEND_BUSY | 461 DP_AUX_CH_CTL_SEND_BUSY |
375 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 462 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
376 DP_AUX_CH_CTL_TIME_OUT_400us | 463 timeout |
377 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 464 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
378 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 465 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
379 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 466 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
@@ -451,9 +538,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
451 int msg_bytes; 538 int msg_bytes;
452 uint8_t ack; 539 uint8_t ack;
453 540
541 if (WARN_ON(send_bytes > 16))
542 return -E2BIG;
543
454 intel_dp_check_edp(intel_dp); 544 intel_dp_check_edp(intel_dp);
455 if (send_bytes > 16)
456 return -1;
457 msg[0] = AUX_NATIVE_WRITE << 4; 545 msg[0] = AUX_NATIVE_WRITE << 4;
458 msg[1] = address >> 8; 546 msg[1] = address >> 8;
459 msg[2] = address & 0xff; 547 msg[2] = address & 0xff;
@@ -494,6 +582,9 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
494 uint8_t ack; 582 uint8_t ack;
495 int ret; 583 int ret;
496 584
585 if (WARN_ON(recv_bytes > 19))
586 return -E2BIG;
587
497 intel_dp_check_edp(intel_dp); 588 intel_dp_check_edp(intel_dp);
498 msg[0] = AUX_NATIVE_READ << 4; 589 msg[0] = AUX_NATIVE_READ << 4;
499 msg[1] = address >> 8; 590 msg[1] = address >> 8;
@@ -538,6 +629,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
538 int reply_bytes; 629 int reply_bytes;
539 int ret; 630 int ret;
540 631
632 ironlake_edp_panel_vdd_on(intel_dp);
541 intel_dp_check_edp(intel_dp); 633 intel_dp_check_edp(intel_dp);
542 /* Set up the command byte */ 634 /* Set up the command byte */
543 if (mode & MODE_I2C_READ) 635 if (mode & MODE_I2C_READ)
@@ -569,13 +661,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
569 break; 661 break;
570 } 662 }
571 663
572 for (retry = 0; retry < 5; retry++) { 664 /*
665 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
666 * required to retry at least seven times upon receiving AUX_DEFER
667 * before giving up the AUX transaction.
668 */
669 for (retry = 0; retry < 7; retry++) {
573 ret = intel_dp_aux_ch(intel_dp, 670 ret = intel_dp_aux_ch(intel_dp,
574 msg, msg_bytes, 671 msg, msg_bytes,
575 reply, reply_bytes); 672 reply, reply_bytes);
576 if (ret < 0) { 673 if (ret < 0) {
577 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 674 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
578 return ret; 675 goto out;
579 } 676 }
580 677
581 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 678 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
@@ -586,7 +683,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
586 break; 683 break;
587 case AUX_NATIVE_REPLY_NACK: 684 case AUX_NATIVE_REPLY_NACK:
588 DRM_DEBUG_KMS("aux_ch native nack\n"); 685 DRM_DEBUG_KMS("aux_ch native nack\n");
589 return -EREMOTEIO; 686 ret = -EREMOTEIO;
687 goto out;
590 case AUX_NATIVE_REPLY_DEFER: 688 case AUX_NATIVE_REPLY_DEFER:
591 /* 689 /*
592 * For now, just give more slack to branch devices. We 690 * For now, just give more slack to branch devices. We
@@ -604,7 +702,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
604 default: 702 default:
605 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 703 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
606 reply[0]); 704 reply[0]);
607 return -EREMOTEIO; 705 ret = -EREMOTEIO;
706 goto out;
608 } 707 }
609 708
610 switch (reply[0] & AUX_I2C_REPLY_MASK) { 709 switch (reply[0] & AUX_I2C_REPLY_MASK) {
@@ -612,22 +711,29 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
612 if (mode == MODE_I2C_READ) { 711 if (mode == MODE_I2C_READ) {
613 *read_byte = reply[1]; 712 *read_byte = reply[1];
614 } 713 }
615 return reply_bytes - 1; 714 ret = reply_bytes - 1;
715 goto out;
616 case AUX_I2C_REPLY_NACK: 716 case AUX_I2C_REPLY_NACK:
617 DRM_DEBUG_KMS("aux_i2c nack\n"); 717 DRM_DEBUG_KMS("aux_i2c nack\n");
618 return -EREMOTEIO; 718 ret = -EREMOTEIO;
719 goto out;
619 case AUX_I2C_REPLY_DEFER: 720 case AUX_I2C_REPLY_DEFER:
620 DRM_DEBUG_KMS("aux_i2c defer\n"); 721 DRM_DEBUG_KMS("aux_i2c defer\n");
621 udelay(100); 722 udelay(100);
622 break; 723 break;
623 default: 724 default:
624 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 725 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
625 return -EREMOTEIO; 726 ret = -EREMOTEIO;
727 goto out;
626 } 728 }
627 } 729 }
628 730
629 DRM_ERROR("too many retries, giving up\n"); 731 DRM_ERROR("too many retries, giving up\n");
630 return -EREMOTEIO; 732 ret = -EREMOTEIO;
733
734out:
735 ironlake_edp_panel_vdd_off(intel_dp, false);
736 return ret;
631} 737}
632 738
633static int 739static int
@@ -647,11 +753,9 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
647 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 753 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
648 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 754 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
649 intel_dp->adapter.algo_data = &intel_dp->algo; 755 intel_dp->adapter.algo_data = &intel_dp->algo;
650 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 756 intel_dp->adapter.dev.parent = intel_connector->base.kdev;
651 757
652 ironlake_edp_panel_vdd_on(intel_dp);
653 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 758 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
654 ironlake_edp_panel_vdd_off(intel_dp, false);
655 return ret; 759 return ret;
656} 760}
657 761
@@ -660,41 +764,30 @@ intel_dp_set_clock(struct intel_encoder *encoder,
660 struct intel_crtc_config *pipe_config, int link_bw) 764 struct intel_crtc_config *pipe_config, int link_bw)
661{ 765{
662 struct drm_device *dev = encoder->base.dev; 766 struct drm_device *dev = encoder->base.dev;
767 const struct dp_link_dpll *divisor = NULL;
768 int i, count = 0;
663 769
664 if (IS_G4X(dev)) { 770 if (IS_G4X(dev)) {
665 if (link_bw == DP_LINK_BW_1_62) { 771 divisor = gen4_dpll;
666 pipe_config->dpll.p1 = 2; 772 count = ARRAY_SIZE(gen4_dpll);
667 pipe_config->dpll.p2 = 10;
668 pipe_config->dpll.n = 2;
669 pipe_config->dpll.m1 = 23;
670 pipe_config->dpll.m2 = 8;
671 } else {
672 pipe_config->dpll.p1 = 1;
673 pipe_config->dpll.p2 = 10;
674 pipe_config->dpll.n = 1;
675 pipe_config->dpll.m1 = 14;
676 pipe_config->dpll.m2 = 2;
677 }
678 pipe_config->clock_set = true;
679 } else if (IS_HASWELL(dev)) { 773 } else if (IS_HASWELL(dev)) {
680 /* Haswell has special-purpose DP DDI clocks. */ 774 /* Haswell has special-purpose DP DDI clocks. */
681 } else if (HAS_PCH_SPLIT(dev)) { 775 } else if (HAS_PCH_SPLIT(dev)) {
682 if (link_bw == DP_LINK_BW_1_62) { 776 divisor = pch_dpll;
683 pipe_config->dpll.n = 1; 777 count = ARRAY_SIZE(pch_dpll);
684 pipe_config->dpll.p1 = 2;
685 pipe_config->dpll.p2 = 10;
686 pipe_config->dpll.m1 = 12;
687 pipe_config->dpll.m2 = 9;
688 } else {
689 pipe_config->dpll.n = 2;
690 pipe_config->dpll.p1 = 1;
691 pipe_config->dpll.p2 = 10;
692 pipe_config->dpll.m1 = 14;
693 pipe_config->dpll.m2 = 8;
694 }
695 pipe_config->clock_set = true;
696 } else if (IS_VALLEYVIEW(dev)) { 778 } else if (IS_VALLEYVIEW(dev)) {
697 /* FIXME: Need to figure out optimized DP clocks for vlv. */ 779 divisor = vlv_dpll;
780 count = ARRAY_SIZE(vlv_dpll);
781 }
782
783 if (divisor && count) {
784 for (i = 0; i < count; i++) {
785 if (link_bw == divisor[i].link_bw) {
786 pipe_config->dpll = divisor[i].dpll;
787 pipe_config->clock_set = true;
788 break;
789 }
790 }
698 } 791 }
699} 792}
700 793
@@ -737,19 +830,22 @@ intel_dp_compute_config(struct intel_encoder *encoder,
737 830
738 DRM_DEBUG_KMS("DP link computation with max lane count %i " 831 DRM_DEBUG_KMS("DP link computation with max lane count %i "
739 "max bw %02x pixel clock %iKHz\n", 832 "max bw %02x pixel clock %iKHz\n",
740 max_lane_count, bws[max_clock], adjusted_mode->clock); 833 max_lane_count, bws[max_clock],
834 adjusted_mode->crtc_clock);
741 835
742 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 836 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
743 * bpc in between. */ 837 * bpc in between. */
744 bpp = pipe_config->pipe_bpp; 838 bpp = pipe_config->pipe_bpp;
745 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) { 839 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
840 dev_priv->vbt.edp_bpp < bpp) {
746 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 841 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
747 dev_priv->vbt.edp_bpp); 842 dev_priv->vbt.edp_bpp);
748 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); 843 bpp = dev_priv->vbt.edp_bpp;
749 } 844 }
750 845
751 for (; bpp >= 6*3; bpp -= 2*3) { 846 for (; bpp >= 6*3; bpp -= 2*3) {
752 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 847 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
848 bpp);
753 849
754 for (clock = 0; clock <= max_clock; clock++) { 850 for (clock = 0; clock <= max_clock; clock++) {
755 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 851 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -794,7 +890,8 @@ found:
794 mode_rate, link_avail); 890 mode_rate, link_avail);
795 891
796 intel_link_compute_m_n(bpp, lane_count, 892 intel_link_compute_m_n(bpp, lane_count,
797 adjusted_mode->clock, pipe_config->port_clock, 893 adjusted_mode->crtc_clock,
894 pipe_config->port_clock,
798 &pipe_config->dp_m_n); 895 &pipe_config->dp_m_n);
799 896
800 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 897 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -802,21 +899,6 @@ found:
802 return true; 899 return true;
803} 900}
804 901
805void intel_dp_init_link_config(struct intel_dp *intel_dp)
806{
807 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
808 intel_dp->link_configuration[0] = intel_dp->link_bw;
809 intel_dp->link_configuration[1] = intel_dp->lane_count;
810 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
811 /*
812 * Check for DPCD version > 1.1 and enhanced framing support
813 */
814 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
815 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
816 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
817 }
818}
819
820static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) 902static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
821{ 903{
822 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 904 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -889,8 +971,6 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
889 intel_write_eld(&encoder->base, adjusted_mode); 971 intel_write_eld(&encoder->base, adjusted_mode);
890 } 972 }
891 973
892 intel_dp_init_link_config(intel_dp);
893
894 /* Split out the IBX/CPU vs CPT settings */ 974 /* Split out the IBX/CPU vs CPT settings */
895 975
896 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 976 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
@@ -900,7 +980,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
900 intel_dp->DP |= DP_SYNC_VS_HIGH; 980 intel_dp->DP |= DP_SYNC_VS_HIGH;
901 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 981 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
902 982
903 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 983 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
904 intel_dp->DP |= DP_ENHANCED_FRAMING; 984 intel_dp->DP |= DP_ENHANCED_FRAMING;
905 985
906 intel_dp->DP |= crtc->pipe << 29; 986 intel_dp->DP |= crtc->pipe << 29;
@@ -914,7 +994,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
914 intel_dp->DP |= DP_SYNC_VS_HIGH; 994 intel_dp->DP |= DP_SYNC_VS_HIGH;
915 intel_dp->DP |= DP_LINK_TRAIN_OFF; 995 intel_dp->DP |= DP_LINK_TRAIN_OFF;
916 996
917 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 997 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
918 intel_dp->DP |= DP_ENHANCED_FRAMING; 998 intel_dp->DP |= DP_ENHANCED_FRAMING;
919 999
920 if (crtc->pipe == 1) 1000 if (crtc->pipe == 1)
@@ -944,8 +1024,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
944 struct drm_i915_private *dev_priv = dev->dev_private; 1024 struct drm_i915_private *dev_priv = dev->dev_private;
945 u32 pp_stat_reg, pp_ctrl_reg; 1025 u32 pp_stat_reg, pp_ctrl_reg;
946 1026
947 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1027 pp_stat_reg = _pp_stat_reg(intel_dp);
948 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1028 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
949 1029
950 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 1030 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
951 mask, value, 1031 mask, value,
@@ -987,11 +1067,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
987 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1067 struct drm_device *dev = intel_dp_to_dev(intel_dp);
988 struct drm_i915_private *dev_priv = dev->dev_private; 1068 struct drm_i915_private *dev_priv = dev->dev_private;
989 u32 control; 1069 u32 control;
990 u32 pp_ctrl_reg;
991
992 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
993 control = I915_READ(pp_ctrl_reg);
994 1070
1071 control = I915_READ(_pp_ctrl_reg(intel_dp));
995 control &= ~PANEL_UNLOCK_MASK; 1072 control &= ~PANEL_UNLOCK_MASK;
996 control |= PANEL_UNLOCK_REGS; 1073 control |= PANEL_UNLOCK_REGS;
997 return control; 1074 return control;
@@ -1006,17 +1083,16 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1006 1083
1007 if (!is_edp(intel_dp)) 1084 if (!is_edp(intel_dp))
1008 return; 1085 return;
1009 DRM_DEBUG_KMS("Turn eDP VDD on\n");
1010 1086
1011 WARN(intel_dp->want_panel_vdd, 1087 WARN(intel_dp->want_panel_vdd,
1012 "eDP VDD already requested on\n"); 1088 "eDP VDD already requested on\n");
1013 1089
1014 intel_dp->want_panel_vdd = true; 1090 intel_dp->want_panel_vdd = true;
1015 1091
1016 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1092 if (ironlake_edp_have_panel_vdd(intel_dp))
1017 DRM_DEBUG_KMS("eDP VDD already on\n");
1018 return; 1093 return;
1019 } 1094
1095 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1020 1096
1021 if (!ironlake_edp_have_panel_power(intel_dp)) 1097 if (!ironlake_edp_have_panel_power(intel_dp))
1022 ironlake_wait_panel_power_cycle(intel_dp); 1098 ironlake_wait_panel_power_cycle(intel_dp);
@@ -1024,8 +1100,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1024 pp = ironlake_get_pp_control(intel_dp); 1100 pp = ironlake_get_pp_control(intel_dp);
1025 pp |= EDP_FORCE_VDD; 1101 pp |= EDP_FORCE_VDD;
1026 1102
1027 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1103 pp_stat_reg = _pp_stat_reg(intel_dp);
1028 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1104 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1029 1105
1030 I915_WRITE(pp_ctrl_reg, pp); 1106 I915_WRITE(pp_ctrl_reg, pp);
1031 POSTING_READ(pp_ctrl_reg); 1107 POSTING_READ(pp_ctrl_reg);
@@ -1050,11 +1126,13 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1050 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1126 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1051 1127
1052 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1128 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1129 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1130
1053 pp = ironlake_get_pp_control(intel_dp); 1131 pp = ironlake_get_pp_control(intel_dp);
1054 pp &= ~EDP_FORCE_VDD; 1132 pp &= ~EDP_FORCE_VDD;
1055 1133
1056 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1134 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1057 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1135 pp_stat_reg = _pp_stat_reg(intel_dp);
1058 1136
1059 I915_WRITE(pp_ctrl_reg, pp); 1137 I915_WRITE(pp_ctrl_reg, pp);
1060 POSTING_READ(pp_ctrl_reg); 1138 POSTING_READ(pp_ctrl_reg);
@@ -1082,7 +1160,6 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1082 if (!is_edp(intel_dp)) 1160 if (!is_edp(intel_dp))
1083 return; 1161 return;
1084 1162
1085 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1086 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1163 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1087 1164
1088 intel_dp->want_panel_vdd = false; 1165 intel_dp->want_panel_vdd = false;
@@ -1119,20 +1196,19 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1119 1196
1120 ironlake_wait_panel_power_cycle(intel_dp); 1197 ironlake_wait_panel_power_cycle(intel_dp);
1121 1198
1199 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1122 pp = ironlake_get_pp_control(intel_dp); 1200 pp = ironlake_get_pp_control(intel_dp);
1123 if (IS_GEN5(dev)) { 1201 if (IS_GEN5(dev)) {
1124 /* ILK workaround: disable reset around power sequence */ 1202 /* ILK workaround: disable reset around power sequence */
1125 pp &= ~PANEL_POWER_RESET; 1203 pp &= ~PANEL_POWER_RESET;
1126 I915_WRITE(PCH_PP_CONTROL, pp); 1204 I915_WRITE(pp_ctrl_reg, pp);
1127 POSTING_READ(PCH_PP_CONTROL); 1205 POSTING_READ(pp_ctrl_reg);
1128 } 1206 }
1129 1207
1130 pp |= POWER_TARGET_ON; 1208 pp |= POWER_TARGET_ON;
1131 if (!IS_GEN5(dev)) 1209 if (!IS_GEN5(dev))
1132 pp |= PANEL_POWER_RESET; 1210 pp |= PANEL_POWER_RESET;
1133 1211
1134 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1135
1136 I915_WRITE(pp_ctrl_reg, pp); 1212 I915_WRITE(pp_ctrl_reg, pp);
1137 POSTING_READ(pp_ctrl_reg); 1213 POSTING_READ(pp_ctrl_reg);
1138 1214
@@ -1140,8 +1216,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1140 1216
1141 if (IS_GEN5(dev)) { 1217 if (IS_GEN5(dev)) {
1142 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1218 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1143 I915_WRITE(PCH_PP_CONTROL, pp); 1219 I915_WRITE(pp_ctrl_reg, pp);
1144 POSTING_READ(PCH_PP_CONTROL); 1220 POSTING_READ(pp_ctrl_reg);
1145 } 1221 }
1146} 1222}
1147 1223
@@ -1164,7 +1240,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1164 * panels get very unhappy and cease to work. */ 1240 * panels get very unhappy and cease to work. */
1165 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1241 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1166 1242
1167 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1243 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1168 1244
1169 I915_WRITE(pp_ctrl_reg, pp); 1245 I915_WRITE(pp_ctrl_reg, pp);
1170 POSTING_READ(pp_ctrl_reg); 1246 POSTING_READ(pp_ctrl_reg);
@@ -1179,7 +1255,6 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1179 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1255 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1180 struct drm_device *dev = intel_dig_port->base.base.dev; 1256 struct drm_device *dev = intel_dig_port->base.base.dev;
1181 struct drm_i915_private *dev_priv = dev->dev_private; 1257 struct drm_i915_private *dev_priv = dev->dev_private;
1182 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1183 u32 pp; 1258 u32 pp;
1184 u32 pp_ctrl_reg; 1259 u32 pp_ctrl_reg;
1185 1260
@@ -1197,12 +1272,12 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1197 pp = ironlake_get_pp_control(intel_dp); 1272 pp = ironlake_get_pp_control(intel_dp);
1198 pp |= EDP_BLC_ENABLE; 1273 pp |= EDP_BLC_ENABLE;
1199 1274
1200 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1275 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1201 1276
1202 I915_WRITE(pp_ctrl_reg, pp); 1277 I915_WRITE(pp_ctrl_reg, pp);
1203 POSTING_READ(pp_ctrl_reg); 1278 POSTING_READ(pp_ctrl_reg);
1204 1279
1205 intel_panel_enable_backlight(dev, pipe); 1280 intel_panel_enable_backlight(intel_dp->attached_connector);
1206} 1281}
1207 1282
1208void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1283void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1215,13 +1290,13 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1215 if (!is_edp(intel_dp)) 1290 if (!is_edp(intel_dp))
1216 return; 1291 return;
1217 1292
1218 intel_panel_disable_backlight(dev); 1293 intel_panel_disable_backlight(intel_dp->attached_connector);
1219 1294
1220 DRM_DEBUG_KMS("\n"); 1295 DRM_DEBUG_KMS("\n");
1221 pp = ironlake_get_pp_control(intel_dp); 1296 pp = ironlake_get_pp_control(intel_dp);
1222 pp &= ~EDP_BLC_ENABLE; 1297 pp &= ~EDP_BLC_ENABLE;
1223 1298
1224 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1299 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1225 1300
1226 I915_WRITE(pp_ctrl_reg, pp); 1301 I915_WRITE(pp_ctrl_reg, pp);
1227 POSTING_READ(pp_ctrl_reg); 1302 POSTING_READ(pp_ctrl_reg);
@@ -1368,6 +1443,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1368 struct drm_i915_private *dev_priv = dev->dev_private; 1443 struct drm_i915_private *dev_priv = dev->dev_private;
1369 enum port port = dp_to_dig_port(intel_dp)->port; 1444 enum port port = dp_to_dig_port(intel_dp)->port;
1370 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1445 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1446 int dotclock;
1371 1447
1372 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 1448 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1373 tmp = I915_READ(intel_dp->output_reg); 1449 tmp = I915_READ(intel_dp->output_reg);
@@ -1395,13 +1471,25 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1395 1471
1396 pipe_config->adjusted_mode.flags |= flags; 1472 pipe_config->adjusted_mode.flags |= flags;
1397 1473
1398 if (dp_to_dig_port(intel_dp)->port == PORT_A) { 1474 pipe_config->has_dp_encoder = true;
1475
1476 intel_dp_get_m_n(crtc, pipe_config);
1477
1478 if (port == PORT_A) {
1399 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) 1479 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1400 pipe_config->port_clock = 162000; 1480 pipe_config->port_clock = 162000;
1401 else 1481 else
1402 pipe_config->port_clock = 270000; 1482 pipe_config->port_clock = 270000;
1403 } 1483 }
1404 1484
1485 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1486 &pipe_config->dp_m_n);
1487
1488 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1489 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1490
1491 pipe_config->adjusted_mode.crtc_clock = dotclock;
1492
1405 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 1493 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1406 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { 1494 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1407 /* 1495 /*
@@ -1423,20 +1511,21 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1423 } 1511 }
1424} 1512}
1425 1513
1426static bool is_edp_psr(struct intel_dp *intel_dp) 1514static bool is_edp_psr(struct drm_device *dev)
1427{ 1515{
1428 return is_edp(intel_dp) && 1516 struct drm_i915_private *dev_priv = dev->dev_private;
1429 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; 1517
1518 return dev_priv->psr.sink_support;
1430} 1519}
1431 1520
1432static bool intel_edp_is_psr_enabled(struct drm_device *dev) 1521static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1433{ 1522{
1434 struct drm_i915_private *dev_priv = dev->dev_private; 1523 struct drm_i915_private *dev_priv = dev->dev_private;
1435 1524
1436 if (!IS_HASWELL(dev)) 1525 if (!HAS_PSR(dev))
1437 return false; 1526 return false;
1438 1527
1439 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 1528 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1440} 1529}
1441 1530
1442static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, 1531static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1486,7 +1575,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1486 intel_edp_psr_write_vsc(intel_dp, &psr_vsc); 1575 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1487 1576
1488 /* Avoid continuous PSR exit by masking memup and hpd */ 1577 /* Avoid continuous PSR exit by masking memup and hpd */
1489 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | 1578 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1490 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); 1579 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1491 1580
1492 intel_dp->psr_setup_done = true; 1581 intel_dp->psr_setup_done = true;
@@ -1511,9 +1600,9 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1511 DP_PSR_MAIN_LINK_ACTIVE); 1600 DP_PSR_MAIN_LINK_ACTIVE);
1512 1601
1513 /* Setup AUX registers */ 1602 /* Setup AUX registers */
1514 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND); 1603 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1515 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION); 1604 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1516 I915_WRITE(EDP_PSR_AUX_CTL, 1605 I915_WRITE(EDP_PSR_AUX_CTL(dev),
1517 DP_AUX_CH_CTL_TIME_OUT_400us | 1606 DP_AUX_CH_CTL_TIME_OUT_400us |
1518 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1607 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1519 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1608 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -1527,6 +1616,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1527 uint32_t max_sleep_time = 0x1f; 1616 uint32_t max_sleep_time = 0x1f;
1528 uint32_t idle_frames = 1; 1617 uint32_t idle_frames = 1;
1529 uint32_t val = 0x0; 1618 uint32_t val = 0x0;
1619 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1530 1620
1531 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { 1621 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1532 val |= EDP_PSR_LINK_STANDBY; 1622 val |= EDP_PSR_LINK_STANDBY;
@@ -1536,8 +1626,8 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1536 } else 1626 } else
1537 val |= EDP_PSR_LINK_DISABLE; 1627 val |= EDP_PSR_LINK_DISABLE;
1538 1628
1539 I915_WRITE(EDP_PSR_CTL, val | 1629 I915_WRITE(EDP_PSR_CTL(dev), val |
1540 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | 1630 IS_BROADWELL(dev) ? 0 : link_entry_time |
1541 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1631 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1542 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1632 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1543 EDP_PSR_ENABLE); 1633 EDP_PSR_ENABLE);
@@ -1553,42 +1643,33 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1553 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; 1643 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1554 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1644 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1555 1645
1556 if (!IS_HASWELL(dev)) { 1646 dev_priv->psr.source_ok = false;
1647
1648 if (!HAS_PSR(dev)) {
1557 DRM_DEBUG_KMS("PSR not supported on this platform\n"); 1649 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1558 dev_priv->no_psr_reason = PSR_NO_SOURCE;
1559 return false; 1650 return false;
1560 } 1651 }
1561 1652
1562 if ((intel_encoder->type != INTEL_OUTPUT_EDP) || 1653 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1563 (dig_port->port != PORT_A)) { 1654 (dig_port->port != PORT_A)) {
1564 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1655 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1565 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
1566 return false;
1567 }
1568
1569 if (!is_edp_psr(intel_dp)) {
1570 DRM_DEBUG_KMS("PSR not supported by this panel\n");
1571 dev_priv->no_psr_reason = PSR_NO_SINK;
1572 return false; 1656 return false;
1573 } 1657 }
1574 1658
1575 if (!i915_enable_psr) { 1659 if (!i915_enable_psr) {
1576 DRM_DEBUG_KMS("PSR disable by flag\n"); 1660 DRM_DEBUG_KMS("PSR disable by flag\n");
1577 dev_priv->no_psr_reason = PSR_MODULE_PARAM;
1578 return false; 1661 return false;
1579 } 1662 }
1580 1663
1581 crtc = dig_port->base.base.crtc; 1664 crtc = dig_port->base.base.crtc;
1582 if (crtc == NULL) { 1665 if (crtc == NULL) {
1583 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1666 DRM_DEBUG_KMS("crtc not active for PSR\n");
1584 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1585 return false; 1667 return false;
1586 } 1668 }
1587 1669
1588 intel_crtc = to_intel_crtc(crtc); 1670 intel_crtc = to_intel_crtc(crtc);
1589 if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { 1671 if (!intel_crtc_active(crtc)) {
1590 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1672 DRM_DEBUG_KMS("crtc not active for PSR\n");
1591 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1592 return false; 1673 return false;
1593 } 1674 }
1594 1675
@@ -1596,29 +1677,26 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1596 if (obj->tiling_mode != I915_TILING_X || 1677 if (obj->tiling_mode != I915_TILING_X ||
1597 obj->fence_reg == I915_FENCE_REG_NONE) { 1678 obj->fence_reg == I915_FENCE_REG_NONE) {
1598 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1679 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1599 dev_priv->no_psr_reason = PSR_NOT_TILED;
1600 return false; 1680 return false;
1601 } 1681 }
1602 1682
1603 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { 1683 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1604 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); 1684 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1605 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
1606 return false; 1685 return false;
1607 } 1686 }
1608 1687
1609 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1688 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1610 S3D_ENABLE) { 1689 S3D_ENABLE) {
1611 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1690 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1612 dev_priv->no_psr_reason = PSR_S3D_ENABLED;
1613 return false; 1691 return false;
1614 } 1692 }
1615 1693
1616 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { 1694 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1617 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); 1695 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1618 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
1619 return false; 1696 return false;
1620 } 1697 }
1621 1698
1699 dev_priv->psr.source_ok = true;
1622 return true; 1700 return true;
1623} 1701}
1624 1702
@@ -1657,10 +1735,11 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
1657 if (!intel_edp_is_psr_enabled(dev)) 1735 if (!intel_edp_is_psr_enabled(dev))
1658 return; 1736 return;
1659 1737
1660 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); 1738 I915_WRITE(EDP_PSR_CTL(dev),
1739 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1661 1740
1662 /* Wait till PSR is idle */ 1741 /* Wait till PSR is idle */
1663 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 1742 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1664 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 1743 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1665 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 1744 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1666} 1745}
@@ -1674,7 +1753,7 @@ void intel_edp_psr_update(struct drm_device *dev)
1674 if (encoder->type == INTEL_OUTPUT_EDP) { 1753 if (encoder->type == INTEL_OUTPUT_EDP) {
1675 intel_dp = enc_to_intel_dp(&encoder->base); 1754 intel_dp = enc_to_intel_dp(&encoder->base);
1676 1755
1677 if (!is_edp_psr(intel_dp)) 1756 if (!is_edp_psr(dev))
1678 return; 1757 return;
1679 1758
1680 if (!intel_edp_psr_match_conditions(intel_dp)) 1759 if (!intel_edp_psr_match_conditions(intel_dp))
@@ -1733,14 +1812,24 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1733 ironlake_edp_panel_vdd_off(intel_dp, true); 1812 ironlake_edp_panel_vdd_off(intel_dp, true);
1734 intel_dp_complete_link_train(intel_dp); 1813 intel_dp_complete_link_train(intel_dp);
1735 intel_dp_stop_link_train(intel_dp); 1814 intel_dp_stop_link_train(intel_dp);
1815}
1816
1817static void g4x_enable_dp(struct intel_encoder *encoder)
1818{
1819 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1820
1821 intel_enable_dp(encoder);
1736 ironlake_edp_backlight_on(intel_dp); 1822 ironlake_edp_backlight_on(intel_dp);
1737} 1823}
1738 1824
1739static void vlv_enable_dp(struct intel_encoder *encoder) 1825static void vlv_enable_dp(struct intel_encoder *encoder)
1740{ 1826{
1827 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1828
1829 ironlake_edp_backlight_on(intel_dp);
1741} 1830}
1742 1831
1743static void intel_pre_enable_dp(struct intel_encoder *encoder) 1832static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1744{ 1833{
1745 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1834 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1746 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1835 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
@@ -1758,53 +1847,59 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1758 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1847 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1759 int port = vlv_dport_to_channel(dport); 1848 int port = vlv_dport_to_channel(dport);
1760 int pipe = intel_crtc->pipe; 1849 int pipe = intel_crtc->pipe;
1850 struct edp_power_seq power_seq;
1761 u32 val; 1851 u32 val;
1762 1852
1763 mutex_lock(&dev_priv->dpio_lock); 1853 mutex_lock(&dev_priv->dpio_lock);
1764 1854
1765 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1855 val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
1766 val = 0; 1856 val = 0;
1767 if (pipe) 1857 if (pipe)
1768 val |= (1<<21); 1858 val |= (1<<21);
1769 else 1859 else
1770 val &= ~(1<<21); 1860 val &= ~(1<<21);
1771 val |= 0x001000c4; 1861 val |= 0x001000c4;
1772 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); 1862 vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
1773 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018); 1863 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
1774 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); 1864 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
1775 1865
1776 mutex_unlock(&dev_priv->dpio_lock); 1866 mutex_unlock(&dev_priv->dpio_lock);
1777 1867
1868 /* init power sequencer on this pipe and port */
1869 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1870 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1871 &power_seq);
1872
1778 intel_enable_dp(encoder); 1873 intel_enable_dp(encoder);
1779 1874
1780 vlv_wait_port_ready(dev_priv, port); 1875 vlv_wait_port_ready(dev_priv, port);
1781} 1876}
1782 1877
1783static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) 1878static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1784{ 1879{
1785 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1880 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1786 struct drm_device *dev = encoder->base.dev; 1881 struct drm_device *dev = encoder->base.dev;
1787 struct drm_i915_private *dev_priv = dev->dev_private; 1882 struct drm_i915_private *dev_priv = dev->dev_private;
1883 struct intel_crtc *intel_crtc =
1884 to_intel_crtc(encoder->base.crtc);
1788 int port = vlv_dport_to_channel(dport); 1885 int port = vlv_dport_to_channel(dport);
1789 1886 int pipe = intel_crtc->pipe;
1790 if (!IS_VALLEYVIEW(dev))
1791 return;
1792 1887
1793 /* Program Tx lane resets to default */ 1888 /* Program Tx lane resets to default */
1794 mutex_lock(&dev_priv->dpio_lock); 1889 mutex_lock(&dev_priv->dpio_lock);
1795 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1890 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
1796 DPIO_PCS_TX_LANE2_RESET | 1891 DPIO_PCS_TX_LANE2_RESET |
1797 DPIO_PCS_TX_LANE1_RESET); 1892 DPIO_PCS_TX_LANE1_RESET);
1798 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 1893 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
1799 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1894 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1800 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1895 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1801 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1896 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1802 DPIO_PCS_CLK_SOFT_RESET); 1897 DPIO_PCS_CLK_SOFT_RESET);
1803 1898
1804 /* Fix up inter-pair skew failure */ 1899 /* Fix up inter-pair skew failure */
1805 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1900 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
1806 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1901 vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
1807 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1902 vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
1808 mutex_unlock(&dev_priv->dpio_lock); 1903 mutex_unlock(&dev_priv->dpio_lock);
1809} 1904}
1810 1905
@@ -1869,7 +1964,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
1869 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1964 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1870 enum port port = dp_to_dig_port(intel_dp)->port; 1965 enum port port = dp_to_dig_port(intel_dp)->port;
1871 1966
1872 if (IS_VALLEYVIEW(dev)) 1967 if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
1873 return DP_TRAIN_VOLTAGE_SWING_1200; 1968 return DP_TRAIN_VOLTAGE_SWING_1200;
1874 else if (IS_GEN7(dev) && port == PORT_A) 1969 else if (IS_GEN7(dev) && port == PORT_A)
1875 return DP_TRAIN_VOLTAGE_SWING_800; 1970 return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1885,7 +1980,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1885 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1980 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1886 enum port port = dp_to_dig_port(intel_dp)->port; 1981 enum port port = dp_to_dig_port(intel_dp)->port;
1887 1982
1888 if (HAS_DDI(dev)) { 1983 if (IS_BROADWELL(dev)) {
1984 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1985 case DP_TRAIN_VOLTAGE_SWING_400:
1986 case DP_TRAIN_VOLTAGE_SWING_600:
1987 return DP_TRAIN_PRE_EMPHASIS_6;
1988 case DP_TRAIN_VOLTAGE_SWING_800:
1989 return DP_TRAIN_PRE_EMPHASIS_3_5;
1990 case DP_TRAIN_VOLTAGE_SWING_1200:
1991 default:
1992 return DP_TRAIN_PRE_EMPHASIS_0;
1993 }
1994 } else if (IS_HASWELL(dev)) {
1889 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1995 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1890 case DP_TRAIN_VOLTAGE_SWING_400: 1996 case DP_TRAIN_VOLTAGE_SWING_400:
1891 return DP_TRAIN_PRE_EMPHASIS_9_5; 1997 return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -1939,10 +2045,13 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1939 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2045 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1940 struct drm_i915_private *dev_priv = dev->dev_private; 2046 struct drm_i915_private *dev_priv = dev->dev_private;
1941 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2047 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2048 struct intel_crtc *intel_crtc =
2049 to_intel_crtc(dport->base.base.crtc);
1942 unsigned long demph_reg_value, preemph_reg_value, 2050 unsigned long demph_reg_value, preemph_reg_value,
1943 uniqtranscale_reg_value; 2051 uniqtranscale_reg_value;
1944 uint8_t train_set = intel_dp->train_set[0]; 2052 uint8_t train_set = intel_dp->train_set[0];
1945 int port = vlv_dport_to_channel(dport); 2053 int port = vlv_dport_to_channel(dport);
2054 int pipe = intel_crtc->pipe;
1946 2055
1947 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2056 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1948 case DP_TRAIN_PRE_EMPHASIS_0: 2057 case DP_TRAIN_PRE_EMPHASIS_0:
@@ -2018,21 +2127,22 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2018 } 2127 }
2019 2128
2020 mutex_lock(&dev_priv->dpio_lock); 2129 mutex_lock(&dev_priv->dpio_lock);
2021 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000); 2130 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
2022 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value); 2131 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
2023 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 2132 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
2024 uniqtranscale_reg_value); 2133 uniqtranscale_reg_value);
2025 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040); 2134 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
2026 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 2135 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
2027 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); 2136 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
2028 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000); 2137 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
2029 mutex_unlock(&dev_priv->dpio_lock); 2138 mutex_unlock(&dev_priv->dpio_lock);
2030 2139
2031 return 0; 2140 return 0;
2032} 2141}
2033 2142
2034static void 2143static void
2035intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 2144intel_get_adjust_train(struct intel_dp *intel_dp,
2145 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2036{ 2146{
2037 uint8_t v = 0; 2147 uint8_t v = 0;
2038 uint8_t p = 0; 2148 uint8_t p = 0;
@@ -2193,6 +2303,41 @@ intel_hsw_signal_levels(uint8_t train_set)
2193 } 2303 }
2194} 2304}
2195 2305
2306static uint32_t
2307intel_bdw_signal_levels(uint8_t train_set)
2308{
2309 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2310 DP_TRAIN_PRE_EMPHASIS_MASK);
2311 switch (signal_levels) {
2312 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2313 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2314 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2315 return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
2316 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2317 return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
2318
2319 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2320 return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
2321 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2322 return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
2323 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2324 return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
2325
2326 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2327 return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
2328 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2329 return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
2330
2331 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2332 return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
2333
2334 default:
2335 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2336 "0x%x\n", signal_levels);
2337 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2338 }
2339}
2340
2196/* Properly updates "DP" with the correct signal levels. */ 2341/* Properly updates "DP" with the correct signal levels. */
2197static void 2342static void
2198intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 2343intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2203,7 +2348,10 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2203 uint32_t signal_levels, mask; 2348 uint32_t signal_levels, mask;
2204 uint8_t train_set = intel_dp->train_set[0]; 2349 uint8_t train_set = intel_dp->train_set[0];
2205 2350
2206 if (HAS_DDI(dev)) { 2351 if (IS_BROADWELL(dev)) {
2352 signal_levels = intel_bdw_signal_levels(train_set);
2353 mask = DDI_BUF_EMP_MASK;
2354 } else if (IS_HASWELL(dev)) {
2207 signal_levels = intel_hsw_signal_levels(train_set); 2355 signal_levels = intel_hsw_signal_levels(train_set);
2208 mask = DDI_BUF_EMP_MASK; 2356 mask = DDI_BUF_EMP_MASK;
2209 } else if (IS_VALLEYVIEW(dev)) { 2357 } else if (IS_VALLEYVIEW(dev)) {
@@ -2227,14 +2375,15 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2227 2375
2228static bool 2376static bool
2229intel_dp_set_link_train(struct intel_dp *intel_dp, 2377intel_dp_set_link_train(struct intel_dp *intel_dp,
2230 uint32_t dp_reg_value, 2378 uint32_t *DP,
2231 uint8_t dp_train_pat) 2379 uint8_t dp_train_pat)
2232{ 2380{
2233 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2381 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2234 struct drm_device *dev = intel_dig_port->base.base.dev; 2382 struct drm_device *dev = intel_dig_port->base.base.dev;
2235 struct drm_i915_private *dev_priv = dev->dev_private; 2383 struct drm_i915_private *dev_priv = dev->dev_private;
2236 enum port port = intel_dig_port->port; 2384 enum port port = intel_dig_port->port;
2237 int ret; 2385 uint8_t buf[sizeof(intel_dp->train_set) + 1];
2386 int ret, len;
2238 2387
2239 if (HAS_DDI(dev)) { 2388 if (HAS_DDI(dev)) {
2240 uint32_t temp = I915_READ(DP_TP_CTL(port)); 2389 uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2263,62 +2412,93 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
2263 I915_WRITE(DP_TP_CTL(port), temp); 2412 I915_WRITE(DP_TP_CTL(port), temp);
2264 2413
2265 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2414 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2266 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 2415 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2267 2416
2268 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2417 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2269 case DP_TRAINING_PATTERN_DISABLE: 2418 case DP_TRAINING_PATTERN_DISABLE:
2270 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 2419 *DP |= DP_LINK_TRAIN_OFF_CPT;
2271 break; 2420 break;
2272 case DP_TRAINING_PATTERN_1: 2421 case DP_TRAINING_PATTERN_1:
2273 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 2422 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2274 break; 2423 break;
2275 case DP_TRAINING_PATTERN_2: 2424 case DP_TRAINING_PATTERN_2:
2276 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2425 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2277 break; 2426 break;
2278 case DP_TRAINING_PATTERN_3: 2427 case DP_TRAINING_PATTERN_3:
2279 DRM_ERROR("DP training pattern 3 not supported\n"); 2428 DRM_ERROR("DP training pattern 3 not supported\n");
2280 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2429 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2281 break; 2430 break;
2282 } 2431 }
2283 2432
2284 } else { 2433 } else {
2285 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 2434 *DP &= ~DP_LINK_TRAIN_MASK;
2286 2435
2287 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2436 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2288 case DP_TRAINING_PATTERN_DISABLE: 2437 case DP_TRAINING_PATTERN_DISABLE:
2289 dp_reg_value |= DP_LINK_TRAIN_OFF; 2438 *DP |= DP_LINK_TRAIN_OFF;
2290 break; 2439 break;
2291 case DP_TRAINING_PATTERN_1: 2440 case DP_TRAINING_PATTERN_1:
2292 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 2441 *DP |= DP_LINK_TRAIN_PAT_1;
2293 break; 2442 break;
2294 case DP_TRAINING_PATTERN_2: 2443 case DP_TRAINING_PATTERN_2:
2295 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2444 *DP |= DP_LINK_TRAIN_PAT_2;
2296 break; 2445 break;
2297 case DP_TRAINING_PATTERN_3: 2446 case DP_TRAINING_PATTERN_3:
2298 DRM_ERROR("DP training pattern 3 not supported\n"); 2447 DRM_ERROR("DP training pattern 3 not supported\n");
2299 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2448 *DP |= DP_LINK_TRAIN_PAT_2;
2300 break; 2449 break;
2301 } 2450 }
2302 } 2451 }
2303 2452
2304 I915_WRITE(intel_dp->output_reg, dp_reg_value); 2453 I915_WRITE(intel_dp->output_reg, *DP);
2305 POSTING_READ(intel_dp->output_reg); 2454 POSTING_READ(intel_dp->output_reg);
2306 2455
2307 intel_dp_aux_native_write_1(intel_dp, 2456 buf[0] = dp_train_pat;
2308 DP_TRAINING_PATTERN_SET, 2457 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2309 dp_train_pat);
2310
2311 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
2312 DP_TRAINING_PATTERN_DISABLE) { 2458 DP_TRAINING_PATTERN_DISABLE) {
2313 ret = intel_dp_aux_native_write(intel_dp, 2459 /* don't write DP_TRAINING_LANEx_SET on disable */
2314 DP_TRAINING_LANE0_SET, 2460 len = 1;
2315 intel_dp->train_set, 2461 } else {
2316 intel_dp->lane_count); 2462 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2317 if (ret != intel_dp->lane_count) 2463 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2318 return false; 2464 len = intel_dp->lane_count + 1;
2319 } 2465 }
2320 2466
2321 return true; 2467 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
2468 buf, len);
2469
2470 return ret == len;
2471}
2472
2473static bool
2474intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2475 uint8_t dp_train_pat)
2476{
2477 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2478 intel_dp_set_signal_levels(intel_dp, DP);
2479 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2480}
2481
2482static bool
2483intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2484 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2485{
2486 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2487 struct drm_device *dev = intel_dig_port->base.base.dev;
2488 struct drm_i915_private *dev_priv = dev->dev_private;
2489 int ret;
2490
2491 intel_get_adjust_train(intel_dp, link_status);
2492 intel_dp_set_signal_levels(intel_dp, DP);
2493
2494 I915_WRITE(intel_dp->output_reg, *DP);
2495 POSTING_READ(intel_dp->output_reg);
2496
2497 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
2498 intel_dp->train_set,
2499 intel_dp->lane_count);
2500
2501 return ret == intel_dp->lane_count;
2322} 2502}
2323 2503
2324static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 2504static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -2362,32 +2542,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2362 uint8_t voltage; 2542 uint8_t voltage;
2363 int voltage_tries, loop_tries; 2543 int voltage_tries, loop_tries;
2364 uint32_t DP = intel_dp->DP; 2544 uint32_t DP = intel_dp->DP;
2545 uint8_t link_config[2];
2365 2546
2366 if (HAS_DDI(dev)) 2547 if (HAS_DDI(dev))
2367 intel_ddi_prepare_link_retrain(encoder); 2548 intel_ddi_prepare_link_retrain(encoder);
2368 2549
2369 /* Write the link configuration data */ 2550 /* Write the link configuration data */
2370 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 2551 link_config[0] = intel_dp->link_bw;
2371 intel_dp->link_configuration, 2552 link_config[1] = intel_dp->lane_count;
2372 DP_LINK_CONFIGURATION_SIZE); 2553 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2554 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2555 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
2556
2557 link_config[0] = 0;
2558 link_config[1] = DP_SET_ANSI_8B10B;
2559 intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
2373 2560
2374 DP |= DP_PORT_EN; 2561 DP |= DP_PORT_EN;
2375 2562
2376 memset(intel_dp->train_set, 0, 4); 2563 /* clock recovery */
2564 if (!intel_dp_reset_link_train(intel_dp, &DP,
2565 DP_TRAINING_PATTERN_1 |
2566 DP_LINK_SCRAMBLING_DISABLE)) {
2567 DRM_ERROR("failed to enable link training\n");
2568 return;
2569 }
2570
2377 voltage = 0xff; 2571 voltage = 0xff;
2378 voltage_tries = 0; 2572 voltage_tries = 0;
2379 loop_tries = 0; 2573 loop_tries = 0;
2380 for (;;) { 2574 for (;;) {
2381 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 2575 uint8_t link_status[DP_LINK_STATUS_SIZE];
2382 uint8_t link_status[DP_LINK_STATUS_SIZE];
2383
2384 intel_dp_set_signal_levels(intel_dp, &DP);
2385
2386 /* Set training pattern 1 */
2387 if (!intel_dp_set_link_train(intel_dp, DP,
2388 DP_TRAINING_PATTERN_1 |
2389 DP_LINK_SCRAMBLING_DISABLE))
2390 break;
2391 2576
2392 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 2577 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2393 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2578 if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -2407,10 +2592,12 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2407 if (i == intel_dp->lane_count) { 2592 if (i == intel_dp->lane_count) {
2408 ++loop_tries; 2593 ++loop_tries;
2409 if (loop_tries == 5) { 2594 if (loop_tries == 5) {
2410 DRM_DEBUG_KMS("too many full retries, give up\n"); 2595 DRM_ERROR("too many full retries, give up\n");
2411 break; 2596 break;
2412 } 2597 }
2413 memset(intel_dp->train_set, 0, 4); 2598 intel_dp_reset_link_train(intel_dp, &DP,
2599 DP_TRAINING_PATTERN_1 |
2600 DP_LINK_SCRAMBLING_DISABLE);
2414 voltage_tries = 0; 2601 voltage_tries = 0;
2415 continue; 2602 continue;
2416 } 2603 }
@@ -2419,15 +2606,18 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2419 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 2606 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2420 ++voltage_tries; 2607 ++voltage_tries;
2421 if (voltage_tries == 5) { 2608 if (voltage_tries == 5) {
2422 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 2609 DRM_ERROR("too many voltage retries, give up\n");
2423 break; 2610 break;
2424 } 2611 }
2425 } else 2612 } else
2426 voltage_tries = 0; 2613 voltage_tries = 0;
2427 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 2614 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2428 2615
2429 /* Compute new intel_dp->train_set as requested by target */ 2616 /* Update training set as requested by target */
2430 intel_get_adjust_train(intel_dp, link_status); 2617 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2618 DRM_ERROR("failed to update link training\n");
2619 break;
2620 }
2431 } 2621 }
2432 2622
2433 intel_dp->DP = DP; 2623 intel_dp->DP = DP;
@@ -2441,11 +2631,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2441 uint32_t DP = intel_dp->DP; 2631 uint32_t DP = intel_dp->DP;
2442 2632
2443 /* channel equalization */ 2633 /* channel equalization */
2634 if (!intel_dp_set_link_train(intel_dp, &DP,
2635 DP_TRAINING_PATTERN_2 |
2636 DP_LINK_SCRAMBLING_DISABLE)) {
2637 DRM_ERROR("failed to start channel equalization\n");
2638 return;
2639 }
2640
2444 tries = 0; 2641 tries = 0;
2445 cr_tries = 0; 2642 cr_tries = 0;
2446 channel_eq = false; 2643 channel_eq = false;
2447 for (;;) { 2644 for (;;) {
2448 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2645 uint8_t link_status[DP_LINK_STATUS_SIZE];
2449 2646
2450 if (cr_tries > 5) { 2647 if (cr_tries > 5) {
2451 DRM_ERROR("failed to train DP, aborting\n"); 2648 DRM_ERROR("failed to train DP, aborting\n");
@@ -2453,21 +2650,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2453 break; 2650 break;
2454 } 2651 }
2455 2652
2456 intel_dp_set_signal_levels(intel_dp, &DP);
2457
2458 /* channel eq pattern */
2459 if (!intel_dp_set_link_train(intel_dp, DP,
2460 DP_TRAINING_PATTERN_2 |
2461 DP_LINK_SCRAMBLING_DISABLE))
2462 break;
2463
2464 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 2653 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2465 if (!intel_dp_get_link_status(intel_dp, link_status)) 2654 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2655 DRM_ERROR("failed to get link status\n");
2466 break; 2656 break;
2657 }
2467 2658
2468 /* Make sure clock is still ok */ 2659 /* Make sure clock is still ok */
2469 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2660 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2470 intel_dp_start_link_train(intel_dp); 2661 intel_dp_start_link_train(intel_dp);
2662 intel_dp_set_link_train(intel_dp, &DP,
2663 DP_TRAINING_PATTERN_2 |
2664 DP_LINK_SCRAMBLING_DISABLE);
2471 cr_tries++; 2665 cr_tries++;
2472 continue; 2666 continue;
2473 } 2667 }
@@ -2481,13 +2675,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2481 if (tries > 5) { 2675 if (tries > 5) {
2482 intel_dp_link_down(intel_dp); 2676 intel_dp_link_down(intel_dp);
2483 intel_dp_start_link_train(intel_dp); 2677 intel_dp_start_link_train(intel_dp);
2678 intel_dp_set_link_train(intel_dp, &DP,
2679 DP_TRAINING_PATTERN_2 |
2680 DP_LINK_SCRAMBLING_DISABLE);
2484 tries = 0; 2681 tries = 0;
2485 cr_tries++; 2682 cr_tries++;
2486 continue; 2683 continue;
2487 } 2684 }
2488 2685
2489 /* Compute new intel_dp->train_set as requested by target */ 2686 /* Update training set as requested by target */
2490 intel_get_adjust_train(intel_dp, link_status); 2687 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2688 DRM_ERROR("failed to update link training\n");
2689 break;
2690 }
2491 ++tries; 2691 ++tries;
2492 } 2692 }
2493 2693
@@ -2502,7 +2702,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2502 2702
2503void intel_dp_stop_link_train(struct intel_dp *intel_dp) 2703void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2504{ 2704{
2505 intel_dp_set_link_train(intel_dp, intel_dp->DP, 2705 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2506 DP_TRAINING_PATTERN_DISABLE); 2706 DP_TRAINING_PATTERN_DISABLE);
2507} 2707}
2508 2708
@@ -2589,6 +2789,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2589static bool 2789static bool
2590intel_dp_get_dpcd(struct intel_dp *intel_dp) 2790intel_dp_get_dpcd(struct intel_dp *intel_dp)
2591{ 2791{
2792 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2793 struct drm_device *dev = dig_port->base.base.dev;
2794 struct drm_i915_private *dev_priv = dev->dev_private;
2795
2592 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2796 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2593 2797
2594 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2798 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
@@ -2604,11 +2808,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2604 2808
2605 /* Check if the panel supports PSR */ 2809 /* Check if the panel supports PSR */
2606 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 2810 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2607 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, 2811 if (is_edp(intel_dp)) {
2608 intel_dp->psr_dpcd, 2812 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2609 sizeof(intel_dp->psr_dpcd)); 2813 intel_dp->psr_dpcd,
2610 if (is_edp_psr(intel_dp)) 2814 sizeof(intel_dp->psr_dpcd));
2611 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 2815 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2816 dev_priv->psr.sink_support = true;
2817 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2818 }
2819 }
2820
2612 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2821 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2613 DP_DWN_STRM_PORT_PRESENT)) 2822 DP_DWN_STRM_PORT_PRESENT))
2614 return true; /* native DP sink */ 2823 return true; /* native DP sink */
@@ -2728,7 +2937,6 @@ static enum drm_connector_status
2728intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2937intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2729{ 2938{
2730 uint8_t *dpcd = intel_dp->dpcd; 2939 uint8_t *dpcd = intel_dp->dpcd;
2731 bool hpd;
2732 uint8_t type; 2940 uint8_t type;
2733 2941
2734 if (!intel_dp_get_dpcd(intel_dp)) 2942 if (!intel_dp_get_dpcd(intel_dp))
@@ -2739,8 +2947,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2739 return connector_status_connected; 2947 return connector_status_connected;
2740 2948
2741 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2949 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2742 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2950 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2743 if (hpd) { 2951 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
2744 uint8_t reg; 2952 uint8_t reg;
2745 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2953 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2746 &reg, 1)) 2954 &reg, 1))
@@ -2754,9 +2962,18 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2754 return connector_status_connected; 2962 return connector_status_connected;
2755 2963
2756 /* Well we tried, say unknown for unreliable port types */ 2964 /* Well we tried, say unknown for unreliable port types */
2757 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2965 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
2758 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2966 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2759 return connector_status_unknown; 2967 if (type == DP_DS_PORT_TYPE_VGA ||
2968 type == DP_DS_PORT_TYPE_NON_EDID)
2969 return connector_status_unknown;
2970 } else {
2971 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2972 DP_DWN_STRM_PORT_TYPE_MASK;
2973 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
2974 type == DP_DWN_STRM_PORT_TYPE_OTHER)
2975 return connector_status_unknown;
2976 }
2760 2977
2761 /* Anything else is out of spec, warn and ignore */ 2978 /* Anything else is out of spec, warn and ignore */
2762 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2979 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
@@ -2830,19 +3047,11 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2830 3047
2831 /* use cached edid if we have one */ 3048 /* use cached edid if we have one */
2832 if (intel_connector->edid) { 3049 if (intel_connector->edid) {
2833 struct edid *edid;
2834 int size;
2835
2836 /* invalid edid */ 3050 /* invalid edid */
2837 if (IS_ERR(intel_connector->edid)) 3051 if (IS_ERR(intel_connector->edid))
2838 return NULL; 3052 return NULL;
2839 3053
2840 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 3054 return drm_edid_duplicate(intel_connector->edid);
2841 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
2842 if (!edid)
2843 return NULL;
2844
2845 return edid;
2846 } 3055 }
2847 3056
2848 return drm_get_edid(connector, adapter); 3057 return drm_get_edid(connector, adapter);
@@ -3050,7 +3259,6 @@ intel_dp_connector_destroy(struct drm_connector *connector)
3050 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3259 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3051 intel_panel_fini(&intel_connector->panel); 3260 intel_panel_fini(&intel_connector->panel);
3052 3261
3053 drm_sysfs_connector_remove(connector);
3054 drm_connector_cleanup(connector); 3262 drm_connector_cleanup(connector);
3055 kfree(connector); 3263 kfree(connector);
3056} 3264}
@@ -3121,7 +3329,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3121bool intel_dpd_is_edp(struct drm_device *dev) 3329bool intel_dpd_is_edp(struct drm_device *dev)
3122{ 3330{
3123 struct drm_i915_private *dev_priv = dev->dev_private; 3331 struct drm_i915_private *dev_priv = dev->dev_private;
3124 struct child_device_config *p_child; 3332 union child_device_config *p_child;
3125 int i; 3333 int i;
3126 3334
3127 if (!dev_priv->vbt.child_dev_num) 3335 if (!dev_priv->vbt.child_dev_num)
@@ -3130,8 +3338,9 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3130 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3338 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3131 p_child = dev_priv->vbt.child_dev + i; 3339 p_child = dev_priv->vbt.child_dev + i;
3132 3340
3133 if (p_child->dvo_port == PORT_IDPD && 3341 if (p_child->common.dvo_port == PORT_IDPD &&
3134 p_child->device_type == DEVICE_TYPE_eDP) 3342 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3343 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3135 return true; 3344 return true;
3136 } 3345 }
3137 return false; 3346 return false;
@@ -3164,24 +3373,26 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3164 struct drm_i915_private *dev_priv = dev->dev_private; 3373 struct drm_i915_private *dev_priv = dev->dev_private;
3165 struct edp_power_seq cur, vbt, spec, final; 3374 struct edp_power_seq cur, vbt, spec, final;
3166 u32 pp_on, pp_off, pp_div, pp; 3375 u32 pp_on, pp_off, pp_div, pp;
3167 int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg; 3376 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3168 3377
3169 if (HAS_PCH_SPLIT(dev)) { 3378 if (HAS_PCH_SPLIT(dev)) {
3170 pp_control_reg = PCH_PP_CONTROL; 3379 pp_ctrl_reg = PCH_PP_CONTROL;
3171 pp_on_reg = PCH_PP_ON_DELAYS; 3380 pp_on_reg = PCH_PP_ON_DELAYS;
3172 pp_off_reg = PCH_PP_OFF_DELAYS; 3381 pp_off_reg = PCH_PP_OFF_DELAYS;
3173 pp_div_reg = PCH_PP_DIVISOR; 3382 pp_div_reg = PCH_PP_DIVISOR;
3174 } else { 3383 } else {
3175 pp_control_reg = PIPEA_PP_CONTROL; 3384 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3176 pp_on_reg = PIPEA_PP_ON_DELAYS; 3385
3177 pp_off_reg = PIPEA_PP_OFF_DELAYS; 3386 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3178 pp_div_reg = PIPEA_PP_DIVISOR; 3387 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3388 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3389 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3179 } 3390 }
3180 3391
3181 /* Workaround: Need to write PP_CONTROL with the unlock key as 3392 /* Workaround: Need to write PP_CONTROL with the unlock key as
3182 * the very first thing. */ 3393 * the very first thing. */
3183 pp = ironlake_get_pp_control(intel_dp); 3394 pp = ironlake_get_pp_control(intel_dp);
3184 I915_WRITE(pp_control_reg, pp); 3395 I915_WRITE(pp_ctrl_reg, pp);
3185 3396
3186 pp_on = I915_READ(pp_on_reg); 3397 pp_on = I915_READ(pp_on_reg);
3187 pp_off = I915_READ(pp_off_reg); 3398 pp_off = I915_READ(pp_off_reg);
@@ -3269,9 +3480,11 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3269 pp_off_reg = PCH_PP_OFF_DELAYS; 3480 pp_off_reg = PCH_PP_OFF_DELAYS;
3270 pp_div_reg = PCH_PP_DIVISOR; 3481 pp_div_reg = PCH_PP_DIVISOR;
3271 } else { 3482 } else {
3272 pp_on_reg = PIPEA_PP_ON_DELAYS; 3483 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3273 pp_off_reg = PIPEA_PP_OFF_DELAYS; 3484
3274 pp_div_reg = PIPEA_PP_DIVISOR; 3485 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3486 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3487 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3275 } 3488 }
3276 3489
3277 /* And finally store the new values in the power sequencer. */ 3490 /* And finally store the new values in the power sequencer. */
@@ -3288,12 +3501,15 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3288 /* Haswell doesn't have any port selection bits for the panel 3501 /* Haswell doesn't have any port selection bits for the panel
3289 * power sequencer any more. */ 3502 * power sequencer any more. */
3290 if (IS_VALLEYVIEW(dev)) { 3503 if (IS_VALLEYVIEW(dev)) {
3291 port_sel = I915_READ(pp_on_reg) & 0xc0000000; 3504 if (dp_to_dig_port(intel_dp)->port == PORT_B)
3505 port_sel = PANEL_PORT_SELECT_DPB_VLV;
3506 else
3507 port_sel = PANEL_PORT_SELECT_DPC_VLV;
3292 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 3508 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3293 if (dp_to_dig_port(intel_dp)->port == PORT_A) 3509 if (dp_to_dig_port(intel_dp)->port == PORT_A)
3294 port_sel = PANEL_POWER_PORT_DP_A; 3510 port_sel = PANEL_PORT_SELECT_DPA;
3295 else 3511 else
3296 port_sel = PANEL_POWER_PORT_DP_D; 3512 port_sel = PANEL_PORT_SELECT_DPD;
3297 } 3513 }
3298 3514
3299 pp_on |= port_sel; 3515 pp_on |= port_sel;
@@ -3346,7 +3562,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3346 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 3562 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3347 &power_seq); 3563 &power_seq);
3348 3564
3349 ironlake_edp_panel_vdd_on(intel_dp);
3350 edid = drm_get_edid(connector, &intel_dp->adapter); 3565 edid = drm_get_edid(connector, &intel_dp->adapter);
3351 if (edid) { 3566 if (edid) {
3352 if (drm_add_edid_modes(connector, edid)) { 3567 if (drm_add_edid_modes(connector, edid)) {
@@ -3378,8 +3593,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3378 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3593 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3379 } 3594 }
3380 3595
3381 ironlake_edp_panel_vdd_off(intel_dp, false);
3382
3383 intel_panel_init(&intel_connector->panel, fixed_mode); 3596 intel_panel_init(&intel_connector->panel, fixed_mode);
3384 intel_panel_setup_backlight(connector); 3597 intel_panel_setup_backlight(connector);
3385 3598
@@ -3536,11 +3749,11 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3536 struct drm_encoder *encoder; 3749 struct drm_encoder *encoder;
3537 struct intel_connector *intel_connector; 3750 struct intel_connector *intel_connector;
3538 3751
3539 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 3752 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3540 if (!intel_dig_port) 3753 if (!intel_dig_port)
3541 return; 3754 return;
3542 3755
3543 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 3756 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
3544 if (!intel_connector) { 3757 if (!intel_connector) {
3545 kfree(intel_dig_port); 3758 kfree(intel_dig_port);
3546 return; 3759 return;
@@ -3559,12 +3772,12 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3559 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3772 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3560 intel_encoder->get_config = intel_dp_get_config; 3773 intel_encoder->get_config = intel_dp_get_config;
3561 if (IS_VALLEYVIEW(dev)) { 3774 if (IS_VALLEYVIEW(dev)) {
3562 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; 3775 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
3563 intel_encoder->pre_enable = vlv_pre_enable_dp; 3776 intel_encoder->pre_enable = vlv_pre_enable_dp;
3564 intel_encoder->enable = vlv_enable_dp; 3777 intel_encoder->enable = vlv_enable_dp;
3565 } else { 3778 } else {
3566 intel_encoder->pre_enable = intel_pre_enable_dp; 3779 intel_encoder->pre_enable = g4x_pre_enable_dp;
3567 intel_encoder->enable = intel_enable_dp; 3780 intel_encoder->enable = g4x_enable_dp;
3568 } 3781 }
3569 3782
3570 intel_dig_port->port = port; 3783 intel_dig_port->port = port;