aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c355
1 files changed, 267 insertions, 88 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index de51489de23c..5d127e068950 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1506,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1506 1506
1507/* SBI access */ 1507/* SBI access */
1508static void 1508static void
1509intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) 1509intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1510 enum intel_sbi_destination destination)
1510{ 1511{
1511 unsigned long flags; 1512 unsigned long flags;
1513 u32 tmp;
1512 1514
1513 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1515 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1514 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 1516 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1515 100)) {
1516 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1517 DRM_ERROR("timeout waiting for SBI to become ready\n");
1517 goto out_unlock; 1518 goto out_unlock;
1518 } 1519 }
1519 1520
1520 I915_WRITE(SBI_ADDR, 1521 I915_WRITE(SBI_ADDR, (reg << 16));
1521 (reg << 16)); 1522 I915_WRITE(SBI_DATA, value);
1522 I915_WRITE(SBI_DATA, 1523
1523 value); 1524 if (destination == SBI_ICLK)
1524 I915_WRITE(SBI_CTL_STAT, 1525 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
1525 SBI_BUSY | 1526 else
1526 SBI_CTL_OP_CRWR); 1527 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
1528 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
1527 1529
1528 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1530 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1529 100)) { 1531 100)) {
@@ -1536,23 +1538,25 @@ out_unlock:
1536} 1538}
1537 1539
1538static u32 1540static u32
1539intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) 1541intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1542 enum intel_sbi_destination destination)
1540{ 1543{
1541 unsigned long flags; 1544 unsigned long flags;
1542 u32 value = 0; 1545 u32 value = 0;
1543 1546
1544 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1547 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1545 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 1548 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1546 100)) {
1547 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1549 DRM_ERROR("timeout waiting for SBI to become ready\n");
1548 goto out_unlock; 1550 goto out_unlock;
1549 } 1551 }
1550 1552
1551 I915_WRITE(SBI_ADDR, 1553 I915_WRITE(SBI_ADDR, (reg << 16));
1552 (reg << 16)); 1554
1553 I915_WRITE(SBI_CTL_STAT, 1555 if (destination == SBI_ICLK)
1554 SBI_BUSY | 1556 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
1555 SBI_CTL_OP_CRRD); 1557 else
1558 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
1559 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
1556 1560
1557 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1561 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1558 100)) { 1562 100)) {
@@ -2424,18 +2428,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2424 FDI_FE_ERRC_ENABLE); 2428 FDI_FE_ERRC_ENABLE);
2425} 2429}
2426 2430
2427static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2428{
2429 struct drm_i915_private *dev_priv = dev->dev_private;
2430 u32 flags = I915_READ(SOUTH_CHICKEN1);
2431
2432 flags |= FDI_PHASE_SYNC_OVR(pipe);
2433 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2434 flags |= FDI_PHASE_SYNC_EN(pipe);
2435 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2436 POSTING_READ(SOUTH_CHICKEN1);
2437}
2438
2439static void ivb_modeset_global_resources(struct drm_device *dev) 2431static void ivb_modeset_global_resources(struct drm_device *dev)
2440{ 2432{
2441 struct drm_i915_private *dev_priv = dev->dev_private; 2433 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2610,8 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2610 POSTING_READ(reg); 2602 POSTING_READ(reg);
2611 udelay(150); 2603 udelay(150);
2612 2604
2613 cpt_phase_pointer_enable(dev, pipe);
2614
2615 for (i = 0; i < 4; i++) { 2605 for (i = 0; i < 4; i++) {
2616 reg = FDI_TX_CTL(pipe); 2606 reg = FDI_TX_CTL(pipe);
2617 temp = I915_READ(reg); 2607 temp = I915_READ(reg);
@@ -2744,8 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2744 POSTING_READ(reg); 2734 POSTING_READ(reg);
2745 udelay(150); 2735 udelay(150);
2746 2736
2747 cpt_phase_pointer_enable(dev, pipe);
2748
2749 for (i = 0; i < 4; i++) { 2737 for (i = 0; i < 4; i++) {
2750 reg = FDI_TX_CTL(pipe); 2738 reg = FDI_TX_CTL(pipe);
2751 temp = I915_READ(reg); 2739 temp = I915_READ(reg);
@@ -2884,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2884 udelay(100); 2872 udelay(100);
2885} 2873}
2886 2874
2887static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2888{
2889 struct drm_i915_private *dev_priv = dev->dev_private;
2890 u32 flags = I915_READ(SOUTH_CHICKEN1);
2891
2892 flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2893 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2894 flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2895 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2896 POSTING_READ(SOUTH_CHICKEN1);
2897}
2898static void ironlake_fdi_disable(struct drm_crtc *crtc) 2875static void ironlake_fdi_disable(struct drm_crtc *crtc)
2899{ 2876{
2900 struct drm_device *dev = crtc->dev; 2877 struct drm_device *dev = crtc->dev;
@@ -2921,8 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2921 /* Ironlake workaround, disable clock pointer after downing FDI */ 2898 /* Ironlake workaround, disable clock pointer after downing FDI */
2922 if (HAS_PCH_IBX(dev)) { 2899 if (HAS_PCH_IBX(dev)) {
2923 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2900 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2924 } else if (HAS_PCH_CPT(dev)) {
2925 cpt_phase_pointer_disable(dev, pipe);
2926 } 2901 }
2927 2902
2928 /* still set train pattern 1 */ 2903 /* still set train pattern 1 */
@@ -3024,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
3024 2999
3025 /* Disable SSCCTL */ 3000 /* Disable SSCCTL */
3026 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3001 intel_sbi_write(dev_priv, SBI_SSCCTL6,
3027 intel_sbi_read(dev_priv, SBI_SSCCTL6) | 3002 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3028 SBI_SSCCTL_DISABLE); 3003 SBI_SSCCTL_DISABLE,
3004 SBI_ICLK);
3029 3005
3030 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3006 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3031 if (crtc->mode.clock == 20000) { 3007 if (crtc->mode.clock == 20000) {
@@ -3066,33 +3042,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
3066 phaseinc); 3042 phaseinc);
3067 3043
3068 /* Program SSCDIVINTPHASE6 */ 3044 /* Program SSCDIVINTPHASE6 */
3069 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6); 3045 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3070 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3046 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3071 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3047 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3072 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3048 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3073 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3049 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3074 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3050 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3075 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3051 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3076 3052 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3077 intel_sbi_write(dev_priv,
3078 SBI_SSCDIVINTPHASE6,
3079 temp);
3080 3053
3081 /* Program SSCAUXDIV */ 3054 /* Program SSCAUXDIV */
3082 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6); 3055 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3083 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3056 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3084 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3057 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3085 intel_sbi_write(dev_priv, 3058 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3086 SBI_SSCAUXDIV6,
3087 temp);
3088
3089 3059
3090 /* Enable modulator and associated divider */ 3060 /* Enable modulator and associated divider */
3091 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6); 3061 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3092 temp &= ~SBI_SSCCTL_DISABLE; 3062 temp &= ~SBI_SSCCTL_DISABLE;
3093 intel_sbi_write(dev_priv, 3063 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3094 SBI_SSCCTL6,
3095 temp);
3096 3064
3097 /* Wait for initialization time */ 3065 /* Wait for initialization time */
3098 udelay(24); 3066 udelay(24);
@@ -4878,10 +4846,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4878 return ret; 4846 return ret;
4879} 4847}
4880 4848
4881/* 4849static void ironlake_init_pch_refclk(struct drm_device *dev)
4882 * Initialize reference clocks when the driver loads
4883 */
4884void ironlake_init_pch_refclk(struct drm_device *dev)
4885{ 4850{
4886 struct drm_i915_private *dev_priv = dev->dev_private; 4851 struct drm_i915_private *dev_priv = dev->dev_private;
4887 struct drm_mode_config *mode_config = &dev->mode_config; 4852 struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4995,6 +4960,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
4995 } 4960 }
4996} 4961}
4997 4962
4963/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
4964static void lpt_init_pch_refclk(struct drm_device *dev)
4965{
4966 struct drm_i915_private *dev_priv = dev->dev_private;
4967 struct drm_mode_config *mode_config = &dev->mode_config;
4968 struct intel_encoder *encoder;
4969 bool has_vga = false;
4970 bool is_sdv = false;
4971 u32 tmp;
4972
4973 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4974 switch (encoder->type) {
4975 case INTEL_OUTPUT_ANALOG:
4976 has_vga = true;
4977 break;
4978 }
4979 }
4980
4981 if (!has_vga)
4982 return;
4983
4984 /* XXX: Rip out SDV support once Haswell ships for real. */
4985 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
4986 is_sdv = true;
4987
4988 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4989 tmp &= ~SBI_SSCCTL_DISABLE;
4990 tmp |= SBI_SSCCTL_PATHALT;
4991 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4992
4993 udelay(24);
4994
4995 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4996 tmp &= ~SBI_SSCCTL_PATHALT;
4997 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4998
4999 if (!is_sdv) {
5000 tmp = I915_READ(SOUTH_CHICKEN2);
5001 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5002 I915_WRITE(SOUTH_CHICKEN2, tmp);
5003
5004 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5005 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5006 DRM_ERROR("FDI mPHY reset assert timeout\n");
5007
5008 tmp = I915_READ(SOUTH_CHICKEN2);
5009 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5010 I915_WRITE(SOUTH_CHICKEN2, tmp);
5011
5012 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5013 FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
5014 100))
5015 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5016 }
5017
5018 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5019 tmp &= ~(0xFF << 24);
5020 tmp |= (0x12 << 24);
5021 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5022
5023 if (!is_sdv) {
5024 tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
5025 tmp &= ~(0x3 << 6);
5026 tmp |= (1 << 6) | (1 << 0);
5027 intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
5028 }
5029
5030 if (is_sdv) {
5031 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
5032 tmp |= 0x7FFF;
5033 intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
5034 }
5035
5036 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5037 tmp |= (1 << 11);
5038 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5039
5040 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5041 tmp |= (1 << 11);
5042 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5043
5044 if (is_sdv) {
5045 tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
5046 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5047 intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
5048
5049 tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
5050 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5051 intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
5052
5053 tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
5054 tmp |= (0x3F << 8);
5055 intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
5056
5057 tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
5058 tmp |= (0x3F << 8);
5059 intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
5060 }
5061
5062 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5063 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5064 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5065
5066 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5067 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5068 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5069
5070 if (!is_sdv) {
5071 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5072 tmp &= ~(7 << 13);
5073 tmp |= (5 << 13);
5074 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5075
5076 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5077 tmp &= ~(7 << 13);
5078 tmp |= (5 << 13);
5079 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5080 }
5081
5082 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5083 tmp &= ~0xFF;
5084 tmp |= 0x1C;
5085 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5086
5087 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5088 tmp &= ~0xFF;
5089 tmp |= 0x1C;
5090 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5091
5092 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5093 tmp &= ~(0xFF << 16);
5094 tmp |= (0x1C << 16);
5095 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5096
5097 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5098 tmp &= ~(0xFF << 16);
5099 tmp |= (0x1C << 16);
5100 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5101
5102 if (!is_sdv) {
5103 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5104 tmp |= (1 << 27);
5105 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5106
5107 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5108 tmp |= (1 << 27);
5109 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5110
5111 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5112 tmp &= ~(0xF << 28);
5113 tmp |= (4 << 28);
5114 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5115
5116 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5117 tmp &= ~(0xF << 28);
5118 tmp |= (4 << 28);
5119 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5120 }
5121
5122 /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
5123 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
5124 tmp |= SBI_DBUFF0_ENABLE;
5125 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
5126}
5127
5128/*
5129 * Initialize reference clocks when the driver loads
5130 */
5131void intel_init_pch_refclk(struct drm_device *dev)
5132{
5133 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5134 ironlake_init_pch_refclk(dev);
5135 else if (HAS_PCH_LPT(dev))
5136 lpt_init_pch_refclk(dev);
5137}
5138
4998static int ironlake_get_refclk(struct drm_crtc *crtc) 5139static int ironlake_get_refclk(struct drm_crtc *crtc)
4999{ 5140{
5000 struct drm_device *dev = crtc->dev; 5141 struct drm_device *dev = crtc->dev;
@@ -5239,6 +5380,17 @@ static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
5239 } 5380 }
5240} 5381}
5241 5382
5383int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5384{
5385 /*
5386 * Account for spread spectrum to avoid
5387 * oversubscribing the link. Max center spread
5388 * is 2.5%; use 5% for safety's sake.
5389 */
5390 u32 bps = target_clock * bpp * 21 / 20;
5391 return bps / (link_bw * 8) + 1;
5392}
5393
5242static void ironlake_set_m_n(struct drm_crtc *crtc, 5394static void ironlake_set_m_n(struct drm_crtc *crtc,
5243 struct drm_display_mode *mode, 5395 struct drm_display_mode *mode,
5244 struct drm_display_mode *adjusted_mode) 5396 struct drm_display_mode *adjusted_mode)
@@ -5292,15 +5444,9 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
5292 else 5444 else
5293 target_clock = adjusted_mode->clock; 5445 target_clock = adjusted_mode->clock;
5294 5446
5295 if (!lane) { 5447 if (!lane)
5296 /* 5448 lane = ironlake_get_lanes_required(target_clock, link_bw,
5297 * Account for spread spectrum to avoid 5449 intel_crtc->bpp);
5298 * oversubscribing the link. Max center spread
5299 * is 2.5%; use 5% for safety's sake.
5300 */
5301 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5302 lane = bps / (link_bw * 8) + 1;
5303 }
5304 5450
5305 intel_crtc->fdi_lanes = lane; 5451 intel_crtc->fdi_lanes = lane;
5306 5452
@@ -6940,11 +7086,18 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6940 7086
6941 spin_lock_irqsave(&dev->event_lock, flags); 7087 spin_lock_irqsave(&dev->event_lock, flags);
6942 work = intel_crtc->unpin_work; 7088 work = intel_crtc->unpin_work;
6943 if (work == NULL || !work->pending) { 7089
7090 /* Ensure we don't miss a work->pending update ... */
7091 smp_rmb();
7092
7093 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
6944 spin_unlock_irqrestore(&dev->event_lock, flags); 7094 spin_unlock_irqrestore(&dev->event_lock, flags);
6945 return; 7095 return;
6946 } 7096 }
6947 7097
7098 /* and that the unpin work is consistent wrt ->pending. */
7099 smp_rmb();
7100
6948 intel_crtc->unpin_work = NULL; 7101 intel_crtc->unpin_work = NULL;
6949 7102
6950 if (work->event) 7103 if (work->event)
@@ -6988,16 +7141,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
6988 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 7141 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6989 unsigned long flags; 7142 unsigned long flags;
6990 7143
7144 /* NB: An MMIO update of the plane base pointer will also
7145 * generate a page-flip completion irq, i.e. every modeset
7146 * is also accompanied by a spurious intel_prepare_page_flip().
7147 */
6991 spin_lock_irqsave(&dev->event_lock, flags); 7148 spin_lock_irqsave(&dev->event_lock, flags);
6992 if (intel_crtc->unpin_work) { 7149 if (intel_crtc->unpin_work)
6993 if ((++intel_crtc->unpin_work->pending) > 1) 7150 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
6994 DRM_ERROR("Prepared flip multiple times\n");
6995 } else {
6996 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6997 }
6998 spin_unlock_irqrestore(&dev->event_lock, flags); 7151 spin_unlock_irqrestore(&dev->event_lock, flags);
6999} 7152}
7000 7153
7154inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7155{
7156 /* Ensure that the work item is consistent when activating it ... */
7157 smp_wmb();
7158 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
7159 /* and that it is marked active as soon as the irq could fire. */
7160 smp_wmb();
7161}
7162
7001static int intel_gen2_queue_flip(struct drm_device *dev, 7163static int intel_gen2_queue_flip(struct drm_device *dev,
7002 struct drm_crtc *crtc, 7164 struct drm_crtc *crtc,
7003 struct drm_framebuffer *fb, 7165 struct drm_framebuffer *fb,
@@ -7031,6 +7193,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7031 intel_ring_emit(ring, fb->pitches[0]); 7193 intel_ring_emit(ring, fb->pitches[0]);
7032 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7194 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7033 intel_ring_emit(ring, 0); /* aux display base address, unused */ 7195 intel_ring_emit(ring, 0); /* aux display base address, unused */
7196
7197 intel_mark_page_flip_active(intel_crtc);
7034 intel_ring_advance(ring); 7198 intel_ring_advance(ring);
7035 return 0; 7199 return 0;
7036 7200
@@ -7071,6 +7235,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7071 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7235 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7072 intel_ring_emit(ring, MI_NOOP); 7236 intel_ring_emit(ring, MI_NOOP);
7073 7237
7238 intel_mark_page_flip_active(intel_crtc);
7074 intel_ring_advance(ring); 7239 intel_ring_advance(ring);
7075 return 0; 7240 return 0;
7076 7241
@@ -7117,6 +7282,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7117 pf = 0; 7282 pf = 0;
7118 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7283 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7119 intel_ring_emit(ring, pf | pipesrc); 7284 intel_ring_emit(ring, pf | pipesrc);
7285
7286 intel_mark_page_flip_active(intel_crtc);
7120 intel_ring_advance(ring); 7287 intel_ring_advance(ring);
7121 return 0; 7288 return 0;
7122 7289
@@ -7159,6 +7326,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7159 pf = 0; 7326 pf = 0;
7160 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7327 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7161 intel_ring_emit(ring, pf | pipesrc); 7328 intel_ring_emit(ring, pf | pipesrc);
7329
7330 intel_mark_page_flip_active(intel_crtc);
7162 intel_ring_advance(ring); 7331 intel_ring_advance(ring);
7163 return 0; 7332 return 0;
7164 7333
@@ -7213,6 +7382,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7213 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 7382 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7214 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7383 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7215 intel_ring_emit(ring, (MI_NOOP)); 7384 intel_ring_emit(ring, (MI_NOOP));
7385
7386 intel_mark_page_flip_active(intel_crtc);
7216 intel_ring_advance(ring); 7387 intel_ring_advance(ring);
7217 return 0; 7388 return 0;
7218 7389
@@ -8394,8 +8565,7 @@ static void intel_setup_outputs(struct drm_device *dev)
8394 intel_encoder_clones(encoder); 8565 intel_encoder_clones(encoder);
8395 } 8566 }
8396 8567
8397 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 8568 intel_init_pch_refclk(dev);
8398 ironlake_init_pch_refclk(dev);
8399 8569
8400 drm_helper_move_panel_connectors_to_head(dev); 8570 drm_helper_move_panel_connectors_to_head(dev);
8401} 8571}
@@ -8999,7 +9169,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
8999 9169
9000/* Scan out the current hw modeset state, sanitizes it and maps it into the drm 9170/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
9001 * and i915 state tracking structures. */ 9171 * and i915 state tracking structures. */
9002void intel_modeset_setup_hw_state(struct drm_device *dev) 9172void intel_modeset_setup_hw_state(struct drm_device *dev,
9173 bool force_restore)
9003{ 9174{
9004 struct drm_i915_private *dev_priv = dev->dev_private; 9175 struct drm_i915_private *dev_priv = dev->dev_private;
9005 enum pipe pipe; 9176 enum pipe pipe;
@@ -9098,7 +9269,15 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
9098 intel_sanitize_crtc(crtc); 9269 intel_sanitize_crtc(crtc);
9099 } 9270 }
9100 9271
9101 intel_modeset_update_staged_output_state(dev); 9272 if (force_restore) {
9273 for_each_pipe(pipe) {
9274 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9275 intel_set_mode(&crtc->base, &crtc->base.mode,
9276 crtc->base.x, crtc->base.y, crtc->base.fb);
9277 }
9278 } else {
9279 intel_modeset_update_staged_output_state(dev);
9280 }
9102 9281
9103 intel_modeset_check_state(dev); 9282 intel_modeset_check_state(dev);
9104 9283
@@ -9111,7 +9290,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
9111 9290
9112 intel_setup_overlay(dev); 9291 intel_setup_overlay(dev);
9113 9292
9114 intel_modeset_setup_hw_state(dev); 9293 intel_modeset_setup_hw_state(dev, false);
9115} 9294}
9116 9295
9117void intel_modeset_cleanup(struct drm_device *dev) 9296void intel_modeset_cleanup(struct drm_device *dev)