diff options
author | Dave Airlie <airlied@redhat.com> | 2010-10-25 19:23:22 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-25 19:23:22 -0400 |
commit | e3ce8a0b277438591844847ac7c89a980b4cfa6d (patch) | |
tree | c9bf47675403a54be2e0c54df9357d2b9c65326b /drivers/gpu/drm/i915/intel_display.c | |
parent | e1efc9b6ac22c605fd326b3f6af9b393325d43b4 (diff) | |
parent | 641934069d29211baf82afb93622a426172b67b6 (diff) |
Merge remote branch 'intel/drm-intel-next' of ../drm-next into drm-core-next
* 'intel/drm-intel-next' of ../drm-next: (63 commits)
drm/i915: Move gpu_write_list to per-ring
drm/i915: Invalidate the to-ring, flush the old-ring when updating domains
drm/i915/ringbuffer: Write the value passed in to the tail register
agp/intel: Restore valid PTE bit for Sandybridge after bdd3072
drm/i915: Fix flushing regression from 9af90d19f
drm/i915/sdvo: Remove unused encoding member
i915: enable AVI infoframe for intel_hdmi.c [v4]
drm/i915: Fix current fb blocking for page flip
drm/i915: IS_IRONLAKE is synonymous with gen == 5
drm/i915: Enable SandyBridge blitter ring
drm/i915/ringbuffer: Remove broken intel_fill_struct()
drm/i915/ringbuffer: Fix emit batch buffer regression from 8187a2b
drm/i915: Copy the updated reloc->presumed_offset back to the user
drm/i915: Track objects in global active list (as well as per-ring)
drm/i915: Simplify most HAS_BSD() checks
drm/i915: cache the last object lookup during pin_and_relocate()
drm/i915: Do interrupible mutex lock first to avoid locking for unreference
drivers: gpu: drm: i915: Fix a typo.
agp/intel: Also add B43.1 to list of supported devices
drm/i915: rearrange mutex acquisition for pread
...
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 175 |
1 files changed, 94 insertions, 81 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 96d08a9f3aaa..990f065374b2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -345,8 +345,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, | |||
345 | static inline u32 /* units of 100MHz */ | 345 | static inline u32 /* units of 100MHz */ |
346 | intel_fdi_link_freq(struct drm_device *dev) | 346 | intel_fdi_link_freq(struct drm_device *dev) |
347 | { | 347 | { |
348 | struct drm_i915_private *dev_priv = dev->dev_private; | 348 | if (IS_GEN5(dev)) { |
349 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; | 349 | struct drm_i915_private *dev_priv = dev->dev_private; |
350 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; | ||
351 | } else | ||
352 | return 27; | ||
350 | } | 353 | } |
351 | 354 | ||
352 | static const intel_limit_t intel_limits_i8xx_dvo = { | 355 | static const intel_limit_t intel_limits_i8xx_dvo = { |
@@ -932,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
932 | struct drm_device *dev = crtc->dev; | 935 | struct drm_device *dev = crtc->dev; |
933 | intel_clock_t clock; | 936 | intel_clock_t clock; |
934 | 937 | ||
935 | /* return directly when it is eDP */ | ||
936 | if (HAS_eDP) | ||
937 | return true; | ||
938 | |||
939 | if (target < 200000) { | 938 | if (target < 200000) { |
940 | clock.n = 1; | 939 | clock.n = 1; |
941 | clock.p1 = 2; | 940 | clock.p1 = 2; |
@@ -1719,6 +1718,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1719 | POSTING_READ(reg); | 1718 | POSTING_READ(reg); |
1720 | udelay(150); | 1719 | udelay(150); |
1721 | 1720 | ||
1721 | /* Ironlake workaround, enable clock pointer after FDI enable*/ | ||
1722 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE); | ||
1723 | |||
1722 | reg = FDI_RX_IIR(pipe); | 1724 | reg = FDI_RX_IIR(pipe); |
1723 | for (tries = 0; tries < 5; tries++) { | 1725 | for (tries = 0; tries < 5; tries++) { |
1724 | temp = I915_READ(reg); | 1726 | temp = I915_READ(reg); |
@@ -1764,6 +1766,28 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1764 | DRM_ERROR("FDI train 2 fail!\n"); | 1766 | DRM_ERROR("FDI train 2 fail!\n"); |
1765 | 1767 | ||
1766 | DRM_DEBUG_KMS("FDI train done\n"); | 1768 | DRM_DEBUG_KMS("FDI train done\n"); |
1769 | |||
1770 | /* enable normal train */ | ||
1771 | reg = FDI_TX_CTL(pipe); | ||
1772 | temp = I915_READ(reg); | ||
1773 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1774 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
1775 | I915_WRITE(reg, temp); | ||
1776 | |||
1777 | reg = FDI_RX_CTL(pipe); | ||
1778 | temp = I915_READ(reg); | ||
1779 | if (HAS_PCH_CPT(dev)) { | ||
1780 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1781 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
1782 | } else { | ||
1783 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1784 | temp |= FDI_LINK_TRAIN_NONE; | ||
1785 | } | ||
1786 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1787 | |||
1788 | /* wait one idle pattern time */ | ||
1789 | POSTING_READ(reg); | ||
1790 | udelay(1000); | ||
1767 | } | 1791 | } |
1768 | 1792 | ||
1769 | static const int const snb_b_fdi_train_param [] = { | 1793 | static const int const snb_b_fdi_train_param [] = { |
@@ -2002,8 +2026,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2002 | 2026 | ||
2003 | /* Enable panel fitting for LVDS */ | 2027 | /* Enable panel fitting for LVDS */ |
2004 | if (dev_priv->pch_pf_size && | 2028 | if (dev_priv->pch_pf_size && |
2005 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) | 2029 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { |
2006 | || HAS_eDP || intel_pch_has_edp(crtc))) { | ||
2007 | /* Force use of hard-coded filter coefficients | 2030 | /* Force use of hard-coded filter coefficients |
2008 | * as some pre-programmed values are broken, | 2031 | * as some pre-programmed values are broken, |
2009 | * e.g. x201. | 2032 | * e.g. x201. |
@@ -2022,7 +2045,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2022 | if ((temp & PIPECONF_ENABLE) == 0) { | 2045 | if ((temp & PIPECONF_ENABLE) == 0) { |
2023 | I915_WRITE(reg, temp | PIPECONF_ENABLE); | 2046 | I915_WRITE(reg, temp | PIPECONF_ENABLE); |
2024 | POSTING_READ(reg); | 2047 | POSTING_READ(reg); |
2025 | udelay(100); | 2048 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2026 | } | 2049 | } |
2027 | 2050 | ||
2028 | /* configure and enable CPU plane */ | 2051 | /* configure and enable CPU plane */ |
@@ -2067,28 +2090,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2067 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); | 2090 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
2068 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | 2091 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
2069 | 2092 | ||
2070 | /* enable normal train */ | ||
2071 | reg = FDI_TX_CTL(pipe); | ||
2072 | temp = I915_READ(reg); | ||
2073 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2074 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
2075 | I915_WRITE(reg, temp); | ||
2076 | |||
2077 | reg = FDI_RX_CTL(pipe); | ||
2078 | temp = I915_READ(reg); | ||
2079 | if (HAS_PCH_CPT(dev)) { | ||
2080 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2081 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
2082 | } else { | ||
2083 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2084 | temp |= FDI_LINK_TRAIN_NONE; | ||
2085 | } | ||
2086 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
2087 | |||
2088 | /* wait one idle pattern time */ | ||
2089 | POSTING_READ(reg); | ||
2090 | udelay(100); | ||
2091 | |||
2092 | /* For PCH DP, enable TRANS_DP_CTL */ | 2093 | /* For PCH DP, enable TRANS_DP_CTL */ |
2093 | if (HAS_PCH_CPT(dev) && | 2094 | if (HAS_PCH_CPT(dev) && |
2094 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 2095 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
@@ -2134,7 +2135,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2134 | temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | 2135 | temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; |
2135 | I915_WRITE(reg, temp | TRANS_ENABLE); | 2136 | I915_WRITE(reg, temp | TRANS_ENABLE); |
2136 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 2137 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
2137 | DRM_ERROR("failed to enable transcoder\n"); | 2138 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
2138 | 2139 | ||
2139 | intel_crtc_load_lut(crtc); | 2140 | intel_crtc_load_lut(crtc); |
2140 | intel_update_fbc(dev); | 2141 | intel_update_fbc(dev); |
@@ -2174,9 +2175,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2174 | temp = I915_READ(reg); | 2175 | temp = I915_READ(reg); |
2175 | if (temp & PIPECONF_ENABLE) { | 2176 | if (temp & PIPECONF_ENABLE) { |
2176 | I915_WRITE(reg, temp & ~PIPECONF_ENABLE); | 2177 | I915_WRITE(reg, temp & ~PIPECONF_ENABLE); |
2178 | POSTING_READ(reg); | ||
2177 | /* wait for cpu pipe off, pipe state */ | 2179 | /* wait for cpu pipe off, pipe state */ |
2178 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 50)) | 2180 | intel_wait_for_pipe_off(dev, intel_crtc->pipe); |
2179 | DRM_ERROR("failed to turn off cpu pipe\n"); | ||
2180 | } | 2181 | } |
2181 | 2182 | ||
2182 | /* Disable PF */ | 2183 | /* Disable PF */ |
@@ -2198,6 +2199,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2198 | POSTING_READ(reg); | 2199 | POSTING_READ(reg); |
2199 | udelay(100); | 2200 | udelay(100); |
2200 | 2201 | ||
2202 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2203 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2204 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2205 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2206 | |||
2201 | /* still set train pattern 1 */ | 2207 | /* still set train pattern 1 */ |
2202 | reg = FDI_TX_CTL(pipe); | 2208 | reg = FDI_TX_CTL(pipe); |
2203 | temp = I915_READ(reg); | 2209 | temp = I915_READ(reg); |
@@ -3623,7 +3629,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3623 | refclk / 1000); | 3629 | refclk / 1000); |
3624 | } else if (!IS_GEN2(dev)) { | 3630 | } else if (!IS_GEN2(dev)) { |
3625 | refclk = 96000; | 3631 | refclk = 96000; |
3626 | if (HAS_PCH_SPLIT(dev)) | 3632 | if (HAS_PCH_SPLIT(dev) && |
3633 | (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base))) | ||
3627 | refclk = 120000; /* 120Mhz refclk */ | 3634 | refclk = 120000; /* 120Mhz refclk */ |
3628 | } else { | 3635 | } else { |
3629 | refclk = 48000; | 3636 | refclk = 48000; |
@@ -3685,16 +3692,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3685 | /* FDI link */ | 3692 | /* FDI link */ |
3686 | if (HAS_PCH_SPLIT(dev)) { | 3693 | if (HAS_PCH_SPLIT(dev)) { |
3687 | int lane = 0, link_bw, bpp; | 3694 | int lane = 0, link_bw, bpp; |
3688 | /* eDP doesn't require FDI link, so just set DP M/N | 3695 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
3689 | according to current link config */ | 3696 | according to current link config */ |
3690 | if (has_edp_encoder) { | 3697 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { |
3691 | target_clock = mode->clock; | 3698 | target_clock = mode->clock; |
3692 | intel_edp_link_config(has_edp_encoder, | 3699 | intel_edp_link_config(has_edp_encoder, |
3693 | &lane, &link_bw); | 3700 | &lane, &link_bw); |
3694 | } else { | 3701 | } else { |
3695 | /* DP over FDI requires target mode clock | 3702 | /* [e]DP over FDI requires target mode clock |
3696 | instead of link clock */ | 3703 | instead of link clock */ |
3697 | if (is_dp) | 3704 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) |
3698 | target_clock = mode->clock; | 3705 | target_clock = mode->clock; |
3699 | else | 3706 | else |
3700 | target_clock = adjusted_mode->clock; | 3707 | target_clock = adjusted_mode->clock; |
@@ -3718,7 +3725,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3718 | temp |= PIPE_8BPC; | 3725 | temp |= PIPE_8BPC; |
3719 | else | 3726 | else |
3720 | temp |= PIPE_6BPC; | 3727 | temp |= PIPE_6BPC; |
3721 | } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { | 3728 | } else if (has_edp_encoder) { |
3722 | switch (dev_priv->edp.bpp/3) { | 3729 | switch (dev_priv->edp.bpp/3) { |
3723 | case 8: | 3730 | case 8: |
3724 | temp |= PIPE_8BPC; | 3731 | temp |= PIPE_8BPC; |
@@ -3794,13 +3801,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3794 | 3801 | ||
3795 | POSTING_READ(PCH_DREF_CONTROL); | 3802 | POSTING_READ(PCH_DREF_CONTROL); |
3796 | udelay(200); | 3803 | udelay(200); |
3804 | } | ||
3805 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
3797 | 3806 | ||
3798 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 3807 | /* Enable CPU source on CPU attached eDP */ |
3799 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | 3808 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3809 | if (dev_priv->lvds_use_ssc) | ||
3810 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
3811 | else | ||
3812 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
3800 | } else { | 3813 | } else { |
3801 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 3814 | /* Enable SSC on PCH eDP if needed */ |
3815 | if (dev_priv->lvds_use_ssc) { | ||
3816 | DRM_ERROR("enabling SSC on PCH\n"); | ||
3817 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
3818 | } | ||
3802 | } | 3819 | } |
3803 | I915_WRITE(PCH_DREF_CONTROL, temp); | 3820 | I915_WRITE(PCH_DREF_CONTROL, temp); |
3821 | POSTING_READ(PCH_DREF_CONTROL); | ||
3822 | udelay(200); | ||
3804 | } | 3823 | } |
3805 | } | 3824 | } |
3806 | 3825 | ||
@@ -3835,7 +3854,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3835 | } | 3854 | } |
3836 | dpll |= DPLL_DVO_HIGH_SPEED; | 3855 | dpll |= DPLL_DVO_HIGH_SPEED; |
3837 | } | 3856 | } |
3838 | if (is_dp) | 3857 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) |
3839 | dpll |= DPLL_DVO_HIGH_SPEED; | 3858 | dpll |= DPLL_DVO_HIGH_SPEED; |
3840 | 3859 | ||
3841 | /* compute bitmask from p1 value */ | 3860 | /* compute bitmask from p1 value */ |
@@ -3934,7 +3953,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3934 | dpll_reg = DPLL(pipe); | 3953 | dpll_reg = DPLL(pipe); |
3935 | } | 3954 | } |
3936 | 3955 | ||
3937 | if (!has_edp_encoder) { | 3956 | /* PCH eDP needs FDI, but CPU eDP does not */ |
3957 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
3938 | I915_WRITE(fp_reg, fp); | 3958 | I915_WRITE(fp_reg, fp); |
3939 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | 3959 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); |
3940 | 3960 | ||
@@ -4011,9 +4031,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4011 | } | 4031 | } |
4012 | } | 4032 | } |
4013 | 4033 | ||
4014 | if (is_dp) | 4034 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4015 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | 4035 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
4016 | else if (HAS_PCH_SPLIT(dev)) { | 4036 | } else if (HAS_PCH_SPLIT(dev)) { |
4017 | /* For non-DP output, clear any trans DP clock recovery setting.*/ | 4037 | /* For non-DP output, clear any trans DP clock recovery setting.*/ |
4018 | if (pipe == 0) { | 4038 | if (pipe == 0) { |
4019 | I915_WRITE(TRANSA_DATA_M1, 0); | 4039 | I915_WRITE(TRANSA_DATA_M1, 0); |
@@ -4028,7 +4048,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4028 | } | 4048 | } |
4029 | } | 4049 | } |
4030 | 4050 | ||
4031 | if (!has_edp_encoder) { | 4051 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4032 | I915_WRITE(fp_reg, fp); | 4052 | I915_WRITE(fp_reg, fp); |
4033 | I915_WRITE(dpll_reg, dpll); | 4053 | I915_WRITE(dpll_reg, dpll); |
4034 | 4054 | ||
@@ -4122,29 +4142,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4122 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); | 4142 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
4123 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); | 4143 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
4124 | 4144 | ||
4125 | if (has_edp_encoder) { | 4145 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4126 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | 4146 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
4127 | } else { | ||
4128 | /* enable FDI RX PLL too */ | ||
4129 | reg = FDI_RX_CTL(pipe); | ||
4130 | temp = I915_READ(reg); | ||
4131 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); | ||
4132 | |||
4133 | POSTING_READ(reg); | ||
4134 | udelay(200); | ||
4135 | |||
4136 | /* enable FDI TX PLL too */ | ||
4137 | reg = FDI_TX_CTL(pipe); | ||
4138 | temp = I915_READ(reg); | ||
4139 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); | ||
4140 | |||
4141 | /* enable FDI RX PCDCLK */ | ||
4142 | reg = FDI_RX_CTL(pipe); | ||
4143 | temp = I915_READ(reg); | ||
4144 | I915_WRITE(reg, temp | FDI_PCDCLK); | ||
4145 | |||
4146 | POSTING_READ(reg); | ||
4147 | udelay(200); | ||
4148 | } | 4147 | } |
4149 | } | 4148 | } |
4150 | 4149 | ||
@@ -4153,7 +4152,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4153 | 4152 | ||
4154 | intel_wait_for_vblank(dev, pipe); | 4153 | intel_wait_for_vblank(dev, pipe); |
4155 | 4154 | ||
4156 | if (IS_IRONLAKE(dev)) { | 4155 | if (IS_GEN5(dev)) { |
4157 | /* enable address swizzle for tiling buffer */ | 4156 | /* enable address swizzle for tiling buffer */ |
4158 | temp = I915_READ(DISP_ARB_CTL); | 4157 | temp = I915_READ(DISP_ARB_CTL); |
4159 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); | 4158 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); |
@@ -4992,11 +4991,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4992 | 4991 | ||
4993 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4992 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4994 | 4993 | ||
4995 | obj_priv = to_intel_bo(work->pending_flip_obj); | 4994 | obj_priv = to_intel_bo(work->old_fb_obj); |
4996 | 4995 | atomic_clear_mask(1 << intel_crtc->plane, | |
4997 | /* Initial scanout buffer will have a 0 pending flip count */ | 4996 | &obj_priv->pending_flip.counter); |
4998 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | 4997 | if (atomic_read(&obj_priv->pending_flip) == 0) |
4999 | atomic_dec_and_test(&obj_priv->pending_flip)) | ||
5000 | wake_up(&dev_priv->pending_flip_queue); | 4998 | wake_up(&dev_priv->pending_flip_queue); |
5001 | schedule_work(&work->work); | 4999 | schedule_work(&work->work); |
5002 | 5000 | ||
@@ -5092,9 +5090,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5092 | if (ret) | 5090 | if (ret) |
5093 | goto cleanup_objs; | 5091 | goto cleanup_objs; |
5094 | 5092 | ||
5095 | obj_priv = to_intel_bo(obj); | 5093 | /* Block clients from rendering to the new back buffer until |
5096 | atomic_inc(&obj_priv->pending_flip); | 5094 | * the flip occurs and the object is no longer visible. |
5095 | */ | ||
5096 | atomic_add(1 << intel_crtc->plane, | ||
5097 | &to_intel_bo(work->old_fb_obj)->pending_flip); | ||
5098 | |||
5097 | work->pending_flip_obj = obj; | 5099 | work->pending_flip_obj = obj; |
5100 | obj_priv = to_intel_bo(obj); | ||
5098 | 5101 | ||
5099 | if (IS_GEN3(dev) || IS_GEN2(dev)) { | 5102 | if (IS_GEN3(dev) || IS_GEN2(dev)) { |
5100 | u32 flip_mask; | 5103 | u32 flip_mask; |
@@ -5736,7 +5739,7 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5736 | if (HAS_PCH_SPLIT(dev)) { | 5739 | if (HAS_PCH_SPLIT(dev)) { |
5737 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | 5740 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
5738 | 5741 | ||
5739 | if (IS_IRONLAKE(dev)) { | 5742 | if (IS_GEN5(dev)) { |
5740 | /* Required for FBC */ | 5743 | /* Required for FBC */ |
5741 | dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; | 5744 | dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; |
5742 | /* Required for CxSR */ | 5745 | /* Required for CxSR */ |
@@ -5750,13 +5753,20 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5750 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | 5753 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
5751 | 5754 | ||
5752 | /* | 5755 | /* |
5756 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
5757 | * gating for the panel power sequencer or it will fail to | ||
5758 | * start up when no ports are active. | ||
5759 | */ | ||
5760 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
5761 | |||
5762 | /* | ||
5753 | * According to the spec the following bits should be set in | 5763 | * According to the spec the following bits should be set in |
5754 | * order to enable memory self-refresh | 5764 | * order to enable memory self-refresh |
5755 | * The bit 22/21 of 0x42004 | 5765 | * The bit 22/21 of 0x42004 |
5756 | * The bit 5 of 0x42020 | 5766 | * The bit 5 of 0x42020 |
5757 | * The bit 15 of 0x45000 | 5767 | * The bit 15 of 0x45000 |
5758 | */ | 5768 | */ |
5759 | if (IS_IRONLAKE(dev)) { | 5769 | if (IS_GEN5(dev)) { |
5760 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 5770 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
5761 | (I915_READ(ILK_DISPLAY_CHICKEN2) | | 5771 | (I915_READ(ILK_DISPLAY_CHICKEN2) | |
5762 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | 5772 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); |
@@ -5932,7 +5942,7 @@ static void intel_init_display(struct drm_device *dev) | |||
5932 | 5942 | ||
5933 | /* For FIFO watermark updates */ | 5943 | /* For FIFO watermark updates */ |
5934 | if (HAS_PCH_SPLIT(dev)) { | 5944 | if (HAS_PCH_SPLIT(dev)) { |
5935 | if (IS_IRONLAKE(dev)) { | 5945 | if (IS_GEN5(dev)) { |
5936 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) | 5946 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
5937 | dev_priv->display.update_wm = ironlake_update_wm; | 5947 | dev_priv->display.update_wm = ironlake_update_wm; |
5938 | else { | 5948 | else { |
@@ -6131,6 +6141,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6131 | drm_kms_helper_poll_fini(dev); | 6141 | drm_kms_helper_poll_fini(dev); |
6132 | mutex_lock(&dev->struct_mutex); | 6142 | mutex_lock(&dev->struct_mutex); |
6133 | 6143 | ||
6144 | intel_unregister_dsm_handler(); | ||
6145 | |||
6146 | |||
6134 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 6147 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6135 | /* Skip inactive CRTCs */ | 6148 | /* Skip inactive CRTCs */ |
6136 | if (!crtc->fb) | 6149 | if (!crtc->fb) |