aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-05 19:02:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-05 19:02:01 -0400
commitfc1caf6eafb30ea185720e29f7f5eccca61ecd60 (patch)
tree666dabc25a9b02e5c05f9eba32fa6b0d8027341a /drivers/gpu/drm/i915/intel_display.c
parent9779714c8af09d57527f18d9aa2207dcc27a8687 (diff)
parent96576a9e1a0cdb8a43d3af5846be0948f52b4460 (diff)
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (204 commits) agp: intel-agp: do not use PCI resources before pci_enable_device() agp: efficeon-agp: do not use PCI resources before pci_enable_device() drm: kill BKL from common code drm/kms: Simplify setup of the initial I2C encoder config. drm,io-mapping: Specify slot to use for atomic mappings drm/radeon/kms: only expose underscan on avivo chips drm/radeon: add new pci ids drm: Cleanup after failing to create master->unique and dev->name drm/radeon: tone down overchatty acpi debug messages. drm/radeon/kms: enable underscan option for digital connectors drm/radeon/kms: fix calculation of h/v scaling factors drm/radeon/kms/igp: sideport is AMD only drm/radeon/kms: handle the case of no active displays properly in the bandwidth code drm: move ttm global code to core drm drm/i915: Clear the Ironlake dithering flags when the pipe doesn't want it. drm/radeon/kms: make sure HPD is set to NONE on analog-only connectors drm/radeon/kms: make sure rio_mem is valid before unmapping it drm/agp/i915: trim stolen space to 32M drm/i915: Unset cursor if out-of-bounds upon mode change (v4) drm/i915: Unreference object not handle on creation ...
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c586
1 files changed, 423 insertions, 163 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 714bf539918..1e5e0d379fa 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -33,6 +33,7 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h"
36#include "drm_dp_helper.h" 37#include "drm_dp_helper.h"
37 38
38#include "drm_crtc_helper.h" 39#include "drm_crtc_helper.h"
@@ -42,6 +43,7 @@
42bool intel_pipe_has_type (struct drm_crtc *crtc, int type); 43bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
43static void intel_update_watermarks(struct drm_device *dev); 44static void intel_update_watermarks(struct drm_device *dev);
44static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); 45static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc);
45 47
46typedef struct { 48typedef struct {
47 /* given values */ 49 /* given values */
@@ -322,6 +324,9 @@ struct intel_limit {
322#define IRONLAKE_DP_P1_MIN 1 324#define IRONLAKE_DP_P1_MIN 1
323#define IRONLAKE_DP_P1_MAX 2 325#define IRONLAKE_DP_P1_MAX 2
324 326
327/* FDI */
328#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
329
325static bool 330static bool
326intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 331intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
327 int target, int refclk, intel_clock_t *best_clock); 332 int target, int refclk, intel_clock_t *best_clock);
@@ -1125,6 +1130,67 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
1125 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1130 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1126} 1131}
1127 1132
1133static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1134{
1135 struct drm_device *dev = crtc->dev;
1136 struct drm_i915_private *dev_priv = dev->dev_private;
1137 struct drm_framebuffer *fb = crtc->fb;
1138 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1139 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1140 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1141 int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
1142 DPFC_CTL_PLANEB;
1143 unsigned long stall_watermark = 200;
1144 u32 dpfc_ctl;
1145
1146 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1147 dev_priv->cfb_fence = obj_priv->fence_reg;
1148 dev_priv->cfb_plane = intel_crtc->plane;
1149
1150 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1151 dpfc_ctl &= DPFC_RESERVED;
1152 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1153 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1154 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
1155 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1156 } else {
1157 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1158 }
1159
1160 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1161 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1162 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1163 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1164 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1165 I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
1166 /* enable it... */
1167 I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
1168 DPFC_CTL_EN);
1169
1170 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1171}
1172
1173void ironlake_disable_fbc(struct drm_device *dev)
1174{
1175 struct drm_i915_private *dev_priv = dev->dev_private;
1176 u32 dpfc_ctl;
1177
1178 /* Disable compression */
1179 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1180 dpfc_ctl &= ~DPFC_CTL_EN;
1181 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1182 intel_wait_for_vblank(dev);
1183
1184 DRM_DEBUG_KMS("disabled FBC\n");
1185}
1186
1187static bool ironlake_fbc_enabled(struct drm_device *dev)
1188{
1189 struct drm_i915_private *dev_priv = dev->dev_private;
1190
1191 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1192}
1193
1128bool intel_fbc_enabled(struct drm_device *dev) 1194bool intel_fbc_enabled(struct drm_device *dev)
1129{ 1195{
1130 struct drm_i915_private *dev_priv = dev->dev_private; 1196 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1286,7 +1352,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1286 1352
1287 switch (obj_priv->tiling_mode) { 1353 switch (obj_priv->tiling_mode) {
1288 case I915_TILING_NONE: 1354 case I915_TILING_NONE:
1289 alignment = 64 * 1024; 1355 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1356 alignment = 128 * 1024;
1357 else if (IS_I965G(dev))
1358 alignment = 4 * 1024;
1359 else
1360 alignment = 64 * 1024;
1290 break; 1361 break;
1291 case I915_TILING_X: 1362 case I915_TILING_X:
1292 /* pin() will align the object as required by fence */ 1363 /* pin() will align the object as required by fence */
@@ -1653,6 +1724,15 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1653 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; 1724 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1654 u32 temp, tries = 0; 1725 u32 temp, tries = 0;
1655 1726
1727 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1728 for train result */
1729 temp = I915_READ(fdi_rx_imr_reg);
1730 temp &= ~FDI_RX_SYMBOL_LOCK;
1731 temp &= ~FDI_RX_BIT_LOCK;
1732 I915_WRITE(fdi_rx_imr_reg, temp);
1733 I915_READ(fdi_rx_imr_reg);
1734 udelay(150);
1735
1656 /* enable CPU FDI TX and PCH FDI RX */ 1736 /* enable CPU FDI TX and PCH FDI RX */
1657 temp = I915_READ(fdi_tx_reg); 1737 temp = I915_READ(fdi_tx_reg);
1658 temp |= FDI_TX_ENABLE; 1738 temp |= FDI_TX_ENABLE;
@@ -1670,16 +1750,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1670 I915_READ(fdi_rx_reg); 1750 I915_READ(fdi_rx_reg);
1671 udelay(150); 1751 udelay(150);
1672 1752
1673 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 1753 for (tries = 0; tries < 5; tries++) {
1674 for train result */
1675 temp = I915_READ(fdi_rx_imr_reg);
1676 temp &= ~FDI_RX_SYMBOL_LOCK;
1677 temp &= ~FDI_RX_BIT_LOCK;
1678 I915_WRITE(fdi_rx_imr_reg, temp);
1679 I915_READ(fdi_rx_imr_reg);
1680 udelay(150);
1681
1682 for (;;) {
1683 temp = I915_READ(fdi_rx_iir_reg); 1754 temp = I915_READ(fdi_rx_iir_reg);
1684 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1755 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1685 1756
@@ -1689,14 +1760,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1689 temp | FDI_RX_BIT_LOCK); 1760 temp | FDI_RX_BIT_LOCK);
1690 break; 1761 break;
1691 } 1762 }
1692
1693 tries++;
1694
1695 if (tries > 5) {
1696 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1697 break;
1698 }
1699 } 1763 }
1764 if (tries == 5)
1765 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1700 1766
1701 /* Train 2 */ 1767 /* Train 2 */
1702 temp = I915_READ(fdi_tx_reg); 1768 temp = I915_READ(fdi_tx_reg);
@@ -1712,7 +1778,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1712 1778
1713 tries = 0; 1779 tries = 0;
1714 1780
1715 for (;;) { 1781 for (tries = 0; tries < 5; tries++) {
1716 temp = I915_READ(fdi_rx_iir_reg); 1782 temp = I915_READ(fdi_rx_iir_reg);
1717 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1783 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1718 1784
@@ -1722,14 +1788,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1722 DRM_DEBUG_KMS("FDI train 2 done.\n"); 1788 DRM_DEBUG_KMS("FDI train 2 done.\n");
1723 break; 1789 break;
1724 } 1790 }
1725
1726 tries++;
1727
1728 if (tries > 5) {
1729 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1730 break;
1731 }
1732 } 1791 }
1792 if (tries == 5)
1793 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1733 1794
1734 DRM_DEBUG_KMS("FDI train done\n"); 1795 DRM_DEBUG_KMS("FDI train done\n");
1735} 1796}
@@ -1754,6 +1815,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1754 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; 1815 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1755 u32 temp, i; 1816 u32 temp, i;
1756 1817
1818 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1819 for train result */
1820 temp = I915_READ(fdi_rx_imr_reg);
1821 temp &= ~FDI_RX_SYMBOL_LOCK;
1822 temp &= ~FDI_RX_BIT_LOCK;
1823 I915_WRITE(fdi_rx_imr_reg, temp);
1824 I915_READ(fdi_rx_imr_reg);
1825 udelay(150);
1826
1757 /* enable CPU FDI TX and PCH FDI RX */ 1827 /* enable CPU FDI TX and PCH FDI RX */
1758 temp = I915_READ(fdi_tx_reg); 1828 temp = I915_READ(fdi_tx_reg);
1759 temp |= FDI_TX_ENABLE; 1829 temp |= FDI_TX_ENABLE;
@@ -1779,15 +1849,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1779 I915_READ(fdi_rx_reg); 1849 I915_READ(fdi_rx_reg);
1780 udelay(150); 1850 udelay(150);
1781 1851
1782 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1783 for train result */
1784 temp = I915_READ(fdi_rx_imr_reg);
1785 temp &= ~FDI_RX_SYMBOL_LOCK;
1786 temp &= ~FDI_RX_BIT_LOCK;
1787 I915_WRITE(fdi_rx_imr_reg, temp);
1788 I915_READ(fdi_rx_imr_reg);
1789 udelay(150);
1790
1791 for (i = 0; i < 4; i++ ) { 1852 for (i = 0; i < 4; i++ ) {
1792 temp = I915_READ(fdi_tx_reg); 1853 temp = I915_READ(fdi_tx_reg);
1793 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 1854 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -1942,7 +2003,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1942 } 2003 }
1943 2004
1944 /* Enable panel fitting for LVDS */ 2005 /* Enable panel fitting for LVDS */
1945 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 2006 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
2007 || HAS_eDP || intel_pch_has_edp(crtc)) {
1946 temp = I915_READ(pf_ctl_reg); 2008 temp = I915_READ(pf_ctl_reg);
1947 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); 2009 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
1948 2010
@@ -2037,9 +2099,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2037 reg = I915_READ(trans_dp_ctl); 2099 reg = I915_READ(trans_dp_ctl);
2038 reg &= ~TRANS_DP_PORT_SEL_MASK; 2100 reg &= ~TRANS_DP_PORT_SEL_MASK;
2039 reg = TRANS_DP_OUTPUT_ENABLE | 2101 reg = TRANS_DP_OUTPUT_ENABLE |
2040 TRANS_DP_ENH_FRAMING | 2102 TRANS_DP_ENH_FRAMING;
2041 TRANS_DP_VSYNC_ACTIVE_HIGH | 2103
2042 TRANS_DP_HSYNC_ACTIVE_HIGH; 2104 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2105 reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2106 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2107 reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2043 2108
2044 switch (intel_trans_dp_port_sel(crtc)) { 2109 switch (intel_trans_dp_port_sel(crtc)) {
2045 case PCH_DP_B: 2110 case PCH_DP_B:
@@ -2079,6 +2144,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2079 2144
2080 intel_crtc_load_lut(crtc); 2145 intel_crtc_load_lut(crtc);
2081 2146
2147 intel_update_fbc(crtc, &crtc->mode);
2148
2082 break; 2149 break;
2083 case DRM_MODE_DPMS_OFF: 2150 case DRM_MODE_DPMS_OFF:
2084 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 2151 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
@@ -2093,6 +2160,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2093 I915_READ(dspbase_reg); 2160 I915_READ(dspbase_reg);
2094 } 2161 }
2095 2162
2163 if (dev_priv->cfb_plane == plane &&
2164 dev_priv->display.disable_fbc)
2165 dev_priv->display.disable_fbc(dev);
2166
2096 i915_disable_vga(dev); 2167 i915_disable_vga(dev);
2097 2168
2098 /* disable cpu pipe, disable after all planes disabled */ 2169 /* disable cpu pipe, disable after all planes disabled */
@@ -2472,8 +2543,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2472 struct drm_device *dev = crtc->dev; 2543 struct drm_device *dev = crtc->dev;
2473 if (HAS_PCH_SPLIT(dev)) { 2544 if (HAS_PCH_SPLIT(dev)) {
2474 /* FDI link clock is fixed at 2.7G */ 2545 /* FDI link clock is fixed at 2.7G */
2475 if (mode->clock * 3 > 27000 * 4) 2546 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
2476 return MODE_CLOCK_HIGH; 2547 return false;
2477 } 2548 }
2478 return true; 2549 return true;
2479} 2550}
@@ -2655,6 +2726,20 @@ static struct intel_watermark_params g4x_wm_info = {
2655 2, 2726 2,
2656 G4X_FIFO_LINE_SIZE, 2727 G4X_FIFO_LINE_SIZE,
2657}; 2728};
2729static struct intel_watermark_params g4x_cursor_wm_info = {
2730 I965_CURSOR_FIFO,
2731 I965_CURSOR_MAX_WM,
2732 I965_CURSOR_DFT_WM,
2733 2,
2734 G4X_FIFO_LINE_SIZE,
2735};
2736static struct intel_watermark_params i965_cursor_wm_info = {
2737 I965_CURSOR_FIFO,
2738 I965_CURSOR_MAX_WM,
2739 I965_CURSOR_DFT_WM,
2740 2,
2741 I915_FIFO_LINE_SIZE,
2742};
2658static struct intel_watermark_params i945_wm_info = { 2743static struct intel_watermark_params i945_wm_info = {
2659 I945_FIFO_SIZE, 2744 I945_FIFO_SIZE,
2660 I915_MAX_WM, 2745 I915_MAX_WM,
@@ -2692,6 +2777,14 @@ static struct intel_watermark_params ironlake_display_wm_info = {
2692 ILK_FIFO_LINE_SIZE 2777 ILK_FIFO_LINE_SIZE
2693}; 2778};
2694 2779
2780static struct intel_watermark_params ironlake_cursor_wm_info = {
2781 ILK_CURSOR_FIFO,
2782 ILK_CURSOR_MAXWM,
2783 ILK_CURSOR_DFTWM,
2784 2,
2785 ILK_FIFO_LINE_SIZE
2786};
2787
2695static struct intel_watermark_params ironlake_display_srwm_info = { 2788static struct intel_watermark_params ironlake_display_srwm_info = {
2696 ILK_DISPLAY_SR_FIFO, 2789 ILK_DISPLAY_SR_FIFO,
2697 ILK_DISPLAY_MAX_SRWM, 2790 ILK_DISPLAY_MAX_SRWM,
@@ -2741,7 +2834,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2741 */ 2834 */
2742 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 2835 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
2743 1000; 2836 1000;
2744 entries_required /= wm->cacheline_size; 2837 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
2745 2838
2746 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); 2839 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
2747 2840
@@ -2752,8 +2845,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2752 /* Don't promote wm_size to unsigned... */ 2845 /* Don't promote wm_size to unsigned... */
2753 if (wm_size > (long)wm->max_wm) 2846 if (wm_size > (long)wm->max_wm)
2754 wm_size = wm->max_wm; 2847 wm_size = wm->max_wm;
2755 if (wm_size <= 0) 2848 if (wm_size <= 0) {
2756 wm_size = wm->default_wm; 2849 wm_size = wm->default_wm;
2850 DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
2851 " entries required = %ld, available = %lu.\n",
2852 entries_required + wm->guard_size,
2853 wm->fifo_size);
2854 }
2855
2757 return wm_size; 2856 return wm_size;
2758} 2857}
2759 2858
@@ -2862,11 +2961,9 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2862 uint32_t dsparb = I915_READ(DSPARB); 2961 uint32_t dsparb = I915_READ(DSPARB);
2863 int size; 2962 int size;
2864 2963
2865 if (plane == 0) 2964 size = dsparb & 0x7f;
2866 size = dsparb & 0x7f; 2965 if (plane)
2867 else 2966 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
2868 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2869 (dsparb & 0x7f);
2870 2967
2871 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 2968 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2872 plane ? "B" : "A", size); 2969 plane ? "B" : "A", size);
@@ -2880,11 +2977,9 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2880 uint32_t dsparb = I915_READ(DSPARB); 2977 uint32_t dsparb = I915_READ(DSPARB);
2881 int size; 2978 int size;
2882 2979
2883 if (plane == 0) 2980 size = dsparb & 0x1ff;
2884 size = dsparb & 0x1ff; 2981 if (plane)
2885 else 2982 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
2886 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
2887 (dsparb & 0x1ff);
2888 size >>= 1; /* Convert to cachelines */ 2983 size >>= 1; /* Convert to cachelines */
2889 2984
2890 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 2985 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
@@ -2925,7 +3020,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2925} 3020}
2926 3021
2927static void pineview_update_wm(struct drm_device *dev, int planea_clock, 3022static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2928 int planeb_clock, int sr_hdisplay, int pixel_size) 3023 int planeb_clock, int sr_hdisplay, int unused,
3024 int pixel_size)
2929{ 3025{
2930 struct drm_i915_private *dev_priv = dev->dev_private; 3026 struct drm_i915_private *dev_priv = dev->dev_private;
2931 u32 reg; 3027 u32 reg;
@@ -2990,7 +3086,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2990} 3086}
2991 3087
2992static void g4x_update_wm(struct drm_device *dev, int planea_clock, 3088static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2993 int planeb_clock, int sr_hdisplay, int pixel_size) 3089 int planeb_clock, int sr_hdisplay, int sr_htotal,
3090 int pixel_size)
2994{ 3091{
2995 struct drm_i915_private *dev_priv = dev->dev_private; 3092 struct drm_i915_private *dev_priv = dev->dev_private;
2996 int total_size, cacheline_size; 3093 int total_size, cacheline_size;
@@ -3014,12 +3111,12 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
3014 */ 3111 */
3015 entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / 3112 entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
3016 1000; 3113 1000;
3017 entries_required /= G4X_FIFO_LINE_SIZE; 3114 entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
3018 planea_wm = entries_required + planea_params.guard_size; 3115 planea_wm = entries_required + planea_params.guard_size;
3019 3116
3020 entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / 3117 entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
3021 1000; 3118 1000;
3022 entries_required /= G4X_FIFO_LINE_SIZE; 3119 entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
3023 planeb_wm = entries_required + planeb_params.guard_size; 3120 planeb_wm = entries_required + planeb_params.guard_size;
3024 3121
3025 cursora_wm = cursorb_wm = 16; 3122 cursora_wm = cursorb_wm = 16;
@@ -3033,13 +3130,24 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
3033 static const int sr_latency_ns = 12000; 3130 static const int sr_latency_ns = 12000;
3034 3131
3035 sr_clock = planea_clock ? planea_clock : planeb_clock; 3132 sr_clock = planea_clock ? planea_clock : planeb_clock;
3036 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3133 line_time_us = ((sr_htotal * 1000) / sr_clock);
3037 3134
3038 /* Use ns/us then divide to preserve precision */ 3135 /* Use ns/us then divide to preserve precision */
3039 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 3136 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3040 pixel_size * sr_hdisplay) / 1000; 3137 pixel_size * sr_hdisplay;
3041 sr_entries = roundup(sr_entries / cacheline_size, 1); 3138 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
3042 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 3139
3140 entries_required = (((sr_latency_ns / line_time_us) +
3141 1000) / 1000) * pixel_size * 64;
3142 entries_required = DIV_ROUND_UP(entries_required,
3143 g4x_cursor_wm_info.cacheline_size);
3144 cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
3145
3146 if (cursor_sr > g4x_cursor_wm_info.max_wm)
3147 cursor_sr = g4x_cursor_wm_info.max_wm;
3148 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3149 "cursor %d\n", sr_entries, cursor_sr);
3150
3043 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3151 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3044 } else { 3152 } else {
3045 /* Turn off self refresh if both pipes are enabled */ 3153 /* Turn off self refresh if both pipes are enabled */
@@ -3064,11 +3172,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
3064} 3172}
3065 3173
3066static void i965_update_wm(struct drm_device *dev, int planea_clock, 3174static void i965_update_wm(struct drm_device *dev, int planea_clock,
3067 int planeb_clock, int sr_hdisplay, int pixel_size) 3175 int planeb_clock, int sr_hdisplay, int sr_htotal,
3176 int pixel_size)
3068{ 3177{
3069 struct drm_i915_private *dev_priv = dev->dev_private; 3178 struct drm_i915_private *dev_priv = dev->dev_private;
3070 unsigned long line_time_us; 3179 unsigned long line_time_us;
3071 int sr_clock, sr_entries, srwm = 1; 3180 int sr_clock, sr_entries, srwm = 1;
3181 int cursor_sr = 16;
3072 3182
3073 /* Calc sr entries for one plane configs */ 3183 /* Calc sr entries for one plane configs */
3074 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 3184 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
@@ -3076,17 +3186,31 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3076 static const int sr_latency_ns = 12000; 3186 static const int sr_latency_ns = 12000;
3077 3187
3078 sr_clock = planea_clock ? planea_clock : planeb_clock; 3188 sr_clock = planea_clock ? planea_clock : planeb_clock;
3079 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3189 line_time_us = ((sr_htotal * 1000) / sr_clock);
3080 3190
3081 /* Use ns/us then divide to preserve precision */ 3191 /* Use ns/us then divide to preserve precision */
3082 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 3192 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3083 pixel_size * sr_hdisplay) / 1000; 3193 pixel_size * sr_hdisplay;
3084 sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); 3194 sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
3085 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 3195 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
3086 srwm = I945_FIFO_SIZE - sr_entries; 3196 srwm = I965_FIFO_SIZE - sr_entries;
3087 if (srwm < 0) 3197 if (srwm < 0)
3088 srwm = 1; 3198 srwm = 1;
3089 srwm &= 0x3f; 3199 srwm &= 0x1ff;
3200
3201 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3202 pixel_size * 64;
3203 sr_entries = DIV_ROUND_UP(sr_entries,
3204 i965_cursor_wm_info.cacheline_size);
3205 cursor_sr = i965_cursor_wm_info.fifo_size -
3206 (sr_entries + i965_cursor_wm_info.guard_size);
3207
3208 if (cursor_sr > i965_cursor_wm_info.max_wm)
3209 cursor_sr = i965_cursor_wm_info.max_wm;
3210
3211 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3212 "cursor %d\n", srwm, cursor_sr);
3213
3090 if (IS_I965GM(dev)) 3214 if (IS_I965GM(dev))
3091 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3215 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3092 } else { 3216 } else {
@@ -3103,10 +3227,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3103 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | 3227 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
3104 (8 << 0)); 3228 (8 << 0));
3105 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 3229 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
3230 /* update cursor SR watermark */
3231 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
3106} 3232}
3107 3233
3108static void i9xx_update_wm(struct drm_device *dev, int planea_clock, 3234static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3109 int planeb_clock, int sr_hdisplay, int pixel_size) 3235 int planeb_clock, int sr_hdisplay, int sr_htotal,
3236 int pixel_size)
3110{ 3237{
3111 struct drm_i915_private *dev_priv = dev->dev_private; 3238 struct drm_i915_private *dev_priv = dev->dev_private;
3112 uint32_t fwater_lo; 3239 uint32_t fwater_lo;
@@ -3151,12 +3278,12 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3151 static const int sr_latency_ns = 6000; 3278 static const int sr_latency_ns = 6000;
3152 3279
3153 sr_clock = planea_clock ? planea_clock : planeb_clock; 3280 sr_clock = planea_clock ? planea_clock : planeb_clock;
3154 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3281 line_time_us = ((sr_htotal * 1000) / sr_clock);
3155 3282
3156 /* Use ns/us then divide to preserve precision */ 3283 /* Use ns/us then divide to preserve precision */
3157 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 3284 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3158 pixel_size * sr_hdisplay) / 1000; 3285 pixel_size * sr_hdisplay;
3159 sr_entries = roundup(sr_entries / cacheline_size, 1); 3286 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
3160 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); 3287 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
3161 srwm = total_size - sr_entries; 3288 srwm = total_size - sr_entries;
3162 if (srwm < 0) 3289 if (srwm < 0)
@@ -3194,7 +3321,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3194} 3321}
3195 3322
3196static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, 3323static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3197 int unused2, int pixel_size) 3324 int unused2, int unused3, int pixel_size)
3198{ 3325{
3199 struct drm_i915_private *dev_priv = dev->dev_private; 3326 struct drm_i915_private *dev_priv = dev->dev_private;
3200 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; 3327 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -3212,9 +3339,11 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3212} 3339}
3213 3340
3214#define ILK_LP0_PLANE_LATENCY 700 3341#define ILK_LP0_PLANE_LATENCY 700
3342#define ILK_LP0_CURSOR_LATENCY 1300
3215 3343
3216static void ironlake_update_wm(struct drm_device *dev, int planea_clock, 3344static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3217 int planeb_clock, int sr_hdisplay, int pixel_size) 3345 int planeb_clock, int sr_hdisplay, int sr_htotal,
3346 int pixel_size)
3218{ 3347{
3219 struct drm_i915_private *dev_priv = dev->dev_private; 3348 struct drm_i915_private *dev_priv = dev->dev_private;
3220 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 3349 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -3222,20 +3351,48 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3222 unsigned long line_time_us; 3351 unsigned long line_time_us;
3223 int sr_clock, entries_required; 3352 int sr_clock, entries_required;
3224 u32 reg_value; 3353 u32 reg_value;
3354 int line_count;
3355 int planea_htotal = 0, planeb_htotal = 0;
3356 struct drm_crtc *crtc;
3357 struct intel_crtc *intel_crtc;
3358
3359 /* Need htotal for all active display plane */
3360 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3361 intel_crtc = to_intel_crtc(crtc);
3362 if (crtc->enabled) {
3363 if (intel_crtc->plane == 0)
3364 planea_htotal = crtc->mode.htotal;
3365 else
3366 planeb_htotal = crtc->mode.htotal;
3367 }
3368 }
3225 3369
3226 /* Calculate and update the watermark for plane A */ 3370 /* Calculate and update the watermark for plane A */
3227 if (planea_clock) { 3371 if (planea_clock) {
3228 entries_required = ((planea_clock / 1000) * pixel_size * 3372 entries_required = ((planea_clock / 1000) * pixel_size *
3229 ILK_LP0_PLANE_LATENCY) / 1000; 3373 ILK_LP0_PLANE_LATENCY) / 1000;
3230 entries_required = DIV_ROUND_UP(entries_required, 3374 entries_required = DIV_ROUND_UP(entries_required,
3231 ironlake_display_wm_info.cacheline_size); 3375 ironlake_display_wm_info.cacheline_size);
3232 planea_wm = entries_required + 3376 planea_wm = entries_required +
3233 ironlake_display_wm_info.guard_size; 3377 ironlake_display_wm_info.guard_size;
3234 3378
3235 if (planea_wm > (int)ironlake_display_wm_info.max_wm) 3379 if (planea_wm > (int)ironlake_display_wm_info.max_wm)
3236 planea_wm = ironlake_display_wm_info.max_wm; 3380 planea_wm = ironlake_display_wm_info.max_wm;
3237 3381
3238 cursora_wm = 16; 3382 /* Use the large buffer method to calculate cursor watermark */
3383 line_time_us = (planea_htotal * 1000) / planea_clock;
3384
3385 /* Use ns/us then divide to preserve precision */
3386 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
3387
3388 /* calculate the cursor watermark for cursor A */
3389 entries_required = line_count * 64 * pixel_size;
3390 entries_required = DIV_ROUND_UP(entries_required,
3391 ironlake_cursor_wm_info.cacheline_size);
3392 cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
3393 if (cursora_wm > ironlake_cursor_wm_info.max_wm)
3394 cursora_wm = ironlake_cursor_wm_info.max_wm;
3395
3239 reg_value = I915_READ(WM0_PIPEA_ILK); 3396 reg_value = I915_READ(WM0_PIPEA_ILK);
3240 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 3397 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3241 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | 3398 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3249,14 +3406,27 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3249 entries_required = ((planeb_clock / 1000) * pixel_size * 3406 entries_required = ((planeb_clock / 1000) * pixel_size *
3250 ILK_LP0_PLANE_LATENCY) / 1000; 3407 ILK_LP0_PLANE_LATENCY) / 1000;
3251 entries_required = DIV_ROUND_UP(entries_required, 3408 entries_required = DIV_ROUND_UP(entries_required,
3252 ironlake_display_wm_info.cacheline_size); 3409 ironlake_display_wm_info.cacheline_size);
3253 planeb_wm = entries_required + 3410 planeb_wm = entries_required +
3254 ironlake_display_wm_info.guard_size; 3411 ironlake_display_wm_info.guard_size;
3255 3412
3256 if (planeb_wm > (int)ironlake_display_wm_info.max_wm) 3413 if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
3257 planeb_wm = ironlake_display_wm_info.max_wm; 3414 planeb_wm = ironlake_display_wm_info.max_wm;
3258 3415
3259 cursorb_wm = 16; 3416 /* Use the large buffer method to calculate cursor watermark */
3417 line_time_us = (planeb_htotal * 1000) / planeb_clock;
3418
3419 /* Use ns/us then divide to preserve precision */
3420 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
3421
3422 /* calculate the cursor watermark for cursor B */
3423 entries_required = line_count * 64 * pixel_size;
3424 entries_required = DIV_ROUND_UP(entries_required,
3425 ironlake_cursor_wm_info.cacheline_size);
3426 cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
3427 if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
3428 cursorb_wm = ironlake_cursor_wm_info.max_wm;
3429
3260 reg_value = I915_READ(WM0_PIPEB_ILK); 3430 reg_value = I915_READ(WM0_PIPEB_ILK);
3261 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 3431 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3262 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | 3432 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3271,12 +3441,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3271 * display plane is used. 3441 * display plane is used.
3272 */ 3442 */
3273 if (!planea_clock || !planeb_clock) { 3443 if (!planea_clock || !planeb_clock) {
3274 int line_count; 3444
3275 /* Read the self-refresh latency. The unit is 0.5us */ 3445 /* Read the self-refresh latency. The unit is 0.5us */
3276 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; 3446 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
3277 3447
3278 sr_clock = planea_clock ? planea_clock : planeb_clock; 3448 sr_clock = planea_clock ? planea_clock : planeb_clock;
3279 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3449 line_time_us = ((sr_htotal * 1000) / sr_clock);
3280 3450
3281 /* Use ns/us then divide to preserve precision */ 3451 /* Use ns/us then divide to preserve precision */
3282 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) 3452 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
@@ -3285,14 +3455,14 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3285 /* calculate the self-refresh watermark for display plane */ 3455 /* calculate the self-refresh watermark for display plane */
3286 entries_required = line_count * sr_hdisplay * pixel_size; 3456 entries_required = line_count * sr_hdisplay * pixel_size;
3287 entries_required = DIV_ROUND_UP(entries_required, 3457 entries_required = DIV_ROUND_UP(entries_required,
3288 ironlake_display_srwm_info.cacheline_size); 3458 ironlake_display_srwm_info.cacheline_size);
3289 sr_wm = entries_required + 3459 sr_wm = entries_required +
3290 ironlake_display_srwm_info.guard_size; 3460 ironlake_display_srwm_info.guard_size;
3291 3461
3292 /* calculate the self-refresh watermark for display cursor */ 3462 /* calculate the self-refresh watermark for display cursor */
3293 entries_required = line_count * pixel_size * 64; 3463 entries_required = line_count * pixel_size * 64;
3294 entries_required = DIV_ROUND_UP(entries_required, 3464 entries_required = DIV_ROUND_UP(entries_required,
3295 ironlake_cursor_srwm_info.cacheline_size); 3465 ironlake_cursor_srwm_info.cacheline_size);
3296 cursor_wm = entries_required + 3466 cursor_wm = entries_required +
3297 ironlake_cursor_srwm_info.guard_size; 3467 ironlake_cursor_srwm_info.guard_size;
3298 3468
@@ -3336,6 +3506,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3336 * bytes per pixel 3506 * bytes per pixel
3337 * where 3507 * where
3338 * line time = htotal / dotclock 3508 * line time = htotal / dotclock
3509 * surface width = hdisplay for normal plane and 64 for cursor
3339 * and latency is assumed to be high, as above. 3510 * and latency is assumed to be high, as above.
3340 * 3511 *
3341 * The final value programmed to the register should always be rounded up, 3512 * The final value programmed to the register should always be rounded up,
@@ -3352,6 +3523,7 @@ static void intel_update_watermarks(struct drm_device *dev)
3352 int sr_hdisplay = 0; 3523 int sr_hdisplay = 0;
3353 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; 3524 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
3354 int enabled = 0, pixel_size = 0; 3525 int enabled = 0, pixel_size = 0;
3526 int sr_htotal = 0;
3355 3527
3356 if (!dev_priv->display.update_wm) 3528 if (!dev_priv->display.update_wm)
3357 return; 3529 return;
@@ -3372,6 +3544,7 @@ static void intel_update_watermarks(struct drm_device *dev)
3372 } 3544 }
3373 sr_hdisplay = crtc->mode.hdisplay; 3545 sr_hdisplay = crtc->mode.hdisplay;
3374 sr_clock = crtc->mode.clock; 3546 sr_clock = crtc->mode.clock;
3547 sr_htotal = crtc->mode.htotal;
3375 if (crtc->fb) 3548 if (crtc->fb)
3376 pixel_size = crtc->fb->bits_per_pixel / 8; 3549 pixel_size = crtc->fb->bits_per_pixel / 8;
3377 else 3550 else
@@ -3383,7 +3556,7 @@ static void intel_update_watermarks(struct drm_device *dev)
3383 return; 3556 return;
3384 3557
3385 dev_priv->display.update_wm(dev, planea_clock, planeb_clock, 3558 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
3386 sr_hdisplay, pixel_size); 3559 sr_hdisplay, sr_htotal, pixel_size);
3387} 3560}
3388 3561
3389static int intel_crtc_mode_set(struct drm_crtc *crtc, 3562static int intel_crtc_mode_set(struct drm_crtc *crtc,
@@ -3502,6 +3675,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3502 return -EINVAL; 3675 return -EINVAL;
3503 } 3676 }
3504 3677
3678 /* Ensure that the cursor is valid for the new mode before changing... */
3679 intel_crtc_update_cursor(crtc);
3680
3505 if (is_lvds && dev_priv->lvds_downclock_avail) { 3681 if (is_lvds && dev_priv->lvds_downclock_avail) {
3506 has_reduced_clock = limit->find_pll(limit, crtc, 3682 has_reduced_clock = limit->find_pll(limit, crtc,
3507 dev_priv->lvds_downclock, 3683 dev_priv->lvds_downclock,
@@ -3568,7 +3744,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3568 temp |= PIPE_8BPC; 3744 temp |= PIPE_8BPC;
3569 else 3745 else
3570 temp |= PIPE_6BPC; 3746 temp |= PIPE_6BPC;
3571 } else if (is_edp) { 3747 } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) {
3572 switch (dev_priv->edp_bpp/3) { 3748 switch (dev_priv->edp_bpp/3) {
3573 case 8: 3749 case 8:
3574 temp |= PIPE_8BPC; 3750 temp |= PIPE_8BPC;
@@ -3811,6 +3987,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3811 udelay(150); 3987 udelay(150);
3812 } 3988 }
3813 3989
3990 if (HAS_PCH_SPLIT(dev)) {
3991 pipeconf &= ~PIPE_ENABLE_DITHER;
3992 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3993 }
3994
3814 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 3995 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3815 * This is an exception to the general rule that mode_set doesn't turn 3996 * This is an exception to the general rule that mode_set doesn't turn
3816 * things on. 3997 * things on.
@@ -3853,16 +4034,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3853 if (dev_priv->lvds_dither) { 4034 if (dev_priv->lvds_dither) {
3854 if (HAS_PCH_SPLIT(dev)) { 4035 if (HAS_PCH_SPLIT(dev)) {
3855 pipeconf |= PIPE_ENABLE_DITHER; 4036 pipeconf |= PIPE_ENABLE_DITHER;
3856 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3857 pipeconf |= PIPE_DITHER_TYPE_ST01; 4037 pipeconf |= PIPE_DITHER_TYPE_ST01;
3858 } else 4038 } else
3859 lvds |= LVDS_ENABLE_DITHER; 4039 lvds |= LVDS_ENABLE_DITHER;
3860 } else { 4040 } else {
3861 if (HAS_PCH_SPLIT(dev)) { 4041 if (!HAS_PCH_SPLIT(dev)) {
3862 pipeconf &= ~PIPE_ENABLE_DITHER;
3863 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3864 } else
3865 lvds &= ~LVDS_ENABLE_DITHER; 4042 lvds &= ~LVDS_ENABLE_DITHER;
4043 }
3866 } 4044 }
3867 } 4045 }
3868 I915_WRITE(lvds_reg, lvds); 4046 I915_WRITE(lvds_reg, lvds);
@@ -4038,6 +4216,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
4038 } 4216 }
4039} 4217}
4040 4218
4219/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
4220static void intel_crtc_update_cursor(struct drm_crtc *crtc)
4221{
4222 struct drm_device *dev = crtc->dev;
4223 struct drm_i915_private *dev_priv = dev->dev_private;
4224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4225 int pipe = intel_crtc->pipe;
4226 int x = intel_crtc->cursor_x;
4227 int y = intel_crtc->cursor_y;
4228 uint32_t base, pos;
4229 bool visible;
4230
4231 pos = 0;
4232
4233 if (crtc->fb) {
4234 base = intel_crtc->cursor_addr;
4235 if (x > (int) crtc->fb->width)
4236 base = 0;
4237
4238 if (y > (int) crtc->fb->height)
4239 base = 0;
4240 } else
4241 base = 0;
4242
4243 if (x < 0) {
4244 if (x + intel_crtc->cursor_width < 0)
4245 base = 0;
4246
4247 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
4248 x = -x;
4249 }
4250 pos |= x << CURSOR_X_SHIFT;
4251
4252 if (y < 0) {
4253 if (y + intel_crtc->cursor_height < 0)
4254 base = 0;
4255
4256 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
4257 y = -y;
4258 }
4259 pos |= y << CURSOR_Y_SHIFT;
4260
4261 visible = base != 0;
4262 if (!visible && !intel_crtc->cursor_visble)
4263 return;
4264
4265 I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
4266 if (intel_crtc->cursor_visble != visible) {
4267 uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
4268 if (base) {
4269 /* Hooray for CUR*CNTR differences */
4270 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4271 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4272 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4273 cntl |= pipe << 28; /* Connect to correct pipe */
4274 } else {
4275 cntl &= ~(CURSOR_FORMAT_MASK);
4276 cntl |= CURSOR_ENABLE;
4277 cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
4278 }
4279 } else {
4280 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4281 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4282 cntl |= CURSOR_MODE_DISABLE;
4283 } else {
4284 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4285 }
4286 }
4287 I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
4288
4289 intel_crtc->cursor_visble = visible;
4290 }
4291 /* and commit changes on next vblank */
4292 I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
4293
4294 if (visible)
4295 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
4296}
4297
4041static int intel_crtc_cursor_set(struct drm_crtc *crtc, 4298static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4042 struct drm_file *file_priv, 4299 struct drm_file *file_priv,
4043 uint32_t handle, 4300 uint32_t handle,
@@ -4048,11 +4305,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4048 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4305 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4049 struct drm_gem_object *bo; 4306 struct drm_gem_object *bo;
4050 struct drm_i915_gem_object *obj_priv; 4307 struct drm_i915_gem_object *obj_priv;
4051 int pipe = intel_crtc->pipe; 4308 uint32_t addr;
4052 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
4053 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
4054 uint32_t temp = I915_READ(control);
4055 size_t addr;
4056 int ret; 4309 int ret;
4057 4310
4058 DRM_DEBUG_KMS("\n"); 4311 DRM_DEBUG_KMS("\n");
@@ -4060,12 +4313,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4060 /* if we want to turn off the cursor ignore width and height */ 4313 /* if we want to turn off the cursor ignore width and height */
4061 if (!handle) { 4314 if (!handle) {
4062 DRM_DEBUG_KMS("cursor off\n"); 4315 DRM_DEBUG_KMS("cursor off\n");
4063 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4064 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4065 temp |= CURSOR_MODE_DISABLE;
4066 } else {
4067 temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4068 }
4069 addr = 0; 4316 addr = 0;
4070 bo = NULL; 4317 bo = NULL;
4071 mutex_lock(&dev->struct_mutex); 4318 mutex_lock(&dev->struct_mutex);
@@ -4107,7 +4354,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4107 4354
4108 addr = obj_priv->gtt_offset; 4355 addr = obj_priv->gtt_offset;
4109 } else { 4356 } else {
4110 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); 4357 ret = i915_gem_attach_phys_object(dev, bo,
4358 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
4111 if (ret) { 4359 if (ret) {
4112 DRM_ERROR("failed to attach phys object\n"); 4360 DRM_ERROR("failed to attach phys object\n");
4113 goto fail_locked; 4361 goto fail_locked;
@@ -4118,21 +4366,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4118 if (!IS_I9XX(dev)) 4366 if (!IS_I9XX(dev))
4119 I915_WRITE(CURSIZE, (height << 12) | width); 4367 I915_WRITE(CURSIZE, (height << 12) | width);
4120 4368
4121 /* Hooray for CUR*CNTR differences */
4122 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4123 temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4124 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4125 temp |= (pipe << 28); /* Connect to correct pipe */
4126 } else {
4127 temp &= ~(CURSOR_FORMAT_MASK);
4128 temp |= CURSOR_ENABLE;
4129 temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
4130 }
4131
4132 finish: 4369 finish:
4133 I915_WRITE(control, temp);
4134 I915_WRITE(base, addr);
4135
4136 if (intel_crtc->cursor_bo) { 4370 if (intel_crtc->cursor_bo) {
4137 if (dev_priv->info->cursor_needs_physical) { 4371 if (dev_priv->info->cursor_needs_physical) {
4138 if (intel_crtc->cursor_bo != bo) 4372 if (intel_crtc->cursor_bo != bo)
@@ -4146,6 +4380,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4146 4380
4147 intel_crtc->cursor_addr = addr; 4381 intel_crtc->cursor_addr = addr;
4148 intel_crtc->cursor_bo = bo; 4382 intel_crtc->cursor_bo = bo;
4383 intel_crtc->cursor_width = width;
4384 intel_crtc->cursor_height = height;
4385
4386 intel_crtc_update_cursor(crtc);
4149 4387
4150 return 0; 4388 return 0;
4151fail_unpin: 4389fail_unpin:
@@ -4159,34 +4397,12 @@ fail:
4159 4397
4160static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 4398static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
4161{ 4399{
4162 struct drm_device *dev = crtc->dev;
4163 struct drm_i915_private *dev_priv = dev->dev_private;
4164 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4165 struct intel_framebuffer *intel_fb;
4166 int pipe = intel_crtc->pipe;
4167 uint32_t temp = 0;
4168 uint32_t adder;
4169 4401
4170 if (crtc->fb) { 4402 intel_crtc->cursor_x = x;
4171 intel_fb = to_intel_framebuffer(crtc->fb); 4403 intel_crtc->cursor_y = y;
4172 intel_mark_busy(dev, intel_fb->obj);
4173 }
4174 4404
4175 if (x < 0) { 4405 intel_crtc_update_cursor(crtc);
4176 temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
4177 x = -x;
4178 }
4179 if (y < 0) {
4180 temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
4181 y = -y;
4182 }
4183
4184 temp |= x << CURSOR_X_SHIFT;
4185 temp |= y << CURSOR_Y_SHIFT;
4186
4187 adder = intel_crtc->cursor_addr;
4188 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
4189 I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
4190 4406
4191 return 0; 4407 return 0;
4192} 4408}
@@ -4770,6 +4986,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
4770 atomic_dec_and_test(&obj_priv->pending_flip)) 4986 atomic_dec_and_test(&obj_priv->pending_flip))
4771 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4987 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4772 schedule_work(&work->work); 4988 schedule_work(&work->work);
4989
4990 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
4773} 4991}
4774 4992
4775void intel_finish_page_flip(struct drm_device *dev, int pipe) 4993void intel_finish_page_flip(struct drm_device *dev, int pipe)
@@ -4847,27 +5065,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4847 5065
4848 mutex_lock(&dev->struct_mutex); 5066 mutex_lock(&dev->struct_mutex);
4849 ret = intel_pin_and_fence_fb_obj(dev, obj); 5067 ret = intel_pin_and_fence_fb_obj(dev, obj);
4850 if (ret != 0) { 5068 if (ret)
4851 mutex_unlock(&dev->struct_mutex); 5069 goto cleanup_work;
4852
4853 spin_lock_irqsave(&dev->event_lock, flags);
4854 intel_crtc->unpin_work = NULL;
4855 spin_unlock_irqrestore(&dev->event_lock, flags);
4856
4857 kfree(work);
4858
4859 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4860 to_intel_bo(obj));
4861 return ret;
4862 }
4863 5070
4864 /* Reference the objects for the scheduled work. */ 5071 /* Reference the objects for the scheduled work. */
4865 drm_gem_object_reference(work->old_fb_obj); 5072 drm_gem_object_reference(work->old_fb_obj);
4866 drm_gem_object_reference(obj); 5073 drm_gem_object_reference(obj);
4867 5074
4868 crtc->fb = fb; 5075 crtc->fb = fb;
4869 i915_gem_object_flush_write_domain(obj); 5076 ret = i915_gem_object_flush_write_domain(obj);
4870 drm_vblank_get(dev, intel_crtc->pipe); 5077 if (ret)
5078 goto cleanup_objs;
5079
5080 ret = drm_vblank_get(dev, intel_crtc->pipe);
5081 if (ret)
5082 goto cleanup_objs;
5083
4871 obj_priv = to_intel_bo(obj); 5084 obj_priv = to_intel_bo(obj);
4872 atomic_inc(&obj_priv->pending_flip); 5085 atomic_inc(&obj_priv->pending_flip);
4873 work->pending_flip_obj = obj; 5086 work->pending_flip_obj = obj;
@@ -4905,7 +5118,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4905 5118
4906 mutex_unlock(&dev->struct_mutex); 5119 mutex_unlock(&dev->struct_mutex);
4907 5120
5121 trace_i915_flip_request(intel_crtc->plane, obj);
5122
4908 return 0; 5123 return 0;
5124
5125cleanup_objs:
5126 drm_gem_object_unreference(work->old_fb_obj);
5127 drm_gem_object_unreference(obj);
5128cleanup_work:
5129 mutex_unlock(&dev->struct_mutex);
5130
5131 spin_lock_irqsave(&dev->event_lock, flags);
5132 intel_crtc->unpin_work = NULL;
5133 spin_unlock_irqrestore(&dev->event_lock, flags);
5134
5135 kfree(work);
5136
5137 return ret;
4909} 5138}
4910 5139
4911static const struct drm_crtc_helper_funcs intel_helper_funcs = { 5140static const struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -5032,19 +5261,26 @@ static void intel_setup_outputs(struct drm_device *dev)
5032{ 5261{
5033 struct drm_i915_private *dev_priv = dev->dev_private; 5262 struct drm_i915_private *dev_priv = dev->dev_private;
5034 struct drm_encoder *encoder; 5263 struct drm_encoder *encoder;
5264 bool dpd_is_edp = false;
5035 5265
5036 intel_crt_init(dev);
5037
5038 /* Set up integrated LVDS */
5039 if (IS_MOBILE(dev) && !IS_I830(dev)) 5266 if (IS_MOBILE(dev) && !IS_I830(dev))
5040 intel_lvds_init(dev); 5267 intel_lvds_init(dev);
5041 5268
5042 if (HAS_PCH_SPLIT(dev)) { 5269 if (HAS_PCH_SPLIT(dev)) {
5043 int found; 5270 dpd_is_edp = intel_dpd_is_edp(dev);
5044 5271
5045 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 5272 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
5046 intel_dp_init(dev, DP_A); 5273 intel_dp_init(dev, DP_A);
5047 5274
5275 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
5276 intel_dp_init(dev, PCH_DP_D);
5277 }
5278
5279 intel_crt_init(dev);
5280
5281 if (HAS_PCH_SPLIT(dev)) {
5282 int found;
5283
5048 if (I915_READ(HDMIB) & PORT_DETECTED) { 5284 if (I915_READ(HDMIB) & PORT_DETECTED) {
5049 /* PCH SDVOB multiplex with HDMIB */ 5285 /* PCH SDVOB multiplex with HDMIB */
5050 found = intel_sdvo_init(dev, PCH_SDVOB); 5286 found = intel_sdvo_init(dev, PCH_SDVOB);
@@ -5063,7 +5299,7 @@ static void intel_setup_outputs(struct drm_device *dev)
5063 if (I915_READ(PCH_DP_C) & DP_DETECTED) 5299 if (I915_READ(PCH_DP_C) & DP_DETECTED)
5064 intel_dp_init(dev, PCH_DP_C); 5300 intel_dp_init(dev, PCH_DP_C);
5065 5301
5066 if (I915_READ(PCH_DP_D) & DP_DETECTED) 5302 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
5067 intel_dp_init(dev, PCH_DP_D); 5303 intel_dp_init(dev, PCH_DP_D);
5068 5304
5069 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 5305 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
@@ -5472,6 +5708,26 @@ void intel_init_clock_gating(struct drm_device *dev)
5472 (I915_READ(DISP_ARB_CTL) | 5708 (I915_READ(DISP_ARB_CTL) |
5473 DISP_FBC_WM_DIS)); 5709 DISP_FBC_WM_DIS));
5474 } 5710 }
5711 /*
5712 * Based on the document from hardware guys the following bits
5713 * should be set unconditionally in order to enable FBC.
5714 * The bit 22 of 0x42000
5715 * The bit 22 of 0x42004
5716 * The bit 7,8,9 of 0x42020.
5717 */
5718 if (IS_IRONLAKE_M(dev)) {
5719 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5720 I915_READ(ILK_DISPLAY_CHICKEN1) |
5721 ILK_FBCQ_DIS);
5722 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5723 I915_READ(ILK_DISPLAY_CHICKEN2) |
5724 ILK_DPARB_GATE);
5725 I915_WRITE(ILK_DSPCLK_GATE,
5726 I915_READ(ILK_DSPCLK_GATE) |
5727 ILK_DPFC_DIS1 |
5728 ILK_DPFC_DIS2 |
5729 ILK_CLK_FBC);
5730 }
5475 return; 5731 return;
5476 } else if (IS_G4X(dev)) { 5732 } else if (IS_G4X(dev)) {
5477 uint32_t dspclk_gate; 5733 uint32_t dspclk_gate;
@@ -5550,7 +5806,11 @@ static void intel_init_display(struct drm_device *dev)
5550 dev_priv->display.dpms = i9xx_crtc_dpms; 5806 dev_priv->display.dpms = i9xx_crtc_dpms;
5551 5807
5552 if (I915_HAS_FBC(dev)) { 5808 if (I915_HAS_FBC(dev)) {
5553 if (IS_GM45(dev)) { 5809 if (IS_IRONLAKE_M(dev)) {
5810 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5811 dev_priv->display.enable_fbc = ironlake_enable_fbc;
5812 dev_priv->display.disable_fbc = ironlake_disable_fbc;
5813 } else if (IS_GM45(dev)) {
5554 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 5814 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5555 dev_priv->display.enable_fbc = g4x_enable_fbc; 5815 dev_priv->display.enable_fbc = g4x_enable_fbc;
5556 dev_priv->display.disable_fbc = g4x_disable_fbc; 5816 dev_priv->display.disable_fbc = g4x_disable_fbc;