aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c228
1 files changed, 174 insertions, 54 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 98967f3b7724..49fb54fd9a18 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1214} 1214}
1215 1215
1216static void sandybridge_blit_fbc_update(struct drm_device *dev)
1217{
1218 struct drm_i915_private *dev_priv = dev->dev_private;
1219 u32 blt_ecoskpd;
1220
1221 /* Make sure blitter notifies FBC of writes */
1222 __gen6_gt_force_wake_get(dev_priv);
1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1225 GEN6_BLITTER_LOCK_SHIFT;
1226 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1227 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1228 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1229 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1230 GEN6_BLITTER_LOCK_SHIFT);
1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1232 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1233 __gen6_gt_force_wake_put(dev_priv);
1234}
1235
1216static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1217{ 1237{
1218 struct drm_device *dev = crtc->dev; 1238 struct drm_device *dev = crtc->dev;
@@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1266 I915_WRITE(SNB_DPFC_CTL_SA, 1286 I915_WRITE(SNB_DPFC_CTL_SA,
1267 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); 1287 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
1268 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 1288 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1289 sandybridge_blit_fbc_update(dev);
1269 } 1290 }
1270 1291
1271 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1292 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
@@ -1609,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1609 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1610 1631
1611 wait_event(dev_priv->pending_flip_queue, 1632 wait_event(dev_priv->pending_flip_queue,
1633 atomic_read(&dev_priv->mm.wedged) ||
1612 atomic_read(&obj->pending_flip) == 0); 1634 atomic_read(&obj->pending_flip) == 0);
1613 1635
1614 /* Big Hammer, we also need to ensure that any pending 1636 /* Big Hammer, we also need to ensure that any pending
1615 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1637 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1616 * current scanout is retired before unpinning the old 1638 * current scanout is retired before unpinning the old
1617 * framebuffer. 1639 * framebuffer.
1640 *
1641 * This should only fail upon a hung GPU, in which case we
1642 * can safely continue.
1618 */ 1643 */
1619 ret = i915_gem_object_flush_gpu(obj, false); 1644 ret = i915_gem_object_flush_gpu(obj, false);
1620 if (ret) { 1645 (void) ret;
1621 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1622 mutex_unlock(&dev->struct_mutex);
1623 return ret;
1624 }
1625 } 1646 }
1626 1647
1627 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2024,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2024 atomic_read(&obj->pending_flip) == 0); 2045 atomic_read(&obj->pending_flip) == 0);
2025} 2046}
2026 2047
2048static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2049{
2050 struct drm_device *dev = crtc->dev;
2051 struct drm_mode_config *mode_config = &dev->mode_config;
2052 struct intel_encoder *encoder;
2053
2054 /*
2055 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2056 * must be driven by its own crtc; no sharing is possible.
2057 */
2058 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2059 if (encoder->base.crtc != crtc)
2060 continue;
2061
2062 switch (encoder->type) {
2063 case INTEL_OUTPUT_EDP:
2064 if (!intel_encoder_is_pch_edp(&encoder->base))
2065 return false;
2066 continue;
2067 }
2068 }
2069
2070 return true;
2071}
2072
2027static void ironlake_crtc_enable(struct drm_crtc *crtc) 2073static void ironlake_crtc_enable(struct drm_crtc *crtc)
2028{ 2074{
2029 struct drm_device *dev = crtc->dev; 2075 struct drm_device *dev = crtc->dev;
@@ -2032,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2032 int pipe = intel_crtc->pipe; 2078 int pipe = intel_crtc->pipe;
2033 int plane = intel_crtc->plane; 2079 int plane = intel_crtc->plane;
2034 u32 reg, temp; 2080 u32 reg, temp;
2081 bool is_pch_port = false;
2035 2082
2036 if (intel_crtc->active) 2083 if (intel_crtc->active)
2037 return; 2084 return;
@@ -2045,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2045 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2092 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2046 } 2093 }
2047 2094
2048 ironlake_fdi_enable(crtc); 2095 is_pch_port = intel_crtc_driving_pch(crtc);
2096
2097 if (is_pch_port)
2098 ironlake_fdi_enable(crtc);
2099 else {
2100 /* disable CPU FDI tx and PCH FDI rx */
2101 reg = FDI_TX_CTL(pipe);
2102 temp = I915_READ(reg);
2103 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2104 POSTING_READ(reg);
2105
2106 reg = FDI_RX_CTL(pipe);
2107 temp = I915_READ(reg);
2108 temp &= ~(0x7 << 16);
2109 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2110 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2111
2112 POSTING_READ(reg);
2113 udelay(100);
2114
2115 /* Ironlake workaround, disable clock pointer after downing FDI */
2116 if (HAS_PCH_IBX(dev))
2117 I915_WRITE(FDI_RX_CHICKEN(pipe),
2118 I915_READ(FDI_RX_CHICKEN(pipe) &
2119 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2120
2121 /* still set train pattern 1 */
2122 reg = FDI_TX_CTL(pipe);
2123 temp = I915_READ(reg);
2124 temp &= ~FDI_LINK_TRAIN_NONE;
2125 temp |= FDI_LINK_TRAIN_PATTERN_1;
2126 I915_WRITE(reg, temp);
2127
2128 reg = FDI_RX_CTL(pipe);
2129 temp = I915_READ(reg);
2130 if (HAS_PCH_CPT(dev)) {
2131 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2132 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2133 } else {
2134 temp &= ~FDI_LINK_TRAIN_NONE;
2135 temp |= FDI_LINK_TRAIN_PATTERN_1;
2136 }
2137 /* BPC in FDI rx is consistent with that in PIPECONF */
2138 temp &= ~(0x07 << 16);
2139 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2140 I915_WRITE(reg, temp);
2141
2142 POSTING_READ(reg);
2143 udelay(100);
2144 }
2049 2145
2050 /* Enable panel fitting for LVDS */ 2146 /* Enable panel fitting for LVDS */
2051 if (dev_priv->pch_pf_size && 2147 if (dev_priv->pch_pf_size &&
@@ -2079,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2079 intel_flush_display_plane(dev, plane); 2175 intel_flush_display_plane(dev, plane);
2080 } 2176 }
2081 2177
2178 /* Skip the PCH stuff if possible */
2179 if (!is_pch_port)
2180 goto done;
2181
2082 /* For PCH output, training FDI link */ 2182 /* For PCH output, training FDI link */
2083 if (IS_GEN6(dev)) 2183 if (IS_GEN6(dev))
2084 gen6_fdi_link_train(crtc); 2184 gen6_fdi_link_train(crtc);
@@ -2163,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2163 I915_WRITE(reg, temp | TRANS_ENABLE); 2263 I915_WRITE(reg, temp | TRANS_ENABLE);
2164 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2264 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2165 DRM_ERROR("failed to enable transcoder %d\n", pipe); 2265 DRM_ERROR("failed to enable transcoder %d\n", pipe);
2166 2266done:
2167 intel_crtc_load_lut(crtc); 2267 intel_crtc_load_lut(crtc);
2168 intel_update_fbc(dev); 2268 intel_update_fbc(dev);
2169 intel_crtc_update_cursor(crtc, true); 2269 intel_crtc_update_cursor(crtc, true);
@@ -5530,6 +5630,16 @@ cleanup_work:
5530 return ret; 5630 return ret;
5531} 5631}
5532 5632
5633static void intel_crtc_reset(struct drm_crtc *crtc)
5634{
5635 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5636
5637 /* Reset flags back to the 'unknown' status so that they
5638 * will be correctly set on the initial modeset.
5639 */
5640 intel_crtc->dpms_mode = -1;
5641}
5642
5533static struct drm_crtc_helper_funcs intel_helper_funcs = { 5643static struct drm_crtc_helper_funcs intel_helper_funcs = {
5534 .dpms = intel_crtc_dpms, 5644 .dpms = intel_crtc_dpms,
5535 .mode_fixup = intel_crtc_mode_fixup, 5645 .mode_fixup = intel_crtc_mode_fixup,
@@ -5541,6 +5651,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
5541}; 5651};
5542 5652
5543static const struct drm_crtc_funcs intel_crtc_funcs = { 5653static const struct drm_crtc_funcs intel_crtc_funcs = {
5654 .reset = intel_crtc_reset,
5544 .cursor_set = intel_crtc_cursor_set, 5655 .cursor_set = intel_crtc_cursor_set,
5545 .cursor_move = intel_crtc_cursor_move, 5656 .cursor_move = intel_crtc_cursor_move,
5546 .gamma_set = intel_crtc_gamma_set, 5657 .gamma_set = intel_crtc_gamma_set,
@@ -5631,8 +5742,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5631 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 5742 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
5632 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5743 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5633 5744
5634 intel_crtc->cursor_addr = 0; 5745 intel_crtc_reset(&intel_crtc->base);
5635 intel_crtc->dpms_mode = -1;
5636 intel_crtc->active = true; /* force the pipe off on setup_init_config */ 5746 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5637 5747
5638 if (HAS_PCH_SPLIT(dev)) { 5748 if (HAS_PCH_SPLIT(dev)) {
@@ -6172,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6172 * userspace... 6282 * userspace...
6173 */ 6283 */
6174 I915_WRITE(GEN6_RC_STATE, 0); 6284 I915_WRITE(GEN6_RC_STATE, 0);
6175 __gen6_force_wake_get(dev_priv); 6285 __gen6_gt_force_wake_get(dev_priv);
6176 6286
6177 /* disable the counters and set deterministic thresholds */ 6287 /* disable the counters and set deterministic thresholds */
6178 I915_WRITE(GEN6_RC_CONTROL, 0); 6288 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6270,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6270 /* enable all PM interrupts */ 6380 /* enable all PM interrupts */
6271 I915_WRITE(GEN6_PMINTRMSK, 0); 6381 I915_WRITE(GEN6_PMINTRMSK, 0);
6272 6382
6273 __gen6_force_wake_put(dev_priv); 6383 __gen6_gt_force_wake_put(dev_priv);
6274} 6384}
6275 6385
6276void intel_enable_clock_gating(struct drm_device *dev) 6386void intel_enable_clock_gating(struct drm_device *dev)
@@ -6286,7 +6396,9 @@ void intel_enable_clock_gating(struct drm_device *dev)
6286 6396
6287 if (IS_GEN5(dev)) { 6397 if (IS_GEN5(dev)) {
6288 /* Required for FBC */ 6398 /* Required for FBC */
6289 dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; 6399 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
6400 DPFCRUNIT_CLOCK_GATE_DISABLE |
6401 DPFDUNIT_CLOCK_GATE_DISABLE;
6290 /* Required for CxSR */ 6402 /* Required for CxSR */
6291 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; 6403 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
6292 6404
@@ -6429,52 +6541,60 @@ void intel_enable_clock_gating(struct drm_device *dev)
6429 } 6541 }
6430} 6542}
6431 6543
6432void intel_disable_clock_gating(struct drm_device *dev) 6544static void ironlake_teardown_rc6(struct drm_device *dev)
6433{ 6545{
6434 struct drm_i915_private *dev_priv = dev->dev_private; 6546 struct drm_i915_private *dev_priv = dev->dev_private;
6435 6547
6436 if (dev_priv->renderctx) { 6548 if (dev_priv->renderctx) {
6437 struct drm_i915_gem_object *obj = dev_priv->renderctx; 6549 i915_gem_object_unpin(dev_priv->renderctx);
6438 6550 drm_gem_object_unreference(&dev_priv->renderctx->base);
6439 I915_WRITE(CCID, 0);
6440 POSTING_READ(CCID);
6441
6442 i915_gem_object_unpin(obj);
6443 drm_gem_object_unreference(&obj->base);
6444 dev_priv->renderctx = NULL; 6551 dev_priv->renderctx = NULL;
6445 } 6552 }
6446 6553
6447 if (dev_priv->pwrctx) { 6554 if (dev_priv->pwrctx) {
6448 struct drm_i915_gem_object *obj = dev_priv->pwrctx; 6555 i915_gem_object_unpin(dev_priv->pwrctx);
6556 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6557 dev_priv->pwrctx = NULL;
6558 }
6559}
6560
6561static void ironlake_disable_rc6(struct drm_device *dev)
6562{
6563 struct drm_i915_private *dev_priv = dev->dev_private;
6564
6565 if (I915_READ(PWRCTXA)) {
6566 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
6567 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
6568 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
6569 50);
6449 6570
6450 I915_WRITE(PWRCTXA, 0); 6571 I915_WRITE(PWRCTXA, 0);
6451 POSTING_READ(PWRCTXA); 6572 POSTING_READ(PWRCTXA);
6452 6573
6453 i915_gem_object_unpin(obj); 6574 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6454 drm_gem_object_unreference(&obj->base); 6575 POSTING_READ(RSTDBYCTL);
6455 dev_priv->pwrctx = NULL;
6456 } 6576 }
6577
6578 ironlake_teardown_rc6(dev);
6457} 6579}
6458 6580
6459static void ironlake_disable_rc6(struct drm_device *dev) 6581static int ironlake_setup_rc6(struct drm_device *dev)
6460{ 6582{
6461 struct drm_i915_private *dev_priv = dev->dev_private; 6583 struct drm_i915_private *dev_priv = dev->dev_private;
6462 6584
6463 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 6585 if (dev_priv->renderctx == NULL)
6464 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 6586 dev_priv->renderctx = intel_alloc_context_page(dev);
6465 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 6587 if (!dev_priv->renderctx)
6466 10); 6588 return -ENOMEM;
6467 POSTING_READ(CCID); 6589
6468 I915_WRITE(PWRCTXA, 0); 6590 if (dev_priv->pwrctx == NULL)
6469 POSTING_READ(PWRCTXA); 6591 dev_priv->pwrctx = intel_alloc_context_page(dev);
6470 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6592 if (!dev_priv->pwrctx) {
6471 POSTING_READ(RSTDBYCTL); 6593 ironlake_teardown_rc6(dev);
6472 i915_gem_object_unpin(dev_priv->renderctx); 6594 return -ENOMEM;
6473 drm_gem_object_unreference(&dev_priv->renderctx->base); 6595 }
6474 dev_priv->renderctx = NULL; 6596
6475 i915_gem_object_unpin(dev_priv->pwrctx); 6597 return 0;
6476 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6477 dev_priv->pwrctx = NULL;
6478} 6598}
6479 6599
6480void ironlake_enable_rc6(struct drm_device *dev) 6600void ironlake_enable_rc6(struct drm_device *dev)
@@ -6482,15 +6602,26 @@ void ironlake_enable_rc6(struct drm_device *dev)
6482 struct drm_i915_private *dev_priv = dev->dev_private; 6602 struct drm_i915_private *dev_priv = dev->dev_private;
6483 int ret; 6603 int ret;
6484 6604
6605 /* rc6 disabled by default due to repeated reports of hanging during
6606 * boot and resume.
6607 */
6608 if (!i915_enable_rc6)
6609 return;
6610
6611 ret = ironlake_setup_rc6(dev);
6612 if (ret)
6613 return;
6614
6485 /* 6615 /*
6486 * GPU can automatically power down the render unit if given a page 6616 * GPU can automatically power down the render unit if given a page
6487 * to save state. 6617 * to save state.
6488 */ 6618 */
6489 ret = BEGIN_LP_RING(6); 6619 ret = BEGIN_LP_RING(6);
6490 if (ret) { 6620 if (ret) {
6491 ironlake_disable_rc6(dev); 6621 ironlake_teardown_rc6(dev);
6492 return; 6622 return;
6493 } 6623 }
6624
6494 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 6625 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
6495 OUT_RING(MI_SET_CONTEXT); 6626 OUT_RING(MI_SET_CONTEXT);
6496 OUT_RING(dev_priv->renderctx->gtt_offset | 6627 OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6507,6 +6638,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
6507 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6638 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6508} 6639}
6509 6640
6641
6510/* Set up chip specific display functions */ 6642/* Set up chip specific display functions */
6511static void intel_init_display(struct drm_device *dev) 6643static void intel_init_display(struct drm_device *dev)
6512{ 6644{
@@ -6749,21 +6881,9 @@ void intel_modeset_init(struct drm_device *dev)
6749 if (IS_GEN6(dev)) 6881 if (IS_GEN6(dev))
6750 gen6_enable_rps(dev_priv); 6882 gen6_enable_rps(dev_priv);
6751 6883
6752 if (IS_IRONLAKE_M(dev)) { 6884 if (IS_IRONLAKE_M(dev))
6753 dev_priv->renderctx = intel_alloc_context_page(dev);
6754 if (!dev_priv->renderctx)
6755 goto skip_rc6;
6756 dev_priv->pwrctx = intel_alloc_context_page(dev);
6757 if (!dev_priv->pwrctx) {
6758 i915_gem_object_unpin(dev_priv->renderctx);
6759 drm_gem_object_unreference(&dev_priv->renderctx->base);
6760 dev_priv->renderctx = NULL;
6761 goto skip_rc6;
6762 }
6763 ironlake_enable_rc6(dev); 6885 ironlake_enable_rc6(dev);
6764 }
6765 6886
6766skip_rc6:
6767 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6887 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6768 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6888 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6769 (unsigned long)dev); 6889 (unsigned long)dev);