aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c414
1 files changed, 175 insertions, 239 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52cd9b006da2..002612fae717 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -262,6 +262,14 @@ struct intel_limit {
262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ 262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
264 264
265#define IRONLAKE_P_DISPLAY_PORT_MIN 10
266#define IRONLAKE_P_DISPLAY_PORT_MAX 20
267#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
268#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
269#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
270#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
271#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
272
265static bool 273static bool
266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 274intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
267 int target, int refclk, intel_clock_t *best_clock); 275 int target, int refclk, intel_clock_t *best_clock);
@@ -271,9 +279,6 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
271static bool 279static bool
272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 280intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
273 int target, int refclk, intel_clock_t *best_clock); 281 int target, int refclk, intel_clock_t *best_clock);
274static bool
275intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
276 int target, int refclk, intel_clock_t *best_clock);
277 282
278static bool 283static bool
279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 284intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
@@ -496,7 +501,7 @@ static const intel_limit_t intel_limits_ironlake_sdvo = {
496 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
497 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 502 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
498 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 503 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
499 .find_pll = intel_ironlake_find_best_PLL, 504 .find_pll = intel_g4x_find_best_PLL,
500}; 505};
501 506
502static const intel_limit_t intel_limits_ironlake_lvds = { 507static const intel_limit_t intel_limits_ironlake_lvds = {
@@ -511,7 +516,30 @@ static const intel_limit_t intel_limits_ironlake_lvds = {
511 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 516 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
512 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 517 .p2_slow = IRONLAKE_P2_LVDS_SLOW,
513 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 518 .p2_fast = IRONLAKE_P2_LVDS_FAST },
514 .find_pll = intel_ironlake_find_best_PLL, 519 .find_pll = intel_g4x_find_best_PLL,
520};
521
522static const intel_limit_t intel_limits_ironlake_display_port = {
523 .dot = { .min = IRONLAKE_DOT_MIN,
524 .max = IRONLAKE_DOT_MAX },
525 .vco = { .min = IRONLAKE_VCO_MIN,
526 .max = IRONLAKE_VCO_MAX},
527 .n = { .min = IRONLAKE_N_MIN,
528 .max = IRONLAKE_N_MAX },
529 .m = { .min = IRONLAKE_M_MIN,
530 .max = IRONLAKE_M_MAX },
531 .m1 = { .min = IRONLAKE_M1_MIN,
532 .max = IRONLAKE_M1_MAX },
533 .m2 = { .min = IRONLAKE_M2_MIN,
534 .max = IRONLAKE_M2_MAX },
535 .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
536 .max = IRONLAKE_P_DISPLAY_PORT_MAX },
537 .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
538 .max = IRONLAKE_P1_DISPLAY_PORT_MAX},
539 .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
540 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
541 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
542 .find_pll = intel_find_pll_ironlake_dp,
515}; 543};
516 544
517static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 545static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
@@ -519,6 +547,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
519 const intel_limit_t *limit; 547 const intel_limit_t *limit;
520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 548 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
521 limit = &intel_limits_ironlake_lvds; 549 limit = &intel_limits_ironlake_lvds;
550 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
551 HAS_eDP)
552 limit = &intel_limits_ironlake_display_port;
522 else 553 else
523 limit = &intel_limits_ironlake_sdvo; 554 limit = &intel_limits_ironlake_sdvo;
524 555
@@ -791,7 +822,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
791 found = false; 822 found = false;
792 823
793 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 824 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
794 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 825 int lvds_reg;
826
827 if (IS_IRONLAKE(dev))
828 lvds_reg = PCH_LVDS;
829 else
830 lvds_reg = LVDS;
831 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
795 LVDS_CLKB_POWER_UP) 832 LVDS_CLKB_POWER_UP)
796 clock.p2 = limit->p2.p2_fast; 833 clock.p2 = limit->p2.p2_fast;
797 else 834 else
@@ -839,6 +876,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
839{ 876{
840 struct drm_device *dev = crtc->dev; 877 struct drm_device *dev = crtc->dev;
841 intel_clock_t clock; 878 intel_clock_t clock;
879
880 /* return directly when it is eDP */
881 if (HAS_eDP)
882 return true;
883
842 if (target < 200000) { 884 if (target < 200000) {
843 clock.n = 1; 885 clock.n = 1;
844 clock.p1 = 2; 886 clock.p1 = 2;
@@ -857,68 +899,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
857 return true; 899 return true;
858} 900}
859 901
860static bool
861intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
862 int target, int refclk, intel_clock_t *best_clock)
863{
864 struct drm_device *dev = crtc->dev;
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 intel_clock_t clock;
867 int err_most = 47;
868 int err_min = 10000;
869
870 /* eDP has only 2 clock choice, no n/m/p setting */
871 if (HAS_eDP)
872 return true;
873
874 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
875 return intel_find_pll_ironlake_dp(limit, crtc, target,
876 refclk, best_clock);
877
878 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
879 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
880 LVDS_CLKB_POWER_UP)
881 clock.p2 = limit->p2.p2_fast;
882 else
883 clock.p2 = limit->p2.p2_slow;
884 } else {
885 if (target < limit->p2.dot_limit)
886 clock.p2 = limit->p2.p2_slow;
887 else
888 clock.p2 = limit->p2.p2_fast;
889 }
890
891 memset(best_clock, 0, sizeof(*best_clock));
892 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
893 /* based on hardware requriment prefer smaller n to precision */
894 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
895 /* based on hardware requirment prefere larger m1,m2 */
896 for (clock.m1 = limit->m1.max;
897 clock.m1 >= limit->m1.min; clock.m1--) {
898 for (clock.m2 = limit->m2.max;
899 clock.m2 >= limit->m2.min; clock.m2--) {
900 int this_err;
901
902 intel_clock(dev, refclk, &clock);
903 if (!intel_PLL_is_valid(crtc, &clock))
904 continue;
905 this_err = abs((10000 - (target*10000/clock.dot)));
906 if (this_err < err_most) {
907 *best_clock = clock;
908 /* found on first matching */
909 goto out;
910 } else if (this_err < err_min) {
911 *best_clock = clock;
912 err_min = this_err;
913 }
914 }
915 }
916 }
917 }
918out:
919 return true;
920}
921
922/* DisplayPort has only two frequencies, 162MHz and 270MHz */ 902/* DisplayPort has only two frequencies, 162MHz and 270MHz */
923static bool 903static bool
924intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 904intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -1493,6 +1473,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1493 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1473 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1494 u32 temp; 1474 u32 temp;
1495 int tries = 5, j, n; 1475 int tries = 5, j, n;
1476 u32 pipe_bpc;
1477
1478 temp = I915_READ(pipeconf_reg);
1479 pipe_bpc = temp & PIPE_BPC_MASK;
1496 1480
1497 /* XXX: When our outputs are all unaware of DPMS modes other than off 1481 /* XXX: When our outputs are all unaware of DPMS modes other than off
1498 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1482 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1524,6 +1508,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1524 1508
1525 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1509 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1526 temp = I915_READ(fdi_rx_reg); 1510 temp = I915_READ(fdi_rx_reg);
1511 /*
1512 * make the BPC in FDI Rx be consistent with that in
1513 * pipeconf reg.
1514 */
1515 temp &= ~(0x7 << 16);
1516 temp |= (pipe_bpc << 11);
1527 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1517 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
1528 FDI_SEL_PCDCLK | 1518 FDI_SEL_PCDCLK |
1529 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ 1519 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
@@ -1666,6 +1656,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1666 1656
1667 /* enable PCH transcoder */ 1657 /* enable PCH transcoder */
1668 temp = I915_READ(transconf_reg); 1658 temp = I915_READ(transconf_reg);
1659 /*
1660 * make the BPC in transcoder be consistent with
1661 * that in pipeconf reg.
1662 */
1663 temp &= ~PIPE_BPC_MASK;
1664 temp |= pipe_bpc;
1669 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 1665 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1670 I915_READ(transconf_reg); 1666 I915_READ(transconf_reg);
1671 1667
@@ -1745,6 +1741,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1745 I915_READ(fdi_tx_reg); 1741 I915_READ(fdi_tx_reg);
1746 1742
1747 temp = I915_READ(fdi_rx_reg); 1743 temp = I915_READ(fdi_rx_reg);
1744 /* BPC in FDI rx is consistent with that in pipeconf */
1745 temp &= ~(0x07 << 16);
1746 temp |= (pipe_bpc << 11);
1748 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 1747 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1749 I915_READ(fdi_rx_reg); 1748 I915_READ(fdi_rx_reg);
1750 1749
@@ -1789,7 +1788,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1789 } 1788 }
1790 } 1789 }
1791 } 1790 }
1792 1791 temp = I915_READ(transconf_reg);
1792 /* BPC in transcoder is consistent with that in pipeconf */
1793 temp &= ~PIPE_BPC_MASK;
1794 temp |= pipe_bpc;
1795 I915_WRITE(transconf_reg, temp);
1796 I915_READ(transconf_reg);
1793 udelay(100); 1797 udelay(100);
1794 1798
1795 /* disable PCH DPLL */ 1799 /* disable PCH DPLL */
@@ -2448,7 +2452,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2448 * A value of 5us seems to be a good balance; safe for very low end 2452 * A value of 5us seems to be a good balance; safe for very low end
2449 * platforms but not overly aggressive on lower latency configs. 2453 * platforms but not overly aggressive on lower latency configs.
2450 */ 2454 */
2451const static int latency_ns = 5000; 2455static const int latency_ns = 5000;
2452 2456
2453static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 2457static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2454{ 2458{
@@ -2559,7 +2563,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2559 /* Calc sr entries for one plane configs */ 2563 /* Calc sr entries for one plane configs */
2560 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2564 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2561 /* self-refresh has much higher latency */ 2565 /* self-refresh has much higher latency */
2562 const static int sr_latency_ns = 12000; 2566 static const int sr_latency_ns = 12000;
2563 2567
2564 sr_clock = planea_clock ? planea_clock : planeb_clock; 2568 sr_clock = planea_clock ? planea_clock : planeb_clock;
2565 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2569 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2598,7 +2602,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2598 /* Calc sr entries for one plane configs */ 2602 /* Calc sr entries for one plane configs */
2599 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2603 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2600 /* self-refresh has much higher latency */ 2604 /* self-refresh has much higher latency */
2601 const static int sr_latency_ns = 12000; 2605 static const int sr_latency_ns = 12000;
2602 2606
2603 sr_clock = planea_clock ? planea_clock : planeb_clock; 2607 sr_clock = planea_clock ? planea_clock : planeb_clock;
2604 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2608 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2667,7 +2671,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2667 if (HAS_FW_BLC(dev) && sr_hdisplay && 2671 if (HAS_FW_BLC(dev) && sr_hdisplay &&
2668 (!planea_clock || !planeb_clock)) { 2672 (!planea_clock || !planeb_clock)) {
2669 /* self-refresh has much higher latency */ 2673 /* self-refresh has much higher latency */
2670 const static int sr_latency_ns = 6000; 2674 static const int sr_latency_ns = 6000;
2671 2675
2672 sr_clock = planea_clock ? planea_clock : planeb_clock; 2676 sr_clock = planea_clock ? planea_clock : planeb_clock;
2673 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2677 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2969,6 +2973,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2969 2973
2970 /* determine panel color depth */ 2974 /* determine panel color depth */
2971 temp = I915_READ(pipeconf_reg); 2975 temp = I915_READ(pipeconf_reg);
2976 temp &= ~PIPE_BPC_MASK;
2977 if (is_lvds) {
2978 int lvds_reg = I915_READ(PCH_LVDS);
2979 /* the BPC will be 6 if it is 18-bit LVDS panel */
2980 if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
2981 temp |= PIPE_8BPC;
2982 else
2983 temp |= PIPE_6BPC;
2984 } else
2985 temp |= PIPE_8BPC;
2986 I915_WRITE(pipeconf_reg, temp);
2987 I915_READ(pipeconf_reg);
2972 2988
2973 switch (temp & PIPE_BPC_MASK) { 2989 switch (temp & PIPE_BPC_MASK) {
2974 case PIPE_8BPC: 2990 case PIPE_8BPC:
@@ -3195,7 +3211,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3195 * appropriately here, but we need to look more thoroughly into how 3211 * appropriately here, but we need to look more thoroughly into how
3196 * panels behave in the two modes. 3212 * panels behave in the two modes.
3197 */ 3213 */
3198 3214 /* set the dithering flag */
3215 if (IS_I965G(dev)) {
3216 if (dev_priv->lvds_dither) {
3217 if (IS_IRONLAKE(dev))
3218 pipeconf |= PIPE_ENABLE_DITHER;
3219 else
3220 lvds |= LVDS_ENABLE_DITHER;
3221 } else {
3222 if (IS_IRONLAKE(dev))
3223 pipeconf &= ~PIPE_ENABLE_DITHER;
3224 else
3225 lvds &= ~LVDS_ENABLE_DITHER;
3226 }
3227 }
3199 I915_WRITE(lvds_reg, lvds); 3228 I915_WRITE(lvds_reg, lvds);
3200 I915_READ(lvds_reg); 3229 I915_READ(lvds_reg);
3201 } 3230 }
@@ -3385,7 +3414,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3385 3414
3386 /* we only need to pin inside GTT if cursor is non-phy */ 3415 /* we only need to pin inside GTT if cursor is non-phy */
3387 mutex_lock(&dev->struct_mutex); 3416 mutex_lock(&dev->struct_mutex);
3388 if (!dev_priv->cursor_needs_physical) { 3417 if (!dev_priv->info->cursor_needs_physical) {
3389 ret = i915_gem_object_pin(bo, PAGE_SIZE); 3418 ret = i915_gem_object_pin(bo, PAGE_SIZE);
3390 if (ret) { 3419 if (ret) {
3391 DRM_ERROR("failed to pin cursor bo\n"); 3420 DRM_ERROR("failed to pin cursor bo\n");
@@ -3420,7 +3449,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3420 I915_WRITE(base, addr); 3449 I915_WRITE(base, addr);
3421 3450
3422 if (intel_crtc->cursor_bo) { 3451 if (intel_crtc->cursor_bo) {
3423 if (dev_priv->cursor_needs_physical) { 3452 if (dev_priv->info->cursor_needs_physical) {
3424 if (intel_crtc->cursor_bo != bo) 3453 if (intel_crtc->cursor_bo != bo)
3425 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 3454 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
3426 } else 3455 } else
@@ -3779,125 +3808,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
3779 queue_work(dev_priv->wq, &dev_priv->idle_work); 3808 queue_work(dev_priv->wq, &dev_priv->idle_work);
3780} 3809}
3781 3810
3782void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3783{
3784 drm_i915_private_t *dev_priv = dev->dev_private;
3785
3786 if (IS_IRONLAKE(dev))
3787 return;
3788
3789 if (!dev_priv->render_reclock_avail) {
3790 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3791 return;
3792 }
3793
3794 /* Restore render clock frequency to original value */
3795 if (IS_G4X(dev) || IS_I9XX(dev))
3796 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
3797 else if (IS_I85X(dev))
3798 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
3799 DRM_DEBUG_DRIVER("increasing render clock frequency\n");
3800
3801 /* Schedule downclock */
3802 if (schedule)
3803 mod_timer(&dev_priv->idle_timer, jiffies +
3804 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
3805}
3806
3807void intel_decrease_renderclock(struct drm_device *dev)
3808{
3809 drm_i915_private_t *dev_priv = dev->dev_private;
3810
3811 if (IS_IRONLAKE(dev))
3812 return;
3813
3814 if (!dev_priv->render_reclock_avail) {
3815 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3816 return;
3817 }
3818
3819 if (IS_G4X(dev)) {
3820 u16 gcfgc;
3821
3822 /* Adjust render clock... */
3823 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3824
3825 /* Down to minimum... */
3826 gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
3827 gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
3828
3829 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3830 } else if (IS_I965G(dev)) {
3831 u16 gcfgc;
3832
3833 /* Adjust render clock... */
3834 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3835
3836 /* Down to minimum... */
3837 gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
3838 gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
3839
3840 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3841 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
3842 u16 gcfgc;
3843
3844 /* Adjust render clock... */
3845 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3846
3847 /* Down to minimum... */
3848 gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
3849 gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
3850
3851 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3852 } else if (IS_I915G(dev)) {
3853 u16 gcfgc;
3854
3855 /* Adjust render clock... */
3856 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3857
3858 /* Down to minimum... */
3859 gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
3860 gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
3861
3862 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3863 } else if (IS_I85X(dev)) {
3864 u16 hpllcc;
3865
3866 /* Adjust render clock... */
3867 pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
3868
3869 /* Up to maximum... */
3870 hpllcc &= ~GC_CLOCK_CONTROL_MASK;
3871 hpllcc |= GC_CLOCK_133_200;
3872
3873 pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
3874 }
3875 DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
3876}
3877
3878/* Note that no increase function is needed for this - increase_renderclock()
3879 * will also rewrite these bits
3880 */
3881void intel_decrease_displayclock(struct drm_device *dev)
3882{
3883 if (IS_IRONLAKE(dev))
3884 return;
3885
3886 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
3887 IS_I915GM(dev)) {
3888 u16 gcfgc;
3889
3890 /* Adjust render clock... */
3891 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3892
3893 /* Down to minimum... */
3894 gcfgc &= ~0xf0;
3895 gcfgc |= 0x80;
3896
3897 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3898 }
3899}
3900
3901#define CRTC_IDLE_TIMEOUT 1000 /* ms */ 3811#define CRTC_IDLE_TIMEOUT 1000 /* ms */
3902 3812
3903static void intel_crtc_idle_timer(unsigned long arg) 3813static void intel_crtc_idle_timer(unsigned long arg)
@@ -4011,12 +3921,6 @@ static void intel_idle_update(struct work_struct *work)
4011 3921
4012 mutex_lock(&dev->struct_mutex); 3922 mutex_lock(&dev->struct_mutex);
4013 3923
4014 /* GPU isn't processing, downclock it. */
4015 if (!dev_priv->busy) {
4016 intel_decrease_renderclock(dev);
4017 intel_decrease_displayclock(dev);
4018 }
4019
4020 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3924 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4021 /* Skip inactive CRTCs */ 3925 /* Skip inactive CRTCs */
4022 if (!crtc->fb) 3926 if (!crtc->fb)
@@ -4050,13 +3954,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4050 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3954 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4051 return; 3955 return;
4052 3956
4053 if (!dev_priv->busy) { 3957 if (!dev_priv->busy)
4054 dev_priv->busy = true; 3958 dev_priv->busy = true;
4055 intel_increase_renderclock(dev, true); 3959 else
4056 } else {
4057 mod_timer(&dev_priv->idle_timer, jiffies + 3960 mod_timer(&dev_priv->idle_timer, jiffies +
4058 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 3961 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4059 }
4060 3962
4061 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3963 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4062 if (!crtc->fb) 3964 if (!crtc->fb)
@@ -4400,29 +4302,43 @@ static void intel_setup_outputs(struct drm_device *dev)
4400 bool found = false; 4302 bool found = false;
4401 4303
4402 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4304 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4305 DRM_DEBUG_KMS("probing SDVOB\n");
4403 found = intel_sdvo_init(dev, SDVOB); 4306 found = intel_sdvo_init(dev, SDVOB);
4404 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 4307 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
4308 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
4405 intel_hdmi_init(dev, SDVOB); 4309 intel_hdmi_init(dev, SDVOB);
4310 }
4406 4311
4407 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 4312 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
4313 DRM_DEBUG_KMS("probing DP_B\n");
4408 intel_dp_init(dev, DP_B); 4314 intel_dp_init(dev, DP_B);
4315 }
4409 } 4316 }
4410 4317
4411 /* Before G4X SDVOC doesn't have its own detect register */ 4318 /* Before G4X SDVOC doesn't have its own detect register */
4412 4319
4413 if (I915_READ(SDVOB) & SDVO_DETECTED) 4320 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4321 DRM_DEBUG_KMS("probing SDVOC\n");
4414 found = intel_sdvo_init(dev, SDVOC); 4322 found = intel_sdvo_init(dev, SDVOC);
4323 }
4415 4324
4416 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 4325 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
4417 4326
4418 if (SUPPORTS_INTEGRATED_HDMI(dev)) 4327 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
4328 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
4419 intel_hdmi_init(dev, SDVOC); 4329 intel_hdmi_init(dev, SDVOC);
4420 if (SUPPORTS_INTEGRATED_DP(dev)) 4330 }
4331 if (SUPPORTS_INTEGRATED_DP(dev)) {
4332 DRM_DEBUG_KMS("probing DP_C\n");
4421 intel_dp_init(dev, DP_C); 4333 intel_dp_init(dev, DP_C);
4334 }
4422 } 4335 }
4423 4336
4424 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 4337 if (SUPPORTS_INTEGRATED_DP(dev) &&
4338 (I915_READ(DP_D) & DP_DETECTED)) {
4339 DRM_DEBUG_KMS("probing DP_D\n");
4425 intel_dp_init(dev, DP_D); 4340 intel_dp_init(dev, DP_D);
4341 }
4426 } else if (IS_I8XX(dev)) 4342 } else if (IS_I8XX(dev))
4427 intel_dvo_init(dev); 4343 intel_dvo_init(dev);
4428 4344
@@ -4527,6 +4443,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
4527 .fb_changed = intelfb_probe, 4443 .fb_changed = intelfb_probe,
4528}; 4444};
4529 4445
4446static struct drm_gem_object *
4447intel_alloc_power_context(struct drm_device *dev)
4448{
4449 struct drm_gem_object *pwrctx;
4450 int ret;
4451
4452 pwrctx = drm_gem_object_alloc(dev, 4096);
4453 if (!pwrctx) {
4454 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4455 return NULL;
4456 }
4457
4458 mutex_lock(&dev->struct_mutex);
4459 ret = i915_gem_object_pin(pwrctx, 4096);
4460 if (ret) {
4461 DRM_ERROR("failed to pin power context: %d\n", ret);
4462 goto err_unref;
4463 }
4464
4465 ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4466 if (ret) {
4467 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
4468 goto err_unpin;
4469 }
4470 mutex_unlock(&dev->struct_mutex);
4471
4472 return pwrctx;
4473
4474err_unpin:
4475 i915_gem_object_unpin(pwrctx);
4476err_unref:
4477 drm_gem_object_unreference(pwrctx);
4478 mutex_unlock(&dev->struct_mutex);
4479 return NULL;
4480}
4481
4530void intel_init_clock_gating(struct drm_device *dev) 4482void intel_init_clock_gating(struct drm_device *dev)
4531{ 4483{
4532 struct drm_i915_private *dev_priv = dev->dev_private; 4484 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4579,42 +4531,27 @@ void intel_init_clock_gating(struct drm_device *dev)
4579 * GPU can automatically power down the render unit if given a page 4531 * GPU can automatically power down the render unit if given a page
4580 * to save state. 4532 * to save state.
4581 */ 4533 */
4582 if (I915_HAS_RC6(dev)) { 4534 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
4583 struct drm_gem_object *pwrctx; 4535 struct drm_i915_gem_object *obj_priv = NULL;
4584 struct drm_i915_gem_object *obj_priv;
4585 int ret;
4586 4536
4587 if (dev_priv->pwrctx) { 4537 if (dev_priv->pwrctx) {
4588 obj_priv = dev_priv->pwrctx->driver_private; 4538 obj_priv = dev_priv->pwrctx->driver_private;
4589 } else { 4539 } else {
4590 pwrctx = drm_gem_object_alloc(dev, 4096); 4540 struct drm_gem_object *pwrctx;
4591 if (!pwrctx) {
4592 DRM_DEBUG("failed to alloc power context, "
4593 "RC6 disabled\n");
4594 goto out;
4595 }
4596 4541
4597 ret = i915_gem_object_pin(pwrctx, 4096); 4542 pwrctx = intel_alloc_power_context(dev);
4598 if (ret) { 4543 if (pwrctx) {
4599 DRM_ERROR("failed to pin power context: %d\n", 4544 dev_priv->pwrctx = pwrctx;
4600 ret); 4545 obj_priv = pwrctx->driver_private;
4601 drm_gem_object_unreference(pwrctx);
4602 goto out;
4603 } 4546 }
4604
4605 i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4606
4607 dev_priv->pwrctx = pwrctx;
4608 obj_priv = pwrctx->driver_private;
4609 } 4547 }
4610 4548
4611 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); 4549 if (obj_priv) {
4612 I915_WRITE(MCHBAR_RENDER_STANDBY, 4550 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
4613 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 4551 I915_WRITE(MCHBAR_RENDER_STANDBY,
4552 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
4553 }
4614 } 4554 }
4615
4616out:
4617 return;
4618} 4555}
4619 4556
4620/* Set up chip specific display functions */ 4557/* Set up chip specific display functions */
@@ -4770,7 +4707,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
4770 del_timer_sync(&intel_crtc->idle_timer); 4707 del_timer_sync(&intel_crtc->idle_timer);
4771 } 4708 }
4772 4709
4773 intel_increase_renderclock(dev, false);
4774 del_timer_sync(&dev_priv->idle_timer); 4710 del_timer_sync(&dev_priv->idle_timer);
4775 4711
4776 if (dev_priv->display.disable_fbc) 4712 if (dev_priv->display.disable_fbc)