aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c304
1 files changed, 235 insertions, 69 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f469a84cacfd..cc8131ff319f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1029,19 +1029,28 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1029void i8xx_disable_fbc(struct drm_device *dev) 1029void i8xx_disable_fbc(struct drm_device *dev)
1030{ 1030{
1031 struct drm_i915_private *dev_priv = dev->dev_private; 1031 struct drm_i915_private *dev_priv = dev->dev_private;
1032 unsigned long timeout = jiffies + msecs_to_jiffies(1);
1032 u32 fbc_ctl; 1033 u32 fbc_ctl;
1033 1034
1034 if (!I915_HAS_FBC(dev)) 1035 if (!I915_HAS_FBC(dev))
1035 return; 1036 return;
1036 1037
1038 if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
1039 return; /* Already off, just return */
1040
1037 /* Disable compression */ 1041 /* Disable compression */
1038 fbc_ctl = I915_READ(FBC_CONTROL); 1042 fbc_ctl = I915_READ(FBC_CONTROL);
1039 fbc_ctl &= ~FBC_CTL_EN; 1043 fbc_ctl &= ~FBC_CTL_EN;
1040 I915_WRITE(FBC_CONTROL, fbc_ctl); 1044 I915_WRITE(FBC_CONTROL, fbc_ctl);
1041 1045
1042 /* Wait for compressing bit to clear */ 1046 /* Wait for compressing bit to clear */
1043 while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) 1047 while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
1044 ; /* nothing */ 1048 if (time_after(jiffies, timeout)) {
1049 DRM_DEBUG_DRIVER("FBC idle timed out\n");
1050 break;
1051 }
1052 ; /* do nothing */
1053 }
1045 1054
1046 intel_wait_for_vblank(dev); 1055 intel_wait_for_vblank(dev);
1047 1056
@@ -1239,10 +1248,11 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1239 return; 1248 return;
1240 1249
1241out_disable: 1250out_disable:
1242 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1243 /* Multiple disables should be harmless */ 1251 /* Multiple disables should be harmless */
1244 if (intel_fbc_enabled(dev)) 1252 if (intel_fbc_enabled(dev)) {
1253 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1245 intel_disable_fbc(dev); 1254 intel_disable_fbc(dev);
1255 }
1246} 1256}
1247 1257
1248static int 1258static int
@@ -1386,7 +1396,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1386 Start = obj_priv->gtt_offset; 1396 Start = obj_priv->gtt_offset;
1387 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 1397 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1388 1398
1389 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1399 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1400 Start, Offset, x, y, crtc->fb->pitch);
1390 I915_WRITE(dspstride, crtc->fb->pitch); 1401 I915_WRITE(dspstride, crtc->fb->pitch);
1391 if (IS_I965G(dev)) { 1402 if (IS_I965G(dev)) {
1392 I915_WRITE(dspbase, Offset); 1403 I915_WRITE(dspbase, Offset);
@@ -2345,6 +2356,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2345 if (mode->clock * 3 > 27000 * 4) 2356 if (mode->clock * 3 > 27000 * 4)
2346 return MODE_CLOCK_HIGH; 2357 return MODE_CLOCK_HIGH;
2347 } 2358 }
2359
2360 drm_mode_set_crtcinfo(adjusted_mode, 0);
2348 return true; 2361 return true;
2349} 2362}
2350 2363
@@ -2629,6 +2642,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2629 2642
2630struct cxsr_latency { 2643struct cxsr_latency {
2631 int is_desktop; 2644 int is_desktop;
2645 int is_ddr3;
2632 unsigned long fsb_freq; 2646 unsigned long fsb_freq;
2633 unsigned long mem_freq; 2647 unsigned long mem_freq;
2634 unsigned long display_sr; 2648 unsigned long display_sr;
@@ -2638,33 +2652,45 @@ struct cxsr_latency {
2638}; 2652};
2639 2653
2640static struct cxsr_latency cxsr_latency_table[] = { 2654static struct cxsr_latency cxsr_latency_table[] = {
2641 {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 2655 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
2642 {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 2656 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
2643 {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 2657 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
2644 2658 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
2645 {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 2659 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
2646 {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 2660
2647 {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 2661 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
2648 2662 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
2649 {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 2663 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
2650 {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 2664 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
2651 {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 2665 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
2652 2666
2653 {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 2667 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
2654 {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 2668 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
2655 {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 2669 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
2656 2670 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
2657 {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 2671 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
2658 {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 2672
2659 {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 2673 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
2660 2674 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
2661 {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 2675 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
2662 {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 2676 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
2663 {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 2677 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
2678
2679 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
2680 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
2681 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
2682 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
2683 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
2684
2685 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
2686 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
2687 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
2688 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
2689 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
2664}; 2690};
2665 2691
2666static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, 2692static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
2667 int mem) 2693 int fsb, int mem)
2668{ 2694{
2669 int i; 2695 int i;
2670 struct cxsr_latency *latency; 2696 struct cxsr_latency *latency;
@@ -2675,6 +2701,7 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
2675 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 2701 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
2676 latency = &cxsr_latency_table[i]; 2702 latency = &cxsr_latency_table[i];
2677 if (is_desktop == latency->is_desktop && 2703 if (is_desktop == latency->is_desktop &&
2704 is_ddr3 == latency->is_ddr3 &&
2678 fsb == latency->fsb_freq && mem == latency->mem_freq) 2705 fsb == latency->fsb_freq && mem == latency->mem_freq)
2679 return latency; 2706 return latency;
2680 } 2707 }
@@ -2789,8 +2816,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2789 struct cxsr_latency *latency; 2816 struct cxsr_latency *latency;
2790 int sr_clock; 2817 int sr_clock;
2791 2818
2792 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, 2819 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
2793 dev_priv->mem_freq); 2820 dev_priv->fsb_freq, dev_priv->mem_freq);
2794 if (!latency) { 2821 if (!latency) {
2795 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 2822 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2796 pineview_disable_cxsr(dev); 2823 pineview_disable_cxsr(dev);
@@ -3626,6 +3653,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3626 pipeconf &= ~PIPEACONF_DOUBLE_WIDE; 3653 pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
3627 } 3654 }
3628 3655
3656 dspcntr |= DISPLAY_PLANE_ENABLE;
3657 pipeconf |= PIPEACONF_ENABLE;
3658 dpll |= DPLL_VCO_ENABLE;
3659
3660
3629 /* Disable the panel fitter if it was on our pipe */ 3661 /* Disable the panel fitter if it was on our pipe */
3630 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) 3662 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3631 I915_WRITE(PFIT_CONTROL, 0); 3663 I915_WRITE(PFIT_CONTROL, 0);
@@ -3772,6 +3804,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3772 } 3804 }
3773 } 3805 }
3774 3806
3807 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3808 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3809 /* the chip adds 2 halflines automatically */
3810 adjusted_mode->crtc_vdisplay -= 1;
3811 adjusted_mode->crtc_vtotal -= 1;
3812 adjusted_mode->crtc_vblank_start -= 1;
3813 adjusted_mode->crtc_vblank_end -= 1;
3814 adjusted_mode->crtc_vsync_end -= 1;
3815 adjusted_mode->crtc_vsync_start -= 1;
3816 } else
3817 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
3818
3775 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 3819 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
3776 ((adjusted_mode->crtc_htotal - 1) << 16)); 3820 ((adjusted_mode->crtc_htotal - 1) << 16));
3777 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 3821 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
@@ -3934,6 +3978,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3934 DRM_ERROR("failed to pin cursor bo\n"); 3978 DRM_ERROR("failed to pin cursor bo\n");
3935 goto fail_locked; 3979 goto fail_locked;
3936 } 3980 }
3981
3982 ret = i915_gem_object_set_to_gtt_domain(bo, 0);
3983 if (ret) {
3984 DRM_ERROR("failed to move cursor bo into the GTT\n");
3985 goto fail_unpin;
3986 }
3987
3937 addr = obj_priv->gtt_offset; 3988 addr = obj_priv->gtt_offset;
3938 } else { 3989 } else {
3939 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); 3990 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
@@ -3977,6 +4028,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3977 intel_crtc->cursor_bo = bo; 4028 intel_crtc->cursor_bo = bo;
3978 4029
3979 return 0; 4030 return 0;
4031fail_unpin:
4032 i915_gem_object_unpin(bo);
3980fail_locked: 4033fail_locked:
3981 mutex_unlock(&dev->struct_mutex); 4034 mutex_unlock(&dev->struct_mutex);
3982fail: 4035fail:
@@ -4436,6 +4489,8 @@ static void intel_idle_update(struct work_struct *work)
4436 4489
4437 mutex_lock(&dev->struct_mutex); 4490 mutex_lock(&dev->struct_mutex);
4438 4491
4492 i915_update_gfx_val(dev_priv);
4493
4439 if (IS_I945G(dev) || IS_I945GM(dev)) { 4494 if (IS_I945G(dev) || IS_I945GM(dev)) {
4440 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); 4495 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4441 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); 4496 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
@@ -4564,12 +4619,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4564 spin_lock_irqsave(&dev->event_lock, flags); 4619 spin_lock_irqsave(&dev->event_lock, flags);
4565 work = intel_crtc->unpin_work; 4620 work = intel_crtc->unpin_work;
4566 if (work == NULL || !work->pending) { 4621 if (work == NULL || !work->pending) {
4567 if (work && !work->pending) {
4568 obj_priv = to_intel_bo(work->pending_flip_obj);
4569 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4570 obj_priv,
4571 atomic_read(&obj_priv->pending_flip));
4572 }
4573 spin_unlock_irqrestore(&dev->event_lock, flags); 4622 spin_unlock_irqrestore(&dev->event_lock, flags);
4574 return; 4623 return;
4575 } 4624 }
@@ -4629,14 +4678,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4629 unsigned long flags; 4678 unsigned long flags;
4630 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4679 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4631 int ret, pipesrc; 4680 int ret, pipesrc;
4632 RING_LOCALS;
4633 4681
4634 work = kzalloc(sizeof *work, GFP_KERNEL); 4682 work = kzalloc(sizeof *work, GFP_KERNEL);
4635 if (work == NULL) 4683 if (work == NULL)
4636 return -ENOMEM; 4684 return -ENOMEM;
4637 4685
4638 mutex_lock(&dev->struct_mutex);
4639
4640 work->event = event; 4686 work->event = event;
4641 work->dev = crtc->dev; 4687 work->dev = crtc->dev;
4642 intel_fb = to_intel_framebuffer(crtc->fb); 4688 intel_fb = to_intel_framebuffer(crtc->fb);
@@ -4646,10 +4692,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4646 /* We borrow the event spin lock for protecting unpin_work */ 4692 /* We borrow the event spin lock for protecting unpin_work */
4647 spin_lock_irqsave(&dev->event_lock, flags); 4693 spin_lock_irqsave(&dev->event_lock, flags);
4648 if (intel_crtc->unpin_work) { 4694 if (intel_crtc->unpin_work) {
4649 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4650 spin_unlock_irqrestore(&dev->event_lock, flags); 4695 spin_unlock_irqrestore(&dev->event_lock, flags);
4651 kfree(work); 4696 kfree(work);
4652 mutex_unlock(&dev->struct_mutex); 4697
4698 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4653 return -EBUSY; 4699 return -EBUSY;
4654 } 4700 }
4655 intel_crtc->unpin_work = work; 4701 intel_crtc->unpin_work = work;
@@ -4658,13 +4704,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4658 intel_fb = to_intel_framebuffer(fb); 4704 intel_fb = to_intel_framebuffer(fb);
4659 obj = intel_fb->obj; 4705 obj = intel_fb->obj;
4660 4706
4707 mutex_lock(&dev->struct_mutex);
4661 ret = intel_pin_and_fence_fb_obj(dev, obj); 4708 ret = intel_pin_and_fence_fb_obj(dev, obj);
4662 if (ret != 0) { 4709 if (ret != 0) {
4663 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4664 to_intel_bo(obj));
4665 kfree(work);
4666 intel_crtc->unpin_work = NULL;
4667 mutex_unlock(&dev->struct_mutex); 4710 mutex_unlock(&dev->struct_mutex);
4711
4712 spin_lock_irqsave(&dev->event_lock, flags);
4713 intel_crtc->unpin_work = NULL;
4714 spin_unlock_irqrestore(&dev->event_lock, flags);
4715
4716 kfree(work);
4717
4718 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4719 to_intel_bo(obj));
4668 return ret; 4720 return ret;
4669 } 4721 }
4670 4722
@@ -5023,10 +5075,32 @@ err_unref:
5023 return NULL; 5075 return NULL;
5024} 5076}
5025 5077
5078bool ironlake_set_drps(struct drm_device *dev, u8 val)
5079{
5080 struct drm_i915_private *dev_priv = dev->dev_private;
5081 u16 rgvswctl;
5082
5083 rgvswctl = I915_READ16(MEMSWCTL);
5084 if (rgvswctl & MEMCTL_CMD_STS) {
5085 DRM_DEBUG("gpu busy, RCS change rejected\n");
5086 return false; /* still busy with another command */
5087 }
5088
5089 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5090 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5091 I915_WRITE16(MEMSWCTL, rgvswctl);
5092 POSTING_READ16(MEMSWCTL);
5093
5094 rgvswctl |= MEMCTL_CMD_STS;
5095 I915_WRITE16(MEMSWCTL, rgvswctl);
5096
5097 return true;
5098}
5099
5026void ironlake_enable_drps(struct drm_device *dev) 5100void ironlake_enable_drps(struct drm_device *dev)
5027{ 5101{
5028 struct drm_i915_private *dev_priv = dev->dev_private; 5102 struct drm_i915_private *dev_priv = dev->dev_private;
5029 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; 5103 u32 rgvmodectl = I915_READ(MEMMODECTL);
5030 u8 fmax, fmin, fstart, vstart; 5104 u8 fmax, fmin, fstart, vstart;
5031 int i = 0; 5105 int i = 0;
5032 5106
@@ -5045,13 +5119,21 @@ void ironlake_enable_drps(struct drm_device *dev)
5045 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5119 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5046 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5120 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5047 MEMMODE_FSTART_SHIFT; 5121 MEMMODE_FSTART_SHIFT;
5122 fstart = fmax;
5123
5048 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 5124 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
5049 PXVFREQ_PX_SHIFT; 5125 PXVFREQ_PX_SHIFT;
5050 5126
5051 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ 5127 dev_priv->fmax = fstart; /* IPS callback will increase this */
5128 dev_priv->fstart = fstart;
5129
5130 dev_priv->max_delay = fmax;
5052 dev_priv->min_delay = fmin; 5131 dev_priv->min_delay = fmin;
5053 dev_priv->cur_delay = fstart; 5132 dev_priv->cur_delay = fstart;
5054 5133
5134 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
5135 fstart);
5136
5055 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5137 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5056 5138
5057 /* 5139 /*
@@ -5073,20 +5155,19 @@ void ironlake_enable_drps(struct drm_device *dev)
5073 } 5155 }
5074 msleep(1); 5156 msleep(1);
5075 5157
5076 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 5158 ironlake_set_drps(dev, fstart);
5077 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5078 I915_WRITE(MEMSWCTL, rgvswctl);
5079 POSTING_READ(MEMSWCTL);
5080 5159
5081 rgvswctl |= MEMCTL_CMD_STS; 5160 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
5082 I915_WRITE(MEMSWCTL, rgvswctl); 5161 I915_READ(0x112e0);
5162 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
5163 dev_priv->last_count2 = I915_READ(0x112f4);
5164 getrawmonotonic(&dev_priv->last_time2);
5083} 5165}
5084 5166
5085void ironlake_disable_drps(struct drm_device *dev) 5167void ironlake_disable_drps(struct drm_device *dev)
5086{ 5168{
5087 struct drm_i915_private *dev_priv = dev->dev_private; 5169 struct drm_i915_private *dev_priv = dev->dev_private;
5088 u32 rgvswctl; 5170 u16 rgvswctl = I915_READ16(MEMSWCTL);
5089 u8 fstart;
5090 5171
5091 /* Ack interrupts, disable EFC interrupt */ 5172 /* Ack interrupts, disable EFC interrupt */
5092 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 5173 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -5096,11 +5177,7 @@ void ironlake_disable_drps(struct drm_device *dev)
5096 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 5177 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
5097 5178
5098 /* Go back to the starting frequency */ 5179 /* Go back to the starting frequency */
5099 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> 5180 ironlake_set_drps(dev, dev_priv->fstart);
5100 MEMMODE_FSTART_SHIFT;
5101 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5102 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5103 I915_WRITE(MEMSWCTL, rgvswctl);
5104 msleep(1); 5181 msleep(1);
5105 rgvswctl |= MEMCTL_CMD_STS; 5182 rgvswctl |= MEMCTL_CMD_STS;
5106 I915_WRITE(MEMSWCTL, rgvswctl); 5183 I915_WRITE(MEMSWCTL, rgvswctl);
@@ -5108,6 +5185,92 @@ void ironlake_disable_drps(struct drm_device *dev)
5108 5185
5109} 5186}
5110 5187
5188static unsigned long intel_pxfreq(u32 vidfreq)
5189{
5190 unsigned long freq;
5191 int div = (vidfreq & 0x3f0000) >> 16;
5192 int post = (vidfreq & 0x3000) >> 12;
5193 int pre = (vidfreq & 0x7);
5194
5195 if (!pre)
5196 return 0;
5197
5198 freq = ((div * 133333) / ((1<<post) * pre));
5199
5200 return freq;
5201}
5202
5203void intel_init_emon(struct drm_device *dev)
5204{
5205 struct drm_i915_private *dev_priv = dev->dev_private;
5206 u32 lcfuse;
5207 u8 pxw[16];
5208 int i;
5209
5210 /* Disable to program */
5211 I915_WRITE(ECR, 0);
5212 POSTING_READ(ECR);
5213
5214 /* Program energy weights for various events */
5215 I915_WRITE(SDEW, 0x15040d00);
5216 I915_WRITE(CSIEW0, 0x007f0000);
5217 I915_WRITE(CSIEW1, 0x1e220004);
5218 I915_WRITE(CSIEW2, 0x04000004);
5219
5220 for (i = 0; i < 5; i++)
5221 I915_WRITE(PEW + (i * 4), 0);
5222 for (i = 0; i < 3; i++)
5223 I915_WRITE(DEW + (i * 4), 0);
5224
5225 /* Program P-state weights to account for frequency power adjustment */
5226 for (i = 0; i < 16; i++) {
5227 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
5228 unsigned long freq = intel_pxfreq(pxvidfreq);
5229 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
5230 PXVFREQ_PX_SHIFT;
5231 unsigned long val;
5232
5233 val = vid * vid;
5234 val *= (freq / 1000);
5235 val *= 255;
5236 val /= (127*127*900);
5237 if (val > 0xff)
5238 DRM_ERROR("bad pxval: %ld\n", val);
5239 pxw[i] = val;
5240 }
5241 /* Render standby states get 0 weight */
5242 pxw[14] = 0;
5243 pxw[15] = 0;
5244
5245 for (i = 0; i < 4; i++) {
5246 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
5247 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
5248 I915_WRITE(PXW + (i * 4), val);
5249 }
5250
5251 /* Adjust magic regs to magic values (more experimental results) */
5252 I915_WRITE(OGW0, 0);
5253 I915_WRITE(OGW1, 0);
5254 I915_WRITE(EG0, 0x00007f00);
5255 I915_WRITE(EG1, 0x0000000e);
5256 I915_WRITE(EG2, 0x000e0000);
5257 I915_WRITE(EG3, 0x68000300);
5258 I915_WRITE(EG4, 0x42000000);
5259 I915_WRITE(EG5, 0x00140031);
5260 I915_WRITE(EG6, 0);
5261 I915_WRITE(EG7, 0);
5262
5263 for (i = 0; i < 8; i++)
5264 I915_WRITE(PXWL + (i * 4), 0);
5265
5266 /* Enable PMON + select events */
5267 I915_WRITE(ECR, 0x80000019);
5268
5269 lcfuse = I915_READ(LCFUSE02);
5270
5271 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
5272}
5273
5111void intel_init_clock_gating(struct drm_device *dev) 5274void intel_init_clock_gating(struct drm_device *dev)
5112{ 5275{
5113 struct drm_i915_private *dev_priv = dev->dev_private; 5276 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5277,11 +5440,13 @@ static void intel_init_display(struct drm_device *dev)
5277 dev_priv->display.update_wm = NULL; 5440 dev_priv->display.update_wm = NULL;
5278 } else if (IS_PINEVIEW(dev)) { 5441 } else if (IS_PINEVIEW(dev)) {
5279 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 5442 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5443 dev_priv->is_ddr3,
5280 dev_priv->fsb_freq, 5444 dev_priv->fsb_freq,
5281 dev_priv->mem_freq)) { 5445 dev_priv->mem_freq)) {
5282 DRM_INFO("failed to find known CxSR latency " 5446 DRM_INFO("failed to find known CxSR latency "
5283 "(found fsb freq %d, mem freq %d), " 5447 "(found ddr%s fsb freq %d, mem freq %d), "
5284 "disabling CxSR\n", 5448 "disabling CxSR\n",
5449 (dev_priv->is_ddr3 == 1) ? "3": "2",
5285 dev_priv->fsb_freq, dev_priv->mem_freq); 5450 dev_priv->fsb_freq, dev_priv->mem_freq);
5286 /* Disable CxSR and never update its watermark again */ 5451 /* Disable CxSR and never update its watermark again */
5287 pineview_disable_cxsr(dev); 5452 pineview_disable_cxsr(dev);
@@ -5310,7 +5475,6 @@ static void intel_init_display(struct drm_device *dev)
5310void intel_modeset_init(struct drm_device *dev) 5475void intel_modeset_init(struct drm_device *dev)
5311{ 5476{
5312 struct drm_i915_private *dev_priv = dev->dev_private; 5477 struct drm_i915_private *dev_priv = dev->dev_private;
5313 int num_pipe;
5314 int i; 5478 int i;
5315 5479
5316 drm_mode_config_init(dev); 5480 drm_mode_config_init(dev);
@@ -5340,13 +5504,13 @@ void intel_modeset_init(struct drm_device *dev)
5340 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); 5504 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
5341 5505
5342 if (IS_MOBILE(dev) || IS_I9XX(dev)) 5506 if (IS_MOBILE(dev) || IS_I9XX(dev))
5343 num_pipe = 2; 5507 dev_priv->num_pipe = 2;
5344 else 5508 else
5345 num_pipe = 1; 5509 dev_priv->num_pipe = 1;
5346 DRM_DEBUG_KMS("%d display pipe%s available.\n", 5510 DRM_DEBUG_KMS("%d display pipe%s available.\n",
5347 num_pipe, num_pipe > 1 ? "s" : ""); 5511 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
5348 5512
5349 for (i = 0; i < num_pipe; i++) { 5513 for (i = 0; i < dev_priv->num_pipe; i++) {
5350 intel_crtc_init(dev, i); 5514 intel_crtc_init(dev, i);
5351 } 5515 }
5352 5516
@@ -5354,8 +5518,10 @@ void intel_modeset_init(struct drm_device *dev)
5354 5518
5355 intel_init_clock_gating(dev); 5519 intel_init_clock_gating(dev);
5356 5520
5357 if (IS_IRONLAKE_M(dev)) 5521 if (IS_IRONLAKE_M(dev)) {
5358 ironlake_enable_drps(dev); 5522 ironlake_enable_drps(dev);
5523 intel_init_emon(dev);
5524 }
5359 5525
5360 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 5526 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
5361 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 5527 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,