aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
authorJesse Barnes <jbarnes@virtuousgeek.org>2010-01-29 14:27:07 -0500
committerEric Anholt <eric@anholt.net>2010-02-22 11:46:54 -0500
commitf97108d1d0facc7902134ebc453b226bbd4d1cdb (patch)
tree563d14cb7c65b80e16df9246da25cade22f22fdd /drivers/gpu/drm/i915/intel_display.c
parentee980b8003a25fbfed50c3367f2b426c870eaf90 (diff)
drm/i915: add dynamic performance control support for Ironlake
Ironlake (and 965GM, which this patch doesn't support) supports a hardware performance and power management feature that allows it to adjust to changes in GPU load over time with software help. The goal if this is to maximize performance/power for a given workload. This patch enables that feature, which is also a requirement for supporting Intelligent Power Sharing, a feature which allows for dynamic budgeting of power between the CPU and GPU in Arrandale platforms. Tested-by: ykzhao <yakui.zhao@intel.com> [anholt: Resolved against the irq handler loop removal] Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c100
1 files changed, 93 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index af9ec217cd1d..4a93f7a0f58d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4616,6 +4616,91 @@ err_unref:
4616 return NULL; 4616 return NULL;
4617} 4617}
4618 4618
4619void ironlake_enable_drps(struct drm_device *dev)
4620{
4621 struct drm_i915_private *dev_priv = dev->dev_private;
4622 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
4623 u8 fmax, fmin, fstart, vstart;
4624 int i = 0;
4625
4626 /* 100ms RC evaluation intervals */
4627 I915_WRITE(RCUPEI, 100000);
4628 I915_WRITE(RCDNEI, 100000);
4629
4630 /* Set max/min thresholds to 90ms and 80ms respectively */
4631 I915_WRITE(RCBMAXAVG, 90000);
4632 I915_WRITE(RCBMINAVG, 80000);
4633
4634 I915_WRITE(MEMIHYST, 1);
4635
4636 /* Set up min, max, and cur for interrupt handling */
4637 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4638 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4639 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4640 MEMMODE_FSTART_SHIFT;
4641 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4642 PXVFREQ_PX_SHIFT;
4643
4644 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
4645 dev_priv->min_delay = fmin;
4646 dev_priv->cur_delay = fstart;
4647
4648 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4649
4650 /*
4651 * Interrupts will be enabled in ironlake_irq_postinstall
4652 */
4653
4654 I915_WRITE(VIDSTART, vstart);
4655 POSTING_READ(VIDSTART);
4656
4657 rgvmodectl |= MEMMODE_SWMODE_EN;
4658 I915_WRITE(MEMMODECTL, rgvmodectl);
4659
4660 while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
4661 if (i++ > 100) {
4662 DRM_ERROR("stuck trying to change perf mode\n");
4663 break;
4664 }
4665 msleep(1);
4666 }
4667 msleep(1);
4668
4669 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4670 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4671 I915_WRITE(MEMSWCTL, rgvswctl);
4672 POSTING_READ(MEMSWCTL);
4673
4674 rgvswctl |= MEMCTL_CMD_STS;
4675 I915_WRITE(MEMSWCTL, rgvswctl);
4676}
4677
4678void ironlake_disable_drps(struct drm_device *dev)
4679{
4680 struct drm_i915_private *dev_priv = dev->dev_private;
4681 u32 rgvswctl;
4682 u8 fstart;
4683
4684 /* Ack interrupts, disable EFC interrupt */
4685 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4686 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4687 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4688 I915_WRITE(DEIIR, DE_PCU_EVENT);
4689 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4690
4691 /* Go back to the starting frequency */
4692 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
4693 MEMMODE_FSTART_SHIFT;
4694 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4695 (fstart << MEMCTL_FREQ_SHIFT);
4696 I915_WRITE(MEMSWCTL, rgvswctl);
4697 msleep(1);
4698 rgvswctl |= MEMCTL_CMD_STS;
4699 I915_WRITE(MEMSWCTL, rgvswctl);
4700 msleep(1);
4701
4702}
4703
4619void intel_init_clock_gating(struct drm_device *dev) 4704void intel_init_clock_gating(struct drm_device *dev)
4620{ 4705{
4621 struct drm_i915_private *dev_priv = dev->dev_private; 4706 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4685,8 +4770,8 @@ void intel_init_clock_gating(struct drm_device *dev)
4685 4770
4686 if (obj_priv) { 4771 if (obj_priv) {
4687 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); 4772 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
4688 I915_WRITE(MCHBAR_RENDER_STANDBY, 4773 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) &
4689 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 4774 ~RCX_SW_EXIT);
4690 } 4775 }
4691 } 4776 }
4692} 4777}
@@ -4799,11 +4884,6 @@ void intel_modeset_init(struct drm_device *dev)
4799 DRM_DEBUG_KMS("%d display pipe%s available.\n", 4884 DRM_DEBUG_KMS("%d display pipe%s available.\n",
4800 num_pipe, num_pipe > 1 ? "s" : ""); 4885 num_pipe, num_pipe > 1 ? "s" : "");
4801 4886
4802 if (IS_I85X(dev))
4803 pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
4804 else if (IS_I9XX(dev) || IS_G4X(dev))
4805 pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
4806
4807 for (i = 0; i < num_pipe; i++) { 4887 for (i = 0; i < num_pipe; i++) {
4808 intel_crtc_init(dev, i); 4888 intel_crtc_init(dev, i);
4809 } 4889 }
@@ -4812,6 +4892,9 @@ void intel_modeset_init(struct drm_device *dev)
4812 4892
4813 intel_init_clock_gating(dev); 4893 intel_init_clock_gating(dev);
4814 4894
4895 if (IS_IRONLAKE_M(dev))
4896 ironlake_enable_drps(dev);
4897
4815 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 4898 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
4816 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 4899 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
4817 (unsigned long)dev); 4900 (unsigned long)dev);
@@ -4859,6 +4942,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4859 drm_gem_object_unreference(dev_priv->pwrctx); 4942 drm_gem_object_unreference(dev_priv->pwrctx);
4860 } 4943 }
4861 4944
4945 if (IS_IRONLAKE_M(dev))
4946 ironlake_disable_drps(dev);
4947
4862 mutex_unlock(&dev->struct_mutex); 4948 mutex_unlock(&dev->struct_mutex);
4863 4949
4864 drm_mode_config_cleanup(dev); 4950 drm_mode_config_cleanup(dev);