aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/intel_display.c513
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c513
3 files changed, 514 insertions, 513 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 03c015c3adb3..d3982e9c6ff6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -25,7 +25,6 @@
25 */ 25 */
26 26
27#include <linux/dmi.h> 27#include <linux/dmi.h>
28#include <linux/cpufreq.h>
29#include <linux/module.h> 28#include <linux/module.h>
30#include <linux/input.h> 29#include <linux/input.h>
31#include <linux/i2c.h> 30#include <linux/i2c.h>
@@ -6352,177 +6351,6 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
6352 .output_poll_changed = intel_fb_output_poll_changed, 6351 .output_poll_changed = intel_fb_output_poll_changed,
6353}; 6352};
6354 6353
6355static struct drm_i915_gem_object *
6356intel_alloc_context_page(struct drm_device *dev)
6357{
6358 struct drm_i915_gem_object *ctx;
6359 int ret;
6360
6361 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
6362
6363 ctx = i915_gem_alloc_object(dev, 4096);
6364 if (!ctx) {
6365 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
6366 return NULL;
6367 }
6368
6369 ret = i915_gem_object_pin(ctx, 4096, true);
6370 if (ret) {
6371 DRM_ERROR("failed to pin power context: %d\n", ret);
6372 goto err_unref;
6373 }
6374
6375 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
6376 if (ret) {
6377 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
6378 goto err_unpin;
6379 }
6380
6381 return ctx;
6382
6383err_unpin:
6384 i915_gem_object_unpin(ctx);
6385err_unref:
6386 drm_gem_object_unreference(&ctx->base);
6387 mutex_unlock(&dev->struct_mutex);
6388 return NULL;
6389}
6390
6391bool ironlake_set_drps(struct drm_device *dev, u8 val)
6392{
6393 struct drm_i915_private *dev_priv = dev->dev_private;
6394 u16 rgvswctl;
6395
6396 rgvswctl = I915_READ16(MEMSWCTL);
6397 if (rgvswctl & MEMCTL_CMD_STS) {
6398 DRM_DEBUG("gpu busy, RCS change rejected\n");
6399 return false; /* still busy with another command */
6400 }
6401
6402 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
6403 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
6404 I915_WRITE16(MEMSWCTL, rgvswctl);
6405 POSTING_READ16(MEMSWCTL);
6406
6407 rgvswctl |= MEMCTL_CMD_STS;
6408 I915_WRITE16(MEMSWCTL, rgvswctl);
6409
6410 return true;
6411}
6412
6413void ironlake_enable_drps(struct drm_device *dev)
6414{
6415 struct drm_i915_private *dev_priv = dev->dev_private;
6416 u32 rgvmodectl = I915_READ(MEMMODECTL);
6417 u8 fmax, fmin, fstart, vstart;
6418
6419 /* Enable temp reporting */
6420 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
6421 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
6422
6423 /* 100ms RC evaluation intervals */
6424 I915_WRITE(RCUPEI, 100000);
6425 I915_WRITE(RCDNEI, 100000);
6426
6427 /* Set max/min thresholds to 90ms and 80ms respectively */
6428 I915_WRITE(RCBMAXAVG, 90000);
6429 I915_WRITE(RCBMINAVG, 80000);
6430
6431 I915_WRITE(MEMIHYST, 1);
6432
6433 /* Set up min, max, and cur for interrupt handling */
6434 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
6435 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
6436 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
6437 MEMMODE_FSTART_SHIFT;
6438
6439 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
6440 PXVFREQ_PX_SHIFT;
6441
6442 dev_priv->fmax = fmax; /* IPS callback will increase this */
6443 dev_priv->fstart = fstart;
6444
6445 dev_priv->max_delay = fstart;
6446 dev_priv->min_delay = fmin;
6447 dev_priv->cur_delay = fstart;
6448
6449 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
6450 fmax, fmin, fstart);
6451
6452 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
6453
6454 /*
6455 * Interrupts will be enabled in ironlake_irq_postinstall
6456 */
6457
6458 I915_WRITE(VIDSTART, vstart);
6459 POSTING_READ(VIDSTART);
6460
6461 rgvmodectl |= MEMMODE_SWMODE_EN;
6462 I915_WRITE(MEMMODECTL, rgvmodectl);
6463
6464 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
6465 DRM_ERROR("stuck trying to change perf mode\n");
6466 msleep(1);
6467
6468 ironlake_set_drps(dev, fstart);
6469
6470 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
6471 I915_READ(0x112e0);
6472 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
6473 dev_priv->last_count2 = I915_READ(0x112f4);
6474 getrawmonotonic(&dev_priv->last_time2);
6475}
6476
6477void ironlake_disable_drps(struct drm_device *dev)
6478{
6479 struct drm_i915_private *dev_priv = dev->dev_private;
6480 u16 rgvswctl = I915_READ16(MEMSWCTL);
6481
6482 /* Ack interrupts, disable EFC interrupt */
6483 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
6484 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
6485 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
6486 I915_WRITE(DEIIR, DE_PCU_EVENT);
6487 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
6488
6489 /* Go back to the starting frequency */
6490 ironlake_set_drps(dev, dev_priv->fstart);
6491 msleep(1);
6492 rgvswctl |= MEMCTL_CMD_STS;
6493 I915_WRITE(MEMSWCTL, rgvswctl);
6494 msleep(1);
6495
6496}
6497
6498void gen6_set_rps(struct drm_device *dev, u8 val)
6499{
6500 struct drm_i915_private *dev_priv = dev->dev_private;
6501 u32 swreq;
6502
6503 swreq = (val & 0x3ff) << 25;
6504 I915_WRITE(GEN6_RPNSWREQ, swreq);
6505}
6506
6507void gen6_disable_rps(struct drm_device *dev)
6508{
6509 struct drm_i915_private *dev_priv = dev->dev_private;
6510
6511 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6512 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
6513 I915_WRITE(GEN6_PMIER, 0);
6514 /* Complete PM interrupt masking here doesn't race with the rps work
6515 * item again unmasking PM interrupts because that is using a different
6516 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
6517 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
6518
6519 spin_lock_irq(&dev_priv->rps_lock);
6520 dev_priv->pm_iir = 0;
6521 spin_unlock_irq(&dev_priv->rps_lock);
6522
6523 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
6524}
6525
6526static unsigned long intel_pxfreq(u32 vidfreq) 6354static unsigned long intel_pxfreq(u32 vidfreq)
6527{ 6355{
6528 unsigned long freq; 6356 unsigned long freq;
@@ -6609,232 +6437,6 @@ void intel_init_emon(struct drm_device *dev)
6609 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 6437 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
6610} 6438}
6611 6439
6612int intel_enable_rc6(const struct drm_device *dev)
6613{
6614 /*
6615 * Respect the kernel parameter if it is set
6616 */
6617 if (i915_enable_rc6 >= 0)
6618 return i915_enable_rc6;
6619
6620 /*
6621 * Disable RC6 on Ironlake
6622 */
6623 if (INTEL_INFO(dev)->gen == 5)
6624 return 0;
6625
6626 /* Sorry Haswell, no RC6 for you for now. */
6627 if (IS_HASWELL(dev))
6628 return 0;
6629
6630 /*
6631 * Disable rc6 on Sandybridge
6632 */
6633 if (INTEL_INFO(dev)->gen == 6) {
6634 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
6635 return INTEL_RC6_ENABLE;
6636 }
6637 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
6638 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
6639}
6640
6641void gen6_enable_rps(struct drm_i915_private *dev_priv)
6642{
6643 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6644 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
6645 u32 pcu_mbox, rc6_mask = 0;
6646 u32 gtfifodbg;
6647 int cur_freq, min_freq, max_freq;
6648 int rc6_mode;
6649 int i;
6650
6651 /* Here begins a magic sequence of register writes to enable
6652 * auto-downclocking.
6653 *
6654 * Perhaps there might be some value in exposing these to
6655 * userspace...
6656 */
6657 I915_WRITE(GEN6_RC_STATE, 0);
6658 mutex_lock(&dev_priv->dev->struct_mutex);
6659
6660 /* Clear the DBG now so we don't confuse earlier errors */
6661 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
6662 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
6663 I915_WRITE(GTFIFODBG, gtfifodbg);
6664 }
6665
6666 gen6_gt_force_wake_get(dev_priv);
6667
6668 /* disable the counters and set deterministic thresholds */
6669 I915_WRITE(GEN6_RC_CONTROL, 0);
6670
6671 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
6672 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
6673 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
6674 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6675 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6676
6677 for (i = 0; i < I915_NUM_RINGS; i++)
6678 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
6679
6680 I915_WRITE(GEN6_RC_SLEEP, 0);
6681 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
6682 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
6683 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
6684 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6685
6686 rc6_mode = intel_enable_rc6(dev_priv->dev);
6687 if (rc6_mode & INTEL_RC6_ENABLE)
6688 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
6689
6690 if (rc6_mode & INTEL_RC6p_ENABLE)
6691 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
6692
6693 if (rc6_mode & INTEL_RC6pp_ENABLE)
6694 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
6695
6696 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
6697 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
6698 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
6699 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
6700
6701 I915_WRITE(GEN6_RC_CONTROL,
6702 rc6_mask |
6703 GEN6_RC_CTL_EI_MODE(1) |
6704 GEN6_RC_CTL_HW_ENABLE);
6705
6706 I915_WRITE(GEN6_RPNSWREQ,
6707 GEN6_FREQUENCY(10) |
6708 GEN6_OFFSET(0) |
6709 GEN6_AGGRESSIVE_TURBO);
6710 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6711 GEN6_FREQUENCY(12));
6712
6713 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6714 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6715 18 << 24 |
6716 6 << 16);
6717 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
6718 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
6719 I915_WRITE(GEN6_RP_UP_EI, 100000);
6720 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
6721 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6722 I915_WRITE(GEN6_RP_CONTROL,
6723 GEN6_RP_MEDIA_TURBO |
6724 GEN6_RP_MEDIA_HW_MODE |
6725 GEN6_RP_MEDIA_IS_GFX |
6726 GEN6_RP_ENABLE |
6727 GEN6_RP_UP_BUSY_AVG |
6728 GEN6_RP_DOWN_IDLE_CONT);
6729
6730 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6731 500))
6732 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6733
6734 I915_WRITE(GEN6_PCODE_DATA, 0);
6735 I915_WRITE(GEN6_PCODE_MAILBOX,
6736 GEN6_PCODE_READY |
6737 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6738 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6739 500))
6740 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6741
6742 min_freq = (rp_state_cap & 0xff0000) >> 16;
6743 max_freq = rp_state_cap & 0xff;
6744 cur_freq = (gt_perf_status & 0xff00) >> 8;
6745
6746 /* Check for overclock support */
6747 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6748 500))
6749 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6750 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
6751 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
6752 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6753 500))
6754 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6755 if (pcu_mbox & (1<<31)) { /* OC supported */
6756 max_freq = pcu_mbox & 0xff;
6757 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
6758 }
6759
6760 /* In units of 100MHz */
6761 dev_priv->max_delay = max_freq;
6762 dev_priv->min_delay = min_freq;
6763 dev_priv->cur_delay = cur_freq;
6764
6765 /* requires MSI enabled */
6766 I915_WRITE(GEN6_PMIER,
6767 GEN6_PM_MBOX_EVENT |
6768 GEN6_PM_THERMAL_EVENT |
6769 GEN6_PM_RP_DOWN_TIMEOUT |
6770 GEN6_PM_RP_UP_THRESHOLD |
6771 GEN6_PM_RP_DOWN_THRESHOLD |
6772 GEN6_PM_RP_UP_EI_EXPIRED |
6773 GEN6_PM_RP_DOWN_EI_EXPIRED);
6774 spin_lock_irq(&dev_priv->rps_lock);
6775 WARN_ON(dev_priv->pm_iir != 0);
6776 I915_WRITE(GEN6_PMIMR, 0);
6777 spin_unlock_irq(&dev_priv->rps_lock);
6778 /* enable all PM interrupts */
6779 I915_WRITE(GEN6_PMINTRMSK, 0);
6780
6781 gen6_gt_force_wake_put(dev_priv);
6782 mutex_unlock(&dev_priv->dev->struct_mutex);
6783}
6784
6785void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
6786{
6787 int min_freq = 15;
6788 int gpu_freq, ia_freq, max_ia_freq;
6789 int scaling_factor = 180;
6790
6791 max_ia_freq = cpufreq_quick_get_max(0);
6792 /*
6793 * Default to measured freq if none found, PCU will ensure we don't go
6794 * over
6795 */
6796 if (!max_ia_freq)
6797 max_ia_freq = tsc_khz;
6798
6799 /* Convert from kHz to MHz */
6800 max_ia_freq /= 1000;
6801
6802 mutex_lock(&dev_priv->dev->struct_mutex);
6803
6804 /*
6805 * For each potential GPU frequency, load a ring frequency we'd like
6806 * to use for memory access. We do this by specifying the IA frequency
6807 * the PCU should use as a reference to determine the ring frequency.
6808 */
6809 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
6810 gpu_freq--) {
6811 int diff = dev_priv->max_delay - gpu_freq;
6812
6813 /*
6814 * For GPU frequencies less than 750MHz, just use the lowest
6815 * ring freq.
6816 */
6817 if (gpu_freq < min_freq)
6818 ia_freq = 800;
6819 else
6820 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
6821 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
6822
6823 I915_WRITE(GEN6_PCODE_DATA,
6824 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
6825 gpu_freq);
6826 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
6827 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6828 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
6829 GEN6_PCODE_READY) == 0, 10)) {
6830 DRM_ERROR("pcode write of freq table timed out\n");
6831 continue;
6832 }
6833 }
6834
6835 mutex_unlock(&dev_priv->dev->struct_mutex);
6836}
6837
6838static void ironlake_init_clock_gating(struct drm_device *dev) 6440static void ironlake_init_clock_gating(struct drm_device *dev)
6839{ 6441{
6840 struct drm_i915_private *dev_priv = dev->dev_private; 6442 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7178,121 +6780,6 @@ static void cpt_init_clock_gating(struct drm_device *dev)
7178 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); 6780 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
7179} 6781}
7180 6782
7181static void ironlake_teardown_rc6(struct drm_device *dev)
7182{
7183 struct drm_i915_private *dev_priv = dev->dev_private;
7184
7185 if (dev_priv->renderctx) {
7186 i915_gem_object_unpin(dev_priv->renderctx);
7187 drm_gem_object_unreference(&dev_priv->renderctx->base);
7188 dev_priv->renderctx = NULL;
7189 }
7190
7191 if (dev_priv->pwrctx) {
7192 i915_gem_object_unpin(dev_priv->pwrctx);
7193 drm_gem_object_unreference(&dev_priv->pwrctx->base);
7194 dev_priv->pwrctx = NULL;
7195 }
7196}
7197
7198static void ironlake_disable_rc6(struct drm_device *dev)
7199{
7200 struct drm_i915_private *dev_priv = dev->dev_private;
7201
7202 if (I915_READ(PWRCTXA)) {
7203 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
7204 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
7205 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
7206 50);
7207
7208 I915_WRITE(PWRCTXA, 0);
7209 POSTING_READ(PWRCTXA);
7210
7211 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7212 POSTING_READ(RSTDBYCTL);
7213 }
7214
7215 ironlake_teardown_rc6(dev);
7216}
7217
7218static int ironlake_setup_rc6(struct drm_device *dev)
7219{
7220 struct drm_i915_private *dev_priv = dev->dev_private;
7221
7222 if (dev_priv->renderctx == NULL)
7223 dev_priv->renderctx = intel_alloc_context_page(dev);
7224 if (!dev_priv->renderctx)
7225 return -ENOMEM;
7226
7227 if (dev_priv->pwrctx == NULL)
7228 dev_priv->pwrctx = intel_alloc_context_page(dev);
7229 if (!dev_priv->pwrctx) {
7230 ironlake_teardown_rc6(dev);
7231 return -ENOMEM;
7232 }
7233
7234 return 0;
7235}
7236
7237void ironlake_enable_rc6(struct drm_device *dev)
7238{
7239 struct drm_i915_private *dev_priv = dev->dev_private;
7240 int ret;
7241
7242 /* rc6 disabled by default due to repeated reports of hanging during
7243 * boot and resume.
7244 */
7245 if (!intel_enable_rc6(dev))
7246 return;
7247
7248 mutex_lock(&dev->struct_mutex);
7249 ret = ironlake_setup_rc6(dev);
7250 if (ret) {
7251 mutex_unlock(&dev->struct_mutex);
7252 return;
7253 }
7254
7255 /*
7256 * GPU can automatically power down the render unit if given a page
7257 * to save state.
7258 */
7259 ret = BEGIN_LP_RING(6);
7260 if (ret) {
7261 ironlake_teardown_rc6(dev);
7262 mutex_unlock(&dev->struct_mutex);
7263 return;
7264 }
7265
7266 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
7267 OUT_RING(MI_SET_CONTEXT);
7268 OUT_RING(dev_priv->renderctx->gtt_offset |
7269 MI_MM_SPACE_GTT |
7270 MI_SAVE_EXT_STATE_EN |
7271 MI_RESTORE_EXT_STATE_EN |
7272 MI_RESTORE_INHIBIT);
7273 OUT_RING(MI_SUSPEND_FLUSH);
7274 OUT_RING(MI_NOOP);
7275 OUT_RING(MI_FLUSH);
7276 ADVANCE_LP_RING();
7277
7278 /*
7279 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
7280 * does an implicit flush, combined with MI_FLUSH above, it should be
7281 * safe to assume that renderctx is valid
7282 */
7283 ret = intel_wait_ring_idle(LP_RING(dev_priv));
7284 if (ret) {
7285 DRM_ERROR("failed to enable ironlake power power savings\n");
7286 ironlake_teardown_rc6(dev);
7287 mutex_unlock(&dev->struct_mutex);
7288 return;
7289 }
7290
7291 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
7292 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7293 mutex_unlock(&dev->struct_mutex);
7294}
7295
7296void intel_init_clock_gating(struct drm_device *dev) 6783void intel_init_clock_gating(struct drm_device *dev)
7297{ 6784{
7298 struct drm_i915_private *dev_priv = dev->dev_private; 6785 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f1e27ce18f8a..c87f29a2aeba 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -396,6 +396,7 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
396extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 396extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
397 u16 *blue, int regno); 397 u16 *blue, int regno);
398extern void intel_enable_clock_gating(struct drm_device *dev); 398extern void intel_enable_clock_gating(struct drm_device *dev);
399extern void ironlake_disable_rc6(struct drm_device *dev);
399extern void ironlake_enable_drps(struct drm_device *dev); 400extern void ironlake_enable_drps(struct drm_device *dev);
400extern void ironlake_disable_drps(struct drm_device *dev); 401extern void ironlake_disable_drps(struct drm_device *dev);
401extern void gen6_enable_rps(struct drm_i915_private *dev_priv); 402extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index c5bc4c456baa..2f45de3339bf 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -25,6 +25,7 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/cpufreq.h>
28#include "i915_drv.h" 29#include "i915_drv.h"
29#include "intel_drv.h" 30#include "intel_drv.h"
30 31
@@ -1979,3 +1980,515 @@ void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
1979 pixel_size); 1980 pixel_size);
1980} 1981}
1981 1982
1983static struct drm_i915_gem_object *
1984intel_alloc_context_page(struct drm_device *dev)
1985{
1986 struct drm_i915_gem_object *ctx;
1987 int ret;
1988
1989 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1990
1991 ctx = i915_gem_alloc_object(dev, 4096);
1992 if (!ctx) {
1993 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
1994 return NULL;
1995 }
1996
1997 ret = i915_gem_object_pin(ctx, 4096, true);
1998 if (ret) {
1999 DRM_ERROR("failed to pin power context: %d\n", ret);
2000 goto err_unref;
2001 }
2002
2003 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2004 if (ret) {
2005 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2006 goto err_unpin;
2007 }
2008
2009 return ctx;
2010
2011err_unpin:
2012 i915_gem_object_unpin(ctx);
2013err_unref:
2014 drm_gem_object_unreference(&ctx->base);
2015 mutex_unlock(&dev->struct_mutex);
2016 return NULL;
2017}
2018
2019bool ironlake_set_drps(struct drm_device *dev, u8 val)
2020{
2021 struct drm_i915_private *dev_priv = dev->dev_private;
2022 u16 rgvswctl;
2023
2024 rgvswctl = I915_READ16(MEMSWCTL);
2025 if (rgvswctl & MEMCTL_CMD_STS) {
2026 DRM_DEBUG("gpu busy, RCS change rejected\n");
2027 return false; /* still busy with another command */
2028 }
2029
2030 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2031 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2032 I915_WRITE16(MEMSWCTL, rgvswctl);
2033 POSTING_READ16(MEMSWCTL);
2034
2035 rgvswctl |= MEMCTL_CMD_STS;
2036 I915_WRITE16(MEMSWCTL, rgvswctl);
2037
2038 return true;
2039}
2040
2041void ironlake_enable_drps(struct drm_device *dev)
2042{
2043 struct drm_i915_private *dev_priv = dev->dev_private;
2044 u32 rgvmodectl = I915_READ(MEMMODECTL);
2045 u8 fmax, fmin, fstart, vstart;
2046
2047 /* Enable temp reporting */
2048 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2049 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2050
2051 /* 100ms RC evaluation intervals */
2052 I915_WRITE(RCUPEI, 100000);
2053 I915_WRITE(RCDNEI, 100000);
2054
2055 /* Set max/min thresholds to 90ms and 80ms respectively */
2056 I915_WRITE(RCBMAXAVG, 90000);
2057 I915_WRITE(RCBMINAVG, 80000);
2058
2059 I915_WRITE(MEMIHYST, 1);
2060
2061 /* Set up min, max, and cur for interrupt handling */
2062 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2063 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2064 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2065 MEMMODE_FSTART_SHIFT;
2066
2067 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2068 PXVFREQ_PX_SHIFT;
2069
2070 dev_priv->fmax = fmax; /* IPS callback will increase this */
2071 dev_priv->fstart = fstart;
2072
2073 dev_priv->max_delay = fstart;
2074 dev_priv->min_delay = fmin;
2075 dev_priv->cur_delay = fstart;
2076
2077 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2078 fmax, fmin, fstart);
2079
2080 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2081
2082 /*
2083 * Interrupts will be enabled in ironlake_irq_postinstall
2084 */
2085
2086 I915_WRITE(VIDSTART, vstart);
2087 POSTING_READ(VIDSTART);
2088
2089 rgvmodectl |= MEMMODE_SWMODE_EN;
2090 I915_WRITE(MEMMODECTL, rgvmodectl);
2091
2092 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2093 DRM_ERROR("stuck trying to change perf mode\n");
2094 msleep(1);
2095
2096 ironlake_set_drps(dev, fstart);
2097
2098 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2099 I915_READ(0x112e0);
2100 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
2101 dev_priv->last_count2 = I915_READ(0x112f4);
2102 getrawmonotonic(&dev_priv->last_time2);
2103}
2104
2105void ironlake_disable_drps(struct drm_device *dev)
2106{
2107 struct drm_i915_private *dev_priv = dev->dev_private;
2108 u16 rgvswctl = I915_READ16(MEMSWCTL);
2109
2110 /* Ack interrupts, disable EFC interrupt */
2111 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2112 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2113 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2114 I915_WRITE(DEIIR, DE_PCU_EVENT);
2115 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2116
2117 /* Go back to the starting frequency */
2118 ironlake_set_drps(dev, dev_priv->fstart);
2119 msleep(1);
2120 rgvswctl |= MEMCTL_CMD_STS;
2121 I915_WRITE(MEMSWCTL, rgvswctl);
2122 msleep(1);
2123
2124}
2125
2126void gen6_set_rps(struct drm_device *dev, u8 val)
2127{
2128 struct drm_i915_private *dev_priv = dev->dev_private;
2129 u32 swreq;
2130
2131 swreq = (val & 0x3ff) << 25;
2132 I915_WRITE(GEN6_RPNSWREQ, swreq);
2133}
2134
2135void gen6_disable_rps(struct drm_device *dev)
2136{
2137 struct drm_i915_private *dev_priv = dev->dev_private;
2138
2139 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2140 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2141 I915_WRITE(GEN6_PMIER, 0);
2142 /* Complete PM interrupt masking here doesn't race with the rps work
2143 * item again unmasking PM interrupts because that is using a different
2144 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2145 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2146
2147 spin_lock_irq(&dev_priv->rps_lock);
2148 dev_priv->pm_iir = 0;
2149 spin_unlock_irq(&dev_priv->rps_lock);
2150
2151 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2152}
2153
2154int intel_enable_rc6(const struct drm_device *dev)
2155{
2156 /*
2157 * Respect the kernel parameter if it is set
2158 */
2159 if (i915_enable_rc6 >= 0)
2160 return i915_enable_rc6;
2161
2162 /*
2163 * Disable RC6 on Ironlake
2164 */
2165 if (INTEL_INFO(dev)->gen == 5)
2166 return 0;
2167
2168 /* Sorry Haswell, no RC6 for you for now. */
2169 if (IS_HASWELL(dev))
2170 return 0;
2171
2172 /*
2173 * Disable rc6 on Sandybridge
2174 */
2175 if (INTEL_INFO(dev)->gen == 6) {
2176 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2177 return INTEL_RC6_ENABLE;
2178 }
2179 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2180 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2181}
2182
2183void gen6_enable_rps(struct drm_i915_private *dev_priv)
2184{
2185 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2186 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2187 u32 pcu_mbox, rc6_mask = 0;
2188 u32 gtfifodbg;
2189 int cur_freq, min_freq, max_freq;
2190 int rc6_mode;
2191 int i;
2192
2193 /* Here begins a magic sequence of register writes to enable
2194 * auto-downclocking.
2195 *
2196 * Perhaps there might be some value in exposing these to
2197 * userspace...
2198 */
2199 I915_WRITE(GEN6_RC_STATE, 0);
2200 mutex_lock(&dev_priv->dev->struct_mutex);
2201
2202 /* Clear the DBG now so we don't confuse earlier errors */
2203 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2204 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2205 I915_WRITE(GTFIFODBG, gtfifodbg);
2206 }
2207
2208 gen6_gt_force_wake_get(dev_priv);
2209
2210 /* disable the counters and set deterministic thresholds */
2211 I915_WRITE(GEN6_RC_CONTROL, 0);
2212
2213 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2214 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2215 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2216 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2217 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2218
2219 for (i = 0; i < I915_NUM_RINGS; i++)
2220 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
2221
2222 I915_WRITE(GEN6_RC_SLEEP, 0);
2223 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2224 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2225 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2226 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2227
2228 rc6_mode = intel_enable_rc6(dev_priv->dev);
2229 if (rc6_mode & INTEL_RC6_ENABLE)
2230 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2231
2232 if (rc6_mode & INTEL_RC6p_ENABLE)
2233 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2234
2235 if (rc6_mode & INTEL_RC6pp_ENABLE)
2236 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2237
2238 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2239 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
2240 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
2241 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
2242
2243 I915_WRITE(GEN6_RC_CONTROL,
2244 rc6_mask |
2245 GEN6_RC_CTL_EI_MODE(1) |
2246 GEN6_RC_CTL_HW_ENABLE);
2247
2248 I915_WRITE(GEN6_RPNSWREQ,
2249 GEN6_FREQUENCY(10) |
2250 GEN6_OFFSET(0) |
2251 GEN6_AGGRESSIVE_TURBO);
2252 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2253 GEN6_FREQUENCY(12));
2254
2255 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2256 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2257 18 << 24 |
2258 6 << 16);
2259 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2260 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2261 I915_WRITE(GEN6_RP_UP_EI, 100000);
2262 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2263 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2264 I915_WRITE(GEN6_RP_CONTROL,
2265 GEN6_RP_MEDIA_TURBO |
2266 GEN6_RP_MEDIA_HW_MODE |
2267 GEN6_RP_MEDIA_IS_GFX |
2268 GEN6_RP_ENABLE |
2269 GEN6_RP_UP_BUSY_AVG |
2270 GEN6_RP_DOWN_IDLE_CONT);
2271
2272 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2273 500))
2274 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2275
2276 I915_WRITE(GEN6_PCODE_DATA, 0);
2277 I915_WRITE(GEN6_PCODE_MAILBOX,
2278 GEN6_PCODE_READY |
2279 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2280 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2281 500))
2282 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2283
2284 min_freq = (rp_state_cap & 0xff0000) >> 16;
2285 max_freq = rp_state_cap & 0xff;
2286 cur_freq = (gt_perf_status & 0xff00) >> 8;
2287
2288 /* Check for overclock support */
2289 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2290 500))
2291 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2292 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2293 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2294 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2295 500))
2296 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2297 if (pcu_mbox & (1<<31)) { /* OC supported */
2298 max_freq = pcu_mbox & 0xff;
2299 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2300 }
2301
2302 /* In units of 100MHz */
2303 dev_priv->max_delay = max_freq;
2304 dev_priv->min_delay = min_freq;
2305 dev_priv->cur_delay = cur_freq;
2306
2307 /* requires MSI enabled */
2308 I915_WRITE(GEN6_PMIER,
2309 GEN6_PM_MBOX_EVENT |
2310 GEN6_PM_THERMAL_EVENT |
2311 GEN6_PM_RP_DOWN_TIMEOUT |
2312 GEN6_PM_RP_UP_THRESHOLD |
2313 GEN6_PM_RP_DOWN_THRESHOLD |
2314 GEN6_PM_RP_UP_EI_EXPIRED |
2315 GEN6_PM_RP_DOWN_EI_EXPIRED);
2316 spin_lock_irq(&dev_priv->rps_lock);
2317 WARN_ON(dev_priv->pm_iir != 0);
2318 I915_WRITE(GEN6_PMIMR, 0);
2319 spin_unlock_irq(&dev_priv->rps_lock);
2320 /* enable all PM interrupts */
2321 I915_WRITE(GEN6_PMINTRMSK, 0);
2322
2323 gen6_gt_force_wake_put(dev_priv);
2324 mutex_unlock(&dev_priv->dev->struct_mutex);
2325}
2326
2327void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
2328{
2329 int min_freq = 15;
2330 int gpu_freq, ia_freq, max_ia_freq;
2331 int scaling_factor = 180;
2332
2333 max_ia_freq = cpufreq_quick_get_max(0);
2334 /*
2335 * Default to measured freq if none found, PCU will ensure we don't go
2336 * over
2337 */
2338 if (!max_ia_freq)
2339 max_ia_freq = tsc_khz;
2340
2341 /* Convert from kHz to MHz */
2342 max_ia_freq /= 1000;
2343
2344 mutex_lock(&dev_priv->dev->struct_mutex);
2345
2346 /*
2347 * For each potential GPU frequency, load a ring frequency we'd like
2348 * to use for memory access. We do this by specifying the IA frequency
2349 * the PCU should use as a reference to determine the ring frequency.
2350 */
2351 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
2352 gpu_freq--) {
2353 int diff = dev_priv->max_delay - gpu_freq;
2354
2355 /*
2356 * For GPU frequencies less than 750MHz, just use the lowest
2357 * ring freq.
2358 */
2359 if (gpu_freq < min_freq)
2360 ia_freq = 800;
2361 else
2362 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2363 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2364
2365 I915_WRITE(GEN6_PCODE_DATA,
2366 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
2367 gpu_freq);
2368 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2369 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2370 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
2371 GEN6_PCODE_READY) == 0, 10)) {
2372 DRM_ERROR("pcode write of freq table timed out\n");
2373 continue;
2374 }
2375 }
2376
2377 mutex_unlock(&dev_priv->dev->struct_mutex);
2378}
2379
2380static void ironlake_teardown_rc6(struct drm_device *dev)
2381{
2382 struct drm_i915_private *dev_priv = dev->dev_private;
2383
2384 if (dev_priv->renderctx) {
2385 i915_gem_object_unpin(dev_priv->renderctx);
2386 drm_gem_object_unreference(&dev_priv->renderctx->base);
2387 dev_priv->renderctx = NULL;
2388 }
2389
2390 if (dev_priv->pwrctx) {
2391 i915_gem_object_unpin(dev_priv->pwrctx);
2392 drm_gem_object_unreference(&dev_priv->pwrctx->base);
2393 dev_priv->pwrctx = NULL;
2394 }
2395}
2396
2397void ironlake_disable_rc6(struct drm_device *dev)
2398{
2399 struct drm_i915_private *dev_priv = dev->dev_private;
2400
2401 if (I915_READ(PWRCTXA)) {
2402 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2403 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
2404 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
2405 50);
2406
2407 I915_WRITE(PWRCTXA, 0);
2408 POSTING_READ(PWRCTXA);
2409
2410 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2411 POSTING_READ(RSTDBYCTL);
2412 }
2413
2414 ironlake_teardown_rc6(dev);
2415}
2416
2417static int ironlake_setup_rc6(struct drm_device *dev)
2418{
2419 struct drm_i915_private *dev_priv = dev->dev_private;
2420
2421 if (dev_priv->renderctx == NULL)
2422 dev_priv->renderctx = intel_alloc_context_page(dev);
2423 if (!dev_priv->renderctx)
2424 return -ENOMEM;
2425
2426 if (dev_priv->pwrctx == NULL)
2427 dev_priv->pwrctx = intel_alloc_context_page(dev);
2428 if (!dev_priv->pwrctx) {
2429 ironlake_teardown_rc6(dev);
2430 return -ENOMEM;
2431 }
2432
2433 return 0;
2434}
2435
2436void ironlake_enable_rc6(struct drm_device *dev)
2437{
2438 struct drm_i915_private *dev_priv = dev->dev_private;
2439 int ret;
2440
2441 /* rc6 disabled by default due to repeated reports of hanging during
2442 * boot and resume.
2443 */
2444 if (!intel_enable_rc6(dev))
2445 return;
2446
2447 mutex_lock(&dev->struct_mutex);
2448 ret = ironlake_setup_rc6(dev);
2449 if (ret) {
2450 mutex_unlock(&dev->struct_mutex);
2451 return;
2452 }
2453
2454 /*
2455 * GPU can automatically power down the render unit if given a page
2456 * to save state.
2457 */
2458 ret = BEGIN_LP_RING(6);
2459 if (ret) {
2460 ironlake_teardown_rc6(dev);
2461 mutex_unlock(&dev->struct_mutex);
2462 return;
2463 }
2464
2465 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2466 OUT_RING(MI_SET_CONTEXT);
2467 OUT_RING(dev_priv->renderctx->gtt_offset |
2468 MI_MM_SPACE_GTT |
2469 MI_SAVE_EXT_STATE_EN |
2470 MI_RESTORE_EXT_STATE_EN |
2471 MI_RESTORE_INHIBIT);
2472 OUT_RING(MI_SUSPEND_FLUSH);
2473 OUT_RING(MI_NOOP);
2474 OUT_RING(MI_FLUSH);
2475 ADVANCE_LP_RING();
2476
2477 /*
2478 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2479 * does an implicit flush, combined with MI_FLUSH above, it should be
2480 * safe to assume that renderctx is valid
2481 */
2482 ret = intel_wait_ring_idle(LP_RING(dev_priv));
2483 if (ret) {
2484 DRM_ERROR("failed to enable ironlake power power savings\n");
2485 ironlake_teardown_rc6(dev);
2486 mutex_unlock(&dev->struct_mutex);
2487 return;
2488 }
2489
2490 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
2491 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2492 mutex_unlock(&dev->struct_mutex);
2493}
2494