aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2014-03-28 04:03:34 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-03-31 04:46:35 -0400
commit2876ce734417b1893d48e86c9ade9100e1fcc297 (patch)
treee076de8373d6cd6eca60fc3b5553ff92826d328f /drivers/gpu
parenteb64cad1c13a22cd4f3b061720c71f35e44eec20 (diff)
drm/i915: Mask PM/RPS interrupt generation based on activity
The speculation is that we can conserve more power by masking off the interrupts at source (PMINTRMSK) rather than filtering them by the up/down thresholds (RPINTLIM). We can select which events we know will be active based on the current frequency versus our imposed range, i.e. if at minimum, we know we will not want to generate any more down-interrupts and vice versa. v2: We only need the TIMEOUT when above min frequency. v3: Tweak VLV at the same time Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Deepak S <deepak.s@linux.intel.com> Reviewed-by:Deepak S <deepak.s@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b6291835fb16..e9df08a55441 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3006,6 +3006,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3006 dev_priv->rps.last_adj = 0; 3006 dev_priv->rps.last_adj = 0;
3007} 3007}
3008 3008
3009static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3010{
3011 u32 mask = 0;
3012
3013 if (val > dev_priv->rps.min_freq_softlimit)
3014 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3015 if (val < dev_priv->rps.max_freq_softlimit)
3016 mask |= GEN6_PM_RP_UP_THRESHOLD;
3017
3018 /* IVB and SNB hard hangs on looping batchbuffer
3019 * if GEN6_PM_UP_EI_EXPIRED is masked.
3020 */
3021 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3022 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3023
3024 return ~mask;
3025}
3026
3009/* gen6_set_rps is called to update the frequency request, but should also be 3027/* gen6_set_rps is called to update the frequency request, but should also be
3010 * called when the range (min_delay and max_delay) is modified so that we can 3028 * called when the range (min_delay and max_delay) is modified so that we can
3011 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 3029 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
@@ -3037,6 +3055,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3037 * until we hit the minimum or maximum frequencies. 3055 * until we hit the minimum or maximum frequencies.
3038 */ 3056 */
3039 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val)); 3057 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3058 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3040 3059
3041 POSTING_READ(GEN6_RPNSWREQ); 3060 POSTING_READ(GEN6_RPNSWREQ);
3042 3061
@@ -3089,6 +3108,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3089 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, 3108 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3090 I915_READ(VLV_GTLC_SURVIVABILITY_REG) & 3109 I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
3091 ~VLV_GFX_CLK_FORCE_ON_BIT); 3110 ~VLV_GFX_CLK_FORCE_ON_BIT);
3111
3112 I915_WRITE(GEN6_PMINTRMSK,
3113 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3092} 3114}
3093 3115
3094void gen6_rps_idle(struct drm_i915_private *dev_priv) 3116void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -3134,13 +3156,12 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3134 dev_priv->rps.cur_freq, 3156 dev_priv->rps.cur_freq,
3135 vlv_gpu_freq(dev_priv, val), val); 3157 vlv_gpu_freq(dev_priv, val), val);
3136 3158
3137 if (val == dev_priv->rps.cur_freq) 3159 if (val != dev_priv->rps.cur_freq)
3138 return; 3160 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3139 3161
3140 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3162 I915_WRITE(GEN6_PMINTRMSK, val);
3141 3163
3142 dev_priv->rps.cur_freq = val; 3164 dev_priv->rps.cur_freq = val;
3143
3144 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); 3165 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3145} 3166}
3146 3167
@@ -3218,24 +3239,12 @@ int intel_enable_rc6(const struct drm_device *dev)
3218static void gen6_enable_rps_interrupts(struct drm_device *dev) 3239static void gen6_enable_rps_interrupts(struct drm_device *dev)
3219{ 3240{
3220 struct drm_i915_private *dev_priv = dev->dev_private; 3241 struct drm_i915_private *dev_priv = dev->dev_private;
3221 u32 enabled_intrs;
3222 3242
3223 spin_lock_irq(&dev_priv->irq_lock); 3243 spin_lock_irq(&dev_priv->irq_lock);
3224 WARN_ON(dev_priv->rps.pm_iir); 3244 WARN_ON(dev_priv->rps.pm_iir);
3225 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 3245 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3226 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); 3246 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3227 spin_unlock_irq(&dev_priv->irq_lock); 3247 spin_unlock_irq(&dev_priv->irq_lock);
3228
3229 /* only unmask PM interrupts we need. Mask all others. */
3230 enabled_intrs = dev_priv->pm_rps_events;
3231
3232 /* IVB and SNB hard hangs on looping batchbuffer
3233 * if GEN6_PM_UP_EI_EXPIRED is masked.
3234 */
3235 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3236 enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
3237
3238 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
3239} 3248}
3240 3249
3241static void gen8_enable_rps(struct drm_device *dev) 3250static void gen8_enable_rps(struct drm_device *dev)