aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2015-04-07 11:20:32 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-04-10 02:56:02 -0400
commit1854d5ca0dd7a9fc11243ff220a3e93fce2b4d3e (patch)
tree6a83912410f343ae5abd1a03aa37a454b9ca2a24 /drivers/gpu/drm/i915/intel_pm.c
parent6ad790c0f5ac55fd13f322c23519f0d6f0721864 (diff)
drm/i915: Deminish contribution of wait-boosting from clients
With boosting for missed pageflips, we have a much stronger indication of when we need to (temporarily) boost GPU frequency to ensure smooth delivery of frames. So now only allow each client to perform one RPS boost in each period of GPU activity due to stalling on results. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Deepak S <deepak.s@linux.intel.com> Reviewed-by: Deepak S <deepak.s@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index acf1a318fda9..1ab9e897994a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4091,10 +4091,14 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
4091 dev_priv->rps.last_adj = 0; 4091 dev_priv->rps.last_adj = 0;
4092 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 4092 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4093 } 4093 }
4094
4095 while (!list_empty(&dev_priv->rps.clients))
4096 list_del_init(dev_priv->rps.clients.next);
4094 mutex_unlock(&dev_priv->rps.hw_lock); 4097 mutex_unlock(&dev_priv->rps.hw_lock);
4095} 4098}
4096 4099
4097void gen6_rps_boost(struct drm_i915_private *dev_priv) 4100void gen6_rps_boost(struct drm_i915_private *dev_priv,
4101 struct drm_i915_file_private *file_priv)
4098{ 4102{
4099 u32 val; 4103 u32 val;
4100 4104
@@ -4102,9 +4106,16 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
4102 val = dev_priv->rps.max_freq_softlimit; 4106 val = dev_priv->rps.max_freq_softlimit;
4103 if (dev_priv->rps.enabled && 4107 if (dev_priv->rps.enabled &&
4104 dev_priv->mm.busy && 4108 dev_priv->mm.busy &&
4105 dev_priv->rps.cur_freq < val) { 4109 dev_priv->rps.cur_freq < val &&
4110 (file_priv == NULL || list_empty(&file_priv->rps_boost))) {
4106 intel_set_rps(dev_priv->dev, val); 4111 intel_set_rps(dev_priv->dev, val);
4107 dev_priv->rps.last_adj = 0; 4112 dev_priv->rps.last_adj = 0;
4113
4114 if (file_priv != NULL) {
4115 list_add(&file_priv->rps_boost, &dev_priv->rps.clients);
4116 file_priv->rps_boosts++;
4117 } else
4118 dev_priv->rps.boosts++;
4108 } 4119 }
4109 mutex_unlock(&dev_priv->rps.hw_lock); 4120 mutex_unlock(&dev_priv->rps.hw_lock);
4110} 4121}
@@ -6782,7 +6793,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
6782 struct request_boost *boost = container_of(work, struct request_boost, work); 6793 struct request_boost *boost = container_of(work, struct request_boost, work);
6783 6794
6784 if (!i915_gem_request_completed(boost->rq, true)) 6795 if (!i915_gem_request_completed(boost->rq, true))
6785 gen6_rps_boost(to_i915(boost->rq->ring->dev)); 6796 gen6_rps_boost(to_i915(boost->rq->ring->dev), NULL);
6786 6797
6787 i915_gem_request_unreference__unlocked(boost->rq); 6798 i915_gem_request_unreference__unlocked(boost->rq);
6788 kfree(boost); 6799 kfree(boost);
@@ -6815,6 +6826,7 @@ void intel_pm_setup(struct drm_device *dev)
6815 6826
6816 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 6827 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6817 intel_gen6_powersave_work); 6828 intel_gen6_powersave_work);
6829 INIT_LIST_HEAD(&dev_priv->rps.clients);
6818 6830
6819 dev_priv->pm.suspended = false; 6831 dev_priv->pm.suspended = false;
6820} 6832}