aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_irq.c
diff options
context:
space:
mode:
authorImre Deak <imre.deak@intel.com>2014-11-19 08:30:04 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-11-19 09:03:23 -0500
commitd4d70aa5960a7fbf3d887663f144c324a10619ba (patch)
tree834e941576feea0a00a2fc13a927c69a66dbe621 /drivers/gpu/drm/i915/i915_irq.c
parent3cc134e3ee09055d5a87193fc7eb0ecf4a59eaa1 (diff)
drm/i915: sanitize rps irq disabling
When disabling the RPS interrupts there is a tricky dependency between the thread disabling the interrupts, the RPS interrupt handler and the corresponding RPS work. The RPS work can reenable the interrupts, so there is no straightforward order in the disabling thread to (1) make sure that any RPS work is flushed and to (2) disable all RPS interrupts. Currently this is solved by masking the interrupts using two separate mask registers (first level display IMR and PM IMR) and doing the disabling when all first level interrupts are disabled. This works, but the requirement to run with all first level interrupts disabled is unnecessary making the suspend / unload time ordering of RPS disabling wrt. other unitialization steps difficult and error prone. Removing this restriction allows us to disable RPS early during suspend / unload and forget about it for the rest of the sequence. By adding a more explicit method for avoiding the above race, it also becomes easier to prove its correctness. Finally currently we can hit the WARN in snb_update_pm_irq(), when a final RPS work runs with the first level interrupts already disabled. This won't lead to any problem (due to the separate interrupt masks), but with the change in this and the next patch we can get rid of the WARN, while leaving it in place for other scenarios. To address the above points, add a new RPS interrupts_enabled flag and use this during RPS disabling to avoid requeuing the RPS work and reenabling of the RPS interrupts. Since the interrupt disabling happens now in intel_suspend_gt_powersave(), we will disable RPS interrupts explicitly during suspend (and not just through the first level mask), but there is no problem doing so, it's also more consistent and allows us to unify more of the RPS disabling during suspend and unload time in the next patch. v2/v3: - rebase on patch "drm/i915: move rps irq disable one level up" in the patchset Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 56b30534176a..283756fe48d3 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,6 +274,7 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
274 spin_lock_irq(&dev_priv->irq_lock); 274 spin_lock_irq(&dev_priv->irq_lock);
275 WARN_ON(dev_priv->rps.pm_iir); 275 WARN_ON(dev_priv->rps.pm_iir);
276 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 276 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
277 dev_priv->rps.interrupts_enabled = true;
277 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 278 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
278 spin_unlock_irq(&dev_priv->irq_lock); 279 spin_unlock_irq(&dev_priv->irq_lock);
279} 280}
@@ -282,14 +283,16 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
282{ 283{
283 struct drm_i915_private *dev_priv = dev->dev_private; 284 struct drm_i915_private *dev_priv = dev->dev_private;
284 285
286 spin_lock_irq(&dev_priv->irq_lock);
287 dev_priv->rps.interrupts_enabled = false;
288 spin_unlock_irq(&dev_priv->irq_lock);
289
290 cancel_work_sync(&dev_priv->rps.work);
291
285 I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? 292 I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
286 ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); 293 ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
287 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 294 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
288 ~dev_priv->pm_rps_events); 295 ~dev_priv->pm_rps_events);
289 /* Complete PM interrupt masking here doesn't race with the rps work
290 * item again unmasking PM interrupts because that is using a different
291 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
292 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
293 296
294 spin_lock_irq(&dev_priv->irq_lock); 297 spin_lock_irq(&dev_priv->irq_lock);
295 dev_priv->rps.pm_iir = 0; 298 dev_priv->rps.pm_iir = 0;
@@ -1135,6 +1138,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
1135 int new_delay, adj; 1138 int new_delay, adj;
1136 1139
1137 spin_lock_irq(&dev_priv->irq_lock); 1140 spin_lock_irq(&dev_priv->irq_lock);
1141 /* Speed up work cancelation during disabling rps interrupts. */
1142 if (!dev_priv->rps.interrupts_enabled) {
1143 spin_unlock_irq(&dev_priv->irq_lock);
1144 return;
1145 }
1138 pm_iir = dev_priv->rps.pm_iir; 1146 pm_iir = dev_priv->rps.pm_iir;
1139 dev_priv->rps.pm_iir = 0; 1147 dev_priv->rps.pm_iir = 0;
1140 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1148 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
@@ -1708,11 +1716,12 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1708 1716
1709 if (pm_iir & dev_priv->pm_rps_events) { 1717 if (pm_iir & dev_priv->pm_rps_events) {
1710 spin_lock(&dev_priv->irq_lock); 1718 spin_lock(&dev_priv->irq_lock);
1711 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1712 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1719 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1720 if (dev_priv->rps.interrupts_enabled) {
1721 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1722 queue_work(dev_priv->wq, &dev_priv->rps.work);
1723 }
1713 spin_unlock(&dev_priv->irq_lock); 1724 spin_unlock(&dev_priv->irq_lock);
1714
1715 queue_work(dev_priv->wq, &dev_priv->rps.work);
1716 } 1725 }
1717 1726
1718 if (INTEL_INFO(dev_priv)->gen >= 8) 1727 if (INTEL_INFO(dev_priv)->gen >= 8)