aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-04 17:35:28 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-11 08:36:43 -0400
commit59cdb63d529c81fc8ac0620ad50f29d5fb4411c9 (patch)
tree18e20e7e3a863d18ca30c0fce9853657fb86390e /drivers/gpu/drm/i915/intel_ringbuffer.c
parent2adbee62e00d869a30cb93ea2269e5ea26a9bbc4 (diff)
drm/i915: kill dev_priv->rps.lock
Now that the rps interrupt locking isn't clearly separated (at elast conceptually) from all the other interrupt locking having a different lock stopped making sense: It protects much more than just the rps workqueue it started out with. But with the addition of VECS the separation started to blurr and resulted in some more complex locking for the ring interrupt refcount. With this we can (again) unifiy the ringbuffer irq refcounts without causing a massive confusion, but that's for the next patch. v2: Explain better why the rps.lock once made sense and why no longer, requested by Ben. Reviewed-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 54495df2403e..33a74a803008 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1055,14 +1055,14 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1055 if (!dev->irq_enabled) 1055 if (!dev->irq_enabled)
1056 return false; 1056 return false;
1057 1057
1058 spin_lock_irqsave(&dev_priv->rps.lock, flags); 1058 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1059 if (ring->irq_refcount.pm++ == 0) { 1059 if (ring->irq_refcount.pm++ == 0) {
1060 u32 pm_imr = I915_READ(GEN6_PMIMR); 1060 u32 pm_imr = I915_READ(GEN6_PMIMR);
1061 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1061 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1062 I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); 1062 I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
1063 POSTING_READ(GEN6_PMIMR); 1063 POSTING_READ(GEN6_PMIMR);
1064 } 1064 }
1065 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 1065 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1066 1066
1067 return true; 1067 return true;
1068} 1068}
@@ -1077,14 +1077,14 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1077 if (!dev->irq_enabled) 1077 if (!dev->irq_enabled)
1078 return; 1078 return;
1079 1079
1080 spin_lock_irqsave(&dev_priv->rps.lock, flags); 1080 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1081 if (--ring->irq_refcount.pm == 0) { 1081 if (--ring->irq_refcount.pm == 0) {
1082 u32 pm_imr = I915_READ(GEN6_PMIMR); 1082 u32 pm_imr = I915_READ(GEN6_PMIMR);
1083 I915_WRITE_IMR(ring, ~0); 1083 I915_WRITE_IMR(ring, ~0);
1084 I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); 1084 I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
1085 POSTING_READ(GEN6_PMIMR); 1085 POSTING_READ(GEN6_PMIMR);
1086 } 1086 }
1087 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 1087 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1088} 1088}
1089 1089
1090static int 1090static int