diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-07-04 17:35:29 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-07-11 08:36:49 -0400 |
commit | c7113cc35f59b46b301367b947c4f71ac8f0d5bb (patch) | |
tree | 30a5b3863a87dac9b5552742516c0fbdfec27127 /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 59cdb63d529c81fc8ac0620ad50f29d5fb4411c9 (diff) |
drm/i915: unify ring irq refcounts (again)
With the simplified locking there's no reason any more to keep the
refcounts seperate.
v2: Readd the lost comment that ring->irq_refcount is protected by
dev_priv->irq_lock.
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 33a74a803008..23ffe1d06220 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -821,7 +821,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) | |||
821 | return false; | 821 | return false; |
822 | 822 | ||
823 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 823 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
824 | if (ring->irq_refcount.gt++ == 0) { | 824 | if (ring->irq_refcount++ == 0) { |
825 | dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; | 825 | dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; |
826 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 826 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
827 | POSTING_READ(GTIMR); | 827 | POSTING_READ(GTIMR); |
@@ -839,7 +839,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) | |||
839 | unsigned long flags; | 839 | unsigned long flags; |
840 | 840 | ||
841 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 841 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
842 | if (--ring->irq_refcount.gt == 0) { | 842 | if (--ring->irq_refcount == 0) { |
843 | dev_priv->gt_irq_mask |= ring->irq_enable_mask; | 843 | dev_priv->gt_irq_mask |= ring->irq_enable_mask; |
844 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 844 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
845 | POSTING_READ(GTIMR); | 845 | POSTING_READ(GTIMR); |
@@ -858,7 +858,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) | |||
858 | return false; | 858 | return false; |
859 | 859 | ||
860 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 860 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
861 | if (ring->irq_refcount.gt++ == 0) { | 861 | if (ring->irq_refcount++ == 0) { |
862 | dev_priv->irq_mask &= ~ring->irq_enable_mask; | 862 | dev_priv->irq_mask &= ~ring->irq_enable_mask; |
863 | I915_WRITE(IMR, dev_priv->irq_mask); | 863 | I915_WRITE(IMR, dev_priv->irq_mask); |
864 | POSTING_READ(IMR); | 864 | POSTING_READ(IMR); |
@@ -876,7 +876,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) | |||
876 | unsigned long flags; | 876 | unsigned long flags; |
877 | 877 | ||
878 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 878 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
879 | if (--ring->irq_refcount.gt == 0) { | 879 | if (--ring->irq_refcount == 0) { |
880 | dev_priv->irq_mask |= ring->irq_enable_mask; | 880 | dev_priv->irq_mask |= ring->irq_enable_mask; |
881 | I915_WRITE(IMR, dev_priv->irq_mask); | 881 | I915_WRITE(IMR, dev_priv->irq_mask); |
882 | POSTING_READ(IMR); | 882 | POSTING_READ(IMR); |
@@ -895,7 +895,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring) | |||
895 | return false; | 895 | return false; |
896 | 896 | ||
897 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 897 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
898 | if (ring->irq_refcount.gt++ == 0) { | 898 | if (ring->irq_refcount++ == 0) { |
899 | dev_priv->irq_mask &= ~ring->irq_enable_mask; | 899 | dev_priv->irq_mask &= ~ring->irq_enable_mask; |
900 | I915_WRITE16(IMR, dev_priv->irq_mask); | 900 | I915_WRITE16(IMR, dev_priv->irq_mask); |
901 | POSTING_READ16(IMR); | 901 | POSTING_READ16(IMR); |
@@ -913,7 +913,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring) | |||
913 | unsigned long flags; | 913 | unsigned long flags; |
914 | 914 | ||
915 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 915 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
916 | if (--ring->irq_refcount.gt == 0) { | 916 | if (--ring->irq_refcount == 0) { |
917 | dev_priv->irq_mask |= ring->irq_enable_mask; | 917 | dev_priv->irq_mask |= ring->irq_enable_mask; |
918 | I915_WRITE16(IMR, dev_priv->irq_mask); | 918 | I915_WRITE16(IMR, dev_priv->irq_mask); |
919 | POSTING_READ16(IMR); | 919 | POSTING_READ16(IMR); |
@@ -1006,7 +1006,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) | |||
1006 | gen6_gt_force_wake_get(dev_priv); | 1006 | gen6_gt_force_wake_get(dev_priv); |
1007 | 1007 | ||
1008 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1008 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1009 | if (ring->irq_refcount.gt++ == 0) { | 1009 | if (ring->irq_refcount++ == 0) { |
1010 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) | 1010 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
1011 | I915_WRITE_IMR(ring, | 1011 | I915_WRITE_IMR(ring, |
1012 | ~(ring->irq_enable_mask | | 1012 | ~(ring->irq_enable_mask | |
@@ -1030,7 +1030,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) | |||
1030 | unsigned long flags; | 1030 | unsigned long flags; |
1031 | 1031 | ||
1032 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1032 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1033 | if (--ring->irq_refcount.gt == 0) { | 1033 | if (--ring->irq_refcount == 0) { |
1034 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) | 1034 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
1035 | I915_WRITE_IMR(ring, | 1035 | I915_WRITE_IMR(ring, |
1036 | ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); | 1036 | ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); |
@@ -1056,7 +1056,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring) | |||
1056 | return false; | 1056 | return false; |
1057 | 1057 | ||
1058 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1058 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1059 | if (ring->irq_refcount.pm++ == 0) { | 1059 | if (ring->irq_refcount++ == 0) { |
1060 | u32 pm_imr = I915_READ(GEN6_PMIMR); | 1060 | u32 pm_imr = I915_READ(GEN6_PMIMR); |
1061 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | 1061 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
1062 | I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); | 1062 | I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); |
@@ -1078,7 +1078,7 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) | |||
1078 | return; | 1078 | return; |
1079 | 1079 | ||
1080 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1080 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1081 | if (--ring->irq_refcount.pm == 0) { | 1081 | if (--ring->irq_refcount == 0) { |
1082 | u32 pm_imr = I915_READ(GEN6_PMIMR); | 1082 | u32 pm_imr = I915_READ(GEN6_PMIMR); |
1083 | I915_WRITE_IMR(ring, ~0); | 1083 | I915_WRITE_IMR(ring, ~0); |
1084 | I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); | 1084 | I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); |