aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2013-05-28 22:22:28 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-05-31 14:54:18 -0400
commitaeb0659338793746b8a4e482fa588ba1dd9ee559 (patch)
tree6a486fddb36824e36e38179040412974bac3fe89 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent4848405cced3b46f4ec7d404b8ed5873171ae10a (diff)
drm/i915: Convert irq_refounct to struct
It's overkill on older gens, but it's useful for newer gens. Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 89dfc63677ad..c7a89bb051a0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -795,7 +795,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
795 return false; 795 return false;
796 796
797 spin_lock_irqsave(&dev_priv->irq_lock, flags); 797 spin_lock_irqsave(&dev_priv->irq_lock, flags);
798 if (ring->irq_refcount++ == 0) { 798 if (ring->irq_refcount.gt++ == 0) {
799 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 799 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
800 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 800 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
801 POSTING_READ(GTIMR); 801 POSTING_READ(GTIMR);
@@ -813,7 +813,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
813 unsigned long flags; 813 unsigned long flags;
814 814
815 spin_lock_irqsave(&dev_priv->irq_lock, flags); 815 spin_lock_irqsave(&dev_priv->irq_lock, flags);
816 if (--ring->irq_refcount == 0) { 816 if (--ring->irq_refcount.gt == 0) {
817 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 817 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
818 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 818 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
819 POSTING_READ(GTIMR); 819 POSTING_READ(GTIMR);
@@ -832,7 +832,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
832 return false; 832 return false;
833 833
834 spin_lock_irqsave(&dev_priv->irq_lock, flags); 834 spin_lock_irqsave(&dev_priv->irq_lock, flags);
835 if (ring->irq_refcount++ == 0) { 835 if (ring->irq_refcount.gt++ == 0) {
836 dev_priv->irq_mask &= ~ring->irq_enable_mask; 836 dev_priv->irq_mask &= ~ring->irq_enable_mask;
837 I915_WRITE(IMR, dev_priv->irq_mask); 837 I915_WRITE(IMR, dev_priv->irq_mask);
838 POSTING_READ(IMR); 838 POSTING_READ(IMR);
@@ -850,7 +850,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
850 unsigned long flags; 850 unsigned long flags;
851 851
852 spin_lock_irqsave(&dev_priv->irq_lock, flags); 852 spin_lock_irqsave(&dev_priv->irq_lock, flags);
853 if (--ring->irq_refcount == 0) { 853 if (--ring->irq_refcount.gt == 0) {
854 dev_priv->irq_mask |= ring->irq_enable_mask; 854 dev_priv->irq_mask |= ring->irq_enable_mask;
855 I915_WRITE(IMR, dev_priv->irq_mask); 855 I915_WRITE(IMR, dev_priv->irq_mask);
856 POSTING_READ(IMR); 856 POSTING_READ(IMR);
@@ -869,7 +869,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
869 return false; 869 return false;
870 870
871 spin_lock_irqsave(&dev_priv->irq_lock, flags); 871 spin_lock_irqsave(&dev_priv->irq_lock, flags);
872 if (ring->irq_refcount++ == 0) { 872 if (ring->irq_refcount.gt++ == 0) {
873 dev_priv->irq_mask &= ~ring->irq_enable_mask; 873 dev_priv->irq_mask &= ~ring->irq_enable_mask;
874 I915_WRITE16(IMR, dev_priv->irq_mask); 874 I915_WRITE16(IMR, dev_priv->irq_mask);
875 POSTING_READ16(IMR); 875 POSTING_READ16(IMR);
@@ -887,7 +887,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
887 unsigned long flags; 887 unsigned long flags;
888 888
889 spin_lock_irqsave(&dev_priv->irq_lock, flags); 889 spin_lock_irqsave(&dev_priv->irq_lock, flags);
890 if (--ring->irq_refcount == 0) { 890 if (--ring->irq_refcount.gt == 0) {
891 dev_priv->irq_mask |= ring->irq_enable_mask; 891 dev_priv->irq_mask |= ring->irq_enable_mask;
892 I915_WRITE16(IMR, dev_priv->irq_mask); 892 I915_WRITE16(IMR, dev_priv->irq_mask);
893 POSTING_READ16(IMR); 893 POSTING_READ16(IMR);
@@ -980,7 +980,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
980 gen6_gt_force_wake_get(dev_priv); 980 gen6_gt_force_wake_get(dev_priv);
981 981
982 spin_lock_irqsave(&dev_priv->irq_lock, flags); 982 spin_lock_irqsave(&dev_priv->irq_lock, flags);
983 if (ring->irq_refcount++ == 0) { 983 if (ring->irq_refcount.gt++ == 0) {
984 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 984 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
985 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | 985 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
986 GEN6_RENDER_L3_PARITY_ERROR)); 986 GEN6_RENDER_L3_PARITY_ERROR));
@@ -1003,7 +1003,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1003 unsigned long flags; 1003 unsigned long flags;
1004 1004
1005 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1005 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1006 if (--ring->irq_refcount == 0) { 1006 if (--ring->irq_refcount.gt == 0) {
1007 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1007 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1008 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 1008 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
1009 else 1009 else