aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-01-04 17:22:56 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2011-01-11 15:43:57 -0500
commit01a03331e5fe91861937f8b8e72c259f5e9eae67 (patch)
tree04907482e03da7bafae301778104a13610a38076 /drivers/gpu
parent9862e600cef87de0e301bad7d1435b87e03ea84d (diff)
drm/i915/ringbuffer: Simplify the ring irq refcounting
... and move it under the spinlock to gain the appropriate memory barriers. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=32752 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c62
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
2 files changed, 25 insertions, 39 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3bff7fb72341..13cad981713b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -521,22 +521,20 @@ static bool
521render_ring_get_irq(struct intel_ring_buffer *ring) 521render_ring_get_irq(struct intel_ring_buffer *ring)
522{ 522{
523 struct drm_device *dev = ring->dev; 523 struct drm_device *dev = ring->dev;
524 drm_i915_private_t *dev_priv = dev->dev_private;
524 525
525 if (!dev->irq_enabled) 526 if (!dev->irq_enabled)
526 return false; 527 return false;
527 528
528 if (atomic_inc_return(&ring->irq_refcount) == 1) { 529 spin_lock(&dev_priv->irq_lock);
529 drm_i915_private_t *dev_priv = dev->dev_private; 530 if (ring->irq_refcount++ == 0) {
530 unsigned long irqflags;
531
532 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
533 if (HAS_PCH_SPLIT(dev)) 531 if (HAS_PCH_SPLIT(dev))
534 ironlake_enable_irq(dev_priv, 532 ironlake_enable_irq(dev_priv,
535 GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 533 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
536 else 534 else
537 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 535 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
538 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
539 } 536 }
537 spin_unlock(&dev_priv->irq_lock);
540 538
541 return true; 539 return true;
542} 540}
@@ -545,20 +543,18 @@ static void
545render_ring_put_irq(struct intel_ring_buffer *ring) 543render_ring_put_irq(struct intel_ring_buffer *ring)
546{ 544{
547 struct drm_device *dev = ring->dev; 545 struct drm_device *dev = ring->dev;
546 drm_i915_private_t *dev_priv = dev->dev_private;
548 547
549 if (atomic_dec_and_test(&ring->irq_refcount)) { 548 spin_lock(&dev_priv->irq_lock);
550 drm_i915_private_t *dev_priv = dev->dev_private; 549 if (--ring->irq_refcount == 0) {
551 unsigned long irqflags;
552
553 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
554 if (HAS_PCH_SPLIT(dev)) 550 if (HAS_PCH_SPLIT(dev))
555 ironlake_disable_irq(dev_priv, 551 ironlake_disable_irq(dev_priv,
556 GT_USER_INTERRUPT | 552 GT_USER_INTERRUPT |
557 GT_PIPE_NOTIFY); 553 GT_PIPE_NOTIFY);
558 else 554 else
559 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 555 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
560 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
561 } 556 }
557 spin_unlock(&dev_priv->irq_lock);
562} 558}
563 559
564void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 560void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -619,18 +615,15 @@ static bool
619ring_get_irq(struct intel_ring_buffer *ring, u32 flag) 615ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
620{ 616{
621 struct drm_device *dev = ring->dev; 617 struct drm_device *dev = ring->dev;
618 drm_i915_private_t *dev_priv = dev->dev_private;
622 619
623 if (!dev->irq_enabled) 620 if (!dev->irq_enabled)
624 return false; 621 return false;
625 622
626 if (atomic_inc_return(&ring->irq_refcount) == 1) { 623 spin_lock(&dev_priv->irq_lock);
627 drm_i915_private_t *dev_priv = dev->dev_private; 624 if (ring->irq_refcount++ == 0)
628 unsigned long irqflags;
629
630 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
631 ironlake_enable_irq(dev_priv, flag); 625 ironlake_enable_irq(dev_priv, flag);
632 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 626 spin_unlock(&dev_priv->irq_lock);
633 }
634 627
635 return true; 628 return true;
636} 629}
@@ -639,35 +632,30 @@ static void
639ring_put_irq(struct intel_ring_buffer *ring, u32 flag) 632ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
640{ 633{
641 struct drm_device *dev = ring->dev; 634 struct drm_device *dev = ring->dev;
635 drm_i915_private_t *dev_priv = dev->dev_private;
642 636
643 if (atomic_dec_and_test(&ring->irq_refcount)) { 637 spin_lock(&dev_priv->irq_lock);
644 drm_i915_private_t *dev_priv = dev->dev_private; 638 if (--ring->irq_refcount == 0)
645 unsigned long irqflags;
646
647 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
648 ironlake_disable_irq(dev_priv, flag); 639 ironlake_disable_irq(dev_priv, flag);
649 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 640 spin_unlock(&dev_priv->irq_lock);
650 }
651} 641}
652 642
653static bool 643static bool
654gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 644gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
655{ 645{
656 struct drm_device *dev = ring->dev; 646 struct drm_device *dev = ring->dev;
647 drm_i915_private_t *dev_priv = dev->dev_private;
657 648
658 if (!dev->irq_enabled) 649 if (!dev->irq_enabled)
659 return false; 650 return false;
660 651
661 if (atomic_inc_return(&ring->irq_refcount) == 1) { 652 spin_lock(&dev_priv->irq_lock);
662 drm_i915_private_t *dev_priv = dev->dev_private; 653 if (ring->irq_refcount++ == 0) {
663 unsigned long irqflags;
664
665 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
666 ring->irq_mask &= ~rflag; 654 ring->irq_mask &= ~rflag;
667 I915_WRITE_IMR(ring, ring->irq_mask); 655 I915_WRITE_IMR(ring, ring->irq_mask);
668 ironlake_enable_irq(dev_priv, gflag); 656 ironlake_enable_irq(dev_priv, gflag);
669 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
670 } 657 }
658 spin_unlock(&dev_priv->irq_lock);
671 659
672 return true; 660 return true;
673} 661}
@@ -676,17 +664,15 @@ static void
676gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 664gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
677{ 665{
678 struct drm_device *dev = ring->dev; 666 struct drm_device *dev = ring->dev;
667 drm_i915_private_t *dev_priv = dev->dev_private;
679 668
680 if (atomic_dec_and_test(&ring->irq_refcount)) { 669 spin_lock(&dev_priv->irq_lock);
681 drm_i915_private_t *dev_priv = dev->dev_private; 670 if (--ring->irq_refcount == 0) {
682 unsigned long irqflags;
683
684 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
685 ring->irq_mask |= rflag; 671 ring->irq_mask |= rflag;
686 I915_WRITE_IMR(ring, ring->irq_mask); 672 I915_WRITE_IMR(ring, ring->irq_mask);
687 ironlake_disable_irq(dev_priv, gflag); 673 ironlake_disable_irq(dev_priv, gflag);
688 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
689 } 674 }
675 spin_unlock(&dev_priv->irq_lock);
690} 676}
691 677
692static bool 678static bool
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 9b134b8643cb..6b1d9a5a7d07 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -55,11 +55,11 @@ struct intel_ring_buffer {
55 int effective_size; 55 int effective_size;
56 struct intel_hw_status_page status_page; 56 struct intel_hw_status_page status_page;
57 57
58 u32 irq_refcount;
58 u32 irq_mask; 59 u32 irq_mask;
59 u32 irq_seqno; /* last seq seem at irq time */ 60 u32 irq_seqno; /* last seq seem at irq time */
60 u32 waiting_seqno; 61 u32 waiting_seqno;
61 u32 sync_seqno[I915_NUM_RINGS-1]; 62 u32 sync_seqno[I915_NUM_RINGS-1];
62 atomic_t irq_refcount;
63 bool __must_check (*irq_get)(struct intel_ring_buffer *ring); 63 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
64 void (*irq_put)(struct intel_ring_buffer *ring); 64 void (*irq_put)(struct intel_ring_buffer *ring);
65 65