diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2011-01-05 05:32:24 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2011-01-11 15:43:58 -0500 |
commit | 0dc79fb2a36efcadbb39bd8b28933d8aa40408b1 (patch) | |
tree | cc82a951b6608d0be2b224cc1e75cbe6340b8aac | |
parent | 01a03331e5fe91861937f8b8e72c259f5e9eae67 (diff) |
drm/i915: Make the ring IMR handling private
As the IMR for the USER interrupts are not modified elsewhere, we can
separate the spinlock used for these from that of hpd and pipestats.
Those two IMR are manipulated under an IRQ and so need heavier locking.
Reported-and-tested-by: Alexey Fisher <bug-track@fisher-privat.net>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 1 |
2 files changed, 15 insertions, 12 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 13cad981713b..03e337072517 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -526,7 +526,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) | |||
526 | if (!dev->irq_enabled) | 526 | if (!dev->irq_enabled) |
527 | return false; | 527 | return false; |
528 | 528 | ||
529 | spin_lock(&dev_priv->irq_lock); | 529 | spin_lock(&ring->irq_lock); |
530 | if (ring->irq_refcount++ == 0) { | 530 | if (ring->irq_refcount++ == 0) { |
531 | if (HAS_PCH_SPLIT(dev)) | 531 | if (HAS_PCH_SPLIT(dev)) |
532 | ironlake_enable_irq(dev_priv, | 532 | ironlake_enable_irq(dev_priv, |
@@ -534,7 +534,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) | |||
534 | else | 534 | else |
535 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 535 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
536 | } | 536 | } |
537 | spin_unlock(&dev_priv->irq_lock); | 537 | spin_unlock(&ring->irq_lock); |
538 | 538 | ||
539 | return true; | 539 | return true; |
540 | } | 540 | } |
@@ -545,7 +545,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring) | |||
545 | struct drm_device *dev = ring->dev; | 545 | struct drm_device *dev = ring->dev; |
546 | drm_i915_private_t *dev_priv = dev->dev_private; | 546 | drm_i915_private_t *dev_priv = dev->dev_private; |
547 | 547 | ||
548 | spin_lock(&dev_priv->irq_lock); | 548 | spin_lock(&ring->irq_lock); |
549 | if (--ring->irq_refcount == 0) { | 549 | if (--ring->irq_refcount == 0) { |
550 | if (HAS_PCH_SPLIT(dev)) | 550 | if (HAS_PCH_SPLIT(dev)) |
551 | ironlake_disable_irq(dev_priv, | 551 | ironlake_disable_irq(dev_priv, |
@@ -554,7 +554,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring) | |||
554 | else | 554 | else |
555 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 555 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
556 | } | 556 | } |
557 | spin_unlock(&dev_priv->irq_lock); | 557 | spin_unlock(&ring->irq_lock); |
558 | } | 558 | } |
559 | 559 | ||
560 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | 560 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
@@ -620,10 +620,10 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag) | |||
620 | if (!dev->irq_enabled) | 620 | if (!dev->irq_enabled) |
621 | return false; | 621 | return false; |
622 | 622 | ||
623 | spin_lock(&dev_priv->irq_lock); | 623 | spin_lock(&ring->irq_lock); |
624 | if (ring->irq_refcount++ == 0) | 624 | if (ring->irq_refcount++ == 0) |
625 | ironlake_enable_irq(dev_priv, flag); | 625 | ironlake_enable_irq(dev_priv, flag); |
626 | spin_unlock(&dev_priv->irq_lock); | 626 | spin_unlock(&ring->irq_lock); |
627 | 627 | ||
628 | return true; | 628 | return true; |
629 | } | 629 | } |
@@ -634,10 +634,10 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag) | |||
634 | struct drm_device *dev = ring->dev; | 634 | struct drm_device *dev = ring->dev; |
635 | drm_i915_private_t *dev_priv = dev->dev_private; | 635 | drm_i915_private_t *dev_priv = dev->dev_private; |
636 | 636 | ||
637 | spin_lock(&dev_priv->irq_lock); | 637 | spin_lock(&ring->irq_lock); |
638 | if (--ring->irq_refcount == 0) | 638 | if (--ring->irq_refcount == 0) |
639 | ironlake_disable_irq(dev_priv, flag); | 639 | ironlake_disable_irq(dev_priv, flag); |
640 | spin_unlock(&dev_priv->irq_lock); | 640 | spin_unlock(&ring->irq_lock); |
641 | } | 641 | } |
642 | 642 | ||
643 | static bool | 643 | static bool |
@@ -649,13 +649,13 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | |||
649 | if (!dev->irq_enabled) | 649 | if (!dev->irq_enabled) |
650 | return false; | 650 | return false; |
651 | 651 | ||
652 | spin_lock(&dev_priv->irq_lock); | 652 | spin_lock(&ring->irq_lock); |
653 | if (ring->irq_refcount++ == 0) { | 653 | if (ring->irq_refcount++ == 0) { |
654 | ring->irq_mask &= ~rflag; | 654 | ring->irq_mask &= ~rflag; |
655 | I915_WRITE_IMR(ring, ring->irq_mask); | 655 | I915_WRITE_IMR(ring, ring->irq_mask); |
656 | ironlake_enable_irq(dev_priv, gflag); | 656 | ironlake_enable_irq(dev_priv, gflag); |
657 | } | 657 | } |
658 | spin_unlock(&dev_priv->irq_lock); | 658 | spin_unlock(&ring->irq_lock); |
659 | 659 | ||
660 | return true; | 660 | return true; |
661 | } | 661 | } |
@@ -666,13 +666,13 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | |||
666 | struct drm_device *dev = ring->dev; | 666 | struct drm_device *dev = ring->dev; |
667 | drm_i915_private_t *dev_priv = dev->dev_private; | 667 | drm_i915_private_t *dev_priv = dev->dev_private; |
668 | 668 | ||
669 | spin_lock(&dev_priv->irq_lock); | 669 | spin_lock(&ring->irq_lock); |
670 | if (--ring->irq_refcount == 0) { | 670 | if (--ring->irq_refcount == 0) { |
671 | ring->irq_mask |= rflag; | 671 | ring->irq_mask |= rflag; |
672 | I915_WRITE_IMR(ring, ring->irq_mask); | 672 | I915_WRITE_IMR(ring, ring->irq_mask); |
673 | ironlake_disable_irq(dev_priv, gflag); | 673 | ironlake_disable_irq(dev_priv, gflag); |
674 | } | 674 | } |
675 | spin_unlock(&dev_priv->irq_lock); | 675 | spin_unlock(&ring->irq_lock); |
676 | } | 676 | } |
677 | 677 | ||
678 | static bool | 678 | static bool |
@@ -814,6 +814,8 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
814 | INIT_LIST_HEAD(&ring->active_list); | 814 | INIT_LIST_HEAD(&ring->active_list); |
815 | INIT_LIST_HEAD(&ring->request_list); | 815 | INIT_LIST_HEAD(&ring->request_list); |
816 | INIT_LIST_HEAD(&ring->gpu_write_list); | 816 | INIT_LIST_HEAD(&ring->gpu_write_list); |
817 | |||
818 | spin_lock_init(&ring->irq_lock); | ||
817 | ring->irq_mask = ~0; | 819 | ring->irq_mask = ~0; |
818 | 820 | ||
819 | if (I915_NEED_GFX_HWS(dev)) { | 821 | if (I915_NEED_GFX_HWS(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6b1d9a5a7d07..be9087e4c9be 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -55,6 +55,7 @@ struct intel_ring_buffer { | |||
55 | int effective_size; | 55 | int effective_size; |
56 | struct intel_hw_status_page status_page; | 56 | struct intel_hw_status_page status_page; |
57 | 57 | ||
58 | spinlock_t irq_lock; | ||
58 | u32 irq_refcount; | 59 | u32 irq_refcount; |
59 | u32 irq_mask; | 60 | u32 irq_mask; |
60 | u32 irq_seqno; /* last seq seem at irq time */ | 61 | u32 irq_seqno; /* last seq seem at irq time */ |