diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 50 |
1 files changed, 23 insertions, 27 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 3971b5e6ad60..95c4b1429935 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -621,7 +621,7 @@ ring_add_request(struct intel_ring_buffer *ring, | |||
621 | } | 621 | } |
622 | 622 | ||
623 | static bool | 623 | static bool |
624 | ring_get_irq(struct intel_ring_buffer *ring, u32 flag) | 624 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
625 | { | 625 | { |
626 | struct drm_device *dev = ring->dev; | 626 | struct drm_device *dev = ring->dev; |
627 | drm_i915_private_t *dev_priv = dev->dev_private; | 627 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -630,71 +630,67 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag) | |||
630 | return false; | 630 | return false; |
631 | 631 | ||
632 | spin_lock(&ring->irq_lock); | 632 | spin_lock(&ring->irq_lock); |
633 | if (ring->irq_refcount++ == 0) | 633 | if (ring->irq_refcount++ == 0) { |
634 | ironlake_enable_irq(dev_priv, flag); | 634 | ring->irq_mask &= ~rflag; |
635 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
636 | ironlake_enable_irq(dev_priv, gflag); | ||
637 | } | ||
635 | spin_unlock(&ring->irq_lock); | 638 | spin_unlock(&ring->irq_lock); |
636 | 639 | ||
637 | return true; | 640 | return true; |
638 | } | 641 | } |
639 | 642 | ||
640 | static void | 643 | static void |
641 | ring_put_irq(struct intel_ring_buffer *ring, u32 flag) | 644 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
642 | { | 645 | { |
643 | struct drm_device *dev = ring->dev; | 646 | struct drm_device *dev = ring->dev; |
644 | drm_i915_private_t *dev_priv = dev->dev_private; | 647 | drm_i915_private_t *dev_priv = dev->dev_private; |
645 | 648 | ||
646 | spin_lock(&ring->irq_lock); | 649 | spin_lock(&ring->irq_lock); |
647 | if (--ring->irq_refcount == 0) | 650 | if (--ring->irq_refcount == 0) { |
648 | ironlake_disable_irq(dev_priv, flag); | 651 | ring->irq_mask |= rflag; |
652 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
653 | ironlake_disable_irq(dev_priv, gflag); | ||
654 | } | ||
649 | spin_unlock(&ring->irq_lock); | 655 | spin_unlock(&ring->irq_lock); |
650 | } | 656 | } |
651 | 657 | ||
652 | static bool | 658 | static bool |
653 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | 659 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
654 | { | 660 | { |
655 | struct drm_device *dev = ring->dev; | 661 | struct drm_device *dev = ring->dev; |
656 | drm_i915_private_t *dev_priv = dev->dev_private; | 662 | drm_i915_private_t *dev_priv = dev->dev_private; |
657 | 663 | ||
658 | if (!dev->irq_enabled) | 664 | if (!dev->irq_enabled) |
659 | return false; | 665 | return false; |
660 | 666 | ||
661 | spin_lock(&ring->irq_lock); | 667 | spin_lock(&ring->irq_lock); |
662 | if (ring->irq_refcount++ == 0) { | 668 | if (ring->irq_refcount++ == 0) { |
663 | ring->irq_mask &= ~rflag; | 669 | if (IS_G4X(dev)) |
664 | I915_WRITE_IMR(ring, ring->irq_mask); | 670 | i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
665 | ironlake_enable_irq(dev_priv, gflag); | 671 | else |
672 | ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); | ||
666 | } | 673 | } |
667 | spin_unlock(&ring->irq_lock); | 674 | spin_unlock(&ring->irq_lock); |
668 | 675 | ||
669 | return true; | 676 | return true; |
670 | } | 677 | } |
671 | |||
672 | static void | 678 | static void |
673 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | 679 | bsd_ring_put_irq(struct intel_ring_buffer *ring) |
674 | { | 680 | { |
675 | struct drm_device *dev = ring->dev; | 681 | struct drm_device *dev = ring->dev; |
676 | drm_i915_private_t *dev_priv = dev->dev_private; | 682 | drm_i915_private_t *dev_priv = dev->dev_private; |
677 | 683 | ||
678 | spin_lock(&ring->irq_lock); | 684 | spin_lock(&ring->irq_lock); |
679 | if (--ring->irq_refcount == 0) { | 685 | if (--ring->irq_refcount == 0) { |
680 | ring->irq_mask |= rflag; | 686 | if (IS_G4X(dev)) |
681 | I915_WRITE_IMR(ring, ring->irq_mask); | 687 | i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
682 | ironlake_disable_irq(dev_priv, gflag); | 688 | else |
689 | ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); | ||
683 | } | 690 | } |
684 | spin_unlock(&ring->irq_lock); | 691 | spin_unlock(&ring->irq_lock); |
685 | } | 692 | } |
686 | 693 | ||
687 | static bool | ||
688 | bsd_ring_get_irq(struct intel_ring_buffer *ring) | ||
689 | { | ||
690 | return ring_get_irq(ring, GT_BSD_USER_INTERRUPT); | ||
691 | } | ||
692 | static void | ||
693 | bsd_ring_put_irq(struct intel_ring_buffer *ring) | ||
694 | { | ||
695 | ring_put_irq(ring, GT_BSD_USER_INTERRUPT); | ||
696 | } | ||
697 | |||
698 | static int | 694 | static int |
699 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) | 695 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
700 | { | 696 | { |