diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 55 |
1 files changed, 26 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 77e729d4e4f0..536191540b03 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -301,7 +301,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
301 | 301 | ||
302 | I915_WRITE_CTL(ring, | 302 | I915_WRITE_CTL(ring, |
303 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | 303 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
304 | | RING_REPORT_64K | RING_VALID); | 304 | | RING_VALID); |
305 | 305 | ||
306 | /* If the head is still not zero, the ring is dead */ | 306 | /* If the head is still not zero, the ring is dead */ |
307 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || | 307 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || |
@@ -636,6 +636,19 @@ render_ring_add_request(struct intel_ring_buffer *ring, | |||
636 | } | 636 | } |
637 | 637 | ||
638 | static u32 | 638 | static u32 |
639 | gen6_ring_get_seqno(struct intel_ring_buffer *ring) | ||
640 | { | ||
641 | struct drm_device *dev = ring->dev; | ||
642 | |||
643 | /* Workaround to force correct ordering between irq and seqno writes on | ||
644 | * ivb (and maybe also on snb) by reading from a CS register (like | ||
645 | * ACTHD) before reading the status page. */ | ||
646 | if (IS_GEN7(dev)) | ||
647 | intel_ring_get_active_head(ring); | ||
648 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | ||
649 | } | ||
650 | |||
651 | static u32 | ||
639 | ring_get_seqno(struct intel_ring_buffer *ring) | 652 | ring_get_seqno(struct intel_ring_buffer *ring) |
640 | { | 653 | { |
641 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 654 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
@@ -792,17 +805,6 @@ ring_add_request(struct intel_ring_buffer *ring, | |||
792 | } | 805 | } |
793 | 806 | ||
794 | static bool | 807 | static bool |
795 | gen7_blt_ring_get_irq(struct intel_ring_buffer *ring) | ||
796 | { | ||
797 | /* The BLT ring on IVB appears to have broken synchronization | ||
798 | * between the seqno write and the interrupt, so that the | ||
799 | * interrupt appears first. Returning false here makes | ||
800 | * i915_wait_request() do a polling loop, instead. | ||
801 | */ | ||
802 | return false; | ||
803 | } | ||
804 | |||
805 | static bool | ||
806 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | 808 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
807 | { | 809 | { |
808 | struct drm_device *dev = ring->dev; | 810 | struct drm_device *dev = ring->dev; |
@@ -811,6 +813,12 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | |||
811 | if (!dev->irq_enabled) | 813 | if (!dev->irq_enabled) |
812 | return false; | 814 | return false; |
813 | 815 | ||
816 | /* It looks like we need to prevent the gt from suspending while waiting | ||
817 | * for an notifiy irq, otherwise irqs seem to get lost on at least the | ||
818 | * blt/bsd rings on ivb. */ | ||
819 | if (IS_GEN7(dev)) | ||
820 | gen6_gt_force_wake_get(dev_priv); | ||
821 | |||
814 | spin_lock(&ring->irq_lock); | 822 | spin_lock(&ring->irq_lock); |
815 | if (ring->irq_refcount++ == 0) { | 823 | if (ring->irq_refcount++ == 0) { |
816 | ring->irq_mask &= ~rflag; | 824 | ring->irq_mask &= ~rflag; |
@@ -835,6 +843,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | |||
835 | ironlake_disable_irq(dev_priv, gflag); | 843 | ironlake_disable_irq(dev_priv, gflag); |
836 | } | 844 | } |
837 | spin_unlock(&ring->irq_lock); | 845 | spin_unlock(&ring->irq_lock); |
846 | |||
847 | if (IS_GEN7(dev)) | ||
848 | gen6_gt_force_wake_put(dev_priv); | ||
838 | } | 849 | } |
839 | 850 | ||
840 | static bool | 851 | static bool |
@@ -1121,18 +1132,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | |||
1121 | struct drm_device *dev = ring->dev; | 1132 | struct drm_device *dev = ring->dev; |
1122 | struct drm_i915_private *dev_priv = dev->dev_private; | 1133 | struct drm_i915_private *dev_priv = dev->dev_private; |
1123 | unsigned long end; | 1134 | unsigned long end; |
1124 | u32 head; | ||
1125 | |||
1126 | /* If the reported head position has wrapped or hasn't advanced, | ||
1127 | * fallback to the slow and accurate path. | ||
1128 | */ | ||
1129 | head = intel_read_status_page(ring, 4); | ||
1130 | if (head > ring->head) { | ||
1131 | ring->head = head; | ||
1132 | ring->space = ring_space(ring); | ||
1133 | if (ring->space >= n) | ||
1134 | return 0; | ||
1135 | } | ||
1136 | 1135 | ||
1137 | trace_i915_ring_wait_begin(ring); | 1136 | trace_i915_ring_wait_begin(ring); |
1138 | if (drm_core_check_feature(dev, DRIVER_GEM)) | 1137 | if (drm_core_check_feature(dev, DRIVER_GEM)) |
@@ -1341,7 +1340,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = { | |||
1341 | .write_tail = gen6_bsd_ring_write_tail, | 1340 | .write_tail = gen6_bsd_ring_write_tail, |
1342 | .flush = gen6_ring_flush, | 1341 | .flush = gen6_ring_flush, |
1343 | .add_request = gen6_add_request, | 1342 | .add_request = gen6_add_request, |
1344 | .get_seqno = ring_get_seqno, | 1343 | .get_seqno = gen6_ring_get_seqno, |
1345 | .irq_get = gen6_bsd_ring_get_irq, | 1344 | .irq_get = gen6_bsd_ring_get_irq, |
1346 | .irq_put = gen6_bsd_ring_put_irq, | 1345 | .irq_put = gen6_bsd_ring_put_irq, |
1347 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | 1346 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
@@ -1476,7 +1475,7 @@ static const struct intel_ring_buffer gen6_blt_ring = { | |||
1476 | .write_tail = ring_write_tail, | 1475 | .write_tail = ring_write_tail, |
1477 | .flush = blt_ring_flush, | 1476 | .flush = blt_ring_flush, |
1478 | .add_request = gen6_add_request, | 1477 | .add_request = gen6_add_request, |
1479 | .get_seqno = ring_get_seqno, | 1478 | .get_seqno = gen6_ring_get_seqno, |
1480 | .irq_get = blt_ring_get_irq, | 1479 | .irq_get = blt_ring_get_irq, |
1481 | .irq_put = blt_ring_put_irq, | 1480 | .irq_put = blt_ring_put_irq, |
1482 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | 1481 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
@@ -1499,6 +1498,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1499 | ring->flush = gen6_render_ring_flush; | 1498 | ring->flush = gen6_render_ring_flush; |
1500 | ring->irq_get = gen6_render_ring_get_irq; | 1499 | ring->irq_get = gen6_render_ring_get_irq; |
1501 | ring->irq_put = gen6_render_ring_put_irq; | 1500 | ring->irq_put = gen6_render_ring_put_irq; |
1501 | ring->get_seqno = gen6_ring_get_seqno; | ||
1502 | } else if (IS_GEN5(dev)) { | 1502 | } else if (IS_GEN5(dev)) { |
1503 | ring->add_request = pc_render_add_request; | 1503 | ring->add_request = pc_render_add_request; |
1504 | ring->get_seqno = pc_render_get_seqno; | 1504 | ring->get_seqno = pc_render_get_seqno; |
@@ -1577,8 +1577,5 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) | |||
1577 | 1577 | ||
1578 | *ring = gen6_blt_ring; | 1578 | *ring = gen6_blt_ring; |
1579 | 1579 | ||
1580 | if (IS_GEN7(dev)) | ||
1581 | ring->irq_get = gen7_blt_ring_get_irq; | ||
1582 | |||
1583 | return intel_init_ring_buffer(dev, ring); | 1580 | return intel_init_ring_buffer(dev, ring); |
1584 | } | 1581 | } |