aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c43
1 files changed, 40 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ca70e2f10445..1ab842c6032e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -414,6 +414,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
414 return ret; 414 return ret;
415 } 415 }
416 416
417 if (INTEL_INFO(dev)->gen >= 6) {
418 I915_WRITE(INSTPM,
419 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
420 }
421
417 return ret; 422 return ret;
418} 423}
419 424
@@ -631,6 +636,19 @@ render_ring_add_request(struct intel_ring_buffer *ring,
631} 636}
632 637
633static u32 638static u32
639gen6_ring_get_seqno(struct intel_ring_buffer *ring)
640{
641 struct drm_device *dev = ring->dev;
642
643 /* Workaround to force correct ordering between irq and seqno writes on
644 * ivb (and maybe also on snb) by reading from a CS register (like
645 * ACTHD) before reading the status page. */
646 if (IS_GEN7(dev))
647 intel_ring_get_active_head(ring);
648 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
649}
650
651static u32
634ring_get_seqno(struct intel_ring_buffer *ring) 652ring_get_seqno(struct intel_ring_buffer *ring)
635{ 653{
636 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 654 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
@@ -795,6 +813,12 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
795 if (!dev->irq_enabled) 813 if (!dev->irq_enabled)
796 return false; 814 return false;
797 815
816 /* It looks like we need to prevent the gt from suspending while waiting
817 * for an notifiy irq, otherwise irqs seem to get lost on at least the
818 * blt/bsd rings on ivb. */
819 if (IS_GEN7(dev))
820 gen6_gt_force_wake_get(dev_priv);
821
798 spin_lock(&ring->irq_lock); 822 spin_lock(&ring->irq_lock);
799 if (ring->irq_refcount++ == 0) { 823 if (ring->irq_refcount++ == 0) {
800 ring->irq_mask &= ~rflag; 824 ring->irq_mask &= ~rflag;
@@ -819,6 +843,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
819 ironlake_disable_irq(dev_priv, gflag); 843 ironlake_disable_irq(dev_priv, gflag);
820 } 844 }
821 spin_unlock(&ring->irq_lock); 845 spin_unlock(&ring->irq_lock);
846
847 if (IS_GEN7(dev))
848 gen6_gt_force_wake_put(dev_priv);
822} 849}
823 850
824static bool 851static bool
@@ -1119,7 +1146,16 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1119 } 1146 }
1120 1147
1121 trace_i915_ring_wait_begin(ring); 1148 trace_i915_ring_wait_begin(ring);
1122 end = jiffies + 3 * HZ; 1149 if (drm_core_check_feature(dev, DRIVER_GEM))
1150 /* With GEM the hangcheck timer should kick us out of the loop,
1151 * leaving it early runs the risk of corrupting GEM state (due
1152 * to running on almost untested codepaths). But on resume
1153 * timers don't work yet, so prevent a complete hang in that
1154 * case by choosing an insanely large timeout. */
1155 end = jiffies + 60 * HZ;
1156 else
1157 end = jiffies + 3 * HZ;
1158
1123 do { 1159 do {
1124 ring->head = I915_READ_HEAD(ring); 1160 ring->head = I915_READ_HEAD(ring);
1125 ring->space = ring_space(ring); 1161 ring->space = ring_space(ring);
@@ -1316,7 +1352,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1316 .write_tail = gen6_bsd_ring_write_tail, 1352 .write_tail = gen6_bsd_ring_write_tail,
1317 .flush = gen6_ring_flush, 1353 .flush = gen6_ring_flush,
1318 .add_request = gen6_add_request, 1354 .add_request = gen6_add_request,
1319 .get_seqno = ring_get_seqno, 1355 .get_seqno = gen6_ring_get_seqno,
1320 .irq_get = gen6_bsd_ring_get_irq, 1356 .irq_get = gen6_bsd_ring_get_irq,
1321 .irq_put = gen6_bsd_ring_put_irq, 1357 .irq_put = gen6_bsd_ring_put_irq,
1322 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1358 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
@@ -1451,7 +1487,7 @@ static const struct intel_ring_buffer gen6_blt_ring = {
1451 .write_tail = ring_write_tail, 1487 .write_tail = ring_write_tail,
1452 .flush = blt_ring_flush, 1488 .flush = blt_ring_flush,
1453 .add_request = gen6_add_request, 1489 .add_request = gen6_add_request,
1454 .get_seqno = ring_get_seqno, 1490 .get_seqno = gen6_ring_get_seqno,
1455 .irq_get = blt_ring_get_irq, 1491 .irq_get = blt_ring_get_irq,
1456 .irq_put = blt_ring_put_irq, 1492 .irq_put = blt_ring_put_irq,
1457 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1493 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
@@ -1474,6 +1510,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1474 ring->flush = gen6_render_ring_flush; 1510 ring->flush = gen6_render_ring_flush;
1475 ring->irq_get = gen6_render_ring_get_irq; 1511 ring->irq_get = gen6_render_ring_get_irq;
1476 ring->irq_put = gen6_render_ring_put_irq; 1512 ring->irq_put = gen6_render_ring_put_irq;
1513 ring->get_seqno = gen6_ring_get_seqno;
1477 } else if (IS_GEN5(dev)) { 1514 } else if (IS_GEN5(dev)) {
1478 ring->add_request = pc_render_add_request; 1515 ring->add_request = pc_render_add_request;
1479 ring->get_seqno = pc_render_get_seqno; 1516 ring->get_seqno = pc_render_get_seqno;