aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c275
1 files changed, 223 insertions, 52 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 460ee1026fca..b620337e6d67 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -41,6 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
41 return space; 41 return space;
42} 42}
43 43
44void __intel_ring_advance(struct intel_ring_buffer *ring)
45{
46 struct drm_i915_private *dev_priv = ring->dev->dev_private;
47
48 ring->tail &= ring->size - 1;
49 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
50 return;
51 ring->write_tail(ring, ring->tail);
52}
53
44static int 54static int
45gen2_render_ring_flush(struct intel_ring_buffer *ring, 55gen2_render_ring_flush(struct intel_ring_buffer *ring,
46 u32 invalidate_domains, 56 u32 invalidate_domains,
@@ -350,6 +360,47 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
350 return 0; 360 return 0;
351} 361}
352 362
363static int
364gen8_render_ring_flush(struct intel_ring_buffer *ring,
365 u32 invalidate_domains, u32 flush_domains)
366{
367 u32 flags = 0;
368 u32 scratch_addr = ring->scratch.gtt_offset + 128;
369 int ret;
370
371 flags |= PIPE_CONTROL_CS_STALL;
372
373 if (flush_domains) {
374 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
375 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
376 }
377 if (invalidate_domains) {
378 flags |= PIPE_CONTROL_TLB_INVALIDATE;
379 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
380 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
381 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
382 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
383 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
384 flags |= PIPE_CONTROL_QW_WRITE;
385 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
386 }
387
388 ret = intel_ring_begin(ring, 6);
389 if (ret)
390 return ret;
391
392 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
393 intel_ring_emit(ring, flags);
394 intel_ring_emit(ring, scratch_addr);
395 intel_ring_emit(ring, 0);
396 intel_ring_emit(ring, 0);
397 intel_ring_emit(ring, 0);
398 intel_ring_advance(ring);
399
400 return 0;
401
402}
403
353static void ring_write_tail(struct intel_ring_buffer *ring, 404static void ring_write_tail(struct intel_ring_buffer *ring,
354 u32 value) 405 u32 value)
355{ 406{
@@ -385,8 +436,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
385 int ret = 0; 436 int ret = 0;
386 u32 head; 437 u32 head;
387 438
388 if (HAS_FORCE_WAKE(dev)) 439 gen6_gt_force_wake_get(dev_priv);
389 gen6_gt_force_wake_get(dev_priv);
390 440
391 if (I915_NEED_GFX_HWS(dev)) 441 if (I915_NEED_GFX_HWS(dev))
392 intel_ring_setup_status_page(ring); 442 intel_ring_setup_status_page(ring);
@@ -459,8 +509,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
459 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 509 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
460 510
461out: 511out:
462 if (HAS_FORCE_WAKE(dev)) 512 gen6_gt_force_wake_put(dev_priv);
463 gen6_gt_force_wake_put(dev_priv);
464 513
465 return ret; 514 return ret;
466} 515}
@@ -559,8 +608,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
559 if (INTEL_INFO(dev)->gen >= 6) 608 if (INTEL_INFO(dev)->gen >= 6)
560 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 609 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
561 610
562 if (HAS_L3_GPU_CACHE(dev)) 611 if (HAS_L3_DPF(dev))
563 I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 612 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
564 613
565 return ret; 614 return ret;
566} 615}
@@ -593,7 +642,7 @@ update_mboxes(struct intel_ring_buffer *ring,
593#define MBOX_UPDATE_DWORDS 4 642#define MBOX_UPDATE_DWORDS 4
594 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 643 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
595 intel_ring_emit(ring, mmio_offset); 644 intel_ring_emit(ring, mmio_offset);
596 intel_ring_emit(ring, ring->outstanding_lazy_request); 645 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
597 intel_ring_emit(ring, MI_NOOP); 646 intel_ring_emit(ring, MI_NOOP);
598} 647}
599 648
@@ -629,9 +678,9 @@ gen6_add_request(struct intel_ring_buffer *ring)
629 678
630 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 679 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
631 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 680 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
632 intel_ring_emit(ring, ring->outstanding_lazy_request); 681 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
633 intel_ring_emit(ring, MI_USER_INTERRUPT); 682 intel_ring_emit(ring, MI_USER_INTERRUPT);
634 intel_ring_advance(ring); 683 __intel_ring_advance(ring);
635 684
636 return 0; 685 return 0;
637} 686}
@@ -723,7 +772,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
723 PIPE_CONTROL_WRITE_FLUSH | 772 PIPE_CONTROL_WRITE_FLUSH |
724 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 773 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
725 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 774 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
726 intel_ring_emit(ring, ring->outstanding_lazy_request); 775 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
727 intel_ring_emit(ring, 0); 776 intel_ring_emit(ring, 0);
728 PIPE_CONTROL_FLUSH(ring, scratch_addr); 777 PIPE_CONTROL_FLUSH(ring, scratch_addr);
729 scratch_addr += 128; /* write to separate cachelines */ 778 scratch_addr += 128; /* write to separate cachelines */
@@ -742,9 +791,9 @@ pc_render_add_request(struct intel_ring_buffer *ring)
742 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 791 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
743 PIPE_CONTROL_NOTIFY); 792 PIPE_CONTROL_NOTIFY);
744 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 793 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
745 intel_ring_emit(ring, ring->outstanding_lazy_request); 794 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
746 intel_ring_emit(ring, 0); 795 intel_ring_emit(ring, 0);
747 intel_ring_advance(ring); 796 __intel_ring_advance(ring);
748 797
749 return 0; 798 return 0;
750} 799}
@@ -963,9 +1012,9 @@ i9xx_add_request(struct intel_ring_buffer *ring)
963 1012
964 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1013 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
965 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1014 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
966 intel_ring_emit(ring, ring->outstanding_lazy_request); 1015 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
967 intel_ring_emit(ring, MI_USER_INTERRUPT); 1016 intel_ring_emit(ring, MI_USER_INTERRUPT);
968 intel_ring_advance(ring); 1017 __intel_ring_advance(ring);
969 1018
970 return 0; 1019 return 0;
971} 1020}
@@ -987,10 +1036,10 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
987 1036
988 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1037 spin_lock_irqsave(&dev_priv->irq_lock, flags);
989 if (ring->irq_refcount++ == 0) { 1038 if (ring->irq_refcount++ == 0) {
990 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1039 if (HAS_L3_DPF(dev) && ring->id == RCS)
991 I915_WRITE_IMR(ring, 1040 I915_WRITE_IMR(ring,
992 ~(ring->irq_enable_mask | 1041 ~(ring->irq_enable_mask |
993 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1042 GT_PARITY_ERROR(dev)));
994 else 1043 else
995 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1044 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
996 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1045 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1009,9 +1058,8 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1009 1058
1010 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1059 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1011 if (--ring->irq_refcount == 0) { 1060 if (--ring->irq_refcount == 0) {
1012 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1061 if (HAS_L3_DPF(dev) && ring->id == RCS)
1013 I915_WRITE_IMR(ring, 1062 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1014 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1015 else 1063 else
1016 I915_WRITE_IMR(ring, ~0); 1064 I915_WRITE_IMR(ring, ~0);
1017 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1065 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1059,6 +1107,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1059 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1107 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1060} 1108}
1061 1109
1110static bool
1111gen8_ring_get_irq(struct intel_ring_buffer *ring)
1112{
1113 struct drm_device *dev = ring->dev;
1114 struct drm_i915_private *dev_priv = dev->dev_private;
1115 unsigned long flags;
1116
1117 if (!dev->irq_enabled)
1118 return false;
1119
1120 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1121 if (ring->irq_refcount++ == 0) {
1122 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1123 I915_WRITE_IMR(ring,
1124 ~(ring->irq_enable_mask |
1125 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1126 } else {
1127 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1128 }
1129 POSTING_READ(RING_IMR(ring->mmio_base));
1130 }
1131 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1132
1133 return true;
1134}
1135
1136static void
1137gen8_ring_put_irq(struct intel_ring_buffer *ring)
1138{
1139 struct drm_device *dev = ring->dev;
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141 unsigned long flags;
1142
1143 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1144 if (--ring->irq_refcount == 0) {
1145 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1146 I915_WRITE_IMR(ring,
1147 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1148 } else {
1149 I915_WRITE_IMR(ring, ~0);
1150 }
1151 POSTING_READ(RING_IMR(ring->mmio_base));
1152 }
1153 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1154}
1155
1062static int 1156static int
1063i965_dispatch_execbuffer(struct intel_ring_buffer *ring, 1157i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1064 u32 offset, u32 length, 1158 u32 offset, u32 length,
@@ -1317,7 +1411,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1317 /* Disable the ring buffer. The ring must be idle at this point */ 1411 /* Disable the ring buffer. The ring must be idle at this point */
1318 dev_priv = ring->dev->dev_private; 1412 dev_priv = ring->dev->dev_private;
1319 ret = intel_ring_idle(ring); 1413 ret = intel_ring_idle(ring);
1320 if (ret) 1414 if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
1321 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 1415 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1322 ring->name, ret); 1416 ring->name, ret);
1323 1417
@@ -1328,6 +1422,8 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1328 i915_gem_object_unpin(ring->obj); 1422 i915_gem_object_unpin(ring->obj);
1329 drm_gem_object_unreference(&ring->obj->base); 1423 drm_gem_object_unreference(&ring->obj->base);
1330 ring->obj = NULL; 1424 ring->obj = NULL;
1425 ring->preallocated_lazy_request = NULL;
1426 ring->outstanding_lazy_seqno = 0;
1331 1427
1332 if (ring->cleanup) 1428 if (ring->cleanup)
1333 ring->cleanup(ring); 1429 ring->cleanup(ring);
@@ -1414,6 +1510,9 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1414 if (ret != -ENOSPC) 1510 if (ret != -ENOSPC)
1415 return ret; 1511 return ret;
1416 1512
1513 /* force the tail write in case we have been skipping them */
1514 __intel_ring_advance(ring);
1515
1417 trace_i915_ring_wait_begin(ring); 1516 trace_i915_ring_wait_begin(ring);
1418 /* With GEM the hangcheck timer should kick us out of the loop, 1517 /* With GEM the hangcheck timer should kick us out of the loop,
1419 * leaving it early runs the risk of corrupting GEM state (due 1518 * leaving it early runs the risk of corrupting GEM state (due
@@ -1475,7 +1574,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
1475 int ret; 1574 int ret;
1476 1575
1477 /* We need to add any requests required to flush the objects and ring */ 1576 /* We need to add any requests required to flush the objects and ring */
1478 if (ring->outstanding_lazy_request) { 1577 if (ring->outstanding_lazy_seqno) {
1479 ret = i915_add_request(ring, NULL); 1578 ret = i915_add_request(ring, NULL);
1480 if (ret) 1579 if (ret)
1481 return ret; 1580 return ret;
@@ -1495,10 +1594,20 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
1495static int 1594static int
1496intel_ring_alloc_seqno(struct intel_ring_buffer *ring) 1595intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1497{ 1596{
1498 if (ring->outstanding_lazy_request) 1597 if (ring->outstanding_lazy_seqno)
1499 return 0; 1598 return 0;
1500 1599
1501 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); 1600 if (ring->preallocated_lazy_request == NULL) {
1601 struct drm_i915_gem_request *request;
1602
1603 request = kmalloc(sizeof(*request), GFP_KERNEL);
1604 if (request == NULL)
1605 return -ENOMEM;
1606
1607 ring->preallocated_lazy_request = request;
1608 }
1609
1610 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1502} 1611}
1503 1612
1504static int __intel_ring_begin(struct intel_ring_buffer *ring, 1613static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1545,7 +1654,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1545{ 1654{
1546 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1655 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1547 1656
1548 BUG_ON(ring->outstanding_lazy_request); 1657 BUG_ON(ring->outstanding_lazy_seqno);
1549 1658
1550 if (INTEL_INFO(ring->dev)->gen >= 6) { 1659 if (INTEL_INFO(ring->dev)->gen >= 6) {
1551 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1660 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -1558,17 +1667,6 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1558 ring->hangcheck.seqno = seqno; 1667 ring->hangcheck.seqno = seqno;
1559} 1668}
1560 1669
1561void intel_ring_advance(struct intel_ring_buffer *ring)
1562{
1563 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1564
1565 ring->tail &= ring->size - 1;
1566 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
1567 return;
1568 ring->write_tail(ring, ring->tail);
1569}
1570
1571
1572static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1670static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1573 u32 value) 1671 u32 value)
1574{ 1672{
@@ -1613,6 +1711,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1613 return ret; 1711 return ret;
1614 1712
1615 cmd = MI_FLUSH_DW; 1713 cmd = MI_FLUSH_DW;
1714 if (INTEL_INFO(ring->dev)->gen >= 8)
1715 cmd += 1;
1616 /* 1716 /*
1617 * Bspec vol 1c.5 - video engine command streamer: 1717 * Bspec vol 1c.5 - video engine command streamer:
1618 * "If ENABLED, all TLBs will be invalidated once the flush 1718 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -1624,9 +1724,38 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1624 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1724 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1625 intel_ring_emit(ring, cmd); 1725 intel_ring_emit(ring, cmd);
1626 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1726 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1727 if (INTEL_INFO(ring->dev)->gen >= 8) {
1728 intel_ring_emit(ring, 0); /* upper addr */
1729 intel_ring_emit(ring, 0); /* value */
1730 } else {
1731 intel_ring_emit(ring, 0);
1732 intel_ring_emit(ring, MI_NOOP);
1733 }
1734 intel_ring_advance(ring);
1735 return 0;
1736}
1737
1738static int
1739gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1740 u32 offset, u32 len,
1741 unsigned flags)
1742{
1743 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1744 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1745 !(flags & I915_DISPATCH_SECURE);
1746 int ret;
1747
1748 ret = intel_ring_begin(ring, 4);
1749 if (ret)
1750 return ret;
1751
1752 /* FIXME(BDW): Address space and security selectors. */
1753 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1754 intel_ring_emit(ring, offset);
1627 intel_ring_emit(ring, 0); 1755 intel_ring_emit(ring, 0);
1628 intel_ring_emit(ring, MI_NOOP); 1756 intel_ring_emit(ring, MI_NOOP);
1629 intel_ring_advance(ring); 1757 intel_ring_advance(ring);
1758
1630 return 0; 1759 return 0;
1631} 1760}
1632 1761
@@ -1686,6 +1815,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1686 return ret; 1815 return ret;
1687 1816
1688 cmd = MI_FLUSH_DW; 1817 cmd = MI_FLUSH_DW;
1818 if (INTEL_INFO(ring->dev)->gen >= 8)
1819 cmd += 1;
1689 /* 1820 /*
1690 * Bspec vol 1c.3 - blitter engine command streamer: 1821 * Bspec vol 1c.3 - blitter engine command streamer:
1691 * "If ENABLED, all TLBs will be invalidated once the flush 1822 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -1697,8 +1828,13 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1697 MI_FLUSH_DW_OP_STOREDW; 1828 MI_FLUSH_DW_OP_STOREDW;
1698 intel_ring_emit(ring, cmd); 1829 intel_ring_emit(ring, cmd);
1699 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1830 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1700 intel_ring_emit(ring, 0); 1831 if (INTEL_INFO(ring->dev)->gen >= 8) {
1701 intel_ring_emit(ring, MI_NOOP); 1832 intel_ring_emit(ring, 0); /* upper addr */
1833 intel_ring_emit(ring, 0); /* value */
1834 } else {
1835 intel_ring_emit(ring, 0);
1836 intel_ring_emit(ring, MI_NOOP);
1837 }
1702 intel_ring_advance(ring); 1838 intel_ring_advance(ring);
1703 1839
1704 if (IS_GEN7(dev) && flush) 1840 if (IS_GEN7(dev) && flush)
@@ -1721,8 +1857,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1721 ring->flush = gen7_render_ring_flush; 1857 ring->flush = gen7_render_ring_flush;
1722 if (INTEL_INFO(dev)->gen == 6) 1858 if (INTEL_INFO(dev)->gen == 6)
1723 ring->flush = gen6_render_ring_flush; 1859 ring->flush = gen6_render_ring_flush;
1724 ring->irq_get = gen6_ring_get_irq; 1860 if (INTEL_INFO(dev)->gen >= 8) {
1725 ring->irq_put = gen6_ring_put_irq; 1861 ring->flush = gen8_render_ring_flush;
1862 ring->irq_get = gen8_ring_get_irq;
1863 ring->irq_put = gen8_ring_put_irq;
1864 } else {
1865 ring->irq_get = gen6_ring_get_irq;
1866 ring->irq_put = gen6_ring_put_irq;
1867 }
1726 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1868 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1727 ring->get_seqno = gen6_ring_get_seqno; 1869 ring->get_seqno = gen6_ring_get_seqno;
1728 ring->set_seqno = ring_set_seqno; 1870 ring->set_seqno = ring_set_seqno;
@@ -1764,6 +1906,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1764 ring->write_tail = ring_write_tail; 1906 ring->write_tail = ring_write_tail;
1765 if (IS_HASWELL(dev)) 1907 if (IS_HASWELL(dev))
1766 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 1908 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1909 else if (IS_GEN8(dev))
1910 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
1767 else if (INTEL_INFO(dev)->gen >= 6) 1911 else if (INTEL_INFO(dev)->gen >= 6)
1768 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1912 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1769 else if (INTEL_INFO(dev)->gen >= 4) 1913 else if (INTEL_INFO(dev)->gen >= 4)
@@ -1877,7 +2021,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1877 ring->id = VCS; 2021 ring->id = VCS;
1878 2022
1879 ring->write_tail = ring_write_tail; 2023 ring->write_tail = ring_write_tail;
1880 if (IS_GEN6(dev) || IS_GEN7(dev)) { 2024 if (INTEL_INFO(dev)->gen >= 6) {
1881 ring->mmio_base = GEN6_BSD_RING_BASE; 2025 ring->mmio_base = GEN6_BSD_RING_BASE;
1882 /* gen6 bsd needs a special wa for tail updates */ 2026 /* gen6 bsd needs a special wa for tail updates */
1883 if (IS_GEN6(dev)) 2027 if (IS_GEN6(dev))
@@ -1886,10 +2030,20 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1886 ring->add_request = gen6_add_request; 2030 ring->add_request = gen6_add_request;
1887 ring->get_seqno = gen6_ring_get_seqno; 2031 ring->get_seqno = gen6_ring_get_seqno;
1888 ring->set_seqno = ring_set_seqno; 2032 ring->set_seqno = ring_set_seqno;
1889 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2033 if (INTEL_INFO(dev)->gen >= 8) {
1890 ring->irq_get = gen6_ring_get_irq; 2034 ring->irq_enable_mask =
1891 ring->irq_put = gen6_ring_put_irq; 2035 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1892 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2036 ring->irq_get = gen8_ring_get_irq;
2037 ring->irq_put = gen8_ring_put_irq;
2038 ring->dispatch_execbuffer =
2039 gen8_ring_dispatch_execbuffer;
2040 } else {
2041 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2042 ring->irq_get = gen6_ring_get_irq;
2043 ring->irq_put = gen6_ring_put_irq;
2044 ring->dispatch_execbuffer =
2045 gen6_ring_dispatch_execbuffer;
2046 }
1893 ring->sync_to = gen6_ring_sync; 2047 ring->sync_to = gen6_ring_sync;
1894 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; 2048 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
1895 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2049 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
@@ -1935,10 +2089,18 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1935 ring->add_request = gen6_add_request; 2089 ring->add_request = gen6_add_request;
1936 ring->get_seqno = gen6_ring_get_seqno; 2090 ring->get_seqno = gen6_ring_get_seqno;
1937 ring->set_seqno = ring_set_seqno; 2091 ring->set_seqno = ring_set_seqno;
1938 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2092 if (INTEL_INFO(dev)->gen >= 8) {
1939 ring->irq_get = gen6_ring_get_irq; 2093 ring->irq_enable_mask =
1940 ring->irq_put = gen6_ring_put_irq; 2094 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1941 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2095 ring->irq_get = gen8_ring_get_irq;
2096 ring->irq_put = gen8_ring_put_irq;
2097 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2098 } else {
2099 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2100 ring->irq_get = gen6_ring_get_irq;
2101 ring->irq_put = gen6_ring_put_irq;
2102 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2103 }
1942 ring->sync_to = gen6_ring_sync; 2104 ring->sync_to = gen6_ring_sync;
1943 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; 2105 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
1944 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; 2106 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
@@ -1967,10 +2129,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
1967 ring->add_request = gen6_add_request; 2129 ring->add_request = gen6_add_request;
1968 ring->get_seqno = gen6_ring_get_seqno; 2130 ring->get_seqno = gen6_ring_get_seqno;
1969 ring->set_seqno = ring_set_seqno; 2131 ring->set_seqno = ring_set_seqno;
1970 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2132
1971 ring->irq_get = hsw_vebox_get_irq; 2133 if (INTEL_INFO(dev)->gen >= 8) {
1972 ring->irq_put = hsw_vebox_put_irq; 2134 ring->irq_enable_mask =
1973 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2135 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2136 ring->irq_get = gen8_ring_get_irq;
2137 ring->irq_put = gen8_ring_put_irq;
2138 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2139 } else {
2140 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2141 ring->irq_get = hsw_vebox_get_irq;
2142 ring->irq_put = hsw_vebox_put_irq;
2143 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2144 }
1974 ring->sync_to = gen6_ring_sync; 2145 ring->sync_to = gen6_ring_sync;
1975 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; 2146 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
1976 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; 2147 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;