diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 194 |
1 files changed, 177 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 2dec134f75eb..b620337e6d67 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -360,6 +360,47 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring, | |||
360 | return 0; | 360 | return 0; |
361 | } | 361 | } |
362 | 362 | ||
363 | static int | ||
364 | gen8_render_ring_flush(struct intel_ring_buffer *ring, | ||
365 | u32 invalidate_domains, u32 flush_domains) | ||
366 | { | ||
367 | u32 flags = 0; | ||
368 | u32 scratch_addr = ring->scratch.gtt_offset + 128; | ||
369 | int ret; | ||
370 | |||
371 | flags |= PIPE_CONTROL_CS_STALL; | ||
372 | |||
373 | if (flush_domains) { | ||
374 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | ||
375 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | ||
376 | } | ||
377 | if (invalidate_domains) { | ||
378 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | ||
379 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | ||
380 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | ||
381 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | ||
382 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | ||
383 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | ||
384 | flags |= PIPE_CONTROL_QW_WRITE; | ||
385 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; | ||
386 | } | ||
387 | |||
388 | ret = intel_ring_begin(ring, 6); | ||
389 | if (ret) | ||
390 | return ret; | ||
391 | |||
392 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); | ||
393 | intel_ring_emit(ring, flags); | ||
394 | intel_ring_emit(ring, scratch_addr); | ||
395 | intel_ring_emit(ring, 0); | ||
396 | intel_ring_emit(ring, 0); | ||
397 | intel_ring_emit(ring, 0); | ||
398 | intel_ring_advance(ring); | ||
399 | |||
400 | return 0; | ||
401 | |||
402 | } | ||
403 | |||
363 | static void ring_write_tail(struct intel_ring_buffer *ring, | 404 | static void ring_write_tail(struct intel_ring_buffer *ring, |
364 | u32 value) | 405 | u32 value) |
365 | { | 406 | { |
@@ -1066,6 +1107,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) | |||
1066 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1107 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1067 | } | 1108 | } |
1068 | 1109 | ||
1110 | static bool | ||
1111 | gen8_ring_get_irq(struct intel_ring_buffer *ring) | ||
1112 | { | ||
1113 | struct drm_device *dev = ring->dev; | ||
1114 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1115 | unsigned long flags; | ||
1116 | |||
1117 | if (!dev->irq_enabled) | ||
1118 | return false; | ||
1119 | |||
1120 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | ||
1121 | if (ring->irq_refcount++ == 0) { | ||
1122 | if (HAS_L3_DPF(dev) && ring->id == RCS) { | ||
1123 | I915_WRITE_IMR(ring, | ||
1124 | ~(ring->irq_enable_mask | | ||
1125 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); | ||
1126 | } else { | ||
1127 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | ||
1128 | } | ||
1129 | POSTING_READ(RING_IMR(ring->mmio_base)); | ||
1130 | } | ||
1131 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | ||
1132 | |||
1133 | return true; | ||
1134 | } | ||
1135 | |||
1136 | static void | ||
1137 | gen8_ring_put_irq(struct intel_ring_buffer *ring) | ||
1138 | { | ||
1139 | struct drm_device *dev = ring->dev; | ||
1140 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1141 | unsigned long flags; | ||
1142 | |||
1143 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | ||
1144 | if (--ring->irq_refcount == 0) { | ||
1145 | if (HAS_L3_DPF(dev) && ring->id == RCS) { | ||
1146 | I915_WRITE_IMR(ring, | ||
1147 | ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); | ||
1148 | } else { | ||
1149 | I915_WRITE_IMR(ring, ~0); | ||
1150 | } | ||
1151 | POSTING_READ(RING_IMR(ring->mmio_base)); | ||
1152 | } | ||
1153 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | ||
1154 | } | ||
1155 | |||
1069 | static int | 1156 | static int |
1070 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1157 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1071 | u32 offset, u32 length, | 1158 | u32 offset, u32 length, |
@@ -1624,6 +1711,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, | |||
1624 | return ret; | 1711 | return ret; |
1625 | 1712 | ||
1626 | cmd = MI_FLUSH_DW; | 1713 | cmd = MI_FLUSH_DW; |
1714 | if (INTEL_INFO(ring->dev)->gen >= 8) | ||
1715 | cmd += 1; | ||
1627 | /* | 1716 | /* |
1628 | * Bspec vol 1c.5 - video engine command streamer: | 1717 | * Bspec vol 1c.5 - video engine command streamer: |
1629 | * "If ENABLED, all TLBs will be invalidated once the flush | 1718 | * "If ENABLED, all TLBs will be invalidated once the flush |
@@ -1635,9 +1724,38 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, | |||
1635 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; | 1724 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; |
1636 | intel_ring_emit(ring, cmd); | 1725 | intel_ring_emit(ring, cmd); |
1637 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | 1726 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
1727 | if (INTEL_INFO(ring->dev)->gen >= 8) { | ||
1728 | intel_ring_emit(ring, 0); /* upper addr */ | ||
1729 | intel_ring_emit(ring, 0); /* value */ | ||
1730 | } else { | ||
1731 | intel_ring_emit(ring, 0); | ||
1732 | intel_ring_emit(ring, MI_NOOP); | ||
1733 | } | ||
1734 | intel_ring_advance(ring); | ||
1735 | return 0; | ||
1736 | } | ||
1737 | |||
1738 | static int | ||
1739 | gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | ||
1740 | u32 offset, u32 len, | ||
1741 | unsigned flags) | ||
1742 | { | ||
1743 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||
1744 | bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && | ||
1745 | !(flags & I915_DISPATCH_SECURE); | ||
1746 | int ret; | ||
1747 | |||
1748 | ret = intel_ring_begin(ring, 4); | ||
1749 | if (ret) | ||
1750 | return ret; | ||
1751 | |||
1752 | /* FIXME(BDW): Address space and security selectors. */ | ||
1753 | intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); | ||
1754 | intel_ring_emit(ring, offset); | ||
1638 | intel_ring_emit(ring, 0); | 1755 | intel_ring_emit(ring, 0); |
1639 | intel_ring_emit(ring, MI_NOOP); | 1756 | intel_ring_emit(ring, MI_NOOP); |
1640 | intel_ring_advance(ring); | 1757 | intel_ring_advance(ring); |
1758 | |||
1641 | return 0; | 1759 | return 0; |
1642 | } | 1760 | } |
1643 | 1761 | ||
@@ -1697,6 +1815,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, | |||
1697 | return ret; | 1815 | return ret; |
1698 | 1816 | ||
1699 | cmd = MI_FLUSH_DW; | 1817 | cmd = MI_FLUSH_DW; |
1818 | if (INTEL_INFO(ring->dev)->gen >= 8) | ||
1819 | cmd += 1; | ||
1700 | /* | 1820 | /* |
1701 | * Bspec vol 1c.3 - blitter engine command streamer: | 1821 | * Bspec vol 1c.3 - blitter engine command streamer: |
1702 | * "If ENABLED, all TLBs will be invalidated once the flush | 1822 | * "If ENABLED, all TLBs will be invalidated once the flush |
@@ -1708,8 +1828,13 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, | |||
1708 | MI_FLUSH_DW_OP_STOREDW; | 1828 | MI_FLUSH_DW_OP_STOREDW; |
1709 | intel_ring_emit(ring, cmd); | 1829 | intel_ring_emit(ring, cmd); |
1710 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | 1830 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
1711 | intel_ring_emit(ring, 0); | 1831 | if (INTEL_INFO(ring->dev)->gen >= 8) { |
1712 | intel_ring_emit(ring, MI_NOOP); | 1832 | intel_ring_emit(ring, 0); /* upper addr */ |
1833 | intel_ring_emit(ring, 0); /* value */ | ||
1834 | } else { | ||
1835 | intel_ring_emit(ring, 0); | ||
1836 | intel_ring_emit(ring, MI_NOOP); | ||
1837 | } | ||
1713 | intel_ring_advance(ring); | 1838 | intel_ring_advance(ring); |
1714 | 1839 | ||
1715 | if (IS_GEN7(dev) && flush) | 1840 | if (IS_GEN7(dev) && flush) |
@@ -1732,8 +1857,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1732 | ring->flush = gen7_render_ring_flush; | 1857 | ring->flush = gen7_render_ring_flush; |
1733 | if (INTEL_INFO(dev)->gen == 6) | 1858 | if (INTEL_INFO(dev)->gen == 6) |
1734 | ring->flush = gen6_render_ring_flush; | 1859 | ring->flush = gen6_render_ring_flush; |
1735 | ring->irq_get = gen6_ring_get_irq; | 1860 | if (INTEL_INFO(dev)->gen >= 8) { |
1736 | ring->irq_put = gen6_ring_put_irq; | 1861 | ring->flush = gen8_render_ring_flush; |
1862 | ring->irq_get = gen8_ring_get_irq; | ||
1863 | ring->irq_put = gen8_ring_put_irq; | ||
1864 | } else { | ||
1865 | ring->irq_get = gen6_ring_get_irq; | ||
1866 | ring->irq_put = gen6_ring_put_irq; | ||
1867 | } | ||
1737 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; | 1868 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; |
1738 | ring->get_seqno = gen6_ring_get_seqno; | 1869 | ring->get_seqno = gen6_ring_get_seqno; |
1739 | ring->set_seqno = ring_set_seqno; | 1870 | ring->set_seqno = ring_set_seqno; |
@@ -1775,6 +1906,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1775 | ring->write_tail = ring_write_tail; | 1906 | ring->write_tail = ring_write_tail; |
1776 | if (IS_HASWELL(dev)) | 1907 | if (IS_HASWELL(dev)) |
1777 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; | 1908 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; |
1909 | else if (IS_GEN8(dev)) | ||
1910 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; | ||
1778 | else if (INTEL_INFO(dev)->gen >= 6) | 1911 | else if (INTEL_INFO(dev)->gen >= 6) |
1779 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 1912 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
1780 | else if (INTEL_INFO(dev)->gen >= 4) | 1913 | else if (INTEL_INFO(dev)->gen >= 4) |
@@ -1888,7 +2021,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
1888 | ring->id = VCS; | 2021 | ring->id = VCS; |
1889 | 2022 | ||
1890 | ring->write_tail = ring_write_tail; | 2023 | ring->write_tail = ring_write_tail; |
1891 | if (IS_GEN6(dev) || IS_GEN7(dev)) { | 2024 | if (INTEL_INFO(dev)->gen >= 6) { |
1892 | ring->mmio_base = GEN6_BSD_RING_BASE; | 2025 | ring->mmio_base = GEN6_BSD_RING_BASE; |
1893 | /* gen6 bsd needs a special wa for tail updates */ | 2026 | /* gen6 bsd needs a special wa for tail updates */ |
1894 | if (IS_GEN6(dev)) | 2027 | if (IS_GEN6(dev)) |
@@ -1897,10 +2030,20 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
1897 | ring->add_request = gen6_add_request; | 2030 | ring->add_request = gen6_add_request; |
1898 | ring->get_seqno = gen6_ring_get_seqno; | 2031 | ring->get_seqno = gen6_ring_get_seqno; |
1899 | ring->set_seqno = ring_set_seqno; | 2032 | ring->set_seqno = ring_set_seqno; |
1900 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; | 2033 | if (INTEL_INFO(dev)->gen >= 8) { |
1901 | ring->irq_get = gen6_ring_get_irq; | 2034 | ring->irq_enable_mask = |
1902 | ring->irq_put = gen6_ring_put_irq; | 2035 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; |
1903 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 2036 | ring->irq_get = gen8_ring_get_irq; |
2037 | ring->irq_put = gen8_ring_put_irq; | ||
2038 | ring->dispatch_execbuffer = | ||
2039 | gen8_ring_dispatch_execbuffer; | ||
2040 | } else { | ||
2041 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; | ||
2042 | ring->irq_get = gen6_ring_get_irq; | ||
2043 | ring->irq_put = gen6_ring_put_irq; | ||
2044 | ring->dispatch_execbuffer = | ||
2045 | gen6_ring_dispatch_execbuffer; | ||
2046 | } | ||
1904 | ring->sync_to = gen6_ring_sync; | 2047 | ring->sync_to = gen6_ring_sync; |
1905 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; | 2048 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; |
1906 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; | 2049 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; |
@@ -1946,10 +2089,18 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) | |||
1946 | ring->add_request = gen6_add_request; | 2089 | ring->add_request = gen6_add_request; |
1947 | ring->get_seqno = gen6_ring_get_seqno; | 2090 | ring->get_seqno = gen6_ring_get_seqno; |
1948 | ring->set_seqno = ring_set_seqno; | 2091 | ring->set_seqno = ring_set_seqno; |
1949 | ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; | 2092 | if (INTEL_INFO(dev)->gen >= 8) { |
1950 | ring->irq_get = gen6_ring_get_irq; | 2093 | ring->irq_enable_mask = |
1951 | ring->irq_put = gen6_ring_put_irq; | 2094 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; |
1952 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 2095 | ring->irq_get = gen8_ring_get_irq; |
2096 | ring->irq_put = gen8_ring_put_irq; | ||
2097 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; | ||
2098 | } else { | ||
2099 | ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; | ||
2100 | ring->irq_get = gen6_ring_get_irq; | ||
2101 | ring->irq_put = gen6_ring_put_irq; | ||
2102 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | ||
2103 | } | ||
1953 | ring->sync_to = gen6_ring_sync; | 2104 | ring->sync_to = gen6_ring_sync; |
1954 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; | 2105 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; |
1955 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; | 2106 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; |
@@ -1978,10 +2129,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) | |||
1978 | ring->add_request = gen6_add_request; | 2129 | ring->add_request = gen6_add_request; |
1979 | ring->get_seqno = gen6_ring_get_seqno; | 2130 | ring->get_seqno = gen6_ring_get_seqno; |
1980 | ring->set_seqno = ring_set_seqno; | 2131 | ring->set_seqno = ring_set_seqno; |
1981 | ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; | 2132 | |
1982 | ring->irq_get = hsw_vebox_get_irq; | 2133 | if (INTEL_INFO(dev)->gen >= 8) { |
1983 | ring->irq_put = hsw_vebox_put_irq; | 2134 | ring->irq_enable_mask = |
1984 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 2135 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; |
2136 | ring->irq_get = gen8_ring_get_irq; | ||
2137 | ring->irq_put = gen8_ring_put_irq; | ||
2138 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; | ||
2139 | } else { | ||
2140 | ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; | ||
2141 | ring->irq_get = hsw_vebox_get_irq; | ||
2142 | ring->irq_put = hsw_vebox_put_irq; | ||
2143 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | ||
2144 | } | ||
1985 | ring->sync_to = gen6_ring_sync; | 2145 | ring->sync_to = gen6_ring_sync; |
1986 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; | 2146 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; |
1987 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; | 2147 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; |