diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 195 |
1 files changed, 178 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 2dec134f75eb..c2f09d456300 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -360,6 +360,47 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring, | |||
360 | return 0; | 360 | return 0; |
361 | } | 361 | } |
362 | 362 | ||
363 | static int | ||
364 | gen8_render_ring_flush(struct intel_ring_buffer *ring, | ||
365 | u32 invalidate_domains, u32 flush_domains) | ||
366 | { | ||
367 | u32 flags = 0; | ||
368 | u32 scratch_addr = ring->scratch.gtt_offset + 128; | ||
369 | int ret; | ||
370 | |||
371 | flags |= PIPE_CONTROL_CS_STALL; | ||
372 | |||
373 | if (flush_domains) { | ||
374 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | ||
375 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | ||
376 | } | ||
377 | if (invalidate_domains) { | ||
378 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | ||
379 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | ||
380 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | ||
381 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | ||
382 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | ||
383 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | ||
384 | flags |= PIPE_CONTROL_QW_WRITE; | ||
385 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; | ||
386 | } | ||
387 | |||
388 | ret = intel_ring_begin(ring, 6); | ||
389 | if (ret) | ||
390 | return ret; | ||
391 | |||
392 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); | ||
393 | intel_ring_emit(ring, flags); | ||
394 | intel_ring_emit(ring, scratch_addr); | ||
395 | intel_ring_emit(ring, 0); | ||
396 | intel_ring_emit(ring, 0); | ||
397 | intel_ring_emit(ring, 0); | ||
398 | intel_ring_advance(ring); | ||
399 | |||
400 | return 0; | ||
401 | |||
402 | } | ||
403 | |||
363 | static void ring_write_tail(struct intel_ring_buffer *ring, | 404 | static void ring_write_tail(struct intel_ring_buffer *ring, |
364 | u32 value) | 405 | u32 value) |
365 | { | 406 | { |
@@ -924,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
924 | } else if (IS_GEN6(ring->dev)) { | 965 | } else if (IS_GEN6(ring->dev)) { |
925 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); | 966 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
926 | } else { | 967 | } else { |
968 | /* XXX: gen8 returns to sanity */ | ||
927 | mmio = RING_HWS_PGA(ring->mmio_base); | 969 | mmio = RING_HWS_PGA(ring->mmio_base); |
928 | } | 970 | } |
929 | 971 | ||
@@ -1066,6 +1108,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) | |||
1066 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1108 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1067 | } | 1109 | } |
1068 | 1110 | ||
1111 | static bool | ||
1112 | gen8_ring_get_irq(struct intel_ring_buffer *ring) | ||
1113 | { | ||
1114 | struct drm_device *dev = ring->dev; | ||
1115 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1116 | unsigned long flags; | ||
1117 | |||
1118 | if (!dev->irq_enabled) | ||
1119 | return false; | ||
1120 | |||
1121 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | ||
1122 | if (ring->irq_refcount++ == 0) { | ||
1123 | if (HAS_L3_DPF(dev) && ring->id == RCS) { | ||
1124 | I915_WRITE_IMR(ring, | ||
1125 | ~(ring->irq_enable_mask | | ||
1126 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); | ||
1127 | } else { | ||
1128 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | ||
1129 | } | ||
1130 | POSTING_READ(RING_IMR(ring->mmio_base)); | ||
1131 | } | ||
1132 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | ||
1133 | |||
1134 | return true; | ||
1135 | } | ||
1136 | |||
1137 | static void | ||
1138 | gen8_ring_put_irq(struct intel_ring_buffer *ring) | ||
1139 | { | ||
1140 | struct drm_device *dev = ring->dev; | ||
1141 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1142 | unsigned long flags; | ||
1143 | |||
1144 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | ||
1145 | if (--ring->irq_refcount == 0) { | ||
1146 | if (HAS_L3_DPF(dev) && ring->id == RCS) { | ||
1147 | I915_WRITE_IMR(ring, | ||
1148 | ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); | ||
1149 | } else { | ||
1150 | I915_WRITE_IMR(ring, ~0); | ||
1151 | } | ||
1152 | POSTING_READ(RING_IMR(ring->mmio_base)); | ||
1153 | } | ||
1154 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | ||
1155 | } | ||
1156 | |||
1069 | static int | 1157 | static int |
1070 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1158 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1071 | u32 offset, u32 length, | 1159 | u32 offset, u32 length, |
@@ -1624,6 +1712,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, | |||
1624 | return ret; | 1712 | return ret; |
1625 | 1713 | ||
1626 | cmd = MI_FLUSH_DW; | 1714 | cmd = MI_FLUSH_DW; |
1715 | if (INTEL_INFO(ring->dev)->gen >= 8) | ||
1716 | cmd += 1; | ||
1627 | /* | 1717 | /* |
1628 | * Bspec vol 1c.5 - video engine command streamer: | 1718 | * Bspec vol 1c.5 - video engine command streamer: |
1629 | * "If ENABLED, all TLBs will be invalidated once the flush | 1719 | * "If ENABLED, all TLBs will be invalidated once the flush |
@@ -1635,9 +1725,38 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, | |||
1635 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; | 1725 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; |
1636 | intel_ring_emit(ring, cmd); | 1726 | intel_ring_emit(ring, cmd); |
1637 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | 1727 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
1728 | if (INTEL_INFO(ring->dev)->gen >= 8) { | ||
1729 | intel_ring_emit(ring, 0); /* upper addr */ | ||
1730 | intel_ring_emit(ring, 0); /* value */ | ||
1731 | } else { | ||
1732 | intel_ring_emit(ring, 0); | ||
1733 | intel_ring_emit(ring, MI_NOOP); | ||
1734 | } | ||
1735 | intel_ring_advance(ring); | ||
1736 | return 0; | ||
1737 | } | ||
1738 | |||
1739 | static int | ||
1740 | gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | ||
1741 | u32 offset, u32 len, | ||
1742 | unsigned flags) | ||
1743 | { | ||
1744 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||
1745 | bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && | ||
1746 | !(flags & I915_DISPATCH_SECURE); | ||
1747 | int ret; | ||
1748 | |||
1749 | ret = intel_ring_begin(ring, 4); | ||
1750 | if (ret) | ||
1751 | return ret; | ||
1752 | |||
1753 | /* FIXME(BDW): Address space and security selectors. */ | ||
1754 | intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); | ||
1755 | intel_ring_emit(ring, offset); | ||
1638 | intel_ring_emit(ring, 0); | 1756 | intel_ring_emit(ring, 0); |
1639 | intel_ring_emit(ring, MI_NOOP); | 1757 | intel_ring_emit(ring, MI_NOOP); |
1640 | intel_ring_advance(ring); | 1758 | intel_ring_advance(ring); |
1759 | |||
1641 | return 0; | 1760 | return 0; |
1642 | } | 1761 | } |
1643 | 1762 | ||
@@ -1697,6 +1816,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, | |||
1697 | return ret; | 1816 | return ret; |
1698 | 1817 | ||
1699 | cmd = MI_FLUSH_DW; | 1818 | cmd = MI_FLUSH_DW; |
1819 | if (INTEL_INFO(ring->dev)->gen >= 8) | ||
1820 | cmd += 1; | ||
1700 | /* | 1821 | /* |
1701 | * Bspec vol 1c.3 - blitter engine command streamer: | 1822 | * Bspec vol 1c.3 - blitter engine command streamer: |
1702 | * "If ENABLED, all TLBs will be invalidated once the flush | 1823 | * "If ENABLED, all TLBs will be invalidated once the flush |
@@ -1708,8 +1829,13 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, | |||
1708 | MI_FLUSH_DW_OP_STOREDW; | 1829 | MI_FLUSH_DW_OP_STOREDW; |
1709 | intel_ring_emit(ring, cmd); | 1830 | intel_ring_emit(ring, cmd); |
1710 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | 1831 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
1711 | intel_ring_emit(ring, 0); | 1832 | if (INTEL_INFO(ring->dev)->gen >= 8) { |
1712 | intel_ring_emit(ring, MI_NOOP); | 1833 | intel_ring_emit(ring, 0); /* upper addr */ |
1834 | intel_ring_emit(ring, 0); /* value */ | ||
1835 | } else { | ||
1836 | intel_ring_emit(ring, 0); | ||
1837 | intel_ring_emit(ring, MI_NOOP); | ||
1838 | } | ||
1713 | intel_ring_advance(ring); | 1839 | intel_ring_advance(ring); |
1714 | 1840 | ||
1715 | if (IS_GEN7(dev) && flush) | 1841 | if (IS_GEN7(dev) && flush) |
@@ -1732,8 +1858,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1732 | ring->flush = gen7_render_ring_flush; | 1858 | ring->flush = gen7_render_ring_flush; |
1733 | if (INTEL_INFO(dev)->gen == 6) | 1859 | if (INTEL_INFO(dev)->gen == 6) |
1734 | ring->flush = gen6_render_ring_flush; | 1860 | ring->flush = gen6_render_ring_flush; |
1735 | ring->irq_get = gen6_ring_get_irq; | 1861 | if (INTEL_INFO(dev)->gen >= 8) { |
1736 | ring->irq_put = gen6_ring_put_irq; | 1862 | ring->flush = gen8_render_ring_flush; |
1863 | ring->irq_get = gen8_ring_get_irq; | ||
1864 | ring->irq_put = gen8_ring_put_irq; | ||
1865 | } else { | ||
1866 | ring->irq_get = gen6_ring_get_irq; | ||
1867 | ring->irq_put = gen6_ring_put_irq; | ||
1868 | } | ||
1737 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; | 1869 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; |
1738 | ring->get_seqno = gen6_ring_get_seqno; | 1870 | ring->get_seqno = gen6_ring_get_seqno; |
1739 | ring->set_seqno = ring_set_seqno; | 1871 | ring->set_seqno = ring_set_seqno; |
@@ -1775,6 +1907,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1775 | ring->write_tail = ring_write_tail; | 1907 | ring->write_tail = ring_write_tail; |
1776 | if (IS_HASWELL(dev)) | 1908 | if (IS_HASWELL(dev)) |
1777 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; | 1909 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; |
1910 | else if (IS_GEN8(dev)) | ||
1911 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; | ||
1778 | else if (INTEL_INFO(dev)->gen >= 6) | 1912 | else if (INTEL_INFO(dev)->gen >= 6) |
1779 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 1913 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
1780 | else if (INTEL_INFO(dev)->gen >= 4) | 1914 | else if (INTEL_INFO(dev)->gen >= 4) |
@@ -1888,7 +2022,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
1888 | ring->id = VCS; | 2022 | ring->id = VCS; |
1889 | 2023 | ||
1890 | ring->write_tail = ring_write_tail; | 2024 | ring->write_tail = ring_write_tail; |
1891 | if (IS_GEN6(dev) || IS_GEN7(dev)) { | 2025 | if (INTEL_INFO(dev)->gen >= 6) { |
1892 | ring->mmio_base = GEN6_BSD_RING_BASE; | 2026 | ring->mmio_base = GEN6_BSD_RING_BASE; |
1893 | /* gen6 bsd needs a special wa for tail updates */ | 2027 | /* gen6 bsd needs a special wa for tail updates */ |
1894 | if (IS_GEN6(dev)) | 2028 | if (IS_GEN6(dev)) |
@@ -1897,10 +2031,20 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
1897 | ring->add_request = gen6_add_request; | 2031 | ring->add_request = gen6_add_request; |
1898 | ring->get_seqno = gen6_ring_get_seqno; | 2032 | ring->get_seqno = gen6_ring_get_seqno; |
1899 | ring->set_seqno = ring_set_seqno; | 2033 | ring->set_seqno = ring_set_seqno; |
1900 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; | 2034 | if (INTEL_INFO(dev)->gen >= 8) { |
1901 | ring->irq_get = gen6_ring_get_irq; | 2035 | ring->irq_enable_mask = |
1902 | ring->irq_put = gen6_ring_put_irq; | 2036 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; |
1903 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 2037 | ring->irq_get = gen8_ring_get_irq; |
2038 | ring->irq_put = gen8_ring_put_irq; | ||
2039 | ring->dispatch_execbuffer = | ||
2040 | gen8_ring_dispatch_execbuffer; | ||
2041 | } else { | ||
2042 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; | ||
2043 | ring->irq_get = gen6_ring_get_irq; | ||
2044 | ring->irq_put = gen6_ring_put_irq; | ||
2045 | ring->dispatch_execbuffer = | ||
2046 | gen6_ring_dispatch_execbuffer; | ||
2047 | } | ||
1904 | ring->sync_to = gen6_ring_sync; | 2048 | ring->sync_to = gen6_ring_sync; |
1905 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; | 2049 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; |
1906 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; | 2050 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; |
@@ -1946,10 +2090,18 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) | |||
1946 | ring->add_request = gen6_add_request; | 2090 | ring->add_request = gen6_add_request; |
1947 | ring->get_seqno = gen6_ring_get_seqno; | 2091 | ring->get_seqno = gen6_ring_get_seqno; |
1948 | ring->set_seqno = ring_set_seqno; | 2092 | ring->set_seqno = ring_set_seqno; |
1949 | ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; | 2093 | if (INTEL_INFO(dev)->gen >= 8) { |
1950 | ring->irq_get = gen6_ring_get_irq; | 2094 | ring->irq_enable_mask = |
1951 | ring->irq_put = gen6_ring_put_irq; | 2095 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; |
1952 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 2096 | ring->irq_get = gen8_ring_get_irq; |
2097 | ring->irq_put = gen8_ring_put_irq; | ||
2098 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; | ||
2099 | } else { | ||
2100 | ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; | ||
2101 | ring->irq_get = gen6_ring_get_irq; | ||
2102 | ring->irq_put = gen6_ring_put_irq; | ||
2103 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | ||
2104 | } | ||
1953 | ring->sync_to = gen6_ring_sync; | 2105 | ring->sync_to = gen6_ring_sync; |
1954 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; | 2106 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; |
1955 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; | 2107 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; |
@@ -1978,10 +2130,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) | |||
1978 | ring->add_request = gen6_add_request; | 2130 | ring->add_request = gen6_add_request; |
1979 | ring->get_seqno = gen6_ring_get_seqno; | 2131 | ring->get_seqno = gen6_ring_get_seqno; |
1980 | ring->set_seqno = ring_set_seqno; | 2132 | ring->set_seqno = ring_set_seqno; |
1981 | ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; | 2133 | |
1982 | ring->irq_get = hsw_vebox_get_irq; | 2134 | if (INTEL_INFO(dev)->gen >= 8) { |
1983 | ring->irq_put = hsw_vebox_put_irq; | 2135 | ring->irq_enable_mask = |
1984 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 2136 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; |
2137 | ring->irq_get = gen8_ring_get_irq; | ||
2138 | ring->irq_put = gen8_ring_put_irq; | ||
2139 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; | ||
2140 | } else { | ||
2141 | ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; | ||
2142 | ring->irq_get = hsw_vebox_get_irq; | ||
2143 | ring->irq_put = hsw_vebox_put_irq; | ||
2144 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | ||
2145 | } | ||
1985 | ring->sync_to = gen6_ring_sync; | 2146 | ring->sync_to = gen6_ring_sync; |
1986 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; | 2147 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; |
1987 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; | 2148 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; |