aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-11-10 03:35:33 -0500
committerDave Airlie <airlied@redhat.com>2013-11-10 03:35:33 -0500
commitab0169bb5cc4a5c86756dde662087f9d12302eb0 (patch)
tree495e668337410f6763480ea1f010213f6399e38c /drivers/gpu/drm/i915/intel_ringbuffer.c
parent8d0a2215931f1ffd77aef65cae2c0becc3f5d560 (diff)
parent13b3a0a77625c09c84825ef6ba81d957ec207841 (diff)
Merge tag 'bdw-stage1-2013-11-08-v2' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
So here's the Broadwell pull request. From a kernel driver pov there's two areas with big changes in Broadwell: - Completely new enumerated interrupt bits. On the plus side it now looks fairly unform and sane. - Completely new pagetable layout. To ensure minimal impact on existing platforms we've refactored both the irq and low-level gtt handling code a lot in anticipation of the bdw push. So now bdw enabling in these areas just plugs in a bunch of vfuncs. Otherwise it's all fairly harmless adjusting of switch cases and if-ladders to shovel bdw into the right blocks. So minimized impact on existing platforms. I've also merged the bdw-stage1 branch into our -nightly integration branch for the past week to make sure we don't break anything. Note that there's still quite a flurry or patches floating around, but I've figured I'll push this out. I plan to keep the bdw fixes separate from my usual -fixes stream so that you can reject them easily in case it still looks like too much churn. Also, bdw is for now hidden behind the preliminary hw enabling module option. So there's no real pressure to get follow-up patches all into 3.13. * tag 'bdw-stage1-2013-11-08-v2' of git://people.freedesktop.org/~danvet/drm-intel: (75 commits) drm/i915: Mask the vblank interrupt on bdw by default drm/i915: Wire up cpu fifo underrun reporting support for bdw drm/i915: Optimize gen8_enable|disable_vblank functions drm/i915: Wire up pipe CRC support for bdw drm/i915: Wire up PCH interrupts for bdw drm/i915: Wire up port A aux channel drm/i915: Fix up the bdw pipe interrupt enable lists drm/i915: Optimize pipe irq handling on bdw drm/i915/bdw: Take render error interrupt out of the mask drm/i915/bdw: Add BDW PCH check first drm/i915: Use hsw_crt_get_config on BDW drm/i915/bdw: Change dp aux timeout to 600us on DDIA drm/i915/bdw: Enable trickle feed on Broadwell drm/i915/bdw: WaSingleSubspanDispatchOnAALinesAndPoints drm/i915/bdw: conservative SBE VUE cache mode drm/i915/bdw: Limit SDE poly depth FIFO to 2 drm/i915/bdw: Sampler power bypass disable ddrm/i915/bdw: Disable centroid pixel perf optimization drm/i915/bdw: BWGTLB clock gate disable drm/i915/bdw: Implement edp PSR workarounds ...
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c194
1 files changed, 177 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2dec134f75eb..b620337e6d67 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -360,6 +360,47 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
360 return 0; 360 return 0;
361} 361}
362 362
363static int
364gen8_render_ring_flush(struct intel_ring_buffer *ring,
365 u32 invalidate_domains, u32 flush_domains)
366{
367 u32 flags = 0;
368 u32 scratch_addr = ring->scratch.gtt_offset + 128;
369 int ret;
370
371 flags |= PIPE_CONTROL_CS_STALL;
372
373 if (flush_domains) {
374 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
375 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
376 }
377 if (invalidate_domains) {
378 flags |= PIPE_CONTROL_TLB_INVALIDATE;
379 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
380 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
381 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
382 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
383 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
384 flags |= PIPE_CONTROL_QW_WRITE;
385 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
386 }
387
388 ret = intel_ring_begin(ring, 6);
389 if (ret)
390 return ret;
391
392 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
393 intel_ring_emit(ring, flags);
394 intel_ring_emit(ring, scratch_addr);
395 intel_ring_emit(ring, 0);
396 intel_ring_emit(ring, 0);
397 intel_ring_emit(ring, 0);
398 intel_ring_advance(ring);
399
400 return 0;
401
402}
403
363static void ring_write_tail(struct intel_ring_buffer *ring, 404static void ring_write_tail(struct intel_ring_buffer *ring,
364 u32 value) 405 u32 value)
365{ 406{
@@ -1066,6 +1107,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1066 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1107 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1067} 1108}
1068 1109
1110static bool
1111gen8_ring_get_irq(struct intel_ring_buffer *ring)
1112{
1113 struct drm_device *dev = ring->dev;
1114 struct drm_i915_private *dev_priv = dev->dev_private;
1115 unsigned long flags;
1116
1117 if (!dev->irq_enabled)
1118 return false;
1119
1120 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1121 if (ring->irq_refcount++ == 0) {
1122 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1123 I915_WRITE_IMR(ring,
1124 ~(ring->irq_enable_mask |
1125 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1126 } else {
1127 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1128 }
1129 POSTING_READ(RING_IMR(ring->mmio_base));
1130 }
1131 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1132
1133 return true;
1134}
1135
1136static void
1137gen8_ring_put_irq(struct intel_ring_buffer *ring)
1138{
1139 struct drm_device *dev = ring->dev;
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141 unsigned long flags;
1142
1143 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1144 if (--ring->irq_refcount == 0) {
1145 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1146 I915_WRITE_IMR(ring,
1147 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1148 } else {
1149 I915_WRITE_IMR(ring, ~0);
1150 }
1151 POSTING_READ(RING_IMR(ring->mmio_base));
1152 }
1153 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1154}
1155
1069static int 1156static int
1070i965_dispatch_execbuffer(struct intel_ring_buffer *ring, 1157i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1071 u32 offset, u32 length, 1158 u32 offset, u32 length,
@@ -1624,6 +1711,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1624 return ret; 1711 return ret;
1625 1712
1626 cmd = MI_FLUSH_DW; 1713 cmd = MI_FLUSH_DW;
1714 if (INTEL_INFO(ring->dev)->gen >= 8)
1715 cmd += 1;
1627 /* 1716 /*
1628 * Bspec vol 1c.5 - video engine command streamer: 1717 * Bspec vol 1c.5 - video engine command streamer:
1629 * "If ENABLED, all TLBs will be invalidated once the flush 1718 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -1635,9 +1724,38 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1635 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1724 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1636 intel_ring_emit(ring, cmd); 1725 intel_ring_emit(ring, cmd);
1637 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1726 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1727 if (INTEL_INFO(ring->dev)->gen >= 8) {
1728 intel_ring_emit(ring, 0); /* upper addr */
1729 intel_ring_emit(ring, 0); /* value */
1730 } else {
1731 intel_ring_emit(ring, 0);
1732 intel_ring_emit(ring, MI_NOOP);
1733 }
1734 intel_ring_advance(ring);
1735 return 0;
1736}
1737
1738static int
1739gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1740 u32 offset, u32 len,
1741 unsigned flags)
1742{
1743 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1744 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1745 !(flags & I915_DISPATCH_SECURE);
1746 int ret;
1747
1748 ret = intel_ring_begin(ring, 4);
1749 if (ret)
1750 return ret;
1751
1752 /* FIXME(BDW): Address space and security selectors. */
1753 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1754 intel_ring_emit(ring, offset);
1638 intel_ring_emit(ring, 0); 1755 intel_ring_emit(ring, 0);
1639 intel_ring_emit(ring, MI_NOOP); 1756 intel_ring_emit(ring, MI_NOOP);
1640 intel_ring_advance(ring); 1757 intel_ring_advance(ring);
1758
1641 return 0; 1759 return 0;
1642} 1760}
1643 1761
@@ -1697,6 +1815,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1697 return ret; 1815 return ret;
1698 1816
1699 cmd = MI_FLUSH_DW; 1817 cmd = MI_FLUSH_DW;
1818 if (INTEL_INFO(ring->dev)->gen >= 8)
1819 cmd += 1;
1700 /* 1820 /*
1701 * Bspec vol 1c.3 - blitter engine command streamer: 1821 * Bspec vol 1c.3 - blitter engine command streamer:
1702 * "If ENABLED, all TLBs will be invalidated once the flush 1822 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -1708,8 +1828,13 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1708 MI_FLUSH_DW_OP_STOREDW; 1828 MI_FLUSH_DW_OP_STOREDW;
1709 intel_ring_emit(ring, cmd); 1829 intel_ring_emit(ring, cmd);
1710 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1830 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1711 intel_ring_emit(ring, 0); 1831 if (INTEL_INFO(ring->dev)->gen >= 8) {
1712 intel_ring_emit(ring, MI_NOOP); 1832 intel_ring_emit(ring, 0); /* upper addr */
1833 intel_ring_emit(ring, 0); /* value */
1834 } else {
1835 intel_ring_emit(ring, 0);
1836 intel_ring_emit(ring, MI_NOOP);
1837 }
1713 intel_ring_advance(ring); 1838 intel_ring_advance(ring);
1714 1839
1715 if (IS_GEN7(dev) && flush) 1840 if (IS_GEN7(dev) && flush)
@@ -1732,8 +1857,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1732 ring->flush = gen7_render_ring_flush; 1857 ring->flush = gen7_render_ring_flush;
1733 if (INTEL_INFO(dev)->gen == 6) 1858 if (INTEL_INFO(dev)->gen == 6)
1734 ring->flush = gen6_render_ring_flush; 1859 ring->flush = gen6_render_ring_flush;
1735 ring->irq_get = gen6_ring_get_irq; 1860 if (INTEL_INFO(dev)->gen >= 8) {
1736 ring->irq_put = gen6_ring_put_irq; 1861 ring->flush = gen8_render_ring_flush;
1862 ring->irq_get = gen8_ring_get_irq;
1863 ring->irq_put = gen8_ring_put_irq;
1864 } else {
1865 ring->irq_get = gen6_ring_get_irq;
1866 ring->irq_put = gen6_ring_put_irq;
1867 }
1737 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1868 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1738 ring->get_seqno = gen6_ring_get_seqno; 1869 ring->get_seqno = gen6_ring_get_seqno;
1739 ring->set_seqno = ring_set_seqno; 1870 ring->set_seqno = ring_set_seqno;
@@ -1775,6 +1906,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1775 ring->write_tail = ring_write_tail; 1906 ring->write_tail = ring_write_tail;
1776 if (IS_HASWELL(dev)) 1907 if (IS_HASWELL(dev))
1777 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 1908 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1909 else if (IS_GEN8(dev))
1910 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
1778 else if (INTEL_INFO(dev)->gen >= 6) 1911 else if (INTEL_INFO(dev)->gen >= 6)
1779 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1912 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1780 else if (INTEL_INFO(dev)->gen >= 4) 1913 else if (INTEL_INFO(dev)->gen >= 4)
@@ -1888,7 +2021,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1888 ring->id = VCS; 2021 ring->id = VCS;
1889 2022
1890 ring->write_tail = ring_write_tail; 2023 ring->write_tail = ring_write_tail;
1891 if (IS_GEN6(dev) || IS_GEN7(dev)) { 2024 if (INTEL_INFO(dev)->gen >= 6) {
1892 ring->mmio_base = GEN6_BSD_RING_BASE; 2025 ring->mmio_base = GEN6_BSD_RING_BASE;
1893 /* gen6 bsd needs a special wa for tail updates */ 2026 /* gen6 bsd needs a special wa for tail updates */
1894 if (IS_GEN6(dev)) 2027 if (IS_GEN6(dev))
@@ -1897,10 +2030,20 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1897 ring->add_request = gen6_add_request; 2030 ring->add_request = gen6_add_request;
1898 ring->get_seqno = gen6_ring_get_seqno; 2031 ring->get_seqno = gen6_ring_get_seqno;
1899 ring->set_seqno = ring_set_seqno; 2032 ring->set_seqno = ring_set_seqno;
1900 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2033 if (INTEL_INFO(dev)->gen >= 8) {
1901 ring->irq_get = gen6_ring_get_irq; 2034 ring->irq_enable_mask =
1902 ring->irq_put = gen6_ring_put_irq; 2035 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1903 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2036 ring->irq_get = gen8_ring_get_irq;
2037 ring->irq_put = gen8_ring_put_irq;
2038 ring->dispatch_execbuffer =
2039 gen8_ring_dispatch_execbuffer;
2040 } else {
2041 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2042 ring->irq_get = gen6_ring_get_irq;
2043 ring->irq_put = gen6_ring_put_irq;
2044 ring->dispatch_execbuffer =
2045 gen6_ring_dispatch_execbuffer;
2046 }
1904 ring->sync_to = gen6_ring_sync; 2047 ring->sync_to = gen6_ring_sync;
1905 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; 2048 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
1906 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2049 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
@@ -1946,10 +2089,18 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1946 ring->add_request = gen6_add_request; 2089 ring->add_request = gen6_add_request;
1947 ring->get_seqno = gen6_ring_get_seqno; 2090 ring->get_seqno = gen6_ring_get_seqno;
1948 ring->set_seqno = ring_set_seqno; 2091 ring->set_seqno = ring_set_seqno;
1949 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2092 if (INTEL_INFO(dev)->gen >= 8) {
1950 ring->irq_get = gen6_ring_get_irq; 2093 ring->irq_enable_mask =
1951 ring->irq_put = gen6_ring_put_irq; 2094 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1952 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2095 ring->irq_get = gen8_ring_get_irq;
2096 ring->irq_put = gen8_ring_put_irq;
2097 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2098 } else {
2099 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2100 ring->irq_get = gen6_ring_get_irq;
2101 ring->irq_put = gen6_ring_put_irq;
2102 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2103 }
1953 ring->sync_to = gen6_ring_sync; 2104 ring->sync_to = gen6_ring_sync;
1954 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; 2105 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
1955 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; 2106 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
@@ -1978,10 +2129,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
1978 ring->add_request = gen6_add_request; 2129 ring->add_request = gen6_add_request;
1979 ring->get_seqno = gen6_ring_get_seqno; 2130 ring->get_seqno = gen6_ring_get_seqno;
1980 ring->set_seqno = ring_set_seqno; 2131 ring->set_seqno = ring_set_seqno;
1981 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2132
1982 ring->irq_get = hsw_vebox_get_irq; 2133 if (INTEL_INFO(dev)->gen >= 8) {
1983 ring->irq_put = hsw_vebox_put_irq; 2134 ring->irq_enable_mask =
1984 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2135 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2136 ring->irq_get = gen8_ring_get_irq;
2137 ring->irq_put = gen8_ring_put_irq;
2138 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2139 } else {
2140 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2141 ring->irq_get = hsw_vebox_get_irq;
2142 ring->irq_put = hsw_vebox_put_irq;
2143 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2144 }
1985 ring->sync_to = gen6_ring_sync; 2145 ring->sync_to = gen6_ring_sync;
1986 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; 2146 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
1987 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; 2147 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;