aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2013-11-03 00:07:12 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-11-08 12:09:41 -0500
commit1c7a0623c795b35349d8f19cd8e8a19ac5783008 (patch)
treecac3ee4ecbc7c5b4225a9239bfab791a3fa9fb74 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent3c94ceeee27b77fa0fe59844ec6c11e4db189d00 (diff)
drm/i915/bdw: dispatch updates (64b related)
The command to emit batch buffers has changed to address 48b addresses. It seemed reasonable that we could still use the old instruction where emitting 0 for length would do the right thing, but it seems to bother the simulator when the code does that. Now the second dword in the command has the upper 16b of the address of the batchbuffer. v2: Remove duplicated vfun assignment. v3: Squash in VECS support changes from Zhao Yakui <yakui.zhao@intel.com> v4: Make checkpatch happy. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> (v2) Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c34
1 files changed, 31 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2fda12607b78..7070d734b84b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1688,6 +1688,27 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1688} 1688}
1689 1689
1690static int 1690static int
1691gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1692 u32 offset, u32 len,
1693 unsigned flags)
1694{
1695 int ret;
1696
1697 ret = intel_ring_begin(ring, 4);
1698 if (ret)
1699 return ret;
1700
1701 /* FIXME(BDW): Address space and security selectors. */
1702 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8);
1703 intel_ring_emit(ring, offset);
1704 intel_ring_emit(ring, 0);
1705 intel_ring_emit(ring, MI_NOOP);
1706 intel_ring_advance(ring);
1707
1708 return 0;
1709}
1710
1711static int
1691hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1712hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1692 u32 offset, u32 len, 1713 u32 offset, u32 len,
1693 unsigned flags) 1714 unsigned flags)
@@ -1826,6 +1847,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1826 ring->write_tail = ring_write_tail; 1847 ring->write_tail = ring_write_tail;
1827 if (IS_HASWELL(dev)) 1848 if (IS_HASWELL(dev))
1828 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 1849 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1850 else if (IS_GEN8(dev))
1851 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
1829 else if (INTEL_INFO(dev)->gen >= 6) 1852 else if (INTEL_INFO(dev)->gen >= 6)
1830 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1853 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1831 else if (INTEL_INFO(dev)->gen >= 4) 1854 else if (INTEL_INFO(dev)->gen >= 4)
@@ -1953,12 +1976,15 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1953 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 1976 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1954 ring->irq_get = gen8_ring_get_irq; 1977 ring->irq_get = gen8_ring_get_irq;
1955 ring->irq_put = gen8_ring_put_irq; 1978 ring->irq_put = gen8_ring_put_irq;
1979 ring->dispatch_execbuffer =
1980 gen8_ring_dispatch_execbuffer;
1956 } else { 1981 } else {
1957 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1982 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1958 ring->irq_get = gen6_ring_get_irq; 1983 ring->irq_get = gen6_ring_get_irq;
1959 ring->irq_put = gen6_ring_put_irq; 1984 ring->irq_put = gen6_ring_put_irq;
1985 ring->dispatch_execbuffer =
1986 gen6_ring_dispatch_execbuffer;
1960 } 1987 }
1961 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1962 ring->sync_to = gen6_ring_sync; 1988 ring->sync_to = gen6_ring_sync;
1963 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; 1989 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
1964 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; 1990 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
@@ -2009,12 +2035,13 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
2009 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2035 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2010 ring->irq_get = gen8_ring_get_irq; 2036 ring->irq_get = gen8_ring_get_irq;
2011 ring->irq_put = gen8_ring_put_irq; 2037 ring->irq_put = gen8_ring_put_irq;
2038 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2012 } else { 2039 } else {
2013 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2040 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2014 ring->irq_get = gen6_ring_get_irq; 2041 ring->irq_get = gen6_ring_get_irq;
2015 ring->irq_put = gen6_ring_put_irq; 2042 ring->irq_put = gen6_ring_put_irq;
2043 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2016 } 2044 }
2017 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2018 ring->sync_to = gen6_ring_sync; 2045 ring->sync_to = gen6_ring_sync;
2019 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; 2046 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
2020 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; 2047 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
@@ -2043,7 +2070,6 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
2043 ring->add_request = gen6_add_request; 2070 ring->add_request = gen6_add_request;
2044 ring->get_seqno = gen6_ring_get_seqno; 2071 ring->get_seqno = gen6_ring_get_seqno;
2045 ring->set_seqno = ring_set_seqno; 2072 ring->set_seqno = ring_set_seqno;
2046 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2047 2073
2048 if (INTEL_INFO(dev)->gen >= 8) { 2074 if (INTEL_INFO(dev)->gen >= 8) {
2049 ring->irq_enable_mask = 2075 ring->irq_enable_mask =
@@ -2051,10 +2077,12 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
2051 GT_RENDER_CS_MASTER_ERROR_INTERRUPT; 2077 GT_RENDER_CS_MASTER_ERROR_INTERRUPT;
2052 ring->irq_get = gen8_ring_get_irq; 2078 ring->irq_get = gen8_ring_get_irq;
2053 ring->irq_put = gen8_ring_put_irq; 2079 ring->irq_put = gen8_ring_put_irq;
2080 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2054 } else { 2081 } else {
2055 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2082 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2056 ring->irq_get = hsw_vebox_get_irq; 2083 ring->irq_get = hsw_vebox_get_irq;
2057 ring->irq_put = hsw_vebox_put_irq; 2084 ring->irq_put = hsw_vebox_put_irq;
2085 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2058 } 2086 }
2059 ring->sync_to = gen6_ring_sync; 2087 ring->sync_to = gen6_ring_sync;
2060 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; 2088 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;