aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2013-11-03 00:07:09 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-11-08 12:09:39 -0500
commitabd58f0175915bed644aa67c8f69dc571b8280e0 (patch)
tree28d4ee3ac78111b72ced12f285ddac1441e236db /drivers/gpu/drm/i915/intel_ringbuffer.c
parent9459d252378aea80d28dc12bfec9a0d31b2a61bf (diff)
drm/i915/bdw: Implement interrupt changes
The interrupt handling implementation remains the same as previous generations with the 4 types of registers, status, identity, mask, and enable. However the layout of where the bits go have changed entirely. To address these changes, all of the interrupt vfuncs needed special gen8 code. The way it works is there is a top level status register now which informs the interrupt service routine which unit caused the interrupt, and therefore which interrupt registers to read to process the interrupt. For display the division is quite logical, a set of interrupt registers for each pipe, and in addition to those, a set each for "misc" and port. For GT the things get a bit hairy, as seen by the code. Each of the GT units has it's own bits defined. They all look *very similar* and resides in 16 bits of a GT register. As an example, RCS and BCS share register 0. To compact the code a bit, at a slight expense to complexity, this is exactly how the code works as well. 2 structures are added to the ring buffer so that our ring buffer interrupt handling code knows which ring shares the interrupt registers, and a shift value (ie. the top or bottom 16 bits of the register). The above allows us to kept the interrupt register caching scheme, the per interrupt enables, and the code to mask and unmask interrupts relatively clean (again at the cost of some more complexity). Most of the GT units mentioned above are command streamers, and so the symmetry should work quite well for even the yet to be implemented rings which Broadwell adds. v2: Fixes up a couple of bugs, and is more verbose about errors in the Broadwell interrupt handler. v3: fix DE_MISC IER offset v4: Simplify interrupts: I totally misread the docs the first time I implemented interrupts, and so this should greatly simplify the mess. Unlike GEN6, we never touch the regular mask registers in irq_get/put. v5: Rebased on to of recent pch hotplug setup changes. v6: Fixup on top of moving num_pipes to intel_info. v7: Rebased on top of Egbert Eich's hpd irq handling rework. Also wired up ibx_hpd_irq_setup for gen8. v8: Rebase on top of Jani's asle handling rework. v9: Rebase on top of Ben's VECS enabling for Haswell, where he unfortunately went OCD on the gt irq #defines. Not that they're still not yet fully consistent: - Used the GT_RENDER_ #defines + bdw shifts. - Dropped the shift from the L3_PARITY stuff, seemed clearer. - s/irq_refcount/irq_refcount.gt/ v10: Squash in VECS enabling patches and the gen8_gt_irq_handler refactoring from Zhao Yakui <yakui.zhao@intel.com> v11: Rebase on top of the interrupt cleanups in upstream. v12: Rebase on top of Ben's DPF changes in upstream. v13: Drop bdw from the HAS_L3_DPF feature flag for now, it's unclear what exactly needs to be done. Requested by Ben. v14: Fix the patch. - Drop the mask of reserved bits and assorted logic, it doesn't match the spec. - Do the posting read inconditionally instead of commenting it out. - Add a GEN8_MASTER_IRQ_CONTROL definition and use it. - Fix up the GEN8_PIPE interrupt defines and give the GEN8_ prefixes - we actually will need to use them. - Enclose macros in do {} while (0) (checkpatch). - Clear DE_MISC interrupt bits only after having processed them. - Fix whitespace fail (checkpatch). - Fix overtly long lines where appropriate (checkpatch). - Don't use typedef'ed private_t (maintainer-scripts). - Align the function parameter list correctly. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> (v4) Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> bikeshed
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c96
1 files changed, 85 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2dec134f75eb..2fda12607b78 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1066,6 +1066,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1066 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1066 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1067} 1067}
1068 1068
1069static bool
1070gen8_ring_get_irq(struct intel_ring_buffer *ring)
1071{
1072 struct drm_device *dev = ring->dev;
1073 struct drm_i915_private *dev_priv = dev->dev_private;
1074 unsigned long flags;
1075
1076 if (!dev->irq_enabled)
1077 return false;
1078
1079 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1080 if (ring->irq_refcount++ == 0) {
1081 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1082 I915_WRITE_IMR(ring,
1083 ~(ring->irq_enable_mask |
1084 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1085 } else {
1086 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1087 }
1088 POSTING_READ(RING_IMR(ring->mmio_base));
1089 }
1090 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1091
1092 return true;
1093}
1094
1095static void
1096gen8_ring_put_irq(struct intel_ring_buffer *ring)
1097{
1098 struct drm_device *dev = ring->dev;
1099 struct drm_i915_private *dev_priv = dev->dev_private;
1100 unsigned long flags;
1101
1102 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1103 if (--ring->irq_refcount == 0) {
1104 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1105 I915_WRITE_IMR(ring,
1106 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1107 } else {
1108 I915_WRITE_IMR(ring, ~0);
1109 }
1110 POSTING_READ(RING_IMR(ring->mmio_base));
1111 }
1112 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1113}
1114
1069static int 1115static int
1070i965_dispatch_execbuffer(struct intel_ring_buffer *ring, 1116i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1071 u32 offset, u32 length, 1117 u32 offset, u32 length,
@@ -1732,8 +1778,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1732 ring->flush = gen7_render_ring_flush; 1778 ring->flush = gen7_render_ring_flush;
1733 if (INTEL_INFO(dev)->gen == 6) 1779 if (INTEL_INFO(dev)->gen == 6)
1734 ring->flush = gen6_render_ring_flush; 1780 ring->flush = gen6_render_ring_flush;
1735 ring->irq_get = gen6_ring_get_irq; 1781 if (INTEL_INFO(dev)->gen >= 8) {
1736 ring->irq_put = gen6_ring_put_irq; 1782 ring->irq_get = gen8_ring_get_irq;
1783 ring->irq_put = gen8_ring_put_irq;
1784 } else {
1785 ring->irq_get = gen6_ring_get_irq;
1786 ring->irq_put = gen6_ring_put_irq;
1787 }
1737 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1788 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1738 ring->get_seqno = gen6_ring_get_seqno; 1789 ring->get_seqno = gen6_ring_get_seqno;
1739 ring->set_seqno = ring_set_seqno; 1790 ring->set_seqno = ring_set_seqno;
@@ -1897,9 +1948,16 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1897 ring->add_request = gen6_add_request; 1948 ring->add_request = gen6_add_request;
1898 ring->get_seqno = gen6_ring_get_seqno; 1949 ring->get_seqno = gen6_ring_get_seqno;
1899 ring->set_seqno = ring_set_seqno; 1950 ring->set_seqno = ring_set_seqno;
1900 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1951 if (INTEL_INFO(dev)->gen >= 8) {
1901 ring->irq_get = gen6_ring_get_irq; 1952 ring->irq_enable_mask =
1902 ring->irq_put = gen6_ring_put_irq; 1953 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1954 ring->irq_get = gen8_ring_get_irq;
1955 ring->irq_put = gen8_ring_put_irq;
1956 } else {
1957 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1958 ring->irq_get = gen6_ring_get_irq;
1959 ring->irq_put = gen6_ring_put_irq;
1960 }
1903 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1961 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1904 ring->sync_to = gen6_ring_sync; 1962 ring->sync_to = gen6_ring_sync;
1905 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; 1963 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
@@ -1946,9 +2004,16 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1946 ring->add_request = gen6_add_request; 2004 ring->add_request = gen6_add_request;
1947 ring->get_seqno = gen6_ring_get_seqno; 2005 ring->get_seqno = gen6_ring_get_seqno;
1948 ring->set_seqno = ring_set_seqno; 2006 ring->set_seqno = ring_set_seqno;
1949 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2007 if (INTEL_INFO(dev)->gen >= 8) {
1950 ring->irq_get = gen6_ring_get_irq; 2008 ring->irq_enable_mask =
1951 ring->irq_put = gen6_ring_put_irq; 2009 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2010 ring->irq_get = gen8_ring_get_irq;
2011 ring->irq_put = gen8_ring_put_irq;
2012 } else {
2013 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2014 ring->irq_get = gen6_ring_get_irq;
2015 ring->irq_put = gen6_ring_put_irq;
2016 }
1952 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2017 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1953 ring->sync_to = gen6_ring_sync; 2018 ring->sync_to = gen6_ring_sync;
1954 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; 2019 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
@@ -1978,10 +2043,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
1978 ring->add_request = gen6_add_request; 2043 ring->add_request = gen6_add_request;
1979 ring->get_seqno = gen6_ring_get_seqno; 2044 ring->get_seqno = gen6_ring_get_seqno;
1980 ring->set_seqno = ring_set_seqno; 2045 ring->set_seqno = ring_set_seqno;
1981 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
1982 ring->irq_get = hsw_vebox_get_irq;
1983 ring->irq_put = hsw_vebox_put_irq;
1984 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2046 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2047
2048 if (INTEL_INFO(dev)->gen >= 8) {
2049 ring->irq_enable_mask =
2050 (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT) |
2051 GT_RENDER_CS_MASTER_ERROR_INTERRUPT;
2052 ring->irq_get = gen8_ring_get_irq;
2053 ring->irq_put = gen8_ring_put_irq;
2054 } else {
2055 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2056 ring->irq_get = hsw_vebox_get_irq;
2057 ring->irq_put = hsw_vebox_put_irq;
2058 }
1985 ring->sync_to = gen6_ring_sync; 2059 ring->sync_to = gen6_ring_sync;
1986 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; 2060 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
1987 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; 2061 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;