aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2013-05-28 22:22:29 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-05-31 14:54:18 -0400
commitcc609d5da5c78c92a2e2565604b2603a0965b494 (patch)
tree492d38d51b7cdd4f120c56a1fd4033581e9605c5 /drivers/gpu/drm/i915/intel_ringbuffer.c
parentaeb0659338793746b8a4e482fa588ba1dd9ee559 (diff)
drm/i915: consolidate interrupt naming scheme
The motivation here is we're going to add some new interrupt definitions and handling outside of the GT interrupts which is all we've managed so far (with some RPS exceptions). By consolidating the names in the future we can make thing a bit cleaner as we don't need to define register names twice, and we can leverage pretty decent overlap in HW registers since ILK. To explain briefly what is in the comments: there are two sets of interrupt masking/enabling registers. At least so far, the definitions of the two sets overlap. The old code setup distinct names for interrupts in each set, ie. one for global, and one for ring. This made things confusing when using the wrong defines in the wrong places. rebase: Modified VLV bits v2: Renamed GT_RENDER_MASTER to GT_RENDER_CS_MASTER (Damien) Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c7a89bb051a0..5ab8cc2cafe2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -560,7 +560,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
560 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 560 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
561 561
562 if (HAS_L3_GPU_CACHE(dev)) 562 if (HAS_L3_GPU_CACHE(dev))
563 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 563 I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
564 564
565 return ret; 565 return ret;
566} 566}
@@ -982,8 +982,9 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
982 spin_lock_irqsave(&dev_priv->irq_lock, flags); 982 spin_lock_irqsave(&dev_priv->irq_lock, flags);
983 if (ring->irq_refcount.gt++ == 0) { 983 if (ring->irq_refcount.gt++ == 0) {
984 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 984 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
985 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | 985 I915_WRITE_IMR(ring,
986 GEN6_RENDER_L3_PARITY_ERROR)); 986 ~(ring->irq_enable_mask |
987 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
987 else 988 else
988 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 989 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
989 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 990 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
@@ -1005,7 +1006,8 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1005 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1006 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1006 if (--ring->irq_refcount.gt == 0) { 1007 if (--ring->irq_refcount.gt == 0) {
1007 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1008 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1008 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 1009 I915_WRITE_IMR(ring,
1010 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1009 else 1011 else
1010 I915_WRITE_IMR(ring, ~0); 1012 I915_WRITE_IMR(ring, ~0);
1011 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 1013 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
@@ -1682,7 +1684,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1682 ring->flush = gen6_render_ring_flush; 1684 ring->flush = gen6_render_ring_flush;
1683 ring->irq_get = gen6_ring_get_irq; 1685 ring->irq_get = gen6_ring_get_irq;
1684 ring->irq_put = gen6_ring_put_irq; 1686 ring->irq_put = gen6_ring_put_irq;
1685 ring->irq_enable_mask = GT_USER_INTERRUPT; 1687 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1686 ring->get_seqno = gen6_ring_get_seqno; 1688 ring->get_seqno = gen6_ring_get_seqno;
1687 ring->set_seqno = ring_set_seqno; 1689 ring->set_seqno = ring_set_seqno;
1688 ring->sync_to = gen6_ring_sync; 1690 ring->sync_to = gen6_ring_sync;
@@ -1701,7 +1703,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1701 ring->set_seqno = pc_render_set_seqno; 1703 ring->set_seqno = pc_render_set_seqno;
1702 ring->irq_get = gen5_ring_get_irq; 1704 ring->irq_get = gen5_ring_get_irq;
1703 ring->irq_put = gen5_ring_put_irq; 1705 ring->irq_put = gen5_ring_put_irq;
1704 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; 1706 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
1707 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
1705 } else { 1708 } else {
1706 ring->add_request = i9xx_add_request; 1709 ring->add_request = i9xx_add_request;
1707 if (INTEL_INFO(dev)->gen < 4) 1710 if (INTEL_INFO(dev)->gen < 4)
@@ -1843,7 +1846,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1843 ring->add_request = gen6_add_request; 1846 ring->add_request = gen6_add_request;
1844 ring->get_seqno = gen6_ring_get_seqno; 1847 ring->get_seqno = gen6_ring_get_seqno;
1845 ring->set_seqno = ring_set_seqno; 1848 ring->set_seqno = ring_set_seqno;
1846 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; 1849 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1847 ring->irq_get = gen6_ring_get_irq; 1850 ring->irq_get = gen6_ring_get_irq;
1848 ring->irq_put = gen6_ring_put_irq; 1851 ring->irq_put = gen6_ring_put_irq;
1849 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1852 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
@@ -1863,7 +1866,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1863 ring->get_seqno = ring_get_seqno; 1866 ring->get_seqno = ring_get_seqno;
1864 ring->set_seqno = ring_set_seqno; 1867 ring->set_seqno = ring_set_seqno;
1865 if (IS_GEN5(dev)) { 1868 if (IS_GEN5(dev)) {
1866 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1869 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
1867 ring->irq_get = gen5_ring_get_irq; 1870 ring->irq_get = gen5_ring_get_irq;
1868 ring->irq_put = gen5_ring_put_irq; 1871 ring->irq_put = gen5_ring_put_irq;
1869 } else { 1872 } else {
@@ -1892,7 +1895,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1892 ring->add_request = gen6_add_request; 1895 ring->add_request = gen6_add_request;
1893 ring->get_seqno = gen6_ring_get_seqno; 1896 ring->get_seqno = gen6_ring_get_seqno;
1894 ring->set_seqno = ring_set_seqno; 1897 ring->set_seqno = ring_set_seqno;
1895 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; 1898 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1896 ring->irq_get = gen6_ring_get_irq; 1899 ring->irq_get = gen6_ring_get_irq;
1897 ring->irq_put = gen6_ring_put_irq; 1900 ring->irq_put = gen6_ring_put_irq;
1898 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1901 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;