aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-04-11 16:12:46 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-04-13 06:40:57 -0400
commit6a848ccb800f5330ca368cd804795b7ce644e36c (patch)
tree5d034820c235ca1e3be7ea83f52ca1aec3b5e9fd /drivers/gpu/drm/i915/intel_ringbuffer.c
parent1500f7ea06858819abcf8eec8f952e2f9281c610 (diff)
drm/i915: rip out ring->irq_mask
We only ever enable/disable one interrupt (namely user_interrupts and pipe_notify), so we don't need to track the interrupt masking state. Also rename irq_enable to irq_enable_mask, now that it won't collide - beforehand both a irq_mask and irq_enable_mask would have looked a bit strange. Reviewed-by: Eric Anholt <eric@anholt.net> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 467b3319e4bd..915aa07d5a61 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -798,7 +798,6 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
798{ 798{
799 struct drm_device *dev = ring->dev; 799 struct drm_device *dev = ring->dev;
800 drm_i915_private_t *dev_priv = dev->dev_private; 800 drm_i915_private_t *dev_priv = dev->dev_private;
801 u32 mask = ring->irq_enable;
802 801
803 if (!dev->irq_enabled) 802 if (!dev->irq_enabled)
804 return false; 803 return false;
@@ -810,9 +809,8 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
810 809
811 spin_lock(&ring->irq_lock); 810 spin_lock(&ring->irq_lock);
812 if (ring->irq_refcount++ == 0) { 811 if (ring->irq_refcount++ == 0) {
813 ring->irq_mask &= ~mask; 812 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
814 I915_WRITE_IMR(ring, ring->irq_mask); 813 ironlake_enable_irq(dev_priv, ring->irq_enable_mask);
815 ironlake_enable_irq(dev_priv, mask);
816 } 814 }
817 spin_unlock(&ring->irq_lock); 815 spin_unlock(&ring->irq_lock);
818 816
@@ -824,13 +822,11 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
824{ 822{
825 struct drm_device *dev = ring->dev; 823 struct drm_device *dev = ring->dev;
826 drm_i915_private_t *dev_priv = dev->dev_private; 824 drm_i915_private_t *dev_priv = dev->dev_private;
827 u32 mask = ring->irq_enable;
828 825
829 spin_lock(&ring->irq_lock); 826 spin_lock(&ring->irq_lock);
830 if (--ring->irq_refcount == 0) { 827 if (--ring->irq_refcount == 0) {
831 ring->irq_mask |= mask; 828 I915_WRITE_IMR(ring, ~0);
832 I915_WRITE_IMR(ring, ring->irq_mask); 829 ironlake_disable_irq(dev_priv, ring->irq_enable_mask);
833 ironlake_disable_irq(dev_priv, mask);
834 } 830 }
835 spin_unlock(&ring->irq_lock); 831 spin_unlock(&ring->irq_lock);
836 832
@@ -1002,7 +998,6 @@ int intel_init_ring_buffer(struct drm_device *dev,
1002 998
1003 init_waitqueue_head(&ring->irq_queue); 999 init_waitqueue_head(&ring->irq_queue);
1004 spin_lock_init(&ring->irq_lock); 1000 spin_lock_init(&ring->irq_lock);
1005 ring->irq_mask = ~0;
1006 1001
1007 if (I915_NEED_GFX_HWS(dev)) { 1002 if (I915_NEED_GFX_HWS(dev)) {
1008 ret = init_status_page(ring); 1003 ret = init_status_page(ring);
@@ -1380,7 +1375,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1380 .flush = gen6_ring_flush, 1375 .flush = gen6_ring_flush,
1381 .add_request = gen6_add_request, 1376 .add_request = gen6_add_request,
1382 .get_seqno = gen6_ring_get_seqno, 1377 .get_seqno = gen6_ring_get_seqno,
1383 .irq_enable = GEN6_BSD_USER_INTERRUPT, 1378 .irq_enable_mask = GEN6_BSD_USER_INTERRUPT,
1384 .irq_get = gen6_ring_get_irq, 1379 .irq_get = gen6_ring_get_irq,
1385 .irq_put = gen6_ring_put_irq, 1380 .irq_put = gen6_ring_put_irq,
1386 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1381 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
@@ -1426,7 +1421,7 @@ static const struct intel_ring_buffer gen6_blt_ring = {
1426 .get_seqno = gen6_ring_get_seqno, 1421 .get_seqno = gen6_ring_get_seqno,
1427 .irq_get = gen6_ring_get_irq, 1422 .irq_get = gen6_ring_get_irq,
1428 .irq_put = gen6_ring_put_irq, 1423 .irq_put = gen6_ring_put_irq,
1429 .irq_enable = GEN6_BLITTER_USER_INTERRUPT, 1424 .irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT,
1430 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1425 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1431 .sync_to = gen6_blt_ring_sync_to, 1426 .sync_to = gen6_blt_ring_sync_to,
1432 .semaphore_register = {MI_SEMAPHORE_SYNC_BR, 1427 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
@@ -1446,7 +1441,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1446 ring->flush = gen6_render_ring_flush; 1441 ring->flush = gen6_render_ring_flush;
1447 ring->irq_get = gen6_ring_get_irq; 1442 ring->irq_get = gen6_ring_get_irq;
1448 ring->irq_put = gen6_ring_put_irq; 1443 ring->irq_put = gen6_ring_put_irq;
1449 ring->irq_enable = GT_USER_INTERRUPT; 1444 ring->irq_enable_mask = GT_USER_INTERRUPT;
1450 ring->get_seqno = gen6_ring_get_seqno; 1445 ring->get_seqno = gen6_ring_get_seqno;
1451 } else if (IS_GEN5(dev)) { 1446 } else if (IS_GEN5(dev)) {
1452 ring->add_request = pc_render_add_request; 1447 ring->add_request = pc_render_add_request;
@@ -1471,7 +1466,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1471 ring->add_request = gen6_add_request; 1466 ring->add_request = gen6_add_request;
1472 ring->irq_get = gen6_ring_get_irq; 1467 ring->irq_get = gen6_ring_get_irq;
1473 ring->irq_put = gen6_ring_put_irq; 1468 ring->irq_put = gen6_ring_put_irq;
1474 ring->irq_enable = GT_USER_INTERRUPT; 1469 ring->irq_enable_mask = GT_USER_INTERRUPT;
1475 } else if (IS_GEN5(dev)) { 1470 } else if (IS_GEN5(dev)) {
1476 ring->add_request = pc_render_add_request; 1471 ring->add_request = pc_render_add_request;
1477 ring->get_seqno = pc_render_get_seqno; 1472 ring->get_seqno = pc_render_get_seqno;