diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-04-11 16:12:54 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-04-13 06:51:37 -0400 |
commit | e48d86347c602c55159714f6ddcd88969a1b2f21 (patch) | |
tree | 5503e3946fa23e8b1d7e01bbeeb3742f8080dd27 /drivers/gpu/drm/i915 | |
parent | e367031966c3546b213f6699b83669739cb6fb1d (diff) |
drm/i915: split out the gen5 ring irq get/put functions
Now that we have sensibly split up, we can nicely get rid of that ugly
is_gen5 check.
Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 66 |
1 files changed, 44 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a09e8aa0db8d..7e1f2211ea2a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -645,6 +645,35 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
645 | } | 645 | } |
646 | 646 | ||
647 | static bool | 647 | static bool |
648 | gen5_ring_get_irq(struct intel_ring_buffer *ring) | ||
649 | { | ||
650 | struct drm_device *dev = ring->dev; | ||
651 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
652 | |||
653 | if (!dev->irq_enabled) | ||
654 | return false; | ||
655 | |||
656 | spin_lock(&ring->irq_lock); | ||
657 | if (ring->irq_refcount++ == 0) | ||
658 | ironlake_enable_irq(dev_priv, ring->irq_enable_mask); | ||
659 | spin_unlock(&ring->irq_lock); | ||
660 | |||
661 | return true; | ||
662 | } | ||
663 | |||
664 | static void | ||
665 | gen5_ring_put_irq(struct intel_ring_buffer *ring) | ||
666 | { | ||
667 | struct drm_device *dev = ring->dev; | ||
668 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
669 | |||
670 | spin_lock(&ring->irq_lock); | ||
671 | if (--ring->irq_refcount == 0) | ||
672 | ironlake_disable_irq(dev_priv, ring->irq_enable_mask); | ||
673 | spin_unlock(&ring->irq_lock); | ||
674 | } | ||
675 | |||
676 | static bool | ||
648 | i9xx_ring_get_irq(struct intel_ring_buffer *ring) | 677 | i9xx_ring_get_irq(struct intel_ring_buffer *ring) |
649 | { | 678 | { |
650 | struct drm_device *dev = ring->dev; | 679 | struct drm_device *dev = ring->dev; |
@@ -654,13 +683,8 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) | |||
654 | return false; | 683 | return false; |
655 | 684 | ||
656 | spin_lock(&ring->irq_lock); | 685 | spin_lock(&ring->irq_lock); |
657 | if (ring->irq_refcount++ == 0) { | 686 | if (ring->irq_refcount++ == 0) |
658 | if (INTEL_INFO(dev)->gen >= 5) | 687 | i915_enable_irq(dev_priv, ring->irq_enable_mask); |
659 | ironlake_enable_irq(dev_priv, | ||
660 | ring->irq_enable_mask); | ||
661 | else | ||
662 | i915_enable_irq(dev_priv, ring->irq_enable_mask); | ||
663 | } | ||
664 | spin_unlock(&ring->irq_lock); | 688 | spin_unlock(&ring->irq_lock); |
665 | 689 | ||
666 | return true; | 690 | return true; |
@@ -673,13 +697,8 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) | |||
673 | drm_i915_private_t *dev_priv = dev->dev_private; | 697 | drm_i915_private_t *dev_priv = dev->dev_private; |
674 | 698 | ||
675 | spin_lock(&ring->irq_lock); | 699 | spin_lock(&ring->irq_lock); |
676 | if (--ring->irq_refcount == 0) { | 700 | if (--ring->irq_refcount == 0) |
677 | if (INTEL_INFO(dev)->gen >= 5) | 701 | i915_disable_irq(dev_priv, ring->irq_enable_mask); |
678 | ironlake_disable_irq(dev_priv, | ||
679 | ring->irq_enable_mask); | ||
680 | else | ||
681 | i915_disable_irq(dev_priv, ring->irq_enable_mask); | ||
682 | } | ||
683 | spin_unlock(&ring->irq_lock); | 702 | spin_unlock(&ring->irq_lock); |
684 | } | 703 | } |
685 | 704 | ||
@@ -1301,8 +1320,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1301 | ring->add_request = pc_render_add_request; | 1320 | ring->add_request = pc_render_add_request; |
1302 | ring->flush = render_ring_flush; | 1321 | ring->flush = render_ring_flush; |
1303 | ring->get_seqno = pc_render_get_seqno; | 1322 | ring->get_seqno = pc_render_get_seqno; |
1304 | ring->irq_get = i9xx_ring_get_irq; | 1323 | ring->irq_get = gen5_ring_get_irq; |
1305 | ring->irq_put = i9xx_ring_put_irq; | 1324 | ring->irq_put = gen5_ring_put_irq; |
1306 | ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; | 1325 | ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; |
1307 | } else { | 1326 | } else { |
1308 | ring->add_request = render_ring_add_request; | 1327 | ring->add_request = render_ring_add_request; |
@@ -1342,8 +1361,8 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
1342 | ring->add_request = pc_render_add_request; | 1361 | ring->add_request = pc_render_add_request; |
1343 | ring->flush = render_ring_flush; | 1362 | ring->flush = render_ring_flush; |
1344 | ring->get_seqno = pc_render_get_seqno; | 1363 | ring->get_seqno = pc_render_get_seqno; |
1345 | ring->irq_get = i9xx_ring_get_irq; | 1364 | ring->irq_get = gen5_ring_get_irq; |
1346 | ring->irq_put = i9xx_ring_put_irq; | 1365 | ring->irq_put = gen5_ring_put_irq; |
1347 | ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; | 1366 | ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; |
1348 | } else { | 1367 | } else { |
1349 | ring->add_request = render_ring_add_request; | 1368 | ring->add_request = render_ring_add_request; |
@@ -1418,12 +1437,15 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
1418 | ring->flush = bsd_ring_flush; | 1437 | ring->flush = bsd_ring_flush; |
1419 | ring->add_request = ring_add_request; | 1438 | ring->add_request = ring_add_request; |
1420 | ring->get_seqno = ring_get_seqno; | 1439 | ring->get_seqno = ring_get_seqno; |
1421 | ring->irq_get = i9xx_ring_get_irq; | 1440 | if (IS_GEN5(dev)) { |
1422 | ring->irq_put = i9xx_ring_put_irq; | ||
1423 | if (IS_GEN5(dev)) | ||
1424 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; | 1441 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; |
1425 | else | 1442 | ring->irq_get = gen5_ring_get_irq; |
1443 | ring->irq_put = gen5_ring_put_irq; | ||
1444 | } else { | ||
1426 | ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; | 1445 | ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; |
1446 | ring->irq_get = i9xx_ring_get_irq; | ||
1447 | ring->irq_put = i9xx_ring_put_irq; | ||
1448 | } | ||
1427 | ring->dispatch_execbuffer = ring_dispatch_execbuffer; | 1449 | ring->dispatch_execbuffer = ring_dispatch_execbuffer; |
1428 | } | 1450 | } |
1429 | ring->init = init_ring_common; | 1451 | ring->init = init_ring_common; |