aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2012-03-29 22:11:27 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-04-09 12:04:06 -0400
commit25c063004a6d0048c4dece74db4da117b9ae623e (patch)
treeec4055630625a71272d46b5cc51192417a6d3249 /drivers/gpu/drm/i915/intel_ringbuffer.c
parente2a1e2f0242c363ed80458282d67039c373fbb1f (diff)
drm/i915: open code gen6+ ring irqs
We can now open-code the get/put irq functions as they were just abstracting single register definitions. It would be nice to merge this in with the IRQ handling code... but that is too much work for me at present. In addition I could probably collapse this in to a lot of the Ironlake stuff, but I don't think it's worth the potential regressions. This patch itself should not effect functionality. CC: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Ben Widawsky <benjamin.widawsky@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c62
1 files changed, 16 insertions, 46 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c7eea7fad16f..98ac5c0ca37a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -788,10 +788,11 @@ ring_add_request(struct intel_ring_buffer *ring,
788} 788}
789 789
790static bool 790static bool
791gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 mask) 791gen6_ring_get_irq(struct intel_ring_buffer *ring)
792{ 792{
793 struct drm_device *dev = ring->dev; 793 struct drm_device *dev = ring->dev;
794 drm_i915_private_t *dev_priv = dev->dev_private; 794 drm_i915_private_t *dev_priv = dev->dev_private;
795 u32 mask = ring->irq_enable;
795 796
796 if (!dev->irq_enabled) 797 if (!dev->irq_enabled)
797 return false; 798 return false;
@@ -813,10 +814,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 mask)
813} 814}
814 815
815static void 816static void
816gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 mask) 817gen6_ring_put_irq(struct intel_ring_buffer *ring)
817{ 818{
818 struct drm_device *dev = ring->dev; 819 struct drm_device *dev = ring->dev;
819 drm_i915_private_t *dev_priv = dev->dev_private; 820 drm_i915_private_t *dev_priv = dev->dev_private;
821 u32 mask = ring->irq_enable;
820 822
821 spin_lock(&ring->irq_lock); 823 spin_lock(&ring->irq_lock);
822 if (--ring->irq_refcount == 0) { 824 if (--ring->irq_refcount == 0) {
@@ -1373,30 +1375,6 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1373 return 0; 1375 return 0;
1374} 1376}
1375 1377
1376static bool
1377gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1378{
1379 return gen6_ring_get_irq(ring, GT_USER_INTERRUPT);
1380}
1381
1382static void
1383gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1384{
1385 return gen6_ring_put_irq(ring, GT_USER_INTERRUPT);
1386}
1387
1388static bool
1389gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1390{
1391 return gen6_ring_get_irq(ring, GEN6_BSD_USER_INTERRUPT);
1392}
1393
1394static void
1395gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1396{
1397 return gen6_ring_put_irq(ring, GEN6_BSD_USER_INTERRUPT);
1398}
1399
1400/* ring buffer for Video Codec for Gen6+ */ 1378/* ring buffer for Video Codec for Gen6+ */
1401static const struct intel_ring_buffer gen6_bsd_ring = { 1379static const struct intel_ring_buffer gen6_bsd_ring = {
1402 .name = "gen6 bsd ring", 1380 .name = "gen6 bsd ring",
@@ -1408,8 +1386,9 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1408 .flush = gen6_ring_flush, 1386 .flush = gen6_ring_flush,
1409 .add_request = gen6_add_request, 1387 .add_request = gen6_add_request,
1410 .get_seqno = gen6_ring_get_seqno, 1388 .get_seqno = gen6_ring_get_seqno,
1411 .irq_get = gen6_bsd_ring_get_irq, 1389 .irq_enable = GEN6_BSD_USER_INTERRUPT,
1412 .irq_put = gen6_bsd_ring_put_irq, 1390 .irq_get = gen6_ring_get_irq,
1391 .irq_put = gen6_ring_put_irq,
1413 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1392 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1414 .sync_to = gen6_bsd_ring_sync_to, 1393 .sync_to = gen6_bsd_ring_sync_to,
1415 .semaphore_register = {MI_SEMAPHORE_SYNC_VR, 1394 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
@@ -1420,18 +1399,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1420 1399
1421/* Blitter support (SandyBridge+) */ 1400/* Blitter support (SandyBridge+) */
1422 1401
1423static bool
1424blt_ring_get_irq(struct intel_ring_buffer *ring)
1425{
1426 return gen6_ring_get_irq(ring, GEN6_BLITTER_USER_INTERRUPT);
1427}
1428
1429static void
1430blt_ring_put_irq(struct intel_ring_buffer *ring)
1431{
1432 gen6_ring_put_irq(ring, GEN6_BLITTER_USER_INTERRUPT);
1433}
1434
1435static int blt_ring_flush(struct intel_ring_buffer *ring, 1402static int blt_ring_flush(struct intel_ring_buffer *ring,
1436 u32 invalidate, u32 flush) 1403 u32 invalidate, u32 flush)
1437{ 1404{
@@ -1463,8 +1430,9 @@ static const struct intel_ring_buffer gen6_blt_ring = {
1463 .flush = blt_ring_flush, 1430 .flush = blt_ring_flush,
1464 .add_request = gen6_add_request, 1431 .add_request = gen6_add_request,
1465 .get_seqno = gen6_ring_get_seqno, 1432 .get_seqno = gen6_ring_get_seqno,
1466 .irq_get = blt_ring_get_irq, 1433 .irq_get = gen6_ring_get_irq,
1467 .irq_put = blt_ring_put_irq, 1434 .irq_put = gen6_ring_put_irq,
1435 .irq_enable = GEN6_BLITTER_USER_INTERRUPT,
1468 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1436 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1469 .sync_to = gen6_blt_ring_sync_to, 1437 .sync_to = gen6_blt_ring_sync_to,
1470 .semaphore_register = {MI_SEMAPHORE_SYNC_BR, 1438 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
@@ -1482,8 +1450,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1482 if (INTEL_INFO(dev)->gen >= 6) { 1450 if (INTEL_INFO(dev)->gen >= 6) {
1483 ring->add_request = gen6_add_request; 1451 ring->add_request = gen6_add_request;
1484 ring->flush = gen6_render_ring_flush; 1452 ring->flush = gen6_render_ring_flush;
1485 ring->irq_get = gen6_render_ring_get_irq; 1453 ring->irq_get = gen6_ring_get_irq;
1486 ring->irq_put = gen6_render_ring_put_irq; 1454 ring->irq_put = gen6_ring_put_irq;
1455 ring->irq_enable = GT_USER_INTERRUPT;
1487 ring->get_seqno = gen6_ring_get_seqno; 1456 ring->get_seqno = gen6_ring_get_seqno;
1488 } else if (IS_GEN5(dev)) { 1457 } else if (IS_GEN5(dev)) {
1489 ring->add_request = pc_render_add_request; 1458 ring->add_request = pc_render_add_request;
@@ -1506,8 +1475,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1506 *ring = render_ring; 1475 *ring = render_ring;
1507 if (INTEL_INFO(dev)->gen >= 6) { 1476 if (INTEL_INFO(dev)->gen >= 6) {
1508 ring->add_request = gen6_add_request; 1477 ring->add_request = gen6_add_request;
1509 ring->irq_get = gen6_render_ring_get_irq; 1478 ring->irq_get = gen6_ring_get_irq;
1510 ring->irq_put = gen6_render_ring_put_irq; 1479 ring->irq_put = gen6_ring_put_irq;
1480 ring->irq_enable = GT_USER_INTERRUPT;
1511 } else if (IS_GEN5(dev)) { 1481 } else if (IS_GEN5(dev)) {
1512 ring->add_request = pc_render_add_request; 1482 ring->add_request = pc_render_add_request;
1513 ring->get_seqno = pc_render_get_seqno; 1483 ring->get_seqno = pc_render_get_seqno;