aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-04-12 05:27:01 -0400
committerDave Airlie <airlied@redhat.com>2012-04-12 05:27:01 -0400
commiteffbc4fd8e37e41d6f2bb6bcc611c14b4fbdcf9b (patch)
tree8bc2a6a2116f1031b0033bf1a8f9fbe92201c5c1 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent6a7068b4ef17dfb9de3191321f1adc91fa1659ca (diff)
parentec34a01de31128e5c08e5f05c47f4a787f45a33c (diff)
Merge branch 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next
Daniel Vetter wrote First pull request for 3.5-next, slightly large than usual because new things kept coming in since the last pull for 3.4. Highlights: - first batch of hw enablement for vlv (Jesse et al) and hsw (Eugeni). pci ids are not yet added, and there's still quite a few patches to merge (mostly modesetting). To make QA easier I've decided to merge this stuff in pieces. - loads of cleanups and prep patches spurred by the above. Especially vlv is a real frankenstein chip, but also hsw is stretching our driver's code design. Expect more to come in this area for 3.5. - more gmbus fixes, cleanups and improvements by Daniel Kurtz. Again, there are more patches needed (and some already queued up), but I wanted to split this a bit for better testing. - pwrite/pread rework and retuning. This series has been in the works for a few months already and a lot of i-g-t tests have been created for it. Now it's finally ready to be merged. Note that one patch in this series touches include/pagemap.h, that patch is acked-by akpm. - reduce mappable pressure and relocation throughput improvements from Chris. - mmap offset exhaustion mitigation by Chris Wilson. - a start at figuring out which codepaths in our messy dri1/ums+gem/kms driver we actually need to support by bailing out of unsupported case. The driver now refuses to load without kms on gen6+ and disallows a few ioctls that userspace never used in certain cases. More of this will definitely come. - More decoupling of global gtt and ppgtt. - Improved dual-link lvds detection by Takashi Iwai. - Shut up the compiler + plus fix the fallout (Ben) - Inverted panel brightness handling (mostly Acer manages to break things in this way). - Small fixlets and adjustements and some minor things to help debugging. Regression-wise QA reported quite a few issues on ivb, but all of them turned out to be hw stability issues which are already fixed in drm-intel-fixes (QA runs the nightly regression tests on -next alone, without -fixes automatically merged in). There's still one issue open on snb, it looks like occlusion query writes are not quite as cache coherent as we've expected. With some of the pwrite adjustements we can now reliably hit this. Kernel workaround for it is in the works." * 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel: (101 commits) drm/i915: VCS is not the last ring drm/i915: Add a dual link lvds quirk for MacBook Pro 8,2 drm/i915: make quirks more verbose drm/i915: dump the DMA fetch addr register on pre-gen6 drm/i915/sdvo: Include YRPB as an additional TV output type drm/i915: disallow gem init ioctl on ilk drm/i915: refuse to load on gen6+ without kms drm/i915: extract gt interrupt handler drm/i915: use render gen to switch ring irq functions drm/i915: rip out old HWSTAM missed irq WA for vlv drm/i915: open code gen6+ ring irqs drm/i915: ring irq cleanups drm/i915: add SFUSE_STRAP registers for digital port detection drm/i915: add WM_LINETIME registers drm/i915: add WRPLL clocks drm/i915: add LCPLL control registers drm/i915: add SSC offsets for SBI access drm/i915: add port clock selection support for HSW drm/i915: add S PLL control drm/i915: add PIXCLK_GATE register ... Conflicts: drivers/char/agp/intel-agp.h drivers/char/agp/intel-gtt.c drivers/gpu/drm/i915/i915_debugfs.c
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c92
1 files changed, 25 insertions, 67 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e25581a9f60f..dfdb613752c5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -290,9 +290,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
290 | RING_VALID); 290 | RING_VALID);
291 291
292 /* If the head is still not zero, the ring is dead */ 292 /* If the head is still not zero, the ring is dead */
293 if ((I915_READ_CTL(ring) & RING_VALID) == 0 || 293 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
294 I915_READ_START(ring) != obj->gtt_offset || 294 I915_READ_START(ring) == obj->gtt_offset &&
295 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { 295 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
296 DRM_ERROR("%s initialization failed " 296 DRM_ERROR("%s initialization failed "
297 "ctl %08x head %08x tail %08x start %08x\n", 297 "ctl %08x head %08x tail %08x start %08x\n",
298 ring->name, 298 ring->name,
@@ -687,7 +687,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
687 687
688 spin_lock(&ring->irq_lock); 688 spin_lock(&ring->irq_lock);
689 if (ring->irq_refcount++ == 0) { 689 if (ring->irq_refcount++ == 0) {
690 if (HAS_PCH_SPLIT(dev)) 690 if (INTEL_INFO(dev)->gen >= 5)
691 ironlake_enable_irq(dev_priv, 691 ironlake_enable_irq(dev_priv,
692 GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 692 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
693 else 693 else
@@ -706,7 +706,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
706 706
707 spin_lock(&ring->irq_lock); 707 spin_lock(&ring->irq_lock);
708 if (--ring->irq_refcount == 0) { 708 if (--ring->irq_refcount == 0) {
709 if (HAS_PCH_SPLIT(dev)) 709 if (INTEL_INFO(dev)->gen >= 5)
710 ironlake_disable_irq(dev_priv, 710 ironlake_disable_irq(dev_priv,
711 GT_USER_INTERRUPT | 711 GT_USER_INTERRUPT |
712 GT_PIPE_NOTIFY); 712 GT_PIPE_NOTIFY);
@@ -788,10 +788,11 @@ ring_add_request(struct intel_ring_buffer *ring,
788} 788}
789 789
790static bool 790static bool
791gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 791gen6_ring_get_irq(struct intel_ring_buffer *ring)
792{ 792{
793 struct drm_device *dev = ring->dev; 793 struct drm_device *dev = ring->dev;
794 drm_i915_private_t *dev_priv = dev->dev_private; 794 drm_i915_private_t *dev_priv = dev->dev_private;
795 u32 mask = ring->irq_enable;
795 796
796 if (!dev->irq_enabled) 797 if (!dev->irq_enabled)
797 return false; 798 return false;
@@ -803,9 +804,9 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
803 804
804 spin_lock(&ring->irq_lock); 805 spin_lock(&ring->irq_lock);
805 if (ring->irq_refcount++ == 0) { 806 if (ring->irq_refcount++ == 0) {
806 ring->irq_mask &= ~rflag; 807 ring->irq_mask &= ~mask;
807 I915_WRITE_IMR(ring, ring->irq_mask); 808 I915_WRITE_IMR(ring, ring->irq_mask);
808 ironlake_enable_irq(dev_priv, gflag); 809 ironlake_enable_irq(dev_priv, mask);
809 } 810 }
810 spin_unlock(&ring->irq_lock); 811 spin_unlock(&ring->irq_lock);
811 812
@@ -813,16 +814,17 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
813} 814}
814 815
815static void 816static void
816gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 817gen6_ring_put_irq(struct intel_ring_buffer *ring)
817{ 818{
818 struct drm_device *dev = ring->dev; 819 struct drm_device *dev = ring->dev;
819 drm_i915_private_t *dev_priv = dev->dev_private; 820 drm_i915_private_t *dev_priv = dev->dev_private;
821 u32 mask = ring->irq_enable;
820 822
821 spin_lock(&ring->irq_lock); 823 spin_lock(&ring->irq_lock);
822 if (--ring->irq_refcount == 0) { 824 if (--ring->irq_refcount == 0) {
823 ring->irq_mask |= rflag; 825 ring->irq_mask |= mask;
824 I915_WRITE_IMR(ring, ring->irq_mask); 826 I915_WRITE_IMR(ring, ring->irq_mask);
825 ironlake_disable_irq(dev_priv, gflag); 827 ironlake_disable_irq(dev_priv, mask);
826 } 828 }
827 spin_unlock(&ring->irq_lock); 829 spin_unlock(&ring->irq_lock);
828 830
@@ -1361,38 +1363,6 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1361 return 0; 1363 return 0;
1362} 1364}
1363 1365
1364static bool
1365gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1366{
1367 return gen6_ring_get_irq(ring,
1368 GT_USER_INTERRUPT,
1369 GEN6_RENDER_USER_INTERRUPT);
1370}
1371
1372static void
1373gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1374{
1375 return gen6_ring_put_irq(ring,
1376 GT_USER_INTERRUPT,
1377 GEN6_RENDER_USER_INTERRUPT);
1378}
1379
1380static bool
1381gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1382{
1383 return gen6_ring_get_irq(ring,
1384 GT_GEN6_BSD_USER_INTERRUPT,
1385 GEN6_BSD_USER_INTERRUPT);
1386}
1387
1388static void
1389gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1390{
1391 return gen6_ring_put_irq(ring,
1392 GT_GEN6_BSD_USER_INTERRUPT,
1393 GEN6_BSD_USER_INTERRUPT);
1394}
1395
1396/* ring buffer for Video Codec for Gen6+ */ 1366/* ring buffer for Video Codec for Gen6+ */
1397static const struct intel_ring_buffer gen6_bsd_ring = { 1367static const struct intel_ring_buffer gen6_bsd_ring = {
1398 .name = "gen6 bsd ring", 1368 .name = "gen6 bsd ring",
@@ -1404,8 +1374,9 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1404 .flush = gen6_ring_flush, 1374 .flush = gen6_ring_flush,
1405 .add_request = gen6_add_request, 1375 .add_request = gen6_add_request,
1406 .get_seqno = gen6_ring_get_seqno, 1376 .get_seqno = gen6_ring_get_seqno,
1407 .irq_get = gen6_bsd_ring_get_irq, 1377 .irq_enable = GEN6_BSD_USER_INTERRUPT,
1408 .irq_put = gen6_bsd_ring_put_irq, 1378 .irq_get = gen6_ring_get_irq,
1379 .irq_put = gen6_ring_put_irq,
1409 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1380 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1410 .sync_to = gen6_bsd_ring_sync_to, 1381 .sync_to = gen6_bsd_ring_sync_to,
1411 .semaphore_register = {MI_SEMAPHORE_SYNC_VR, 1382 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
@@ -1416,22 +1387,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1416 1387
1417/* Blitter support (SandyBridge+) */ 1388/* Blitter support (SandyBridge+) */
1418 1389
1419static bool
1420blt_ring_get_irq(struct intel_ring_buffer *ring)
1421{
1422 return gen6_ring_get_irq(ring,
1423 GT_BLT_USER_INTERRUPT,
1424 GEN6_BLITTER_USER_INTERRUPT);
1425}
1426
1427static void
1428blt_ring_put_irq(struct intel_ring_buffer *ring)
1429{
1430 gen6_ring_put_irq(ring,
1431 GT_BLT_USER_INTERRUPT,
1432 GEN6_BLITTER_USER_INTERRUPT);
1433}
1434
1435static int blt_ring_flush(struct intel_ring_buffer *ring, 1390static int blt_ring_flush(struct intel_ring_buffer *ring,
1436 u32 invalidate, u32 flush) 1391 u32 invalidate, u32 flush)
1437{ 1392{
@@ -1463,8 +1418,9 @@ static const struct intel_ring_buffer gen6_blt_ring = {
1463 .flush = blt_ring_flush, 1418 .flush = blt_ring_flush,
1464 .add_request = gen6_add_request, 1419 .add_request = gen6_add_request,
1465 .get_seqno = gen6_ring_get_seqno, 1420 .get_seqno = gen6_ring_get_seqno,
1466 .irq_get = blt_ring_get_irq, 1421 .irq_get = gen6_ring_get_irq,
1467 .irq_put = blt_ring_put_irq, 1422 .irq_put = gen6_ring_put_irq,
1423 .irq_enable = GEN6_BLITTER_USER_INTERRUPT,
1468 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1424 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1469 .sync_to = gen6_blt_ring_sync_to, 1425 .sync_to = gen6_blt_ring_sync_to,
1470 .semaphore_register = {MI_SEMAPHORE_SYNC_BR, 1426 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
@@ -1482,8 +1438,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1482 if (INTEL_INFO(dev)->gen >= 6) { 1438 if (INTEL_INFO(dev)->gen >= 6) {
1483 ring->add_request = gen6_add_request; 1439 ring->add_request = gen6_add_request;
1484 ring->flush = gen6_render_ring_flush; 1440 ring->flush = gen6_render_ring_flush;
1485 ring->irq_get = gen6_render_ring_get_irq; 1441 ring->irq_get = gen6_ring_get_irq;
1486 ring->irq_put = gen6_render_ring_put_irq; 1442 ring->irq_put = gen6_ring_put_irq;
1443 ring->irq_enable = GT_USER_INTERRUPT;
1487 ring->get_seqno = gen6_ring_get_seqno; 1444 ring->get_seqno = gen6_ring_get_seqno;
1488 } else if (IS_GEN5(dev)) { 1445 } else if (IS_GEN5(dev)) {
1489 ring->add_request = pc_render_add_request; 1446 ring->add_request = pc_render_add_request;
@@ -1506,8 +1463,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1506 *ring = render_ring; 1463 *ring = render_ring;
1507 if (INTEL_INFO(dev)->gen >= 6) { 1464 if (INTEL_INFO(dev)->gen >= 6) {
1508 ring->add_request = gen6_add_request; 1465 ring->add_request = gen6_add_request;
1509 ring->irq_get = gen6_render_ring_get_irq; 1466 ring->irq_get = gen6_ring_get_irq;
1510 ring->irq_put = gen6_render_ring_put_irq; 1467 ring->irq_put = gen6_ring_put_irq;
1468 ring->irq_enable = GT_USER_INTERRUPT;
1511 } else if (IS_GEN5(dev)) { 1469 } else if (IS_GEN5(dev)) {
1512 ring->add_request = pc_render_add_request; 1470 ring->add_request = pc_render_add_request;
1513 ring->get_seqno = pc_render_get_seqno; 1471 ring->get_seqno = pc_render_get_seqno;