diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-02-10 10:52:55 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-02-10 11:14:49 -0500 |
commit | 9edd576d89a5b6d3e136d7dcab654d887c0d25b7 (patch) | |
tree | d19670de2256f8187321de3a41fa4a10d3c8e402 /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | e21af88d39796c907c38648c824be3d646ffbe35 (diff) | |
parent | 28a4d5675857f6386930a324317281cb8ed1e5d0 (diff) |
Merge remote-tracking branch 'airlied/drm-fixes' into drm-intel-next-queued
Back-merge from drm-fixes into drm-intel-next to sort out two things:
- interlaced support: -fixes contains a bugfix to correctly clear
interlaced configuration bits in case the bios sets up an interlaced
mode and we want to set up the progressive mode (current kernels
don't support interlaced). The actual feature work to support
interlaced depends upon (and conflicts with) this bugfix.
- forcewake voodoo to workaround missed IRQ issues: -fixes only enabled
this for ivybridge, but some recent bug reports indicate that we
need this on Sandybridge, too. But in a slightly different flavour
and with other fixes and reworks on top. Additionally there are some
forcewake cleanup patches heading to -next that would conflict with
currrent -fixes.
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 41 |
1 files changed, 25 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6e80f8368355..4956f1bff522 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -634,6 +634,19 @@ render_ring_add_request(struct intel_ring_buffer *ring, | |||
634 | } | 634 | } |
635 | 635 | ||
636 | static u32 | 636 | static u32 |
637 | gen6_ring_get_seqno(struct intel_ring_buffer *ring) | ||
638 | { | ||
639 | struct drm_device *dev = ring->dev; | ||
640 | |||
641 | /* Workaround to force correct ordering between irq and seqno writes on | ||
642 | * ivb (and maybe also on snb) by reading from a CS register (like | ||
643 | * ACTHD) before reading the status page. */ | ||
644 | if (IS_GEN7(dev)) | ||
645 | intel_ring_get_active_head(ring); | ||
646 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | ||
647 | } | ||
648 | |||
649 | static u32 | ||
637 | ring_get_seqno(struct intel_ring_buffer *ring) | 650 | ring_get_seqno(struct intel_ring_buffer *ring) |
638 | { | 651 | { |
639 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 652 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
@@ -790,17 +803,6 @@ ring_add_request(struct intel_ring_buffer *ring, | |||
790 | } | 803 | } |
791 | 804 | ||
792 | static bool | 805 | static bool |
793 | gen7_blt_ring_get_irq(struct intel_ring_buffer *ring) | ||
794 | { | ||
795 | /* The BLT ring on IVB appears to have broken synchronization | ||
796 | * between the seqno write and the interrupt, so that the | ||
797 | * interrupt appears first. Returning false here makes | ||
798 | * i915_wait_request() do a polling loop, instead. | ||
799 | */ | ||
800 | return false; | ||
801 | } | ||
802 | |||
803 | static bool | ||
804 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | 806 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
805 | { | 807 | { |
806 | struct drm_device *dev = ring->dev; | 808 | struct drm_device *dev = ring->dev; |
@@ -809,6 +811,12 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | |||
809 | if (!dev->irq_enabled) | 811 | if (!dev->irq_enabled) |
810 | return false; | 812 | return false; |
811 | 813 | ||
814 | /* It looks like we need to prevent the gt from suspending while waiting | ||
815 | * for an notifiy irq, otherwise irqs seem to get lost on at least the | ||
816 | * blt/bsd rings on ivb. */ | ||
817 | if (IS_GEN7(dev)) | ||
818 | gen6_gt_force_wake_get(dev_priv); | ||
819 | |||
812 | spin_lock(&ring->irq_lock); | 820 | spin_lock(&ring->irq_lock); |
813 | if (ring->irq_refcount++ == 0) { | 821 | if (ring->irq_refcount++ == 0) { |
814 | ring->irq_mask &= ~rflag; | 822 | ring->irq_mask &= ~rflag; |
@@ -833,6 +841,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | |||
833 | ironlake_disable_irq(dev_priv, gflag); | 841 | ironlake_disable_irq(dev_priv, gflag); |
834 | } | 842 | } |
835 | spin_unlock(&ring->irq_lock); | 843 | spin_unlock(&ring->irq_lock); |
844 | |||
845 | if (IS_GEN7(dev)) | ||
846 | gen6_gt_force_wake_put(dev_priv); | ||
836 | } | 847 | } |
837 | 848 | ||
838 | static bool | 849 | static bool |
@@ -1339,7 +1350,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = { | |||
1339 | .write_tail = gen6_bsd_ring_write_tail, | 1350 | .write_tail = gen6_bsd_ring_write_tail, |
1340 | .flush = gen6_ring_flush, | 1351 | .flush = gen6_ring_flush, |
1341 | .add_request = gen6_add_request, | 1352 | .add_request = gen6_add_request, |
1342 | .get_seqno = ring_get_seqno, | 1353 | .get_seqno = gen6_ring_get_seqno, |
1343 | .irq_get = gen6_bsd_ring_get_irq, | 1354 | .irq_get = gen6_bsd_ring_get_irq, |
1344 | .irq_put = gen6_bsd_ring_put_irq, | 1355 | .irq_put = gen6_bsd_ring_put_irq, |
1345 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | 1356 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
@@ -1398,7 +1409,7 @@ static const struct intel_ring_buffer gen6_blt_ring = { | |||
1398 | .write_tail = ring_write_tail, | 1409 | .write_tail = ring_write_tail, |
1399 | .flush = blt_ring_flush, | 1410 | .flush = blt_ring_flush, |
1400 | .add_request = gen6_add_request, | 1411 | .add_request = gen6_add_request, |
1401 | .get_seqno = ring_get_seqno, | 1412 | .get_seqno = gen6_ring_get_seqno, |
1402 | .irq_get = blt_ring_get_irq, | 1413 | .irq_get = blt_ring_get_irq, |
1403 | .irq_put = blt_ring_put_irq, | 1414 | .irq_put = blt_ring_put_irq, |
1404 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | 1415 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
@@ -1420,6 +1431,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1420 | ring->flush = gen6_render_ring_flush; | 1431 | ring->flush = gen6_render_ring_flush; |
1421 | ring->irq_get = gen6_render_ring_get_irq; | 1432 | ring->irq_get = gen6_render_ring_get_irq; |
1422 | ring->irq_put = gen6_render_ring_put_irq; | 1433 | ring->irq_put = gen6_render_ring_put_irq; |
1434 | ring->get_seqno = gen6_ring_get_seqno; | ||
1423 | } else if (IS_GEN5(dev)) { | 1435 | } else if (IS_GEN5(dev)) { |
1424 | ring->add_request = pc_render_add_request; | 1436 | ring->add_request = pc_render_add_request; |
1425 | ring->get_seqno = pc_render_get_seqno; | 1437 | ring->get_seqno = pc_render_get_seqno; |
@@ -1498,8 +1510,5 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) | |||
1498 | 1510 | ||
1499 | *ring = gen6_blt_ring; | 1511 | *ring = gen6_blt_ring; |
1500 | 1512 | ||
1501 | if (IS_GEN7(dev)) | ||
1502 | ring->irq_get = gen7_blt_ring_get_irq; | ||
1503 | |||
1504 | return intel_init_ring_buffer(dev, ring); | 1513 | return intel_init_ring_buffer(dev, ring); |
1505 | } | 1514 | } |