aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c75
1 files changed, 49 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c2f09d456300..31b36c5ac894 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -285,14 +285,16 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
285 if (!ring->fbc_dirty) 285 if (!ring->fbc_dirty)
286 return 0; 286 return 0;
287 287
288 ret = intel_ring_begin(ring, 4); 288 ret = intel_ring_begin(ring, 6);
289 if (ret) 289 if (ret)
290 return ret; 290 return ret;
291 intel_ring_emit(ring, MI_NOOP);
292 /* WaFbcNukeOn3DBlt:ivb/hsw */ 291 /* WaFbcNukeOn3DBlt:ivb/hsw */
293 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 292 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
294 intel_ring_emit(ring, MSG_FBC_REND_STATE); 293 intel_ring_emit(ring, MSG_FBC_REND_STATE);
295 intel_ring_emit(ring, value); 294 intel_ring_emit(ring, value);
295 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
296 intel_ring_emit(ring, MSG_FBC_REND_STATE);
297 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
296 intel_ring_advance(ring); 298 intel_ring_advance(ring);
297 299
298 ring->fbc_dirty = false; 300 ring->fbc_dirty = false;
@@ -354,7 +356,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
354 intel_ring_emit(ring, 0); 356 intel_ring_emit(ring, 0);
355 intel_ring_advance(ring); 357 intel_ring_advance(ring);
356 358
357 if (flush_domains) 359 if (!invalidate_domains && flush_domains)
358 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); 360 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
359 361
360 return 0; 362 return 0;
@@ -436,7 +438,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
436 int ret = 0; 438 int ret = 0;
437 u32 head; 439 u32 head;
438 440
439 gen6_gt_force_wake_get(dev_priv); 441 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
440 442
441 if (I915_NEED_GFX_HWS(dev)) 443 if (I915_NEED_GFX_HWS(dev))
442 intel_ring_setup_status_page(ring); 444 intel_ring_setup_status_page(ring);
@@ -509,7 +511,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
509 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 511 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
510 512
511out: 513out:
512 gen6_gt_force_wake_put(dev_priv); 514 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
513 515
514 return ret; 516 return ret;
515} 517}
@@ -661,19 +663,22 @@ gen6_add_request(struct intel_ring_buffer *ring)
661 struct drm_device *dev = ring->dev; 663 struct drm_device *dev = ring->dev;
662 struct drm_i915_private *dev_priv = dev->dev_private; 664 struct drm_i915_private *dev_priv = dev->dev_private;
663 struct intel_ring_buffer *useless; 665 struct intel_ring_buffer *useless;
664 int i, ret; 666 int i, ret, num_dwords = 4;
665 667
666 ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) * 668 if (i915_semaphore_is_enabled(dev))
667 MBOX_UPDATE_DWORDS) + 669 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
668 4); 670#undef MBOX_UPDATE_DWORDS
671
672 ret = intel_ring_begin(ring, num_dwords);
669 if (ret) 673 if (ret)
670 return ret; 674 return ret;
671#undef MBOX_UPDATE_DWORDS
672 675
673 for_each_ring(useless, dev_priv, i) { 676 if (i915_semaphore_is_enabled(dev)) {
674 u32 mbox_reg = ring->signal_mbox[i]; 677 for_each_ring(useless, dev_priv, i) {
675 if (mbox_reg != GEN6_NOSYNC) 678 u32 mbox_reg = ring->signal_mbox[i];
676 update_mboxes(ring, mbox_reg); 679 if (mbox_reg != GEN6_NOSYNC)
680 update_mboxes(ring, mbox_reg);
681 }
677 } 682 }
678 683
679 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 684 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
@@ -1030,11 +1035,6 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
1030 if (!dev->irq_enabled) 1035 if (!dev->irq_enabled)
1031 return false; 1036 return false;
1032 1037
1033 /* It looks like we need to prevent the gt from suspending while waiting
1034 * for an notifiy irq, otherwise irqs seem to get lost on at least the
1035 * blt/bsd rings on ivb. */
1036 gen6_gt_force_wake_get(dev_priv);
1037
1038 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1038 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1039 if (ring->irq_refcount++ == 0) { 1039 if (ring->irq_refcount++ == 0) {
1040 if (HAS_L3_DPF(dev) && ring->id == RCS) 1040 if (HAS_L3_DPF(dev) && ring->id == RCS)
@@ -1066,8 +1066,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1066 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1066 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1067 } 1067 }
1068 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1068 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1069
1070 gen6_gt_force_wake_put(dev_priv);
1071} 1069}
1072 1070
1073static bool 1071static bool
@@ -1611,8 +1609,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1611 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); 1609 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1612} 1610}
1613 1611
1614static int __intel_ring_begin(struct intel_ring_buffer *ring, 1612static int __intel_ring_prepare(struct intel_ring_buffer *ring,
1615 int bytes) 1613 int bytes)
1616{ 1614{
1617 int ret; 1615 int ret;
1618 1616
@@ -1628,7 +1626,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring,
1628 return ret; 1626 return ret;
1629 } 1627 }
1630 1628
1631 ring->space -= bytes;
1632 return 0; 1629 return 0;
1633} 1630}
1634 1631
@@ -1643,12 +1640,38 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1643 if (ret) 1640 if (ret)
1644 return ret; 1641 return ret;
1645 1642
1643 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
1644 if (ret)
1645 return ret;
1646
1646 /* Preallocate the olr before touching the ring */ 1647 /* Preallocate the olr before touching the ring */
1647 ret = intel_ring_alloc_seqno(ring); 1648 ret = intel_ring_alloc_seqno(ring);
1648 if (ret) 1649 if (ret)
1649 return ret; 1650 return ret;
1650 1651
1651 return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); 1652 ring->space -= num_dwords * sizeof(uint32_t);
1653 return 0;
1654}
1655
1656/* Align the ring tail to a cacheline boundary */
1657int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
1658{
1659 int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
1660 int ret;
1661
1662 if (num_dwords == 0)
1663 return 0;
1664
1665 ret = intel_ring_begin(ring, num_dwords);
1666 if (ret)
1667 return ret;
1668
1669 while (num_dwords--)
1670 intel_ring_emit(ring, MI_NOOP);
1671
1672 intel_ring_advance(ring);
1673
1674 return 0;
1652} 1675}
1653 1676
1654void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) 1677void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
@@ -1838,7 +1861,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1838 } 1861 }
1839 intel_ring_advance(ring); 1862 intel_ring_advance(ring);
1840 1863
1841 if (IS_GEN7(dev) && flush) 1864 if (IS_GEN7(dev) && !invalidate && flush)
1842 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); 1865 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
1843 1866
1844 return 0; 1867 return 0;