aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c41
1 files changed, 27 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 66a2b8b83972..513a0f4b469b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
49 49
50void intel_ring_update_space(struct intel_ring *ring) 50void intel_ring_update_space(struct intel_ring *ring)
51{ 51{
52 ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); 52 ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
53} 53}
54 54
55static int 55static int
@@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
774 774
775 i915_gem_request_submit(request); 775 i915_gem_request_submit(request);
776 776
777 assert_ring_tail_valid(request->ring, request->tail); 777 I915_WRITE_TAIL(request->engine,
778 I915_WRITE_TAIL(request->engine, request->tail); 778 intel_ring_set_tail(request->ring, request->tail));
779} 779}
780 780
781static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 781static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@@ -1316,11 +1316,23 @@ err:
1316 return PTR_ERR(addr); 1316 return PTR_ERR(addr);
1317} 1317}
1318 1318
1319void intel_ring_reset(struct intel_ring *ring, u32 tail)
1320{
1321 GEM_BUG_ON(!list_empty(&ring->request_list));
1322 ring->tail = tail;
1323 ring->head = tail;
1324 ring->emit = tail;
1325 intel_ring_update_space(ring);
1326}
1327
1319void intel_ring_unpin(struct intel_ring *ring) 1328void intel_ring_unpin(struct intel_ring *ring)
1320{ 1329{
1321 GEM_BUG_ON(!ring->vma); 1330 GEM_BUG_ON(!ring->vma);
1322 GEM_BUG_ON(!ring->vaddr); 1331 GEM_BUG_ON(!ring->vaddr);
1323 1332
1333 /* Discard any unused bytes beyond that submitted to hw. */
1334 intel_ring_reset(ring, ring->tail);
1335
1324 if (i915_vma_is_map_and_fenceable(ring->vma)) 1336 if (i915_vma_is_map_and_fenceable(ring->vma))
1325 i915_vma_unpin_iomap(ring->vma); 1337 i915_vma_unpin_iomap(ring->vma);
1326 else 1338 else
@@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1562 struct intel_engine_cs *engine; 1574 struct intel_engine_cs *engine;
1563 enum intel_engine_id id; 1575 enum intel_engine_id id;
1564 1576
1577 /* Restart from the beginning of the rings for convenience */
1565 for_each_engine(engine, dev_priv, id) 1578 for_each_engine(engine, dev_priv, id)
1566 engine->buffer->head = engine->buffer->tail; 1579 intel_ring_reset(engine->buffer, 0);
1567} 1580}
1568 1581
1569static int ring_request_alloc(struct drm_i915_gem_request *request) 1582static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
1616 unsigned space; 1629 unsigned space;
1617 1630
1618 /* Would completion of this request free enough space? */ 1631 /* Would completion of this request free enough space? */
1619 space = __intel_ring_space(target->postfix, ring->tail, 1632 space = __intel_ring_space(target->postfix, ring->emit,
1620 ring->size); 1633 ring->size);
1621 if (space >= bytes) 1634 if (space >= bytes)
1622 break; 1635 break;
@@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
1641u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 1654u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
1642{ 1655{
1643 struct intel_ring *ring = req->ring; 1656 struct intel_ring *ring = req->ring;
1644 int remain_actual = ring->size - ring->tail; 1657 int remain_actual = ring->size - ring->emit;
1645 int remain_usable = ring->effective_size - ring->tail; 1658 int remain_usable = ring->effective_size - ring->emit;
1646 int bytes = num_dwords * sizeof(u32); 1659 int bytes = num_dwords * sizeof(u32);
1647 int total_bytes, wait_bytes; 1660 int total_bytes, wait_bytes;
1648 bool need_wrap = false; 1661 bool need_wrap = false;
@@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
1678 1691
1679 if (unlikely(need_wrap)) { 1692 if (unlikely(need_wrap)) {
1680 GEM_BUG_ON(remain_actual > ring->space); 1693 GEM_BUG_ON(remain_actual > ring->space);
1681 GEM_BUG_ON(ring->tail + remain_actual > ring->size); 1694 GEM_BUG_ON(ring->emit + remain_actual > ring->size);
1682 1695
1683 /* Fill the tail with MI_NOOP */ 1696 /* Fill the tail with MI_NOOP */
1684 memset(ring->vaddr + ring->tail, 0, remain_actual); 1697 memset(ring->vaddr + ring->emit, 0, remain_actual);
1685 ring->tail = 0; 1698 ring->emit = 0;
1686 ring->space -= remain_actual; 1699 ring->space -= remain_actual;
1687 } 1700 }
1688 1701
1689 GEM_BUG_ON(ring->tail > ring->size - bytes); 1702 GEM_BUG_ON(ring->emit > ring->size - bytes);
1690 cs = ring->vaddr + ring->tail; 1703 cs = ring->vaddr + ring->emit;
1691 ring->tail += bytes; 1704 ring->emit += bytes;
1692 ring->space -= bytes; 1705 ring->space -= bytes;
1693 GEM_BUG_ON(ring->space < 0); 1706 GEM_BUG_ON(ring->space < 0);
1694 1707
@@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
1699int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 1712int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
1700{ 1713{
1701 int num_dwords = 1714 int num_dwords =
1702 (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1715 (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1703 u32 *cs; 1716 u32 *cs;
1704 1717
1705 if (num_dwords == 0) 1718 if (num_dwords == 0)