aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-11-15 10:12:04 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2017-11-15 12:12:49 -0500
commitfd13821219dda093e402c5849e5d4525bb64b4f3 (patch)
treea562e3a4155bb2079002e2734f74f9ee7133e866 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent24fd018aaeaecdba79f85e2232ca37a412e2754b (diff)
drm/i915: Make request's wait-for-space explicit
At the start of building a request, we would wait for roughly enough space to fit the average request (to reduce the likelihood of having to wait and abort partway through request construction). To achieve we would try to begin a 0-length command packet, this just adds extra confusion so make the wait-for-space explicit, as in the next patch we want to move it from the backend to the i915_gem_request_alloc() so it can ensure that the wait-for-space is the first operation in building a new request. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171115151204.8105-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c56
1 files changed, 36 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3321b801e77d..12e734b29463 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1578,7 +1578,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1578 1578
1579static int ring_request_alloc(struct drm_i915_gem_request *request) 1579static int ring_request_alloc(struct drm_i915_gem_request *request)
1580{ 1580{
1581 u32 *cs; 1581 int ret;
1582 1582
1583 GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count); 1583 GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
1584 1584
@@ -1588,37 +1588,24 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
1588 */ 1588 */
1589 request->reserved_space += LEGACY_REQUEST_SIZE; 1589 request->reserved_space += LEGACY_REQUEST_SIZE;
1590 1590
1591 cs = intel_ring_begin(request, 0); 1591 ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
1592 if (IS_ERR(cs)) 1592 if (ret)
1593 return PTR_ERR(cs); 1593 return ret;
1594 1594
1595 request->reserved_space -= LEGACY_REQUEST_SIZE; 1595 request->reserved_space -= LEGACY_REQUEST_SIZE;
1596 return 0; 1596 return 0;
1597} 1597}
1598 1598
1599static noinline int wait_for_space(struct drm_i915_gem_request *req, 1599static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
1600 unsigned int bytes)
1601{ 1600{
1602 struct intel_ring *ring = req->ring;
1603 struct drm_i915_gem_request *target; 1601 struct drm_i915_gem_request *target;
1604 long timeout; 1602 long timeout;
1605 1603
1606 lockdep_assert_held(&req->i915->drm.struct_mutex); 1604 lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
1607 1605
1608 if (intel_ring_update_space(ring) >= bytes) 1606 if (intel_ring_update_space(ring) >= bytes)
1609 return 0; 1607 return 0;
1610 1608
1611 /*
1612 * Space is reserved in the ringbuffer for finalising the request,
1613 * as that cannot be allowed to fail. During request finalisation,
1614 * reserved_space is set to 0 to stop the overallocation and the
1615 * assumption is that then we never need to wait (which has the
1616 * risk of failing with EINTR).
1617 *
1618 * See also i915_gem_request_alloc() and i915_add_request().
1619 */
1620 GEM_BUG_ON(!req->reserved_space);
1621
1622 list_for_each_entry(target, &ring->request_list, ring_link) { 1609 list_for_each_entry(target, &ring->request_list, ring_link) {
1623 /* Would completion of this request free enough space? */ 1610 /* Would completion of this request free enough space? */
1624 if (bytes <= __intel_ring_space(target->postfix, 1611 if (bytes <= __intel_ring_space(target->postfix,
@@ -1642,6 +1629,22 @@ static noinline int wait_for_space(struct drm_i915_gem_request *req,
1642 return 0; 1629 return 0;
1643} 1630}
1644 1631
1632int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
1633{
1634 GEM_BUG_ON(bytes > ring->effective_size);
1635 if (unlikely(bytes > ring->effective_size - ring->emit))
1636 bytes += ring->size - ring->emit;
1637
1638 if (unlikely(bytes > ring->space)) {
1639 int ret = wait_for_space(ring, bytes);
1640 if (unlikely(ret))
1641 return ret;
1642 }
1643
1644 GEM_BUG_ON(ring->space < bytes);
1645 return 0;
1646}
1647
1645u32 *intel_ring_begin(struct drm_i915_gem_request *req, 1648u32 *intel_ring_begin(struct drm_i915_gem_request *req,
1646 unsigned int num_dwords) 1649 unsigned int num_dwords)
1647{ 1650{
@@ -1681,7 +1684,20 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
1681 } 1684 }
1682 1685
1683 if (unlikely(total_bytes > ring->space)) { 1686 if (unlikely(total_bytes > ring->space)) {
1684 int ret = wait_for_space(req, total_bytes); 1687 int ret;
1688
1689 /*
1690 * Space is reserved in the ringbuffer for finalising the
1691 * request, as that cannot be allowed to fail. During request
1692 * finalisation, reserved_space is set to 0 to stop the
1693 * overallocation and the assumption is that then we never need
1694 * to wait (which has the risk of failing with EINTR).
1695 *
1696 * See also i915_gem_request_alloc() and i915_add_request().
1697 */
1698 GEM_BUG_ON(!req->reserved_space);
1699
1700 ret = wait_for_space(ring, total_bytes);
1685 if (unlikely(ret)) 1701 if (unlikely(ret))
1686 return ERR_PTR(ret); 1702 return ERR_PTR(ret);
1687 } 1703 }