aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-09-17 20:38:04 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-09-21 06:19:50 -0400
commitb84d5f0c22914d37d709add54c66e741c404fa56 (patch)
treedd2013ad666d622cb6b5c007aef7a8b2a6a6165a /drivers
parent82690bba375586ab93d74265710c2fd5788c8178 (diff)
drm/i915: Inline i915_gem_ring_retire_request()
Change the semantics to retire any buffer older than the current seqno rather than repeatedly calling calling the function to retire the buffer at the head of the list matching the request seqno. Whilst this should have no semantic impact on the implementation, Daniel was wondering if there was a bug where we might miss a retirement and so end up with a continually growing active list. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c91
1 files changed, 38 insertions, 53 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 71a2723545b9..1c02798bb7e4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1668,47 +1668,6 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1668} 1668}
1669 1669
1670/** 1670/**
1671 * Moves buffers associated only with the given active seqno from the active
1672 * to inactive list, potentially freeing them.
1673 */
1674static void
1675i915_gem_retire_request(struct drm_device *dev,
1676 struct drm_i915_gem_request *request)
1677{
1678 trace_i915_gem_request_retire(dev, request->seqno);
1679
1680 /* Move any buffers on the active list that are no longer referenced
1681 * by the ringbuffer to the flushing/inactive lists as appropriate.
1682 */
1683 while (!list_empty(&request->ring->active_list)) {
1684 struct drm_gem_object *obj;
1685 struct drm_i915_gem_object *obj_priv;
1686
1687 obj_priv = list_first_entry(&request->ring->active_list,
1688 struct drm_i915_gem_object,
1689 list);
1690 obj = &obj_priv->base;
1691
1692 /* If the seqno being retired doesn't match the oldest in the
1693 * list, then the oldest in the list must still be newer than
1694 * this seqno.
1695 */
1696 if (obj_priv->last_rendering_seqno != request->seqno)
1697 return;
1698
1699#if WATCH_LRU
1700 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1701 __func__, request->seqno, obj);
1702#endif
1703
1704 if (obj->write_domain != 0)
1705 i915_gem_object_move_to_flushing(obj);
1706 else
1707 i915_gem_object_move_to_inactive(obj);
1708 }
1709}
1710
1711/**
1712 * Returns true if seq1 is later than seq2. 1671 * Returns true if seq1 is later than seq2.
1713 */ 1672 */
1714bool 1673bool
@@ -1733,36 +1692,62 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1733{ 1692{
1734 drm_i915_private_t *dev_priv = dev->dev_private; 1693 drm_i915_private_t *dev_priv = dev->dev_private;
1735 uint32_t seqno; 1694 uint32_t seqno;
1695 bool wedged;
1736 1696
1737 if (!ring->status_page.page_addr 1697 if (!ring->status_page.page_addr ||
1738 || list_empty(&ring->request_list)) 1698 list_empty(&ring->request_list))
1739 return; 1699 return;
1740 1700
1741 seqno = i915_get_gem_seqno(dev, ring); 1701 seqno = i915_get_gem_seqno(dev, ring);
1702 wedged = atomic_read(&dev_priv->mm.wedged);
1742 1703
1743 while (!list_empty(&ring->request_list)) { 1704 while (!list_empty(&ring->request_list)) {
1744 struct drm_i915_gem_request *request; 1705 struct drm_i915_gem_request *request;
1745 uint32_t retiring_seqno;
1746 1706
1747 request = list_first_entry(&ring->request_list, 1707 request = list_first_entry(&ring->request_list,
1748 struct drm_i915_gem_request, 1708 struct drm_i915_gem_request,
1749 list); 1709 list);
1750 retiring_seqno = request->seqno;
1751 1710
1752 if (i915_seqno_passed(seqno, retiring_seqno) || 1711 if (!wedged && !i915_seqno_passed(seqno, request->seqno))
1753 atomic_read(&dev_priv->mm.wedged)) { 1712 break;
1754 i915_gem_retire_request(dev, request); 1713
1714 trace_i915_gem_request_retire(dev, request->seqno);
1715
1716 list_del(&request->list);
1717 list_del(&request->client_list);
1718 kfree(request);
1719 }
1720
1721 /* Move any buffers on the active list that are no longer referenced
1722 * by the ringbuffer to the flushing/inactive lists as appropriate.
1723 */
1724 while (!list_empty(&ring->active_list)) {
1725 struct drm_gem_object *obj;
1726 struct drm_i915_gem_object *obj_priv;
1727
1728 obj_priv = list_first_entry(&ring->active_list,
1729 struct drm_i915_gem_object,
1730 list);
1755 1731
1756 list_del(&request->list); 1732 if (!wedged &&
1757 list_del(&request->client_list); 1733 !i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
1758 kfree(request);
1759 } else
1760 break; 1734 break;
1735
1736 obj = &obj_priv->base;
1737
1738#if WATCH_LRU
1739 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1740 __func__, request->seqno, obj);
1741#endif
1742
1743 if (obj->write_domain != 0)
1744 i915_gem_object_move_to_flushing(obj);
1745 else
1746 i915_gem_object_move_to_inactive(obj);
1761 } 1747 }
1762 1748
1763 if (unlikely (dev_priv->trace_irq_seqno && 1749 if (unlikely (dev_priv->trace_irq_seqno &&
1764 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1750 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1765
1766 ring->user_irq_put(dev, ring); 1751 ring->user_irq_put(dev, ring);
1767 dev_priv->trace_irq_seqno = 0; 1752 dev_priv->trace_irq_seqno = 0;
1768 } 1753 }