aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-09-22 05:31:52 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-09-22 05:31:52 -0400
commitdfaae392f4461785eb1c92aeaf2a1040b184edba (patch)
tree2239f155fdbed50f82d218b3499d06fb7c68f288
parent9e0ae53404700f1e4ae1f33b0ff92948ae0e509d (diff)
drm/i915: Clear the gpu_write_list on resetting write_domain upon hang
Otherwise we will hit a list handling assertion when moving the object to the inactive list. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c51
3 files changed, 42 insertions, 28 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4e83bb36888e..2184d29e7a9f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -395,21 +395,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
395 395
396 mutex_lock(&dev->struct_mutex); 396 mutex_lock(&dev->struct_mutex);
397 397
398 /* 398 i915_gem_reset_lists(dev);
399 * Clear request list
400 */
401 i915_gem_retire_requests(dev);
402
403 /* Remove anything from the flushing lists. The GPU cache is likely
404 * to be lost on reset along with the data, so simply move the
405 * lost bo to the inactive list.
406 */
407 i915_gem_reset_flushing_list(dev);
408
409 /* Move everything out of the GPU domains to ensure we do any
410 * necessary invalidation upon reuse.
411 */
412 i915_gem_reset_inactive_gpu_domains(dev);
413 399
414 /* 400 /*
415 * Set the domains we want to reset (GRDOM/bits 2 and 3) as 401 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 12e9f853a5e9..5fec2ca619e8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1005,8 +1005,7 @@ int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
1005int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, 1005int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
1006 bool interruptible); 1006 bool interruptible);
1007void i915_gem_retire_requests(struct drm_device *dev); 1007void i915_gem_retire_requests(struct drm_device *dev);
1008void i915_gem_reset_flushing_list(struct drm_device *dev); 1008void i915_gem_reset_lists(struct drm_device *dev);
1009void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev);
1010void i915_gem_clflush_object(struct drm_gem_object *obj); 1009void i915_gem_clflush_object(struct drm_gem_object *obj);
1011void i915_gem_flush_ring(struct drm_device *dev, 1010void i915_gem_flush_ring(struct drm_device *dev,
1012 struct drm_file *file_priv, 1011 struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 734cc08c3fdb..0ce28c71facc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1682,27 +1682,60 @@ i915_get_gem_seqno(struct drm_device *dev,
1682 return ring->get_gem_seqno(dev, ring); 1682 return ring->get_gem_seqno(dev, ring);
1683} 1683}
1684 1684
1685void i915_gem_reset_flushing_list(struct drm_device *dev) 1685static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1686 struct intel_ring_buffer *ring)
1686{ 1687{
1687 struct drm_i915_private *dev_priv = dev->dev_private; 1688 while (!list_empty(&ring->request_list)) {
1689 struct drm_i915_gem_request *request;
1688 1690
1689 while (!list_empty(&dev_priv->mm.flushing_list)) { 1691 request = list_first_entry(&ring->request_list,
1692 struct drm_i915_gem_request,
1693 list);
1694
1695 list_del(&request->list);
1696 list_del(&request->client_list);
1697 kfree(request);
1698 }
1699
1700 while (!list_empty(&ring->active_list)) {
1690 struct drm_i915_gem_object *obj_priv; 1701 struct drm_i915_gem_object *obj_priv;
1691 1702
1692 obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 1703 obj_priv = list_first_entry(&ring->active_list,
1693 struct drm_i915_gem_object, 1704 struct drm_i915_gem_object,
1694 list); 1705 list);
1695 1706
1696 obj_priv->base.write_domain = 0; 1707 obj_priv->base.write_domain = 0;
1708 list_del_init(&obj_priv->gpu_write_list);
1697 i915_gem_object_move_to_inactive(&obj_priv->base); 1709 i915_gem_object_move_to_inactive(&obj_priv->base);
1698 } 1710 }
1699} 1711}
1700 1712
1701void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev) 1713void i915_gem_reset_lists(struct drm_device *dev)
1702{ 1714{
1703 struct drm_i915_private *dev_priv = dev->dev_private; 1715 struct drm_i915_private *dev_priv = dev->dev_private;
1704 struct drm_i915_gem_object *obj_priv; 1716 struct drm_i915_gem_object *obj_priv;
1705 1717
1718 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
1719 if (HAS_BSD(dev))
1720 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
1721
1722 /* Remove anything from the flushing lists. The GPU cache is likely
1723 * to be lost on reset along with the data, so simply move the
1724 * lost bo to the inactive list.
1725 */
1726 while (!list_empty(&dev_priv->mm.flushing_list)) {
1727 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1728 struct drm_i915_gem_object,
1729 list);
1730
1731 obj_priv->base.write_domain = 0;
1732 list_del_init(&obj_priv->gpu_write_list);
1733 i915_gem_object_move_to_inactive(&obj_priv->base);
1734 }
1735
1736 /* Move everything out of the GPU domains to ensure we do any
1737 * necessary invalidation upon reuse.
1738 */
1706 list_for_each_entry(obj_priv, 1739 list_for_each_entry(obj_priv,
1707 &dev_priv->mm.inactive_list, 1740 &dev_priv->mm.inactive_list,
1708 list) 1741 list)
@@ -1720,15 +1753,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1720{ 1753{
1721 drm_i915_private_t *dev_priv = dev->dev_private; 1754 drm_i915_private_t *dev_priv = dev->dev_private;
1722 uint32_t seqno; 1755 uint32_t seqno;
1723 bool wedged;
1724 1756
1725 if (!ring->status_page.page_addr || 1757 if (!ring->status_page.page_addr ||
1726 list_empty(&ring->request_list)) 1758 list_empty(&ring->request_list))
1727 return; 1759 return;
1728 1760
1729 seqno = i915_get_gem_seqno(dev, ring); 1761 seqno = i915_get_gem_seqno(dev, ring);
1730 wedged = atomic_read(&dev_priv->mm.wedged);
1731
1732 while (!list_empty(&ring->request_list)) { 1762 while (!list_empty(&ring->request_list)) {
1733 struct drm_i915_gem_request *request; 1763 struct drm_i915_gem_request *request;
1734 1764
@@ -1736,7 +1766,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1736 struct drm_i915_gem_request, 1766 struct drm_i915_gem_request,
1737 list); 1767 list);
1738 1768
1739 if (!wedged && !i915_seqno_passed(seqno, request->seqno)) 1769 if (!i915_seqno_passed(seqno, request->seqno))
1740 break; 1770 break;
1741 1771
1742 trace_i915_gem_request_retire(dev, request->seqno); 1772 trace_i915_gem_request_retire(dev, request->seqno);
@@ -1757,8 +1787,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1757 struct drm_i915_gem_object, 1787 struct drm_i915_gem_object,
1758 list); 1788 list);
1759 1789
1760 if (!wedged && 1790 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
1761 !i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
1762 break; 1791 break;
1763 1792
1764 obj = &obj_priv->base; 1793 obj = &obj_priv->base;