aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-07-03 02:58:38 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-09-08 05:23:56 -0400
commitde227ef0907258359d53e3e1530c1f3678eb2bb9 (patch)
tree86610be283f35dd2913fb94aeaca021720e99b71 /drivers/gpu/drm/i915/i915_gem.c
parent015b9c8ce50e5bfb7ea78613dcad4b30d1a0d9da (diff)
drm/i915: Kill the active list spinlock
This spinlock only served debugging purposes in a time when we could not be sure of the mutex ever being released upon a GPU hang. As we now should be able rely on hangcheck to do the job for us (and that error reporting should not itself require the struct mutex) we can kill the incomplete attempt at protection. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c35
1 files changed, 2 insertions, 33 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index afe4a9b0a03d..b6e4b60724ec 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1486,7 +1486,6 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj,
1486 struct intel_ring_buffer *ring) 1486 struct intel_ring_buffer *ring)
1487{ 1487{
1488 struct drm_device *dev = obj->dev; 1488 struct drm_device *dev = obj->dev;
1489 drm_i915_private_t *dev_priv = dev->dev_private;
1490 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1489 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1491 uint32_t seqno = i915_gem_next_request_seqno(dev, ring); 1490 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1492 1491
@@ -1500,9 +1499,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj,
1500 } 1499 }
1501 1500
1502 /* Move from whatever list we were on to the tail of execution. */ 1501 /* Move from whatever list we were on to the tail of execution. */
1503 spin_lock(&dev_priv->mm.active_list_lock);
1504 list_move_tail(&obj_priv->list, &ring->active_list); 1502 list_move_tail(&obj_priv->list, &ring->active_list);
1505 spin_unlock(&dev_priv->mm.active_list_lock);
1506 obj_priv->last_rendering_seqno = seqno; 1503 obj_priv->last_rendering_seqno = seqno;
1507} 1504}
1508 1505
@@ -1676,14 +1673,11 @@ static void
1676i915_gem_retire_request(struct drm_device *dev, 1673i915_gem_retire_request(struct drm_device *dev,
1677 struct drm_i915_gem_request *request) 1674 struct drm_i915_gem_request *request)
1678{ 1675{
1679 drm_i915_private_t *dev_priv = dev->dev_private;
1680
1681 trace_i915_gem_request_retire(dev, request->seqno); 1676 trace_i915_gem_request_retire(dev, request->seqno);
1682 1677
1683 /* Move any buffers on the active list that are no longer referenced 1678 /* Move any buffers on the active list that are no longer referenced
1684 * by the ringbuffer to the flushing/inactive lists as appropriate. 1679 * by the ringbuffer to the flushing/inactive lists as appropriate.
1685 */ 1680 */
1686 spin_lock(&dev_priv->mm.active_list_lock);
1687 while (!list_empty(&request->ring->active_list)) { 1681 while (!list_empty(&request->ring->active_list)) {
1688 struct drm_gem_object *obj; 1682 struct drm_gem_object *obj;
1689 struct drm_i915_gem_object *obj_priv; 1683 struct drm_i915_gem_object *obj_priv;
@@ -1698,7 +1692,7 @@ i915_gem_retire_request(struct drm_device *dev,
1698 * this seqno. 1692 * this seqno.
1699 */ 1693 */
1700 if (obj_priv->last_rendering_seqno != request->seqno) 1694 if (obj_priv->last_rendering_seqno != request->seqno)
1701 goto out; 1695 return;
1702 1696
1703#if WATCH_LRU 1697#if WATCH_LRU
1704 DRM_INFO("%s: retire %d moves to inactive list %p\n", 1698 DRM_INFO("%s: retire %d moves to inactive list %p\n",
@@ -1707,22 +1701,9 @@ i915_gem_retire_request(struct drm_device *dev,
1707 1701
1708 if (obj->write_domain != 0) 1702 if (obj->write_domain != 0)
1709 i915_gem_object_move_to_flushing(obj); 1703 i915_gem_object_move_to_flushing(obj);
1710 else { 1704 else
1711 /* Take a reference on the object so it won't be
1712 * freed while the spinlock is held. The list
1713 * protection for this spinlock is safe when breaking
1714 * the lock like this since the next thing we do
1715 * is just get the head of the list again.
1716 */
1717 drm_gem_object_reference(obj);
1718 i915_gem_object_move_to_inactive(obj); 1705 i915_gem_object_move_to_inactive(obj);
1719 spin_unlock(&dev_priv->mm.active_list_lock);
1720 drm_gem_object_unreference(obj);
1721 spin_lock(&dev_priv->mm.active_list_lock);
1722 }
1723 } 1706 }
1724out:
1725 spin_unlock(&dev_priv->mm.active_list_lock);
1726} 1707}
1727 1708
1728/** 1709/**
@@ -1972,7 +1953,6 @@ int
1972i915_gem_object_unbind(struct drm_gem_object *obj) 1953i915_gem_object_unbind(struct drm_gem_object *obj)
1973{ 1954{
1974 struct drm_device *dev = obj->dev; 1955 struct drm_device *dev = obj->dev;
1975 drm_i915_private_t *dev_priv = dev->dev_private;
1976 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1956 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1977 int ret = 0; 1957 int ret = 0;
1978 1958
@@ -2027,10 +2007,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2027 } 2007 }
2028 2008
2029 /* Remove ourselves from the LRU list if present. */ 2009 /* Remove ourselves from the LRU list if present. */
2030 spin_lock(&dev_priv->mm.active_list_lock);
2031 if (!list_empty(&obj_priv->list)) 2010 if (!list_empty(&obj_priv->list))
2032 list_del_init(&obj_priv->list); 2011 list_del_init(&obj_priv->list);
2033 spin_unlock(&dev_priv->mm.active_list_lock);
2034 2012
2035 if (i915_gem_object_is_purgeable(obj_priv)) 2013 if (i915_gem_object_is_purgeable(obj_priv))
2036 i915_gem_object_truncate(obj); 2014 i915_gem_object_truncate(obj);
@@ -2047,13 +2025,10 @@ i915_gpu_idle(struct drm_device *dev)
2047 bool lists_empty; 2025 bool lists_empty;
2048 int ret; 2026 int ret;
2049 2027
2050 spin_lock(&dev_priv->mm.active_list_lock);
2051 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2028 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2052 list_empty(&dev_priv->render_ring.active_list) && 2029 list_empty(&dev_priv->render_ring.active_list) &&
2053 (!HAS_BSD(dev) || 2030 (!HAS_BSD(dev) ||
2054 list_empty(&dev_priv->bsd_ring.active_list))); 2031 list_empty(&dev_priv->bsd_ring.active_list)));
2055 spin_unlock(&dev_priv->mm.active_list_lock);
2056
2057 if (lists_empty) 2032 if (lists_empty)
2058 return 0; 2033 return 0;
2059 2034
@@ -4550,11 +4525,8 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4550 return ret; 4525 return ret;
4551 } 4526 }
4552 4527
4553 spin_lock(&dev_priv->mm.active_list_lock);
4554 BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); 4528 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4555 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); 4529 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
4556 spin_unlock(&dev_priv->mm.active_list_lock);
4557
4558 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 4530 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4559 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 4531 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4560 BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); 4532 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
@@ -4606,7 +4578,6 @@ i915_gem_load(struct drm_device *dev)
4606 int i; 4578 int i;
4607 drm_i915_private_t *dev_priv = dev->dev_private; 4579 drm_i915_private_t *dev_priv = dev->dev_private;
4608 4580
4609 spin_lock_init(&dev_priv->mm.active_list_lock);
4610 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4581 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4611 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); 4582 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4612 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4583 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
@@ -4862,12 +4833,10 @@ i915_gpu_is_active(struct drm_device *dev)
4862 drm_i915_private_t *dev_priv = dev->dev_private; 4833 drm_i915_private_t *dev_priv = dev->dev_private;
4863 int lists_empty; 4834 int lists_empty;
4864 4835
4865 spin_lock(&dev_priv->mm.active_list_lock);
4866 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4836 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4867 list_empty(&dev_priv->render_ring.active_list); 4837 list_empty(&dev_priv->render_ring.active_list);
4868 if (HAS_BSD(dev)) 4838 if (HAS_BSD(dev))
4869 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); 4839 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
4870 spin_unlock(&dev_priv->mm.active_list_lock);
4871 4840
4872 return !lists_empty; 4841 return !lists_empty;
4873} 4842}