aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-07-23 18:18:50 -0400
committerEric Anholt <eric@anholt.net>2010-08-01 22:53:24 -0400
commitbe72615bcf4d5b7b314d836c5e1b4baa4b65dad1 (patch)
tree1a16cfaf29843b54a4577120c3c2df9cfd548f60 /drivers/gpu/drm/i915/i915_gem.c
parentb09a1feca65764311f8a3e14befb52b98d705f0a (diff)
drm/i915: Repeat unbinding during free if interrupted (v6)
If during the freeing of an object the unbind is interrupted by a system call, which is quite possible if we have outstanding GPU writes that must be flushed, the unbind is silently aborted. This still leaves the AGP region and backing pages allocated, and perhaps more importantly, the object remains upon the various lists exposing us to memory corruption. I think this is the cause behind the use-after-free, such as Bug 15664 - Graphics hang and kernel backtrace when starting Azureus with Compiz enabled https://bugzilla.kernel.org/show_bug.cgi?id=15664 v2: Daniel Vetter reminded me that kernel space programming is never easy. We cannot simply spin to clear the pending signal and so must deferred the freeing of the object until later. v3: Run from the top level retire requests. v4: Tested with P(return -ERESTARTSYS)=.5 from i915_gem_do_wait_request() v5: Rebase against Eric's for-linus tree. v6: Refactor, split and add a comment about avoiding unbounded recursion. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c51
1 files changed, 41 insertions, 10 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 138ca6ea5ef7..f45f385c84cd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -53,6 +53,7 @@ static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
53static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 53static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
54 struct drm_i915_gem_pwrite *args, 54 struct drm_i915_gem_pwrite *args,
55 struct drm_file *file_priv); 55 struct drm_file *file_priv);
56static void i915_gem_free_object_tail(struct drm_gem_object *obj);
56 57
57static LIST_HEAD(shrink_list); 58static LIST_HEAD(shrink_list);
58static DEFINE_SPINLOCK(shrink_list_lock); 59static DEFINE_SPINLOCK(shrink_list_lock);
@@ -1755,6 +1756,20 @@ i915_gem_retire_requests(struct drm_device *dev)
1755{ 1756{
1756 drm_i915_private_t *dev_priv = dev->dev_private; 1757 drm_i915_private_t *dev_priv = dev->dev_private;
1757 1758
1759 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1760 struct drm_i915_gem_object *obj_priv, *tmp;
1761
1762 /* We must be careful that during unbind() we do not
1763 * accidentally infinitely recurse into retire requests.
1764 * Currently:
1765 * retire -> free -> unbind -> wait -> retire_ring
1766 */
1767 list_for_each_entry_safe(obj_priv, tmp,
1768 &dev_priv->mm.deferred_free_list,
1769 list)
1770 i915_gem_free_object_tail(&obj_priv->base);
1771 }
1772
1758 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); 1773 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1759 if (HAS_BSD(dev)) 1774 if (HAS_BSD(dev))
1760 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); 1775 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
@@ -4458,20 +4473,19 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4458 return 0; 4473 return 0;
4459} 4474}
4460 4475
4461void i915_gem_free_object(struct drm_gem_object *obj) 4476static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4462{ 4477{
4463 struct drm_device *dev = obj->dev; 4478 struct drm_device *dev = obj->dev;
4479 drm_i915_private_t *dev_priv = dev->dev_private;
4464 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4480 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4481 int ret;
4465 4482
4466 trace_i915_gem_object_destroy(obj); 4483 ret = i915_gem_object_unbind(obj);
4467 4484 if (ret == -ERESTARTSYS) {
4468 while (obj_priv->pin_count > 0) 4485 list_move(&obj_priv->list,
4469 i915_gem_object_unpin(obj); 4486 &dev_priv->mm.deferred_free_list);
4470 4487 return;
4471 if (obj_priv->phys_obj) 4488 }
4472 i915_gem_detach_phys_object(dev, obj);
4473
4474 i915_gem_object_unbind(obj);
4475 4489
4476 if (obj_priv->mmap_offset) 4490 if (obj_priv->mmap_offset)
4477 i915_gem_free_mmap_offset(obj); 4491 i915_gem_free_mmap_offset(obj);
@@ -4483,6 +4497,22 @@ void i915_gem_free_object(struct drm_gem_object *obj)
4483 kfree(obj_priv); 4497 kfree(obj_priv);
4484} 4498}
4485 4499
4500void i915_gem_free_object(struct drm_gem_object *obj)
4501{
4502 struct drm_device *dev = obj->dev;
4503 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4504
4505 trace_i915_gem_object_destroy(obj);
4506
4507 while (obj_priv->pin_count > 0)
4508 i915_gem_object_unpin(obj);
4509
4510 if (obj_priv->phys_obj)
4511 i915_gem_detach_phys_object(dev, obj);
4512
4513 i915_gem_free_object_tail(obj);
4514}
4515
4486/** Unbinds all inactive objects. */ 4516/** Unbinds all inactive objects. */
4487static int 4517static int
4488i915_gem_evict_from_inactive_list(struct drm_device *dev) 4518i915_gem_evict_from_inactive_list(struct drm_device *dev)
@@ -4756,6 +4786,7 @@ i915_gem_load(struct drm_device *dev)
4756 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); 4786 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4757 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4787 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4758 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4788 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4789 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4759 INIT_LIST_HEAD(&dev_priv->render_ring.active_list); 4790 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4760 INIT_LIST_HEAD(&dev_priv->render_ring.request_list); 4791 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4761 if (HAS_BSD(dev)) { 4792 if (HAS_BSD(dev)) {