aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichał Winiarski <michal.winiarski@intel.com>2015-02-03 09:48:17 -0500
committerJani Nikula <jani.nikula@intel.com>2015-02-05 09:31:30 -0500
commit460822b0b1a77db859b0320469799fa4dbe4d367 (patch)
tree3f2e09686dc7e70d82a75c4d475e84f944606ebe
parent1293eaa3ebf92f146f366d9b678a07b8b3200ea1 (diff)
drm/i915: Prevent use-after-free in invalidate_range_start callback
It's possible for invalidate_range_start mmu notifier callback to race against userptr object release. If the gem object was released prior to obtaining the spinlock in invalidate_range_start we're hitting null pointer dereference. Testcase: igt/gem_userptr_blits/stress-mm-invalidate-close Testcase: igt/gem_userptr_blits/stress-mm-invalidate-close-overlap Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org [Jani: added code comment suggested by Chris] Signed-off-by: Jani Nikula <jani.nikula@intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d182058383a9..1719078c763a 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -113,7 +113,10 @@ restart:
113 continue; 113 continue;
114 114
115 obj = mo->obj; 115 obj = mo->obj;
116 drm_gem_object_reference(&obj->base); 116
117 if (!kref_get_unless_zero(&obj->base.refcount))
118 continue;
119
117 spin_unlock(&mn->lock); 120 spin_unlock(&mn->lock);
118 121
119 cancel_userptr(obj); 122 cancel_userptr(obj);
@@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
149 it = interval_tree_iter_first(&mn->objects, start, end); 152 it = interval_tree_iter_first(&mn->objects, start, end);
150 if (it != NULL) { 153 if (it != NULL) {
151 obj = container_of(it, struct i915_mmu_object, it)->obj; 154 obj = container_of(it, struct i915_mmu_object, it)->obj;
152 drm_gem_object_reference(&obj->base); 155
156 /* The mmu_object is released late when destroying the
157 * GEM object so it is entirely possible to gain a
158 * reference on an object in the process of being freed
159 * since our serialisation is via the spinlock and not
160 * the struct_mutex - and consequently use it after it
161 * is freed and then double free it.
162 */
163 if (!kref_get_unless_zero(&obj->base.refcount)) {
164 spin_unlock(&mn->lock);
165 serial = 0;
166 continue;
167 }
168
153 serial = mn->serial; 169 serial = mn->serial;
154 } 170 }
155 spin_unlock(&mn->lock); 171 spin_unlock(&mn->lock);