diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-08-14 18:02:46 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-08-20 22:58:17 -0400 |
commit | 319c933c71f3dbdb2b3274d1634d3494c70efa06 (patch) | |
tree | 21573ae28fb9b9c38dd974eebc80a69b9a7d70e1 /drivers/gpu/drm/drm_gem.c | |
parent | 20228c447846da9399ead53fdbbc8ab69b47788a (diff) |
drm/prime: proper locking+refcounting for obj->dma_buf link
The export dma-buf cache is semantically similar to an flink name. So
semantically it makes sense to treat it the same and remove the name
(i.e. the dma_buf pointer) and its references when the last gem handle
disappears.
Again we need to be careful, but double so: Not just could someone
race and export with a gem close ioctl (so we need to recheck
obj->handle_count again when assigning the new name), but multiple
exports can also race against each another. This is prevented by
holding the dev->object_name_lock across the entire section which
touches obj->dma_buf.
With the new scheme we also need to reinstate the obj->dma_buf link at
import time (in case the only reference userspace has held in-between
was through the dma-buf fd and not through any native gem handle). For
simplicity we don't check whether it's a native object but
unconditionally set up that link - with the new scheme of removing the
obj->dma_buf reference when the last handle disappears we can do that.
To make it clear that this is not just for exported buffers anymore
als rename it from export_dma_buf to dma_buf.
To make sure that now one can race a fd_to_handle or handle_to_fd with
gem_close we use the same tricks as in flink of extending the
dev->object_name_locking critical section. With this change we finally
have a guaranteed 1:1 relationship (at least for native objects)
between gem objects and dma-bufs, even accounting for races (which can
happen since the dma-buf itself holds a reference while in-flight).
This prevent igt/prime_self_import/export-vs-gem_close-race from
Oopsing the kernel. There is still a leak though since the per-file
priv dma-buf/handle cache handling is racy. That will be fixed in a
later patch.
v2: Remove the bogus dma_buf_put from the export_and_register_object
failure path if we've raced with the handle count dropping to 0.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 24 |
1 files changed, 21 insertions, 3 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d47aa774d64b..4b3c533be859 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -195,9 +195,14 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) | |||
195 | drm_prime_remove_buf_handle(&filp->prime, | 195 | drm_prime_remove_buf_handle(&filp->prime, |
196 | obj->import_attach->dmabuf); | 196 | obj->import_attach->dmabuf); |
197 | } | 197 | } |
198 | if (obj->export_dma_buf) { | 198 | |
199 | /* | ||
200 | * Note: obj->dma_buf can't disappear as long as we still hold a | ||
201 | * handle reference in obj->handle_count. | ||
202 | */ | ||
203 | if (obj->dma_buf) { | ||
199 | drm_prime_remove_buf_handle(&filp->prime, | 204 | drm_prime_remove_buf_handle(&filp->prime, |
200 | obj->export_dma_buf); | 205 | obj->dma_buf); |
201 | } | 206 | } |
202 | } | 207 | } |
203 | 208 | ||
@@ -231,6 +236,15 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj) | |||
231 | } | 236 | } |
232 | } | 237 | } |
233 | 238 | ||
239 | static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) | ||
240 | { | ||
241 | /* Unbreak the reference cycle if we have an exported dma_buf. */ | ||
242 | if (obj->dma_buf) { | ||
243 | dma_buf_put(obj->dma_buf); | ||
244 | obj->dma_buf = NULL; | ||
245 | } | ||
246 | } | ||
247 | |||
234 | static void | 248 | static void |
235 | drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) | 249 | drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) |
236 | { | 250 | { |
@@ -244,8 +258,10 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) | |||
244 | */ | 258 | */ |
245 | 259 | ||
246 | mutex_lock(&obj->dev->object_name_lock); | 260 | mutex_lock(&obj->dev->object_name_lock); |
247 | if (--obj->handle_count == 0) | 261 | if (--obj->handle_count == 0) { |
248 | drm_gem_object_handle_free(obj); | 262 | drm_gem_object_handle_free(obj); |
263 | drm_gem_object_exported_dma_buf_free(obj); | ||
264 | } | ||
249 | mutex_unlock(&obj->dev->object_name_lock); | 265 | mutex_unlock(&obj->dev->object_name_lock); |
250 | 266 | ||
251 | drm_gem_object_unreference_unlocked(obj); | 267 | drm_gem_object_unreference_unlocked(obj); |
@@ -712,6 +728,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) | |||
712 | void | 728 | void |
713 | drm_gem_object_release(struct drm_gem_object *obj) | 729 | drm_gem_object_release(struct drm_gem_object *obj) |
714 | { | 730 | { |
731 | WARN_ON(obj->dma_buf); | ||
732 | |||
715 | if (obj->filp) | 733 | if (obj->filp) |
716 | fput(obj->filp); | 734 | fput(obj->filp); |
717 | } | 735 | } |