diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-08-14 18:02:45 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-08-20 22:58:17 -0400 |
commit | 20228c447846da9399ead53fdbbc8ab69b47788a (patch) | |
tree | 6fe0e7e3b7f822206dde345848138234c7a19465 /drivers/gpu/drm/drm_gem.c | |
parent | cd4f013f3a4b6a55d484cc2e206dc08e055e5291 (diff) |
drm/gem: completely close gem_open vs. gem_close races
The gem flink name holds a reference onto the object itself, and this
self-reference would prevent an flink'ed object from every being
freed. To break that loop we remove the flink name when the last
userspace handle disappears, i.e. when obj->handle_count reaches 0.
Now in gem_open we drop the dev->object_name_lock between the flink
name lookup and actually adding the handle. This means a concurrent
gem_close of the last handle could result in the flink name getting
reaped right inbetween, i.e.
Thread 1 Thread 2
gem_open gem_close
flink -> obj lookup
handle_count drops to 0
remove flink name
create_handle
handle_count++
If someone now flinks this object again, we'll get a new flink name.
We can close this race by removing the lock dropping and making the
entire lookup+handle_create sequence atomic. Unfortunately to still be
able to share the handle_create logic this requires a
handle_create_tail function which drops the lock - we can't hold the
object_name_lock while calling into a driver's ->gem_open callback.
Note that for flink fixing this race isn't really important, since
racing gem_open against gem_close is clearly a userspace bug. And no
matter how the race ends, we won't leak any references.
But with dma-buf where the userspace dma-buf fd itself is refcounted
this is a valid sequence and hence we should fix it. Therefore this
patch here is just a warm-up exercise (and for consistency between
flink buffer sharing and dma-buf buffer sharing with self-imports).
Also note that this extension of the critical section in gem_open
protected by dev->object_name_lock only works because it's now a
mutex: A spinlock would conflict with the potential memory allocation
in idr_preload().
This is exercises by igt/gem_flink_race/flink_name.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 42 |
1 files changed, 31 insertions, 11 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index adb9eda4fa1a..d47aa774d64b 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -308,23 +308,26 @@ int drm_gem_dumb_destroy(struct drm_file *file, | |||
308 | EXPORT_SYMBOL(drm_gem_dumb_destroy); | 308 | EXPORT_SYMBOL(drm_gem_dumb_destroy); |
309 | 309 | ||
310 | /** | 310 | /** |
311 | * Create a handle for this object. This adds a handle reference | 311 | * drm_gem_handle_create_tail - internal functions to create a handle |
312 | * to the object, which includes a regular reference count. Callers | 312 | * |
313 | * will likely want to dereference the object afterwards. | 313 | * This expects the dev->object_name_lock to be held already and will drop it |
314 | * before returning. Used to avoid races in establishing new handles when | ||
315 | * importing an object from either an flink name or a dma-buf. | ||
314 | */ | 316 | */ |
315 | int | 317 | int |
316 | drm_gem_handle_create(struct drm_file *file_priv, | 318 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
317 | struct drm_gem_object *obj, | 319 | struct drm_gem_object *obj, |
318 | u32 *handlep) | 320 | u32 *handlep) |
319 | { | 321 | { |
320 | struct drm_device *dev = obj->dev; | 322 | struct drm_device *dev = obj->dev; |
321 | int ret; | 323 | int ret; |
322 | 324 | ||
325 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); | ||
326 | |||
323 | /* | 327 | /* |
324 | * Get the user-visible handle using idr. Preload and perform | 328 | * Get the user-visible handle using idr. Preload and perform |
325 | * allocation under our spinlock. | 329 | * allocation under our spinlock. |
326 | */ | 330 | */ |
327 | mutex_lock(&dev->object_name_lock); | ||
328 | idr_preload(GFP_KERNEL); | 331 | idr_preload(GFP_KERNEL); |
329 | spin_lock(&file_priv->table_lock); | 332 | spin_lock(&file_priv->table_lock); |
330 | 333 | ||
@@ -351,6 +354,21 @@ drm_gem_handle_create(struct drm_file *file_priv, | |||
351 | 354 | ||
352 | return 0; | 355 | return 0; |
353 | } | 356 | } |
357 | |||
358 | /** | ||
359 | * Create a handle for this object. This adds a handle reference | ||
360 | * to the object, which includes a regular reference count. Callers | ||
361 | * will likely want to dereference the object afterwards. | ||
362 | */ | ||
363 | int | ||
364 | drm_gem_handle_create(struct drm_file *file_priv, | ||
365 | struct drm_gem_object *obj, | ||
366 | u32 *handlep) | ||
367 | { | ||
368 | mutex_lock(&obj->dev->object_name_lock); | ||
369 | |||
370 | return drm_gem_handle_create_tail(file_priv, obj, handlep); | ||
371 | } | ||
354 | EXPORT_SYMBOL(drm_gem_handle_create); | 372 | EXPORT_SYMBOL(drm_gem_handle_create); |
355 | 373 | ||
356 | 374 | ||
@@ -627,13 +645,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, | |||
627 | 645 | ||
628 | mutex_lock(&dev->object_name_lock); | 646 | mutex_lock(&dev->object_name_lock); |
629 | obj = idr_find(&dev->object_name_idr, (int) args->name); | 647 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
630 | if (obj) | 648 | if (obj) { |
631 | drm_gem_object_reference(obj); | 649 | drm_gem_object_reference(obj); |
632 | mutex_unlock(&dev->object_name_lock); | 650 | } else { |
633 | if (!obj) | 651 | mutex_unlock(&dev->object_name_lock); |
634 | return -ENOENT; | 652 | return -ENOENT; |
653 | } | ||
635 | 654 | ||
636 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 655 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
656 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); | ||
637 | drm_gem_object_unreference_unlocked(obj); | 657 | drm_gem_object_unreference_unlocked(obj); |
638 | if (ret) | 658 | if (ret) |
639 | return ret; | 659 | return ret; |