aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorRoland Dreier <rdreier@cisco.com>2009-02-06 20:48:09 -0500
committerDave Airlie <airlied@redhat.com>2009-02-19 21:21:10 -0500
commita35f2e2b83a789e189a501ebd722bc9a1310eb05 (patch)
treea98654866d9295266dcf5ca61740004358b585ba /drivers/gpu
parent96dec61d563fb8dff2c8427fdf85327a95b65c74 (diff)
drm/i915: Fix potential AB-BA deadlock in i915_gem_execbuffer()
Lockdep warns that i915_gem_execbuffer() can trigger a page fault (which takes mmap_sem) while holding dev->struct_mutex, while drm_vm_open() (which is called with mmap_sem already held) takes dev->struct_mutex. So this is a potential AB-BA deadlock. The way that i915_gem_execbuffer() triggers a page fault is by doing copy_to_user() when returning new buffer offsets back to userspace; however there is no reason to hold the struct_mutex when doing this copy, since what is being copied is the contents of an array private to i915_gem_execbuffer() anyway. So we can fix the potential deadlock (and get rid of the lockdep warning) by simply moving the copy_to_user() outside of where struct_mutex is held. This fixes <http://bugzilla.kernel.org/show_bug.cgi?id=12491>. Reported-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Tested-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com> Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 55f4c060fa01..ff0d94dd3550 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2634,15 +2634,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2634 2634
2635 i915_verify_inactive(dev, __FILE__, __LINE__); 2635 i915_verify_inactive(dev, __FILE__, __LINE__);
2636 2636
2637 /* Copy the new buffer offsets back to the user's exec list. */
2638 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2639 (uintptr_t) args->buffers_ptr,
2640 exec_list,
2641 sizeof(*exec_list) * args->buffer_count);
2642 if (ret)
2643 DRM_ERROR("failed to copy %d exec entries "
2644 "back to user (%d)\n",
2645 args->buffer_count, ret);
2646err: 2637err:
2647 for (i = 0; i < pinned; i++) 2638 for (i = 0; i < pinned; i++)
2648 i915_gem_object_unpin(object_list[i]); 2639 i915_gem_object_unpin(object_list[i]);
@@ -2652,6 +2643,18 @@ err:
2652 2643
2653 mutex_unlock(&dev->struct_mutex); 2644 mutex_unlock(&dev->struct_mutex);
2654 2645
2646 if (!ret) {
2647 /* Copy the new buffer offsets back to the user's exec list. */
2648 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2649 (uintptr_t) args->buffers_ptr,
2650 exec_list,
2651 sizeof(*exec_list) * args->buffer_count);
2652 if (ret)
2653 DRM_ERROR("failed to copy %d exec entries "
2654 "back to user (%d)\n",
2655 args->buffer_count, ret);
2656 }
2657
2655pre_mutex_err: 2658pre_mutex_err:
2656 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 2659 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2657 DRM_MEM_DRIVER); 2660 DRM_MEM_DRIVER);