aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2013-01-15 11:17:54 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-01-16 04:53:38 -0500
commit262b6d363fcff16359c93bd58c297f961f6e6273 (patch)
tree5e1e3182442174e877aefc74241804065b396927
parentf30d26e468322b50d5e376bec40658683aff8ece (diff)
drm/i915: Invalidate the relocation presumed_offsets along the slow path
In the slow path, we are forced to copy the relocations prior to acquiring the struct mutex in order to handle pagefaults. We forgo copying the new offsets back into the relocation entries in order to prevent a recursive locking bug should we trigger a pagefault whilst holding the mutex for the reservations of the execbuffer. Therefore, we need to reset the presumed_offsets just in case the objects are rebound back into their old locations after relocating for this exexbuffer - if that were to happen we would assume the relocations were valid and leave the actual pointers to the kernels dangling, instant hang. Fixes regression from commit bcf50e2775bbc3101932d8e4ab8c7902aa4163b4 Author: Chris Wilson <chris@chris-wilson.co.uk> Date: Sun Nov 21 22:07:12 2010 +0000 drm/i915: Handle pagefaults in execbuffer user relocations Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=55984 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@fwll.ch> Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d6a994a07393..26d08bb58218 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
539 total = 0; 539 total = 0;
540 for (i = 0; i < count; i++) { 540 for (i = 0; i < count; i++) {
541 struct drm_i915_gem_relocation_entry __user *user_relocs; 541 struct drm_i915_gem_relocation_entry __user *user_relocs;
542 u64 invalid_offset = (u64)-1;
543 int j;
542 544
543 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; 545 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
544 546
@@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
549 goto err; 551 goto err;
550 } 552 }
551 553
554 /* As we do not update the known relocation offsets after
555 * relocating (due to the complexities in lock handling),
556 * we need to mark them as invalid now so that we force the
557 * relocation processing next time. Just in case the target
558 * object is evicted and then rebound into its old
559 * presumed_offset before the next execbuffer - if that
560 * happened we would make the mistake of assuming that the
561 * relocations were valid.
562 */
563 for (j = 0; j < exec[i].relocation_count; j++) {
564 if (copy_to_user(&user_relocs[j].presumed_offset,
565 &invalid_offset,
566 sizeof(invalid_offset))) {
567 ret = -EFAULT;
568 mutex_lock(&dev->struct_mutex);
569 goto err;
570 }
571 }
572
552 reloc_offset[i] = total; 573 reloc_offset[i] = total;
553 total += exec[i].relocation_count; 574 total += exec[i].relocation_count;
554 } 575 }