aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-02-27 07:26:54 -0500
committerJani Nikula <jani.nikula@intel.com>2017-03-09 03:43:55 -0500
commit8c9923707f30ff56d9fd242053594b18f38d8036 (patch)
treee5ae68a285fa0107f4d9cc35da1b85257d819380
parentb717a0392530ae8da0da041abe5c3a6098b55660 (diff)
drm/i915: Remove the vma from the drm_mm if binding fails
As we track whether a vma has been inserted into the drm_mm using the vma->flags, if we fail to bind the vma into the GTT we do not update those bits and will attempt to reinsert the vma into the drm_mm on future passes. To prevent that, we want to unwind i915_vma_insert() if we fail in our attempt to bind. Fixes: 59bfa1248e22 ("drm/i915: Start passing around i915_vma from execbuffer") Testcase: igt/drv_selftest/live_gtt Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.william.auld@gmail.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: <stable@vger.kernel.org> # v4.9+ Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170227122654.27651-3-chris@chris-wilson.co.uk (cherry picked from commit 31c7effa39f21f0fea1b3250ae9ff32b9c7e1ae5) Signed-off-by: Jani Nikula <jani.nikula@intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c57
1 files changed, 37 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 155906e84812..df20e9bc1c0f 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -512,10 +512,36 @@ err_unpin:
512 return ret; 512 return ret;
513} 513}
514 514
515static void
516i915_vma_remove(struct i915_vma *vma)
517{
518 struct drm_i915_gem_object *obj = vma->obj;
519
520 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
521 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
522
523 drm_mm_remove_node(&vma->node);
524 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
525
526 /* Since the unbound list is global, only move to that list if
527 * no more VMAs exist.
528 */
529 if (--obj->bind_count == 0)
530 list_move_tail(&obj->global_link,
531 &to_i915(obj->base.dev)->mm.unbound_list);
532
533 /* And finally now the object is completely decoupled from this vma,
534 * we can drop its hold on the backing storage and allow it to be
535 * reaped by the shrinker.
536 */
537 i915_gem_object_unpin_pages(obj);
538 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
539}
540
515int __i915_vma_do_pin(struct i915_vma *vma, 541int __i915_vma_do_pin(struct i915_vma *vma,
516 u64 size, u64 alignment, u64 flags) 542 u64 size, u64 alignment, u64 flags)
517{ 543{
518 unsigned int bound = vma->flags; 544 const unsigned int bound = vma->flags;
519 int ret; 545 int ret;
520 546
521 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 547 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
524 550
525 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 551 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
526 ret = -EBUSY; 552 ret = -EBUSY;
527 goto err; 553 goto err_unpin;
528 } 554 }
529 555
530 if ((bound & I915_VMA_BIND_MASK) == 0) { 556 if ((bound & I915_VMA_BIND_MASK) == 0) {
531 ret = i915_vma_insert(vma, size, alignment, flags); 557 ret = i915_vma_insert(vma, size, alignment, flags);
532 if (ret) 558 if (ret)
533 goto err; 559 goto err_unpin;
534 } 560 }
535 561
536 ret = i915_vma_bind(vma, vma->obj->cache_level, flags); 562 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
537 if (ret) 563 if (ret)
538 goto err; 564 goto err_remove;
539 565
540 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 566 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
541 __i915_vma_set_map_and_fenceable(vma); 567 __i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
544 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 570 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
545 return 0; 571 return 0;
546 572
547err: 573err_remove:
574 if ((bound & I915_VMA_BIND_MASK) == 0) {
575 GEM_BUG_ON(vma->pages);
576 i915_vma_remove(vma);
577 }
578err_unpin:
548 __i915_vma_unpin(vma); 579 __i915_vma_unpin(vma);
549 return ret; 580 return ret;
550} 581}
@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
657 } 688 }
658 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 689 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
659 690
660 drm_mm_remove_node(&vma->node);
661 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
662
663 if (vma->pages != obj->mm.pages) { 691 if (vma->pages != obj->mm.pages) {
664 GEM_BUG_ON(!vma->pages); 692 GEM_BUG_ON(!vma->pages);
665 sg_free_table(vma->pages); 693 sg_free_table(vma->pages);
@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
667 } 695 }
668 vma->pages = NULL; 696 vma->pages = NULL;
669 697
670 /* Since the unbound list is global, only move to that list if 698 i915_vma_remove(vma);
671 * no more VMAs exist. */
672 if (--obj->bind_count == 0)
673 list_move_tail(&obj->global_link,
674 &to_i915(obj->base.dev)->mm.unbound_list);
675
676 /* And finally now the object is completely decoupled from this vma,
677 * we can drop its hold on the backing storage and allow it to be
678 * reaped by the shrinker.
679 */
680 i915_gem_object_unpin_pages(obj);
681 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
682 699
683destroy: 700destroy:
684 if (unlikely(i915_vma_is_closed(vma))) 701 if (unlikely(i915_vma_is_closed(vma)))