aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_bo.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <m.b.lankhorst@gmail.com>2012-11-28 06:25:43 -0500
committerDave Airlie <airlied@redhat.com>2012-12-10 05:21:22 -0500
commite7ab20197be3ee5fd75441e1cff0c7cdfea5bf1a (patch)
tree7c43fd066b4fd7f4aef8a4fb7b1d3c47d831c422 /drivers/gpu/drm/ttm/ttm_bo.c
parent2b7b3ad2fb8f904ae9ba7ca71323bc11c0978d91 (diff)
drm/ttm: cope with reserved buffers on lru list in ttm_mem_evict_first, v2
Replace the goto loop with a simple for each loop, and only run the delayed destroy cleanup if we can reserve the buffer first. No race occurs, since lru lock is never dropped any more. An empty list and a list full of unreservable buffers both cause -EBUSY to be returned, which is identical to the previous situation, because previously buffers on the lru list were always guaranteed to be reservable. This should work since currently ttm guarantees items on the lru are always reservable, and reserving items blockingly with some bo held are enough to cause you to run into a deadlock. Currently this is not a concern since removal off the lru list and reservations are always done with atomically, but when this guarantee no longer holds, we have to handle this situation or end up with possible deadlocks. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c42
1 files changed, 11 insertions, 31 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9a479885bf59..6059771d506e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -811,49 +811,29 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
811 struct ttm_bo_global *glob = bdev->glob; 811 struct ttm_bo_global *glob = bdev->glob;
812 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 812 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
813 struct ttm_buffer_object *bo; 813 struct ttm_buffer_object *bo;
814 int ret, put_count = 0; 814 int ret = -EBUSY, put_count;
815 815
816retry:
817 spin_lock(&glob->lru_lock); 816 spin_lock(&glob->lru_lock);
818 if (list_empty(&man->lru)) { 817 list_for_each_entry(bo, &man->lru, lru) {
818 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
819 if (!ret)
820 break;
821 }
822
823 if (ret) {
819 spin_unlock(&glob->lru_lock); 824 spin_unlock(&glob->lru_lock);
820 return -EBUSY; 825 return ret;
821 } 826 }
822 827
823 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
824 kref_get(&bo->list_kref); 828 kref_get(&bo->list_kref);
825 829
826 if (!list_empty(&bo->ddestroy)) { 830 if (!list_empty(&bo->ddestroy)) {
827 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait_reserve, false, 0); 831 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
828 if (!ret) 832 no_wait_gpu);
829 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
830 no_wait_gpu);
831 else
832 spin_unlock(&glob->lru_lock);
833
834 kref_put(&bo->list_kref, ttm_bo_release_list); 833 kref_put(&bo->list_kref, ttm_bo_release_list);
835
836 return ret; 834 return ret;
837 } 835 }
838 836
839 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
840
841 if (unlikely(ret == -EBUSY)) {
842 spin_unlock(&glob->lru_lock);
843 if (likely(!no_wait_reserve))
844 ret = ttm_bo_wait_unreserved(bo, interruptible);
845
846 kref_put(&bo->list_kref, ttm_bo_release_list);
847
848 /**
849 * We *need* to retry after releasing the lru lock.
850 */
851
852 if (unlikely(ret != 0))
853 return ret;
854 goto retry;
855 }
856
857 put_count = ttm_bo_del_from_lru(bo); 837 put_count = ttm_bo_del_from_lru(bo);
858 spin_unlock(&glob->lru_lock); 838 spin_unlock(&glob->lru_lock);
859 839