aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-09-04 14:58:45 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-09-12 14:23:41 -0400
commit378e2d5b504fe0231c557751e58b80fcf717cc20 (patch)
tree43adcb89b887991c2837558a469bd6a4c0d0726f
parentaa4ec7ce7ec52c7230cfa73b06d79288b45fe1c9 (diff)
drm/ttm: fix ttm_bo_cleanup_refs_or_queue once more
With shared reservation objects __ttm_bo_reserve() can easily fail even on destroyed BOs. This prevents correct handling when we need to individualize the reservation object. Fix this by individualizing the object before even trying to reserve it. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 180ce6296416..bee77d31895b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -440,28 +440,29 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
440 struct ttm_bo_global *glob = bo->glob; 440 struct ttm_bo_global *glob = bo->glob;
441 int ret; 441 int ret;
442 442
443 ret = ttm_bo_individualize_resv(bo);
444 if (ret) {
445 /* Last resort, if we fail to allocate memory for the
446 * fences block for the BO to become idle
447 */
448 reservation_object_wait_timeout_rcu(bo->resv, true, false,
449 30 * HZ);
450 spin_lock(&glob->lru_lock);
451 goto error;
452 }
453
443 spin_lock(&glob->lru_lock); 454 spin_lock(&glob->lru_lock);
444 ret = __ttm_bo_reserve(bo, false, true, NULL); 455 ret = __ttm_bo_reserve(bo, false, true, NULL);
445
446 if (!ret) { 456 if (!ret) {
447 if (!ttm_bo_wait(bo, false, true)) { 457 if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
448 ttm_bo_del_from_lru(bo); 458 ttm_bo_del_from_lru(bo);
449 spin_unlock(&glob->lru_lock); 459 spin_unlock(&glob->lru_lock);
460 if (bo->resv != &bo->ttm_resv)
461 reservation_object_unlock(&bo->ttm_resv);
450 ttm_bo_cleanup_memtype_use(bo); 462 ttm_bo_cleanup_memtype_use(bo);
451
452 return; 463 return;
453 } 464 }
454 465
455 ret = ttm_bo_individualize_resv(bo);
456 if (ret) {
457 /* Last resort, if we fail to allocate memory for the
458 * fences block for the BO to become idle and free it.
459 */
460 spin_unlock(&glob->lru_lock);
461 ttm_bo_wait(bo, true, true);
462 ttm_bo_cleanup_memtype_use(bo);
463 return;
464 }
465 ttm_bo_flush_all_fences(bo); 466 ttm_bo_flush_all_fences(bo);
466 467
467 /* 468 /*
@@ -474,11 +475,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
474 ttm_bo_add_to_lru(bo); 475 ttm_bo_add_to_lru(bo);
475 } 476 }
476 477
477 if (bo->resv != &bo->ttm_resv)
478 reservation_object_unlock(&bo->ttm_resv);
479 __ttm_bo_unreserve(bo); 478 __ttm_bo_unreserve(bo);
480 } 479 }
480 if (bo->resv != &bo->ttm_resv)
481 reservation_object_unlock(&bo->ttm_resv);
481 482
483error:
482 kref_get(&bo->list_kref); 484 kref_get(&bo->list_kref);
483 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 485 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
484 spin_unlock(&glob->lru_lock); 486 spin_unlock(&glob->lru_lock);