diff options
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 54 |
1 files changed, 23 insertions, 31 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index da3702135ada..8036b6e189ee 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -524,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
524 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) | 524 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
525 | { | 525 | { |
526 | struct ttm_bo_global *glob = bdev->glob; | 526 | struct ttm_bo_global *glob = bdev->glob; |
527 | struct ttm_buffer_object *entry, *nentry; | 527 | struct ttm_buffer_object *entry = NULL; |
528 | struct list_head *list, *next; | 528 | int ret = 0; |
529 | int ret; | ||
530 | 529 | ||
531 | spin_lock(&glob->lru_lock); | 530 | spin_lock(&glob->lru_lock); |
532 | list_for_each_safe(list, next, &bdev->ddestroy) { | 531 | if (list_empty(&bdev->ddestroy)) |
533 | entry = list_entry(list, struct ttm_buffer_object, ddestroy); | 532 | goto out_unlock; |
534 | nentry = NULL; | ||
535 | 533 | ||
536 | /* | 534 | entry = list_first_entry(&bdev->ddestroy, |
537 | * Protect the next list entry from destruction while we | 535 | struct ttm_buffer_object, ddestroy); |
538 | * unlock the lru_lock. | 536 | kref_get(&entry->list_kref); |
539 | */ | 537 | |
538 | for (;;) { | ||
539 | struct ttm_buffer_object *nentry = NULL; | ||
540 | 540 | ||
541 | if (next != &bdev->ddestroy) { | 541 | if (entry->ddestroy.next != &bdev->ddestroy) { |
542 | nentry = list_entry(next, struct ttm_buffer_object, | 542 | nentry = list_first_entry(&entry->ddestroy, |
543 | ddestroy); | 543 | struct ttm_buffer_object, ddestroy); |
544 | kref_get(&nentry->list_kref); | 544 | kref_get(&nentry->list_kref); |
545 | } | 545 | } |
546 | kref_get(&entry->list_kref); | ||
547 | 546 | ||
548 | spin_unlock(&glob->lru_lock); | 547 | spin_unlock(&glob->lru_lock); |
549 | ret = ttm_bo_cleanup_refs(entry, remove_all); | 548 | ret = ttm_bo_cleanup_refs(entry, remove_all); |
550 | kref_put(&entry->list_kref, ttm_bo_release_list); | 549 | kref_put(&entry->list_kref, ttm_bo_release_list); |
550 | entry = nentry; | ||
551 | |||
552 | if (ret || !entry) | ||
553 | goto out; | ||
551 | 554 | ||
552 | spin_lock(&glob->lru_lock); | 555 | spin_lock(&glob->lru_lock); |
553 | if (nentry) { | 556 | if (list_empty(&entry->ddestroy)) |
554 | bool next_onlist = !list_empty(next); | ||
555 | spin_unlock(&glob->lru_lock); | ||
556 | kref_put(&nentry->list_kref, ttm_bo_release_list); | ||
557 | spin_lock(&glob->lru_lock); | ||
558 | /* | ||
559 | * Someone might have raced us and removed the | ||
560 | * next entry from the list. We don't bother restarting | ||
561 | * list traversal. | ||
562 | */ | ||
563 | |||
564 | if (!next_onlist) | ||
565 | break; | ||
566 | } | ||
567 | if (ret) | ||
568 | break; | 557 | break; |
569 | } | 558 | } |
570 | ret = !list_empty(&bdev->ddestroy); | ||
571 | spin_unlock(&glob->lru_lock); | ||
572 | 559 | ||
560 | out_unlock: | ||
561 | spin_unlock(&glob->lru_lock); | ||
562 | out: | ||
563 | if (entry) | ||
564 | kref_put(&entry->list_kref, ttm_bo_release_list); | ||
573 | return ret; | 565 | return ret; |
574 | } | 566 | } |
575 | 567 | ||