diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 203 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_manager.c | 10 |
2 files changed, 113 insertions, 100 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9f2eed520fc..a1cb783c713 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -434,132 +434,144 @@ out_err: | |||
434 | } | 434 | } |
435 | 435 | ||
436 | /** | 436 | /** |
437 | * Call bo::reserved and with the lru lock held. | 437 | * Call bo::reserved. |
438 | * Will release GPU memory type usage on destruction. | 438 | * Will release GPU memory type usage on destruction. |
439 | * This is the place to put in driver specific hooks. | 439 | * This is the place to put in driver specific hooks to release |
440 | * Will release the bo::reserved lock and the | 440 | * driver private resources. |
441 | * lru lock on exit. | 441 | * Will release the bo::reserved lock. |
442 | */ | 442 | */ |
443 | 443 | ||
444 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | 444 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) |
445 | { | 445 | { |
446 | struct ttm_bo_global *glob = bo->glob; | ||
447 | |||
448 | if (bo->ttm) { | 446 | if (bo->ttm) { |
449 | |||
450 | /** | ||
451 | * Release the lru_lock, since we don't want to have | ||
452 | * an atomic requirement on ttm_tt[unbind|destroy]. | ||
453 | */ | ||
454 | |||
455 | spin_unlock(&glob->lru_lock); | ||
456 | ttm_tt_unbind(bo->ttm); | 447 | ttm_tt_unbind(bo->ttm); |
457 | ttm_tt_destroy(bo->ttm); | 448 | ttm_tt_destroy(bo->ttm); |
458 | bo->ttm = NULL; | 449 | bo->ttm = NULL; |
459 | spin_lock(&glob->lru_lock); | ||
460 | } | 450 | } |
461 | 451 | ||
462 | ttm_bo_mem_put_locked(bo, &bo->mem); | 452 | ttm_bo_mem_put(bo, &bo->mem); |
463 | 453 | ||
464 | atomic_set(&bo->reserved, 0); | 454 | atomic_set(&bo->reserved, 0); |
465 | wake_up_all(&bo->event_queue); | 455 | wake_up_all(&bo->event_queue); |
466 | spin_unlock(&glob->lru_lock); | ||
467 | } | 456 | } |
468 | 457 | ||
469 | 458 | static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |
470 | /** | ||
471 | * If bo idle, remove from delayed- and lru lists, and unref. | ||
472 | * If not idle, and already on delayed list, do nothing. | ||
473 | * If not idle, and not on delayed list, put on delayed list, | ||
474 | * up the list_kref and schedule a delayed list check. | ||
475 | */ | ||
476 | |||
477 | static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | ||
478 | { | 459 | { |
479 | struct ttm_bo_device *bdev = bo->bdev; | 460 | struct ttm_bo_device *bdev = bo->bdev; |
480 | struct ttm_bo_global *glob = bo->glob; | 461 | struct ttm_bo_global *glob = bo->glob; |
481 | struct ttm_bo_driver *driver = bdev->driver; | 462 | struct ttm_bo_driver *driver; |
463 | void *sync_obj; | ||
464 | void *sync_obj_arg; | ||
465 | int put_count; | ||
482 | int ret; | 466 | int ret; |
483 | 467 | ||
484 | spin_lock(&bo->lock); | 468 | spin_lock(&bo->lock); |
485 | retry: | 469 | (void) ttm_bo_wait(bo, false, false, true); |
486 | (void) ttm_bo_wait(bo, false, false, !remove_all); | ||
487 | |||
488 | if (!bo->sync_obj) { | 470 | if (!bo->sync_obj) { |
489 | int put_count; | ||
490 | |||
491 | spin_unlock(&bo->lock); | ||
492 | 471 | ||
493 | spin_lock(&glob->lru_lock); | 472 | spin_lock(&glob->lru_lock); |
494 | ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); | ||
495 | 473 | ||
496 | /** | 474 | /** |
497 | * Someone else has the object reserved. Bail and retry. | 475 | * Lock inversion between bo::reserve and bo::lock here, |
476 | * but that's OK, since we're only trylocking. | ||
498 | */ | 477 | */ |
499 | 478 | ||
500 | if (unlikely(ret == -EBUSY)) { | 479 | ret = ttm_bo_reserve_locked(bo, false, true, false, 0); |
501 | spin_unlock(&glob->lru_lock); | ||
502 | spin_lock(&bo->lock); | ||
503 | goto requeue; | ||
504 | } | ||
505 | |||
506 | /** | ||
507 | * We can re-check for sync object without taking | ||
508 | * the bo::lock since setting the sync object requires | ||
509 | * also bo::reserved. A busy object at this point may | ||
510 | * be caused by another thread starting an accelerated | ||
511 | * eviction. | ||
512 | */ | ||
513 | 480 | ||
514 | if (unlikely(bo->sync_obj)) { | 481 | if (unlikely(ret == -EBUSY)) |
515 | atomic_set(&bo->reserved, 0); | 482 | goto queue; |
516 | wake_up_all(&bo->event_queue); | ||
517 | spin_unlock(&glob->lru_lock); | ||
518 | spin_lock(&bo->lock); | ||
519 | if (remove_all) | ||
520 | goto retry; | ||
521 | else | ||
522 | goto requeue; | ||
523 | } | ||
524 | 483 | ||
484 | spin_unlock(&bo->lock); | ||
525 | put_count = ttm_bo_del_from_lru(bo); | 485 | put_count = ttm_bo_del_from_lru(bo); |
526 | 486 | ||
527 | if (!list_empty(&bo->ddestroy)) { | 487 | spin_unlock(&glob->lru_lock); |
528 | list_del_init(&bo->ddestroy); | ||
529 | ++put_count; | ||
530 | } | ||
531 | |||
532 | ttm_bo_cleanup_memtype_use(bo); | 488 | ttm_bo_cleanup_memtype_use(bo); |
533 | 489 | ||
534 | while (put_count--) | 490 | while (put_count--) |
535 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | 491 | kref_put(&bo->list_kref, ttm_bo_ref_bug); |
536 | 492 | ||
537 | return 0; | 493 | return; |
494 | } else { | ||
495 | spin_lock(&glob->lru_lock); | ||
538 | } | 496 | } |
539 | requeue: | 497 | queue: |
498 | sync_obj = bo->sync_obj; | ||
499 | sync_obj_arg = bo->sync_obj_arg; | ||
500 | driver = bdev->driver; | ||
501 | |||
502 | kref_get(&bo->list_kref); | ||
503 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | ||
504 | spin_unlock(&glob->lru_lock); | ||
505 | spin_unlock(&bo->lock); | ||
506 | |||
507 | if (sync_obj) | ||
508 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | ||
509 | schedule_delayed_work(&bdev->wq, | ||
510 | ((HZ / 100) < 1) ? 1 : HZ / 100); | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * function ttm_bo_cleanup_refs | ||
515 | * If bo idle, remove from delayed- and lru lists, and unref. | ||
516 | * If not idle, do nothing. | ||
517 | * | ||
518 | * @interruptible Any sleeps should occur interruptibly. | ||
519 | * @no_wait_reserve Never wait for reserve. Return -EBUSY instead. | ||
520 | * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. | ||
521 | */ | ||
522 | |||
523 | static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, | ||
524 | bool interruptible, | ||
525 | bool no_wait_reserve, | ||
526 | bool no_wait_gpu) | ||
527 | { | ||
528 | struct ttm_bo_global *glob = bo->glob; | ||
529 | int put_count; | ||
530 | int ret = 0; | ||
531 | |||
532 | retry: | ||
533 | spin_lock(&bo->lock); | ||
534 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | ||
535 | spin_unlock(&bo->lock); | ||
536 | |||
537 | if (unlikely(ret != 0)) | ||
538 | return ret; | ||
539 | |||
540 | spin_lock(&glob->lru_lock); | 540 | spin_lock(&glob->lru_lock); |
541 | if (list_empty(&bo->ddestroy)) { | 541 | ret = ttm_bo_reserve_locked(bo, interruptible, |
542 | void *sync_obj = bo->sync_obj; | 542 | no_wait_reserve, false, 0); |
543 | void *sync_obj_arg = bo->sync_obj_arg; | ||
544 | 543 | ||
545 | kref_get(&bo->list_kref); | 544 | if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { |
546 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | ||
547 | spin_unlock(&glob->lru_lock); | 545 | spin_unlock(&glob->lru_lock); |
548 | spin_unlock(&bo->lock); | 546 | return ret; |
547 | } | ||
549 | 548 | ||
550 | if (sync_obj) | 549 | /** |
551 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | 550 | * We can re-check for sync object without taking |
552 | schedule_delayed_work(&bdev->wq, | 551 | * the bo::lock since setting the sync object requires |
553 | ((HZ / 100) < 1) ? 1 : HZ / 100); | 552 | * also bo::reserved. A busy object at this point may |
554 | ret = 0; | 553 | * be caused by another thread recently starting an accelerated |
554 | * eviction. | ||
555 | */ | ||
555 | 556 | ||
556 | } else { | 557 | if (unlikely(bo->sync_obj)) { |
558 | atomic_set(&bo->reserved, 0); | ||
559 | wake_up_all(&bo->event_queue); | ||
557 | spin_unlock(&glob->lru_lock); | 560 | spin_unlock(&glob->lru_lock); |
558 | spin_unlock(&bo->lock); | 561 | goto retry; |
559 | ret = -EBUSY; | ||
560 | } | 562 | } |
561 | 563 | ||
562 | return ret; | 564 | put_count = ttm_bo_del_from_lru(bo); |
565 | list_del_init(&bo->ddestroy); | ||
566 | ++put_count; | ||
567 | |||
568 | spin_unlock(&glob->lru_lock); | ||
569 | ttm_bo_cleanup_memtype_use(bo); | ||
570 | |||
571 | while (put_count--) | ||
572 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
573 | |||
574 | return 0; | ||
563 | } | 575 | } |
564 | 576 | ||
565 | /** | 577 | /** |
@@ -591,7 +603,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) | |||
591 | } | 603 | } |
592 | 604 | ||
593 | spin_unlock(&glob->lru_lock); | 605 | spin_unlock(&glob->lru_lock); |
594 | ret = ttm_bo_cleanup_refs(entry, remove_all); | 606 | ret = ttm_bo_cleanup_refs(entry, false, !remove_all, |
607 | !remove_all); | ||
595 | kref_put(&entry->list_kref, ttm_bo_release_list); | 608 | kref_put(&entry->list_kref, ttm_bo_release_list); |
596 | entry = nentry; | 609 | entry = nentry; |
597 | 610 | ||
@@ -634,7 +647,7 @@ static void ttm_bo_release(struct kref *kref) | |||
634 | bo->vm_node = NULL; | 647 | bo->vm_node = NULL; |
635 | } | 648 | } |
636 | write_unlock(&bdev->vm_lock); | 649 | write_unlock(&bdev->vm_lock); |
637 | ttm_bo_cleanup_refs(bo, false); | 650 | ttm_bo_cleanup_refs_or_queue(bo); |
638 | kref_put(&bo->list_kref, ttm_bo_release_list); | 651 | kref_put(&bo->list_kref, ttm_bo_release_list); |
639 | write_lock(&bdev->vm_lock); | 652 | write_lock(&bdev->vm_lock); |
640 | } | 653 | } |
@@ -742,6 +755,18 @@ retry: | |||
742 | bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); | 755 | bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); |
743 | kref_get(&bo->list_kref); | 756 | kref_get(&bo->list_kref); |
744 | 757 | ||
758 | if (!list_empty(&bo->ddestroy)) { | ||
759 | spin_unlock(&glob->lru_lock); | ||
760 | ret = ttm_bo_cleanup_refs(bo, interruptible, | ||
761 | no_wait_reserve, no_wait_gpu); | ||
762 | kref_put(&bo->list_kref, ttm_bo_release_list); | ||
763 | |||
764 | if (likely(ret == 0 || ret == -ERESTARTSYS)) | ||
765 | return ret; | ||
766 | |||
767 | goto retry; | ||
768 | } | ||
769 | |||
745 | ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); | 770 | ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); |
746 | 771 | ||
747 | if (unlikely(ret == -EBUSY)) { | 772 | if (unlikely(ret == -EBUSY)) { |
@@ -784,15 +809,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) | |||
784 | } | 809 | } |
785 | EXPORT_SYMBOL(ttm_bo_mem_put); | 810 | EXPORT_SYMBOL(ttm_bo_mem_put); |
786 | 811 | ||
787 | void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) | ||
788 | { | ||
789 | struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; | ||
790 | |||
791 | if (mem->mm_node) | ||
792 | (*man->func->put_node_locked)(man, mem); | ||
793 | } | ||
794 | EXPORT_SYMBOL(ttm_bo_mem_put_locked); | ||
795 | |||
796 | /** | 812 | /** |
797 | * Repeatedly evict memory from the LRU for @mem_type until we create enough | 813 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
798 | * space, or we've evicted everything and there isn't enough space. | 814 | * space, or we've evicted everything and there isn't enough space. |
@@ -1774,6 +1790,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1774 | struct ttm_buffer_object, swap); | 1790 | struct ttm_buffer_object, swap); |
1775 | kref_get(&bo->list_kref); | 1791 | kref_get(&bo->list_kref); |
1776 | 1792 | ||
1793 | if (!list_empty(&bo->ddestroy)) { | ||
1794 | spin_unlock(&glob->lru_lock); | ||
1795 | (void) ttm_bo_cleanup_refs(bo, false, false, false); | ||
1796 | kref_put(&bo->list_kref, ttm_bo_release_list); | ||
1797 | continue; | ||
1798 | } | ||
1799 | |||
1777 | /** | 1800 | /** |
1778 | * Reserve buffer. Since we unlock while sleeping, we need | 1801 | * Reserve buffer. Since we unlock while sleeping, we need |
1779 | * to re-check that nobody removed us from the swap-list while | 1802 | * to re-check that nobody removed us from the swap-list while |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 35c97b20bda..7410c190c89 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c | |||
@@ -90,15 +90,6 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | |||
90 | } | 90 | } |
91 | } | 91 | } |
92 | 92 | ||
93 | static void ttm_bo_man_put_node_locked(struct ttm_mem_type_manager *man, | ||
94 | struct ttm_mem_reg *mem) | ||
95 | { | ||
96 | if (mem->mm_node) { | ||
97 | drm_mm_put_block(mem->mm_node); | ||
98 | mem->mm_node = NULL; | ||
99 | } | ||
100 | } | ||
101 | |||
102 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, | 93 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, |
103 | unsigned long p_size) | 94 | unsigned long p_size) |
104 | { | 95 | { |
@@ -152,7 +143,6 @@ const struct ttm_mem_type_manager_func ttm_bo_manager_func = { | |||
152 | ttm_bo_man_takedown, | 143 | ttm_bo_man_takedown, |
153 | ttm_bo_man_get_node, | 144 | ttm_bo_man_get_node, |
154 | ttm_bo_man_put_node, | 145 | ttm_bo_man_put_node, |
155 | ttm_bo_man_put_node_locked, | ||
156 | ttm_bo_man_debug | 146 | ttm_bo_man_debug |
157 | }; | 147 | }; |
158 | EXPORT_SYMBOL(ttm_bo_manager_func); | 148 | EXPORT_SYMBOL(ttm_bo_manager_func); |