diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 83 |
1 files changed, 71 insertions, 12 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cb4cf7ef4d1e..db809e034cc4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -442,6 +442,43 @@ out_err: | |||
442 | } | 442 | } |
443 | 443 | ||
444 | /** | 444 | /** |
445 | * Call bo::reserved and with the lru lock held. | ||
446 | * Will release GPU memory type usage on destruction. | ||
447 | * This is the place to put in driver specific hooks. | ||
448 | * Will release the bo::reserved lock and the | ||
449 | * lru lock on exit. | ||
450 | */ | ||
451 | |||
452 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | ||
453 | { | ||
454 | struct ttm_bo_global *glob = bo->glob; | ||
455 | |||
456 | if (bo->ttm) { | ||
457 | |||
458 | /** | ||
459 | * Release the lru_lock, since we don't want to have | ||
460 | * an atomic requirement on ttm_tt[unbind|destroy]. | ||
461 | */ | ||
462 | |||
463 | spin_unlock(&glob->lru_lock); | ||
464 | ttm_tt_unbind(bo->ttm); | ||
465 | ttm_tt_destroy(bo->ttm); | ||
466 | bo->ttm = NULL; | ||
467 | spin_lock(&glob->lru_lock); | ||
468 | } | ||
469 | |||
470 | if (bo->mem.mm_node) { | ||
471 | drm_mm_put_block(bo->mem.mm_node); | ||
472 | bo->mem.mm_node = NULL; | ||
473 | } | ||
474 | |||
475 | atomic_set(&bo->reserved, 0); | ||
476 | wake_up_all(&bo->event_queue); | ||
477 | spin_unlock(&glob->lru_lock); | ||
478 | } | ||
479 | |||
480 | |||
481 | /** | ||
445 | * If bo idle, remove from delayed- and lru lists, and unref. | 482 | * If bo idle, remove from delayed- and lru lists, and unref. |
446 | * If not idle, and already on delayed list, do nothing. | 483 | * If not idle, and already on delayed list, do nothing. |
447 | * If not idle, and not on delayed list, put on delayed list, | 484 | * If not idle, and not on delayed list, put on delayed list, |
@@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
456 | int ret; | 493 | int ret; |
457 | 494 | ||
458 | spin_lock(&bo->lock); | 495 | spin_lock(&bo->lock); |
496 | retry: | ||
459 | (void) ttm_bo_wait(bo, false, false, !remove_all); | 497 | (void) ttm_bo_wait(bo, false, false, !remove_all); |
460 | 498 | ||
461 | if (!bo->sync_obj) { | 499 | if (!bo->sync_obj) { |
@@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
464 | spin_unlock(&bo->lock); | 502 | spin_unlock(&bo->lock); |
465 | 503 | ||
466 | spin_lock(&glob->lru_lock); | 504 | spin_lock(&glob->lru_lock); |
467 | put_count = ttm_bo_del_from_lru(bo); | 505 | ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); |
506 | |||
507 | /** | ||
508 | * Someone else has the object reserved. Bail and retry. | ||
509 | */ | ||
468 | 510 | ||
469 | ret = ttm_bo_reserve_locked(bo, false, false, false, 0); | 511 | if (unlikely(ret == -EBUSY)) { |
470 | BUG_ON(ret); | 512 | spin_unlock(&glob->lru_lock); |
471 | if (bo->ttm) | 513 | spin_lock(&bo->lock); |
472 | ttm_tt_unbind(bo->ttm); | 514 | goto requeue; |
515 | } | ||
516 | |||
517 | /** | ||
518 | * We can re-check for sync object without taking | ||
519 | * the bo::lock since setting the sync object requires | ||
520 | * also bo::reserved. A busy object at this point may | ||
521 | * be caused by another thread starting an accelerated | ||
522 | * eviction. | ||
523 | */ | ||
524 | |||
525 | if (unlikely(bo->sync_obj)) { | ||
526 | atomic_set(&bo->reserved, 0); | ||
527 | wake_up_all(&bo->event_queue); | ||
528 | spin_unlock(&glob->lru_lock); | ||
529 | spin_lock(&bo->lock); | ||
530 | if (remove_all) | ||
531 | goto retry; | ||
532 | else | ||
533 | goto requeue; | ||
534 | } | ||
535 | |||
536 | put_count = ttm_bo_del_from_lru(bo); | ||
473 | 537 | ||
474 | if (!list_empty(&bo->ddestroy)) { | 538 | if (!list_empty(&bo->ddestroy)) { |
475 | list_del_init(&bo->ddestroy); | 539 | list_del_init(&bo->ddestroy); |
476 | ++put_count; | 540 | ++put_count; |
477 | } | 541 | } |
478 | if (bo->mem.mm_node) { | ||
479 | drm_mm_put_block(bo->mem.mm_node); | ||
480 | bo->mem.mm_node = NULL; | ||
481 | } | ||
482 | spin_unlock(&glob->lru_lock); | ||
483 | 542 | ||
484 | atomic_set(&bo->reserved, 0); | 543 | ttm_bo_cleanup_memtype_use(bo); |
485 | 544 | ||
486 | while (put_count--) | 545 | while (put_count--) |
487 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | 546 | kref_put(&bo->list_kref, ttm_bo_ref_bug); |
488 | 547 | ||
489 | return 0; | 548 | return 0; |
490 | } | 549 | } |
491 | 550 | requeue: | |
492 | spin_lock(&glob->lru_lock); | 551 | spin_lock(&glob->lru_lock); |
493 | if (list_empty(&bo->ddestroy)) { | 552 | if (list_empty(&bo->ddestroy)) { |
494 | void *sync_obj = bo->sync_obj; | 553 | void *sync_obj = bo->sync_obj; |