diff options
author | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-01-21 07:07:31 -0500 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-09-01 04:16:43 -0400 |
commit | dd7cfd641228abb2669d8d047d5ec377b1835900 (patch) | |
tree | 3011650dbd99b204025f65afceb92c40f09da5f9 /drivers/gpu/drm/ttm | |
parent | 7040138ff85501931138970663a988f48c0666f0 (diff) |
drm/ttm: kill fence_lock
No users are left, kill it off! :D
Conversion to the reservation api is next on the list, after
that the functionality can be restored with rcu.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 75 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 2 |
4 files changed, 18 insertions, 67 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4f1bc948bda0..195386f16ca4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -415,24 +415,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
415 | spin_lock(&glob->lru_lock); | 415 | spin_lock(&glob->lru_lock); |
416 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); | 416 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); |
417 | 417 | ||
418 | spin_lock(&bdev->fence_lock); | 418 | if (!ret) { |
419 | (void) ttm_bo_wait(bo, false, false, true); | 419 | (void) ttm_bo_wait(bo, false, false, true); |
420 | if (!ret && !bo->sync_obj) { | ||
421 | spin_unlock(&bdev->fence_lock); | ||
422 | put_count = ttm_bo_del_from_lru(bo); | ||
423 | 420 | ||
424 | spin_unlock(&glob->lru_lock); | 421 | if (!bo->sync_obj) { |
425 | ttm_bo_cleanup_memtype_use(bo); | 422 | put_count = ttm_bo_del_from_lru(bo); |
426 | 423 | ||
427 | ttm_bo_list_ref_sub(bo, put_count, true); | 424 | spin_unlock(&glob->lru_lock); |
425 | ttm_bo_cleanup_memtype_use(bo); | ||
428 | 426 | ||
429 | return; | 427 | ttm_bo_list_ref_sub(bo, put_count, true); |
430 | } | ||
431 | if (bo->sync_obj) | ||
432 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
433 | spin_unlock(&bdev->fence_lock); | ||
434 | 428 | ||
435 | if (!ret) { | 429 | return; |
430 | } | ||
431 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
436 | 432 | ||
437 | /* | 433 | /* |
438 | * Make NO_EVICT bos immediately available to | 434 | * Make NO_EVICT bos immediately available to |
@@ -481,7 +477,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | |||
481 | int put_count; | 477 | int put_count; |
482 | int ret; | 478 | int ret; |
483 | 479 | ||
484 | spin_lock(&bdev->fence_lock); | ||
485 | ret = ttm_bo_wait(bo, false, false, true); | 480 | ret = ttm_bo_wait(bo, false, false, true); |
486 | 481 | ||
487 | if (ret && !no_wait_gpu) { | 482 | if (ret && !no_wait_gpu) { |
@@ -493,7 +488,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | |||
493 | * no new sync objects can be attached. | 488 | * no new sync objects can be attached. |
494 | */ | 489 | */ |
495 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | 490 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
496 | spin_unlock(&bdev->fence_lock); | ||
497 | 491 | ||
498 | __ttm_bo_unreserve(bo); | 492 | __ttm_bo_unreserve(bo); |
499 | spin_unlock(&glob->lru_lock); | 493 | spin_unlock(&glob->lru_lock); |
@@ -523,11 +517,9 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | |||
523 | * remove sync_obj with ttm_bo_wait, the wait should be | 517 | * remove sync_obj with ttm_bo_wait, the wait should be |
524 | * finished, and no new wait object should have been added. | 518 | * finished, and no new wait object should have been added. |
525 | */ | 519 | */ |
526 | spin_lock(&bdev->fence_lock); | ||
527 | ret = ttm_bo_wait(bo, false, false, true); | 520 | ret = ttm_bo_wait(bo, false, false, true); |
528 | WARN_ON(ret); | 521 | WARN_ON(ret); |
529 | } | 522 | } |
530 | spin_unlock(&bdev->fence_lock); | ||
531 | 523 | ||
532 | if (ret || unlikely(list_empty(&bo->ddestroy))) { | 524 | if (ret || unlikely(list_empty(&bo->ddestroy))) { |
533 | __ttm_bo_unreserve(bo); | 525 | __ttm_bo_unreserve(bo); |
@@ -665,9 +657,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
665 | struct ttm_placement placement; | 657 | struct ttm_placement placement; |
666 | int ret = 0; | 658 | int ret = 0; |
667 | 659 | ||
668 | spin_lock(&bdev->fence_lock); | ||
669 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 660 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
670 | spin_unlock(&bdev->fence_lock); | ||
671 | 661 | ||
672 | if (unlikely(ret != 0)) { | 662 | if (unlikely(ret != 0)) { |
673 | if (ret != -ERESTARTSYS) { | 663 | if (ret != -ERESTARTSYS) { |
@@ -958,7 +948,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
958 | { | 948 | { |
959 | int ret = 0; | 949 | int ret = 0; |
960 | struct ttm_mem_reg mem; | 950 | struct ttm_mem_reg mem; |
961 | struct ttm_bo_device *bdev = bo->bdev; | ||
962 | 951 | ||
963 | lockdep_assert_held(&bo->resv->lock.base); | 952 | lockdep_assert_held(&bo->resv->lock.base); |
964 | 953 | ||
@@ -967,9 +956,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
967 | * Have the driver move function wait for idle when necessary, | 956 | * Have the driver move function wait for idle when necessary, |
968 | * instead of doing it here. | 957 | * instead of doing it here. |
969 | */ | 958 | */ |
970 | spin_lock(&bdev->fence_lock); | ||
971 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 959 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
972 | spin_unlock(&bdev->fence_lock); | ||
973 | if (ret) | 960 | if (ret) |
974 | return ret; | 961 | return ret; |
975 | mem.num_pages = bo->num_pages; | 962 | mem.num_pages = bo->num_pages; |
@@ -1459,7 +1446,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1459 | bdev->glob = glob; | 1446 | bdev->glob = glob; |
1460 | bdev->need_dma32 = need_dma32; | 1447 | bdev->need_dma32 = need_dma32; |
1461 | bdev->val_seq = 0; | 1448 | bdev->val_seq = 0; |
1462 | spin_lock_init(&bdev->fence_lock); | ||
1463 | mutex_lock(&glob->device_list_mutex); | 1449 | mutex_lock(&glob->device_list_mutex); |
1464 | list_add_tail(&bdev->device_list, &glob->device_list); | 1450 | list_add_tail(&bdev->device_list, &glob->device_list); |
1465 | mutex_unlock(&glob->device_list_mutex); | 1451 | mutex_unlock(&glob->device_list_mutex); |
@@ -1517,7 +1503,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1517 | bool lazy, bool interruptible, bool no_wait) | 1503 | bool lazy, bool interruptible, bool no_wait) |
1518 | { | 1504 | { |
1519 | struct ttm_bo_driver *driver = bo->bdev->driver; | 1505 | struct ttm_bo_driver *driver = bo->bdev->driver; |
1520 | struct ttm_bo_device *bdev = bo->bdev; | ||
1521 | void *sync_obj; | 1506 | void *sync_obj; |
1522 | int ret = 0; | 1507 | int ret = 0; |
1523 | 1508 | ||
@@ -1526,53 +1511,33 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1526 | if (likely(bo->sync_obj == NULL)) | 1511 | if (likely(bo->sync_obj == NULL)) |
1527 | return 0; | 1512 | return 0; |
1528 | 1513 | ||
1529 | while (bo->sync_obj) { | 1514 | if (bo->sync_obj) { |
1530 | |||
1531 | if (driver->sync_obj_signaled(bo->sync_obj)) { | 1515 | if (driver->sync_obj_signaled(bo->sync_obj)) { |
1532 | void *tmp_obj = bo->sync_obj; | 1516 | driver->sync_obj_unref(&bo->sync_obj); |
1533 | bo->sync_obj = NULL; | ||
1534 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 1517 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
1535 | spin_unlock(&bdev->fence_lock); | 1518 | return 0; |
1536 | driver->sync_obj_unref(&tmp_obj); | ||
1537 | spin_lock(&bdev->fence_lock); | ||
1538 | continue; | ||
1539 | } | 1519 | } |
1540 | 1520 | ||
1541 | if (no_wait) | 1521 | if (no_wait) |
1542 | return -EBUSY; | 1522 | return -EBUSY; |
1543 | 1523 | ||
1544 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | 1524 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
1545 | spin_unlock(&bdev->fence_lock); | ||
1546 | ret = driver->sync_obj_wait(sync_obj, | 1525 | ret = driver->sync_obj_wait(sync_obj, |
1547 | lazy, interruptible); | 1526 | lazy, interruptible); |
1548 | if (unlikely(ret != 0)) { | 1527 | |
1549 | driver->sync_obj_unref(&sync_obj); | 1528 | if (likely(ret == 0)) { |
1550 | spin_lock(&bdev->fence_lock); | ||
1551 | return ret; | ||
1552 | } | ||
1553 | spin_lock(&bdev->fence_lock); | ||
1554 | if (likely(bo->sync_obj == sync_obj)) { | ||
1555 | void *tmp_obj = bo->sync_obj; | ||
1556 | bo->sync_obj = NULL; | ||
1557 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, | 1529 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, |
1558 | &bo->priv_flags); | 1530 | &bo->priv_flags); |
1559 | spin_unlock(&bdev->fence_lock); | 1531 | driver->sync_obj_unref(&bo->sync_obj); |
1560 | driver->sync_obj_unref(&sync_obj); | ||
1561 | driver->sync_obj_unref(&tmp_obj); | ||
1562 | spin_lock(&bdev->fence_lock); | ||
1563 | } else { | ||
1564 | spin_unlock(&bdev->fence_lock); | ||
1565 | driver->sync_obj_unref(&sync_obj); | ||
1566 | spin_lock(&bdev->fence_lock); | ||
1567 | } | 1532 | } |
1533 | driver->sync_obj_unref(&sync_obj); | ||
1568 | } | 1534 | } |
1569 | return 0; | 1535 | return ret; |
1570 | } | 1536 | } |
1571 | EXPORT_SYMBOL(ttm_bo_wait); | 1537 | EXPORT_SYMBOL(ttm_bo_wait); |
1572 | 1538 | ||
1573 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | 1539 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
1574 | { | 1540 | { |
1575 | struct ttm_bo_device *bdev = bo->bdev; | ||
1576 | int ret = 0; | 1541 | int ret = 0; |
1577 | 1542 | ||
1578 | /* | 1543 | /* |
@@ -1582,9 +1547,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | |||
1582 | ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); | 1547 | ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); |
1583 | if (unlikely(ret != 0)) | 1548 | if (unlikely(ret != 0)) |
1584 | return ret; | 1549 | return ret; |
1585 | spin_lock(&bdev->fence_lock); | ||
1586 | ret = ttm_bo_wait(bo, false, true, no_wait); | 1550 | ret = ttm_bo_wait(bo, false, true, no_wait); |
1587 | spin_unlock(&bdev->fence_lock); | ||
1588 | if (likely(ret == 0)) | 1551 | if (likely(ret == 0)) |
1589 | atomic_inc(&bo->cpu_writers); | 1552 | atomic_inc(&bo->cpu_writers); |
1590 | ttm_bo_unreserve(bo); | 1553 | ttm_bo_unreserve(bo); |
@@ -1641,9 +1604,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1641 | * Wait for GPU, then move to system cached. | 1604 | * Wait for GPU, then move to system cached. |
1642 | */ | 1605 | */ |
1643 | 1606 | ||
1644 | spin_lock(&bo->bdev->fence_lock); | ||
1645 | ret = ttm_bo_wait(bo, false, false, false); | 1607 | ret = ttm_bo_wait(bo, false, false, false); |
1646 | spin_unlock(&bo->bdev->fence_lock); | ||
1647 | 1608 | ||
1648 | if (unlikely(ret != 0)) | 1609 | if (unlikely(ret != 0)) |
1649 | goto out; | 1610 | goto out; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 30e5d90cb7bc..495aebf0f9c3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -466,12 +466,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
466 | drm_vma_node_reset(&fbo->vma_node); | 466 | drm_vma_node_reset(&fbo->vma_node); |
467 | atomic_set(&fbo->cpu_writers, 0); | 467 | atomic_set(&fbo->cpu_writers, 0); |
468 | 468 | ||
469 | spin_lock(&bdev->fence_lock); | ||
470 | if (bo->sync_obj) | 469 | if (bo->sync_obj) |
471 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | 470 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); |
472 | else | 471 | else |
473 | fbo->sync_obj = NULL; | 472 | fbo->sync_obj = NULL; |
474 | spin_unlock(&bdev->fence_lock); | ||
475 | kref_init(&fbo->list_kref); | 473 | kref_init(&fbo->list_kref); |
476 | kref_init(&fbo->kref); | 474 | kref_init(&fbo->kref); |
477 | fbo->destroy = &ttm_transfered_destroy; | 475 | fbo->destroy = &ttm_transfered_destroy; |
@@ -657,7 +655,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
657 | struct ttm_buffer_object *ghost_obj; | 655 | struct ttm_buffer_object *ghost_obj; |
658 | void *tmp_obj = NULL; | 656 | void *tmp_obj = NULL; |
659 | 657 | ||
660 | spin_lock(&bdev->fence_lock); | ||
661 | if (bo->sync_obj) { | 658 | if (bo->sync_obj) { |
662 | tmp_obj = bo->sync_obj; | 659 | tmp_obj = bo->sync_obj; |
663 | bo->sync_obj = NULL; | 660 | bo->sync_obj = NULL; |
@@ -665,7 +662,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
665 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | 662 | bo->sync_obj = driver->sync_obj_ref(sync_obj); |
666 | if (evict) { | 663 | if (evict) { |
667 | ret = ttm_bo_wait(bo, false, false, false); | 664 | ret = ttm_bo_wait(bo, false, false, false); |
668 | spin_unlock(&bdev->fence_lock); | ||
669 | if (tmp_obj) | 665 | if (tmp_obj) |
670 | driver->sync_obj_unref(&tmp_obj); | 666 | driver->sync_obj_unref(&tmp_obj); |
671 | if (ret) | 667 | if (ret) |
@@ -688,7 +684,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
688 | */ | 684 | */ |
689 | 685 | ||
690 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 686 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
691 | spin_unlock(&bdev->fence_lock); | ||
692 | if (tmp_obj) | 687 | if (tmp_obj) |
693 | driver->sync_obj_unref(&tmp_obj); | 688 | driver->sync_obj_unref(&tmp_obj); |
694 | 689 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 0ce48e5a9cb4..d05437f219e9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -45,10 +45,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
45 | struct vm_area_struct *vma, | 45 | struct vm_area_struct *vma, |
46 | struct vm_fault *vmf) | 46 | struct vm_fault *vmf) |
47 | { | 47 | { |
48 | struct ttm_bo_device *bdev = bo->bdev; | ||
49 | int ret = 0; | 48 | int ret = 0; |
50 | 49 | ||
51 | spin_lock(&bdev->fence_lock); | ||
52 | if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) | 50 | if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) |
53 | goto out_unlock; | 51 | goto out_unlock; |
54 | 52 | ||
@@ -82,7 +80,6 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
82 | VM_FAULT_NOPAGE; | 80 | VM_FAULT_NOPAGE; |
83 | 81 | ||
84 | out_unlock: | 82 | out_unlock: |
85 | spin_unlock(&bdev->fence_lock); | ||
86 | return ret; | 83 | return ret; |
87 | } | 84 | } |
88 | 85 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index e8dac8758528..0fbbbbd67afc 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -217,7 +217,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | |||
217 | glob = bo->glob; | 217 | glob = bo->glob; |
218 | 218 | ||
219 | spin_lock(&glob->lru_lock); | 219 | spin_lock(&glob->lru_lock); |
220 | spin_lock(&bdev->fence_lock); | ||
221 | 220 | ||
222 | list_for_each_entry(entry, list, head) { | 221 | list_for_each_entry(entry, list, head) { |
223 | bo = entry->bo; | 222 | bo = entry->bo; |
@@ -227,7 +226,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | |||
227 | __ttm_bo_unreserve(bo); | 226 | __ttm_bo_unreserve(bo); |
228 | entry->reserved = false; | 227 | entry->reserved = false; |
229 | } | 228 | } |
230 | spin_unlock(&bdev->fence_lock); | ||
231 | spin_unlock(&glob->lru_lock); | 229 | spin_unlock(&glob->lru_lock); |
232 | if (ticket) | 230 | if (ticket) |
233 | ww_acquire_fini(ticket); | 231 | ww_acquire_fini(ticket); |