aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c55
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c7
4 files changed, 38 insertions, 37 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5d8750830dc3..d93c73b1c471 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -427,11 +427,9 @@ moved:
427 } 427 }
428 428
429 if (bo->mem.mm_node) { 429 if (bo->mem.mm_node) {
430 spin_lock(&bo->lock);
431 bo->offset = (bo->mem.start << PAGE_SHIFT) + 430 bo->offset = (bo->mem.start << PAGE_SHIFT) +
432 bdev->man[bo->mem.mem_type].gpu_offset; 431 bdev->man[bo->mem.mem_type].gpu_offset;
433 bo->cur_placement = bo->mem.placement; 432 bo->cur_placement = bo->mem.placement;
434 spin_unlock(&bo->lock);
435 } else 433 } else
436 bo->offset = 0; 434 bo->offset = 0;
437 435
@@ -485,14 +483,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
485 int put_count; 483 int put_count;
486 int ret; 484 int ret;
487 485
488 spin_lock(&bo->lock); 486 spin_lock(&bdev->fence_lock);
489 (void) ttm_bo_wait(bo, false, false, true); 487 (void) ttm_bo_wait(bo, false, false, true);
490 if (!bo->sync_obj) { 488 if (!bo->sync_obj) {
491 489
492 spin_lock(&glob->lru_lock); 490 spin_lock(&glob->lru_lock);
493 491
494 /** 492 /**
495 * Lock inversion between bo::reserve and bo::lock here, 493 * Lock inversion between bo:reserve and bdev::fence_lock here,
496 * but that's OK, since we're only trylocking. 494 * but that's OK, since we're only trylocking.
497 */ 495 */
498 496
@@ -501,7 +499,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
501 if (unlikely(ret == -EBUSY)) 499 if (unlikely(ret == -EBUSY))
502 goto queue; 500 goto queue;
503 501
504 spin_unlock(&bo->lock); 502 spin_unlock(&bdev->fence_lock);
505 put_count = ttm_bo_del_from_lru(bo); 503 put_count = ttm_bo_del_from_lru(bo);
506 504
507 spin_unlock(&glob->lru_lock); 505 spin_unlock(&glob->lru_lock);
@@ -522,7 +520,7 @@ queue:
522 kref_get(&bo->list_kref); 520 kref_get(&bo->list_kref);
523 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 521 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
524 spin_unlock(&glob->lru_lock); 522 spin_unlock(&glob->lru_lock);
525 spin_unlock(&bo->lock); 523 spin_unlock(&bdev->fence_lock);
526 524
527 if (sync_obj) { 525 if (sync_obj) {
528 driver->sync_obj_flush(sync_obj, sync_obj_arg); 526 driver->sync_obj_flush(sync_obj, sync_obj_arg);
@@ -547,14 +545,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
547 bool no_wait_reserve, 545 bool no_wait_reserve,
548 bool no_wait_gpu) 546 bool no_wait_gpu)
549{ 547{
548 struct ttm_bo_device *bdev = bo->bdev;
550 struct ttm_bo_global *glob = bo->glob; 549 struct ttm_bo_global *glob = bo->glob;
551 int put_count; 550 int put_count;
552 int ret = 0; 551 int ret = 0;
553 552
554retry: 553retry:
555 spin_lock(&bo->lock); 554 spin_lock(&bdev->fence_lock);
556 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 555 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
557 spin_unlock(&bo->lock); 556 spin_unlock(&bdev->fence_lock);
558 557
559 if (unlikely(ret != 0)) 558 if (unlikely(ret != 0))
560 return ret; 559 return ret;
@@ -707,9 +706,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
707 struct ttm_placement placement; 706 struct ttm_placement placement;
708 int ret = 0; 707 int ret = 0;
709 708
710 spin_lock(&bo->lock); 709 spin_lock(&bdev->fence_lock);
711 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 710 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
712 spin_unlock(&bo->lock); 711 spin_unlock(&bdev->fence_lock);
713 712
714 if (unlikely(ret != 0)) { 713 if (unlikely(ret != 0)) {
715 if (ret != -ERESTARTSYS) { 714 if (ret != -ERESTARTSYS) {
@@ -1044,6 +1043,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1044{ 1043{
1045 int ret = 0; 1044 int ret = 0;
1046 struct ttm_mem_reg mem; 1045 struct ttm_mem_reg mem;
1046 struct ttm_bo_device *bdev = bo->bdev;
1047 1047
1048 BUG_ON(!atomic_read(&bo->reserved)); 1048 BUG_ON(!atomic_read(&bo->reserved));
1049 1049
@@ -1052,9 +1052,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1052 * Have the driver move function wait for idle when necessary, 1052 * Have the driver move function wait for idle when necessary,
1053 * instead of doing it here. 1053 * instead of doing it here.
1054 */ 1054 */
1055 spin_lock(&bo->lock); 1055 spin_lock(&bdev->fence_lock);
1056 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 1056 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1057 spin_unlock(&bo->lock); 1057 spin_unlock(&bdev->fence_lock);
1058 if (ret) 1058 if (ret)
1059 return ret; 1059 return ret;
1060 mem.num_pages = bo->num_pages; 1060 mem.num_pages = bo->num_pages;
@@ -1171,7 +1171,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1171 } 1171 }
1172 bo->destroy = destroy; 1172 bo->destroy = destroy;
1173 1173
1174 spin_lock_init(&bo->lock);
1175 kref_init(&bo->kref); 1174 kref_init(&bo->kref);
1176 kref_init(&bo->list_kref); 1175 kref_init(&bo->list_kref);
1177 atomic_set(&bo->cpu_writers, 0); 1176 atomic_set(&bo->cpu_writers, 0);
@@ -1535,7 +1534,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1535 bdev->dev_mapping = NULL; 1534 bdev->dev_mapping = NULL;
1536 bdev->glob = glob; 1535 bdev->glob = glob;
1537 bdev->need_dma32 = need_dma32; 1536 bdev->need_dma32 = need_dma32;
1538 1537 spin_lock_init(&bdev->fence_lock);
1539 mutex_lock(&glob->device_list_mutex); 1538 mutex_lock(&glob->device_list_mutex);
1540 list_add_tail(&bdev->device_list, &glob->device_list); 1539 list_add_tail(&bdev->device_list, &glob->device_list);
1541 mutex_unlock(&glob->device_list_mutex); 1540 mutex_unlock(&glob->device_list_mutex);
@@ -1659,6 +1658,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1659 bool lazy, bool interruptible, bool no_wait) 1658 bool lazy, bool interruptible, bool no_wait)
1660{ 1659{
1661 struct ttm_bo_driver *driver = bo->bdev->driver; 1660 struct ttm_bo_driver *driver = bo->bdev->driver;
1661 struct ttm_bo_device *bdev = bo->bdev;
1662 void *sync_obj; 1662 void *sync_obj;
1663 void *sync_obj_arg; 1663 void *sync_obj_arg;
1664 int ret = 0; 1664 int ret = 0;
@@ -1672,9 +1672,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1672 void *tmp_obj = bo->sync_obj; 1672 void *tmp_obj = bo->sync_obj;
1673 bo->sync_obj = NULL; 1673 bo->sync_obj = NULL;
1674 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1674 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1675 spin_unlock(&bo->lock); 1675 spin_unlock(&bdev->fence_lock);
1676 driver->sync_obj_unref(&tmp_obj); 1676 driver->sync_obj_unref(&tmp_obj);
1677 spin_lock(&bo->lock); 1677 spin_lock(&bdev->fence_lock);
1678 continue; 1678 continue;
1679 } 1679 }
1680 1680
@@ -1683,29 +1683,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1683 1683
1684 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1684 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1685 sync_obj_arg = bo->sync_obj_arg; 1685 sync_obj_arg = bo->sync_obj_arg;
1686 spin_unlock(&bo->lock); 1686 spin_unlock(&bdev->fence_lock);
1687 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, 1687 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1688 lazy, interruptible); 1688 lazy, interruptible);
1689 if (unlikely(ret != 0)) { 1689 if (unlikely(ret != 0)) {
1690 driver->sync_obj_unref(&sync_obj); 1690 driver->sync_obj_unref(&sync_obj);
1691 spin_lock(&bo->lock); 1691 spin_lock(&bdev->fence_lock);
1692 return ret; 1692 return ret;
1693 } 1693 }
1694 spin_lock(&bo->lock); 1694 spin_lock(&bdev->fence_lock);
1695 if (likely(bo->sync_obj == sync_obj && 1695 if (likely(bo->sync_obj == sync_obj &&
1696 bo->sync_obj_arg == sync_obj_arg)) { 1696 bo->sync_obj_arg == sync_obj_arg)) {
1697 void *tmp_obj = bo->sync_obj; 1697 void *tmp_obj = bo->sync_obj;
1698 bo->sync_obj = NULL; 1698 bo->sync_obj = NULL;
1699 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1699 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1700 &bo->priv_flags); 1700 &bo->priv_flags);
1701 spin_unlock(&bo->lock); 1701 spin_unlock(&bdev->fence_lock);
1702 driver->sync_obj_unref(&sync_obj); 1702 driver->sync_obj_unref(&sync_obj);
1703 driver->sync_obj_unref(&tmp_obj); 1703 driver->sync_obj_unref(&tmp_obj);
1704 spin_lock(&bo->lock); 1704 spin_lock(&bdev->fence_lock);
1705 } else { 1705 } else {
1706 spin_unlock(&bo->lock); 1706 spin_unlock(&bdev->fence_lock);
1707 driver->sync_obj_unref(&sync_obj); 1707 driver->sync_obj_unref(&sync_obj);
1708 spin_lock(&bo->lock); 1708 spin_lock(&bdev->fence_lock);
1709 } 1709 }
1710 } 1710 }
1711 return 0; 1711 return 0;
@@ -1714,6 +1714,7 @@ EXPORT_SYMBOL(ttm_bo_wait);
1714 1714
1715int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1715int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1716{ 1716{
1717 struct ttm_bo_device *bdev = bo->bdev;
1717 int ret = 0; 1718 int ret = 0;
1718 1719
1719 /* 1720 /*
@@ -1723,9 +1724,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1723 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1724 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1724 if (unlikely(ret != 0)) 1725 if (unlikely(ret != 0))
1725 return ret; 1726 return ret;
1726 spin_lock(&bo->lock); 1727 spin_lock(&bdev->fence_lock);
1727 ret = ttm_bo_wait(bo, false, true, no_wait); 1728 ret = ttm_bo_wait(bo, false, true, no_wait);
1728 spin_unlock(&bo->lock); 1729 spin_unlock(&bdev->fence_lock);
1729 if (likely(ret == 0)) 1730 if (likely(ret == 0))
1730 atomic_inc(&bo->cpu_writers); 1731 atomic_inc(&bo->cpu_writers);
1731 ttm_bo_unreserve(bo); 1732 ttm_bo_unreserve(bo);
@@ -1797,9 +1798,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1797 * Wait for GPU, then move to system cached. 1798 * Wait for GPU, then move to system cached.
1798 */ 1799 */
1799 1800
1800 spin_lock(&bo->lock); 1801 spin_lock(&bo->bdev->fence_lock);
1801 ret = ttm_bo_wait(bo, false, false, false); 1802 ret = ttm_bo_wait(bo, false, false, false);
1802 spin_unlock(&bo->lock); 1803 spin_unlock(&bo->bdev->fence_lock);
1803 1804
1804 if (unlikely(ret != 0)) 1805 if (unlikely(ret != 0))
1805 goto out; 1806 goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3106d5bcce32..4b75133d6606 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -337,7 +337,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
337 * TODO: Explicit member copy would probably be better here. 337 * TODO: Explicit member copy would probably be better here.
338 */ 338 */
339 339
340 spin_lock_init(&fbo->lock);
341 init_waitqueue_head(&fbo->event_queue); 340 init_waitqueue_head(&fbo->event_queue);
342 INIT_LIST_HEAD(&fbo->ddestroy); 341 INIT_LIST_HEAD(&fbo->ddestroy);
343 INIT_LIST_HEAD(&fbo->lru); 342 INIT_LIST_HEAD(&fbo->lru);
@@ -520,7 +519,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
520 struct ttm_buffer_object *ghost_obj; 519 struct ttm_buffer_object *ghost_obj;
521 void *tmp_obj = NULL; 520 void *tmp_obj = NULL;
522 521
523 spin_lock(&bo->lock); 522 spin_lock(&bdev->fence_lock);
524 if (bo->sync_obj) { 523 if (bo->sync_obj) {
525 tmp_obj = bo->sync_obj; 524 tmp_obj = bo->sync_obj;
526 bo->sync_obj = NULL; 525 bo->sync_obj = NULL;
@@ -529,7 +528,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
529 bo->sync_obj_arg = sync_obj_arg; 528 bo->sync_obj_arg = sync_obj_arg;
530 if (evict) { 529 if (evict) {
531 ret = ttm_bo_wait(bo, false, false, false); 530 ret = ttm_bo_wait(bo, false, false, false);
532 spin_unlock(&bo->lock); 531 spin_unlock(&bdev->fence_lock);
533 if (tmp_obj) 532 if (tmp_obj)
534 driver->sync_obj_unref(&tmp_obj); 533 driver->sync_obj_unref(&tmp_obj);
535 if (ret) 534 if (ret)
@@ -552,7 +551,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
552 */ 551 */
553 552
554 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 553 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
555 spin_unlock(&bo->lock); 554 spin_unlock(&bdev->fence_lock);
556 if (tmp_obj) 555 if (tmp_obj)
557 driver->sync_obj_unref(&tmp_obj); 556 driver->sync_obj_unref(&tmp_obj);
558 557
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe6cb77899f4..8dd446cb778e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -118,17 +118,17 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
118 * move. 118 * move.
119 */ 119 */
120 120
121 spin_lock(&bo->lock); 121 spin_lock(&bdev->fence_lock);
122 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { 122 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
123 ret = ttm_bo_wait(bo, false, true, false); 123 ret = ttm_bo_wait(bo, false, true, false);
124 spin_unlock(&bo->lock); 124 spin_unlock(&bdev->fence_lock);
125 if (unlikely(ret != 0)) { 125 if (unlikely(ret != 0)) {
126 retval = (ret != -ERESTARTSYS) ? 126 retval = (ret != -ERESTARTSYS) ?
127 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; 127 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
128 goto out_unlock; 128 goto out_unlock;
129 } 129 }
130 } else 130 } else
131 spin_unlock(&bo->lock); 131 spin_unlock(&bdev->fence_lock);
132 132
133 133
134 ret = ttm_mem_io_reserve(bdev, &bo->mem); 134 ret = ttm_mem_io_reserve(bdev, &bo->mem);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 7dcc6470e2f5..c3a2100bace6 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -203,14 +203,15 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
203 203
204 list_for_each_entry(entry, list, head) { 204 list_for_each_entry(entry, list, head) {
205 struct ttm_buffer_object *bo = entry->bo; 205 struct ttm_buffer_object *bo = entry->bo;
206 struct ttm_bo_driver *driver = bo->bdev->driver; 206 struct ttm_bo_device *bdev = bo->bdev;
207 struct ttm_bo_driver *driver = bdev->driver;
207 void *old_sync_obj; 208 void *old_sync_obj;
208 209
209 spin_lock(&bo->lock); 210 spin_lock(&bdev->fence_lock);
210 old_sync_obj = bo->sync_obj; 211 old_sync_obj = bo->sync_obj;
211 bo->sync_obj = driver->sync_obj_ref(sync_obj); 212 bo->sync_obj = driver->sync_obj_ref(sync_obj);
212 bo->sync_obj_arg = entry->new_sync_obj_arg; 213 bo->sync_obj_arg = entry->new_sync_obj_arg;
213 spin_unlock(&bo->lock); 214 spin_unlock(&bdev->fence_lock);
214 ttm_bo_unreserve(bo); 215 ttm_bo_unreserve(bo);
215 entry->reserved = false; 216 entry->reserved = false;
216 if (old_sync_obj) 217 if (old_sync_obj)