aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c55
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c7
-rw-r--r--include/drm/ttm/ttm_bo_api.h6
-rw-r--r--include/drm/ttm/ttm_bo_driver.h3
9 files changed, 53 insertions, 51 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9a1fdcf400c2..1f2301d26c0a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -234,10 +234,10 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
234 if (likely(fence)) { 234 if (likely(fence)) {
235 struct nouveau_fence *prev_fence; 235 struct nouveau_fence *prev_fence;
236 236
237 spin_lock(&nvbo->bo.lock); 237 spin_lock(&nvbo->bo.bdev->fence_lock);
238 prev_fence = nvbo->bo.sync_obj; 238 prev_fence = nvbo->bo.sync_obj;
239 nvbo->bo.sync_obj = nouveau_fence_ref(fence); 239 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
240 spin_unlock(&nvbo->bo.lock); 240 spin_unlock(&nvbo->bo.bdev->fence_lock);
241 nouveau_fence_unref((void *)&prev_fence); 241 nouveau_fence_unref((void *)&prev_fence);
242 } 242 }
243 243
@@ -557,9 +557,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
557 data |= r->vor; 557 data |= r->vor;
558 } 558 }
559 559
560 spin_lock(&nvbo->bo.lock); 560 spin_lock(&nvbo->bo.bdev->fence_lock);
561 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 561 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
562 spin_unlock(&nvbo->bo.lock); 562 spin_unlock(&nvbo->bo.bdev->fence_lock);
563 if (ret) { 563 if (ret) {
564 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); 564 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
565 break; 565 break;
@@ -791,9 +791,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
791 } 791 }
792 792
793 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 793 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
794 spin_lock(&nvbo->bo.lock); 794 spin_lock(&nvbo->bo.bdev->fence_lock);
795 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 795 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
796 spin_unlock(&nvbo->bo.lock); 796 spin_unlock(&nvbo->bo.bdev->fence_lock);
797 } else { 797 } else {
798 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 798 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
799 if (ret == 0) 799 if (ret == 0)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1d067743fee0..e939cb6a91cc 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -369,11 +369,11 @@ void radeon_bo_list_fence(struct list_head *head, void *fence)
369 369
370 list_for_each_entry(lobj, head, list) { 370 list_for_each_entry(lobj, head, list) {
371 bo = lobj->bo; 371 bo = lobj->bo;
372 spin_lock(&bo->tbo.lock); 372 spin_lock(&bo->tbo.bdev->fence_lock);
373 old_fence = (struct radeon_fence *)bo->tbo.sync_obj; 373 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
374 bo->tbo.sync_obj = radeon_fence_ref(fence); 374 bo->tbo.sync_obj = radeon_fence_ref(fence);
375 bo->tbo.sync_obj_arg = NULL; 375 bo->tbo.sync_obj_arg = NULL;
376 spin_unlock(&bo->tbo.lock); 376 spin_unlock(&bo->tbo.bdev->fence_lock);
377 if (old_fence) { 377 if (old_fence) {
378 radeon_fence_unref(&old_fence); 378 radeon_fence_unref(&old_fence);
379 } 379 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d143702b244a..fd536751f865 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,12 +126,12 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
127 if (unlikely(r != 0)) 127 if (unlikely(r != 0))
128 return r; 128 return r;
129 spin_lock(&bo->tbo.lock); 129 spin_lock(&bo->tbo.bdev->fence_lock);
130 if (mem_type) 130 if (mem_type)
131 *mem_type = bo->tbo.mem.mem_type; 131 *mem_type = bo->tbo.mem.mem_type;
132 if (bo->tbo.sync_obj) 132 if (bo->tbo.sync_obj)
133 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 133 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
134 spin_unlock(&bo->tbo.lock); 134 spin_unlock(&bo->tbo.bdev->fence_lock);
135 ttm_bo_unreserve(&bo->tbo); 135 ttm_bo_unreserve(&bo->tbo);
136 return r; 136 return r;
137} 137}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5d8750830dc3..d93c73b1c471 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -427,11 +427,9 @@ moved:
427 } 427 }
428 428
429 if (bo->mem.mm_node) { 429 if (bo->mem.mm_node) {
430 spin_lock(&bo->lock);
431 bo->offset = (bo->mem.start << PAGE_SHIFT) + 430 bo->offset = (bo->mem.start << PAGE_SHIFT) +
432 bdev->man[bo->mem.mem_type].gpu_offset; 431 bdev->man[bo->mem.mem_type].gpu_offset;
433 bo->cur_placement = bo->mem.placement; 432 bo->cur_placement = bo->mem.placement;
434 spin_unlock(&bo->lock);
435 } else 433 } else
436 bo->offset = 0; 434 bo->offset = 0;
437 435
@@ -485,14 +483,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
485 int put_count; 483 int put_count;
486 int ret; 484 int ret;
487 485
488 spin_lock(&bo->lock); 486 spin_lock(&bdev->fence_lock);
489 (void) ttm_bo_wait(bo, false, false, true); 487 (void) ttm_bo_wait(bo, false, false, true);
490 if (!bo->sync_obj) { 488 if (!bo->sync_obj) {
491 489
492 spin_lock(&glob->lru_lock); 490 spin_lock(&glob->lru_lock);
493 491
494 /** 492 /**
495 * Lock inversion between bo::reserve and bo::lock here, 493 * Lock inversion between bo:reserve and bdev::fence_lock here,
496 * but that's OK, since we're only trylocking. 494 * but that's OK, since we're only trylocking.
497 */ 495 */
498 496
@@ -501,7 +499,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
501 if (unlikely(ret == -EBUSY)) 499 if (unlikely(ret == -EBUSY))
502 goto queue; 500 goto queue;
503 501
504 spin_unlock(&bo->lock); 502 spin_unlock(&bdev->fence_lock);
505 put_count = ttm_bo_del_from_lru(bo); 503 put_count = ttm_bo_del_from_lru(bo);
506 504
507 spin_unlock(&glob->lru_lock); 505 spin_unlock(&glob->lru_lock);
@@ -522,7 +520,7 @@ queue:
522 kref_get(&bo->list_kref); 520 kref_get(&bo->list_kref);
523 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 521 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
524 spin_unlock(&glob->lru_lock); 522 spin_unlock(&glob->lru_lock);
525 spin_unlock(&bo->lock); 523 spin_unlock(&bdev->fence_lock);
526 524
527 if (sync_obj) { 525 if (sync_obj) {
528 driver->sync_obj_flush(sync_obj, sync_obj_arg); 526 driver->sync_obj_flush(sync_obj, sync_obj_arg);
@@ -547,14 +545,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
547 bool no_wait_reserve, 545 bool no_wait_reserve,
548 bool no_wait_gpu) 546 bool no_wait_gpu)
549{ 547{
548 struct ttm_bo_device *bdev = bo->bdev;
550 struct ttm_bo_global *glob = bo->glob; 549 struct ttm_bo_global *glob = bo->glob;
551 int put_count; 550 int put_count;
552 int ret = 0; 551 int ret = 0;
553 552
554retry: 553retry:
555 spin_lock(&bo->lock); 554 spin_lock(&bdev->fence_lock);
556 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 555 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
557 spin_unlock(&bo->lock); 556 spin_unlock(&bdev->fence_lock);
558 557
559 if (unlikely(ret != 0)) 558 if (unlikely(ret != 0))
560 return ret; 559 return ret;
@@ -707,9 +706,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
707 struct ttm_placement placement; 706 struct ttm_placement placement;
708 int ret = 0; 707 int ret = 0;
709 708
710 spin_lock(&bo->lock); 709 spin_lock(&bdev->fence_lock);
711 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 710 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
712 spin_unlock(&bo->lock); 711 spin_unlock(&bdev->fence_lock);
713 712
714 if (unlikely(ret != 0)) { 713 if (unlikely(ret != 0)) {
715 if (ret != -ERESTARTSYS) { 714 if (ret != -ERESTARTSYS) {
@@ -1044,6 +1043,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1044{ 1043{
1045 int ret = 0; 1044 int ret = 0;
1046 struct ttm_mem_reg mem; 1045 struct ttm_mem_reg mem;
1046 struct ttm_bo_device *bdev = bo->bdev;
1047 1047
1048 BUG_ON(!atomic_read(&bo->reserved)); 1048 BUG_ON(!atomic_read(&bo->reserved));
1049 1049
@@ -1052,9 +1052,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1052 * Have the driver move function wait for idle when necessary, 1052 * Have the driver move function wait for idle when necessary,
1053 * instead of doing it here. 1053 * instead of doing it here.
1054 */ 1054 */
1055 spin_lock(&bo->lock); 1055 spin_lock(&bdev->fence_lock);
1056 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 1056 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1057 spin_unlock(&bo->lock); 1057 spin_unlock(&bdev->fence_lock);
1058 if (ret) 1058 if (ret)
1059 return ret; 1059 return ret;
1060 mem.num_pages = bo->num_pages; 1060 mem.num_pages = bo->num_pages;
@@ -1171,7 +1171,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1171 } 1171 }
1172 bo->destroy = destroy; 1172 bo->destroy = destroy;
1173 1173
1174 spin_lock_init(&bo->lock);
1175 kref_init(&bo->kref); 1174 kref_init(&bo->kref);
1176 kref_init(&bo->list_kref); 1175 kref_init(&bo->list_kref);
1177 atomic_set(&bo->cpu_writers, 0); 1176 atomic_set(&bo->cpu_writers, 0);
@@ -1535,7 +1534,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1535 bdev->dev_mapping = NULL; 1534 bdev->dev_mapping = NULL;
1536 bdev->glob = glob; 1535 bdev->glob = glob;
1537 bdev->need_dma32 = need_dma32; 1536 bdev->need_dma32 = need_dma32;
1538 1537 spin_lock_init(&bdev->fence_lock);
1539 mutex_lock(&glob->device_list_mutex); 1538 mutex_lock(&glob->device_list_mutex);
1540 list_add_tail(&bdev->device_list, &glob->device_list); 1539 list_add_tail(&bdev->device_list, &glob->device_list);
1541 mutex_unlock(&glob->device_list_mutex); 1540 mutex_unlock(&glob->device_list_mutex);
@@ -1659,6 +1658,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1659 bool lazy, bool interruptible, bool no_wait) 1658 bool lazy, bool interruptible, bool no_wait)
1660{ 1659{
1661 struct ttm_bo_driver *driver = bo->bdev->driver; 1660 struct ttm_bo_driver *driver = bo->bdev->driver;
1661 struct ttm_bo_device *bdev = bo->bdev;
1662 void *sync_obj; 1662 void *sync_obj;
1663 void *sync_obj_arg; 1663 void *sync_obj_arg;
1664 int ret = 0; 1664 int ret = 0;
@@ -1672,9 +1672,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1672 void *tmp_obj = bo->sync_obj; 1672 void *tmp_obj = bo->sync_obj;
1673 bo->sync_obj = NULL; 1673 bo->sync_obj = NULL;
1674 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1674 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1675 spin_unlock(&bo->lock); 1675 spin_unlock(&bdev->fence_lock);
1676 driver->sync_obj_unref(&tmp_obj); 1676 driver->sync_obj_unref(&tmp_obj);
1677 spin_lock(&bo->lock); 1677 spin_lock(&bdev->fence_lock);
1678 continue; 1678 continue;
1679 } 1679 }
1680 1680
@@ -1683,29 +1683,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1683 1683
1684 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1684 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1685 sync_obj_arg = bo->sync_obj_arg; 1685 sync_obj_arg = bo->sync_obj_arg;
1686 spin_unlock(&bo->lock); 1686 spin_unlock(&bdev->fence_lock);
1687 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, 1687 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1688 lazy, interruptible); 1688 lazy, interruptible);
1689 if (unlikely(ret != 0)) { 1689 if (unlikely(ret != 0)) {
1690 driver->sync_obj_unref(&sync_obj); 1690 driver->sync_obj_unref(&sync_obj);
1691 spin_lock(&bo->lock); 1691 spin_lock(&bdev->fence_lock);
1692 return ret; 1692 return ret;
1693 } 1693 }
1694 spin_lock(&bo->lock); 1694 spin_lock(&bdev->fence_lock);
1695 if (likely(bo->sync_obj == sync_obj && 1695 if (likely(bo->sync_obj == sync_obj &&
1696 bo->sync_obj_arg == sync_obj_arg)) { 1696 bo->sync_obj_arg == sync_obj_arg)) {
1697 void *tmp_obj = bo->sync_obj; 1697 void *tmp_obj = bo->sync_obj;
1698 bo->sync_obj = NULL; 1698 bo->sync_obj = NULL;
1699 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1699 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1700 &bo->priv_flags); 1700 &bo->priv_flags);
1701 spin_unlock(&bo->lock); 1701 spin_unlock(&bdev->fence_lock);
1702 driver->sync_obj_unref(&sync_obj); 1702 driver->sync_obj_unref(&sync_obj);
1703 driver->sync_obj_unref(&tmp_obj); 1703 driver->sync_obj_unref(&tmp_obj);
1704 spin_lock(&bo->lock); 1704 spin_lock(&bdev->fence_lock);
1705 } else { 1705 } else {
1706 spin_unlock(&bo->lock); 1706 spin_unlock(&bdev->fence_lock);
1707 driver->sync_obj_unref(&sync_obj); 1707 driver->sync_obj_unref(&sync_obj);
1708 spin_lock(&bo->lock); 1708 spin_lock(&bdev->fence_lock);
1709 } 1709 }
1710 } 1710 }
1711 return 0; 1711 return 0;
@@ -1714,6 +1714,7 @@ EXPORT_SYMBOL(ttm_bo_wait);
1714 1714
1715int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1715int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1716{ 1716{
1717 struct ttm_bo_device *bdev = bo->bdev;
1717 int ret = 0; 1718 int ret = 0;
1718 1719
1719 /* 1720 /*
@@ -1723,9 +1724,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1723 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1724 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1724 if (unlikely(ret != 0)) 1725 if (unlikely(ret != 0))
1725 return ret; 1726 return ret;
1726 spin_lock(&bo->lock); 1727 spin_lock(&bdev->fence_lock);
1727 ret = ttm_bo_wait(bo, false, true, no_wait); 1728 ret = ttm_bo_wait(bo, false, true, no_wait);
1728 spin_unlock(&bo->lock); 1729 spin_unlock(&bdev->fence_lock);
1729 if (likely(ret == 0)) 1730 if (likely(ret == 0))
1730 atomic_inc(&bo->cpu_writers); 1731 atomic_inc(&bo->cpu_writers);
1731 ttm_bo_unreserve(bo); 1732 ttm_bo_unreserve(bo);
@@ -1797,9 +1798,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1797 * Wait for GPU, then move to system cached. 1798 * Wait for GPU, then move to system cached.
1798 */ 1799 */
1799 1800
1800 spin_lock(&bo->lock); 1801 spin_lock(&bo->bdev->fence_lock);
1801 ret = ttm_bo_wait(bo, false, false, false); 1802 ret = ttm_bo_wait(bo, false, false, false);
1802 spin_unlock(&bo->lock); 1803 spin_unlock(&bo->bdev->fence_lock);
1803 1804
1804 if (unlikely(ret != 0)) 1805 if (unlikely(ret != 0))
1805 goto out; 1806 goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3106d5bcce32..4b75133d6606 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -337,7 +337,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
337 * TODO: Explicit member copy would probably be better here. 337 * TODO: Explicit member copy would probably be better here.
338 */ 338 */
339 339
340 spin_lock_init(&fbo->lock);
341 init_waitqueue_head(&fbo->event_queue); 340 init_waitqueue_head(&fbo->event_queue);
342 INIT_LIST_HEAD(&fbo->ddestroy); 341 INIT_LIST_HEAD(&fbo->ddestroy);
343 INIT_LIST_HEAD(&fbo->lru); 342 INIT_LIST_HEAD(&fbo->lru);
@@ -520,7 +519,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
520 struct ttm_buffer_object *ghost_obj; 519 struct ttm_buffer_object *ghost_obj;
521 void *tmp_obj = NULL; 520 void *tmp_obj = NULL;
522 521
523 spin_lock(&bo->lock); 522 spin_lock(&bdev->fence_lock);
524 if (bo->sync_obj) { 523 if (bo->sync_obj) {
525 tmp_obj = bo->sync_obj; 524 tmp_obj = bo->sync_obj;
526 bo->sync_obj = NULL; 525 bo->sync_obj = NULL;
@@ -529,7 +528,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
529 bo->sync_obj_arg = sync_obj_arg; 528 bo->sync_obj_arg = sync_obj_arg;
530 if (evict) { 529 if (evict) {
531 ret = ttm_bo_wait(bo, false, false, false); 530 ret = ttm_bo_wait(bo, false, false, false);
532 spin_unlock(&bo->lock); 531 spin_unlock(&bdev->fence_lock);
533 if (tmp_obj) 532 if (tmp_obj)
534 driver->sync_obj_unref(&tmp_obj); 533 driver->sync_obj_unref(&tmp_obj);
535 if (ret) 534 if (ret)
@@ -552,7 +551,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
552 */ 551 */
553 552
554 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 553 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
555 spin_unlock(&bo->lock); 554 spin_unlock(&bdev->fence_lock);
556 if (tmp_obj) 555 if (tmp_obj)
557 driver->sync_obj_unref(&tmp_obj); 556 driver->sync_obj_unref(&tmp_obj);
558 557
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe6cb77899f4..8dd446cb778e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -118,17 +118,17 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
118 * move. 118 * move.
119 */ 119 */
120 120
121 spin_lock(&bo->lock); 121 spin_lock(&bdev->fence_lock);
122 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { 122 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
123 ret = ttm_bo_wait(bo, false, true, false); 123 ret = ttm_bo_wait(bo, false, true, false);
124 spin_unlock(&bo->lock); 124 spin_unlock(&bdev->fence_lock);
125 if (unlikely(ret != 0)) { 125 if (unlikely(ret != 0)) {
126 retval = (ret != -ERESTARTSYS) ? 126 retval = (ret != -ERESTARTSYS) ?
127 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; 127 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
128 goto out_unlock; 128 goto out_unlock;
129 } 129 }
130 } else 130 } else
131 spin_unlock(&bo->lock); 131 spin_unlock(&bdev->fence_lock);
132 132
133 133
134 ret = ttm_mem_io_reserve(bdev, &bo->mem); 134 ret = ttm_mem_io_reserve(bdev, &bo->mem);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 7dcc6470e2f5..c3a2100bace6 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -203,14 +203,15 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
203 203
204 list_for_each_entry(entry, list, head) { 204 list_for_each_entry(entry, list, head) {
205 struct ttm_buffer_object *bo = entry->bo; 205 struct ttm_buffer_object *bo = entry->bo;
206 struct ttm_bo_driver *driver = bo->bdev->driver; 206 struct ttm_bo_device *bdev = bo->bdev;
207 struct ttm_bo_driver *driver = bdev->driver;
207 void *old_sync_obj; 208 void *old_sync_obj;
208 209
209 spin_lock(&bo->lock); 210 spin_lock(&bdev->fence_lock);
210 old_sync_obj = bo->sync_obj; 211 old_sync_obj = bo->sync_obj;
211 bo->sync_obj = driver->sync_obj_ref(sync_obj); 212 bo->sync_obj = driver->sync_obj_ref(sync_obj);
212 bo->sync_obj_arg = entry->new_sync_obj_arg; 213 bo->sync_obj_arg = entry->new_sync_obj_arg;
213 spin_unlock(&bo->lock); 214 spin_unlock(&bdev->fence_lock);
214 ttm_bo_unreserve(bo); 215 ttm_bo_unreserve(bo);
215 entry->reserved = false; 216 entry->reserved = false;
216 if (old_sync_obj) 217 if (old_sync_obj)
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index b0fc9c12554b..edacd483c59c 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -154,7 +154,6 @@ struct ttm_tt;
154 * keeps one refcount. When this refcount reaches zero, 154 * keeps one refcount. When this refcount reaches zero,
155 * the object is destroyed. 155 * the object is destroyed.
156 * @event_queue: Queue for processes waiting on buffer object status change. 156 * @event_queue: Queue for processes waiting on buffer object status change.
157 * @lock: spinlock protecting mostly synchronization members.
158 * @mem: structure describing current placement. 157 * @mem: structure describing current placement.
159 * @persistant_swap_storage: Usually the swap storage is deleted for buffers 158 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
160 * pinned in physical memory. If this behaviour is not desired, this member 159 * pinned in physical memory. If this behaviour is not desired, this member
@@ -213,7 +212,6 @@ struct ttm_buffer_object {
213 struct kref kref; 212 struct kref kref;
214 struct kref list_kref; 213 struct kref list_kref;
215 wait_queue_head_t event_queue; 214 wait_queue_head_t event_queue;
216 spinlock_t lock;
217 215
218 /** 216 /**
219 * Members protected by the bo::reserved lock. 217 * Members protected by the bo::reserved lock.
@@ -248,10 +246,10 @@ struct ttm_buffer_object {
248 atomic_t reserved; 246 atomic_t reserved;
249 247
250 /** 248 /**
251 * Members protected by the bo::lock 249 * Members protected by struct buffer_object_device::fence_lock
252 * In addition, setting sync_obj to anything else 250 * In addition, setting sync_obj to anything else
253 * than NULL requires bo::reserved to be held. This allows for 251 * than NULL requires bo::reserved to be held. This allows for
254 * checking NULL while reserved but not holding bo::lock. 252 * checking NULL while reserved but not holding the mentioned lock.
255 */ 253 */
256 254
257 void *sync_obj_arg; 255 void *sync_obj_arg;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 1e25a40c688e..ca8131e98300 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -510,6 +510,8 @@ struct ttm_bo_global {
510 * 510 *
511 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 511 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
512 * @man: An array of mem_type_managers. 512 * @man: An array of mem_type_managers.
513 * @fence_lock: Protects the synchronizing members on *all* bos belonging
514 * to this device.
513 * @addr_space_mm: Range manager for the device address space. 515 * @addr_space_mm: Range manager for the device address space.
514 * lru_lock: Spinlock that protects the buffer+device lru lists and 516 * lru_lock: Spinlock that protects the buffer+device lru lists and
515 * ddestroy lists. 517 * ddestroy lists.
@@ -531,6 +533,7 @@ struct ttm_bo_device {
531 struct ttm_bo_driver *driver; 533 struct ttm_bo_driver *driver;
532 rwlock_t vm_lock; 534 rwlock_t vm_lock;
533 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 535 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
536 spinlock_t fence_lock;
534 /* 537 /*
535 * Protected by the vm lock. 538 * Protected by the vm lock.
536 */ 539 */