aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c97
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--include/drm/ttm/ttm_bo_api.h16
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h6
12 files changed, 40 insertions, 140 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 424dff5d0ab..7226f419e17 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1104,8 +1104,7 @@ nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1104 if (vma->node) { 1104 if (vma->node) {
1105 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { 1105 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1106 spin_lock(&nvbo->bo.bdev->fence_lock); 1106 spin_lock(&nvbo->bo.bdev->fence_lock);
1107 ttm_bo_wait(&nvbo->bo, false, false, false, 1107 ttm_bo_wait(&nvbo->bo, false, false, false);
1108 TTM_USAGE_READWRITE);
1109 spin_unlock(&nvbo->bo.bdev->fence_lock); 1108 spin_unlock(&nvbo->bo.bdev->fence_lock);
1110 nouveau_vm_unmap(vma); 1109 nouveau_vm_unmap(vma);
1111 } 1110 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 322bf62a064..5f0bc57fdaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -589,8 +589,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
589 } 589 }
590 590
591 spin_lock(&nvbo->bo.bdev->fence_lock); 591 spin_lock(&nvbo->bo.bdev->fence_lock);
592 ret = ttm_bo_wait(&nvbo->bo, false, false, false, 592 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
593 TTM_USAGE_READWRITE);
594 spin_unlock(&nvbo->bo.bdev->fence_lock); 593 spin_unlock(&nvbo->bo.bdev->fence_lock);
595 if (ret) { 594 if (ret) {
596 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); 595 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
@@ -826,7 +825,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
826 nvbo = nouveau_gem_object(gem); 825 nvbo = nouveau_gem_object(gem);
827 826
828 spin_lock(&nvbo->bo.bdev->fence_lock); 827 spin_lock(&nvbo->bo.bdev->fence_lock);
829 ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait, TTM_USAGE_READWRITE); 828 ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
830 spin_unlock(&nvbo->bo.bdev->fence_lock); 829 spin_unlock(&nvbo->bo.bdev->fence_lock);
831 drm_gem_object_unreference_unlocked(gem); 830 drm_gem_object_unreference_unlocked(gem);
832 return ret; 831 return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 14e85315110..fae00c0d75a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -80,7 +80,6 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
80 p->relocs[i].lobj.wdomain = r->write_domain; 80 p->relocs[i].lobj.wdomain = r->write_domain;
81 p->relocs[i].lobj.rdomain = r->read_domains; 81 p->relocs[i].lobj.rdomain = r->read_domains;
82 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; 82 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
83 p->relocs[i].lobj.tv.usage = TTM_USAGE_READWRITE;
84 p->relocs[i].handle = r->handle; 83 p->relocs[i].handle = r->handle;
85 p->relocs[i].flags = r->flags; 84 p->relocs[i].flags = r->flags;
86 radeon_bo_list_add_object(&p->relocs[i].lobj, 85 radeon_bo_list_add_object(&p->relocs[i].lobj,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index b8f75f5d344..1c851521f45 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -527,7 +527,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
527 if (mem_type) 527 if (mem_type)
528 *mem_type = bo->tbo.mem.mem_type; 528 *mem_type = bo->tbo.mem.mem_type;
529 if (bo->tbo.sync_obj) 529 if (bo->tbo.sync_obj)
530 r = ttm_bo_wait(&bo->tbo, true, true, no_wait, false); 530 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
531 spin_unlock(&bo->tbo.bdev->fence_lock); 531 spin_unlock(&bo->tbo.bdev->fence_lock);
532 ttm_bo_unreserve(&bo->tbo); 532 ttm_bo_unreserve(&bo->tbo);
533 return r; 533 return r;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 50fc8e4c9a3..617b64678fc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -499,7 +499,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
499 int ret; 499 int ret;
500 500
501 spin_lock(&bdev->fence_lock); 501 spin_lock(&bdev->fence_lock);
502 (void) ttm_bo_wait(bo, false, false, true, TTM_USAGE_READWRITE); 502 (void) ttm_bo_wait(bo, false, false, true);
503 if (!bo->sync_obj) { 503 if (!bo->sync_obj) {
504 504
505 spin_lock(&glob->lru_lock); 505 spin_lock(&glob->lru_lock);
@@ -567,8 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
567 567
568retry: 568retry:
569 spin_lock(&bdev->fence_lock); 569 spin_lock(&bdev->fence_lock);
570 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu, 570 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
571 TTM_USAGE_READWRITE);
572 spin_unlock(&bdev->fence_lock); 571 spin_unlock(&bdev->fence_lock);
573 572
574 if (unlikely(ret != 0)) 573 if (unlikely(ret != 0))
@@ -727,8 +726,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
727 int ret = 0; 726 int ret = 0;
728 727
729 spin_lock(&bdev->fence_lock); 728 spin_lock(&bdev->fence_lock);
730 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu, 729 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
731 TTM_USAGE_READWRITE);
732 spin_unlock(&bdev->fence_lock); 730 spin_unlock(&bdev->fence_lock);
733 731
734 if (unlikely(ret != 0)) { 732 if (unlikely(ret != 0)) {
@@ -1075,8 +1073,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1075 * instead of doing it here. 1073 * instead of doing it here.
1076 */ 1074 */
1077 spin_lock(&bdev->fence_lock); 1075 spin_lock(&bdev->fence_lock);
1078 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu, 1076 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1079 TTM_USAGE_READWRITE);
1080 spin_unlock(&bdev->fence_lock); 1077 spin_unlock(&bdev->fence_lock);
1081 if (ret) 1078 if (ret)
1082 return ret; 1079 return ret;
@@ -1697,83 +1694,34 @@ out_unlock:
1697 return ret; 1694 return ret;
1698} 1695}
1699 1696
1700static void ttm_bo_unref_sync_obj_locked(struct ttm_buffer_object *bo,
1701 void *sync_obj,
1702 void **extra_sync_obj)
1703{
1704 struct ttm_bo_device *bdev = bo->bdev;
1705 struct ttm_bo_driver *driver = bdev->driver;
1706 void *tmp_obj = NULL, *tmp_obj_read = NULL, *tmp_obj_write = NULL;
1707
1708 /* We must unref the sync obj wherever it's ref'd.
1709 * Note that if we unref bo->sync_obj, we can unref both the read
1710 * and write sync objs too, because they can't be newer than
1711 * bo->sync_obj, so they are no longer relevant. */
1712 if (sync_obj == bo->sync_obj ||
1713 sync_obj == bo->sync_obj_read) {
1714 tmp_obj_read = bo->sync_obj_read;
1715 bo->sync_obj_read = NULL;
1716 }
1717 if (sync_obj == bo->sync_obj ||
1718 sync_obj == bo->sync_obj_write) {
1719 tmp_obj_write = bo->sync_obj_write;
1720 bo->sync_obj_write = NULL;
1721 }
1722 if (sync_obj == bo->sync_obj) {
1723 tmp_obj = bo->sync_obj;
1724 bo->sync_obj = NULL;
1725 }
1726
1727 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1728 spin_unlock(&bdev->fence_lock);
1729 if (tmp_obj)
1730 driver->sync_obj_unref(&tmp_obj);
1731 if (tmp_obj_read)
1732 driver->sync_obj_unref(&tmp_obj_read);
1733 if (tmp_obj_write)
1734 driver->sync_obj_unref(&tmp_obj_write);
1735 if (extra_sync_obj)
1736 driver->sync_obj_unref(extra_sync_obj);
1737 spin_lock(&bdev->fence_lock);
1738}
1739
1740int ttm_bo_wait(struct ttm_buffer_object *bo, 1697int ttm_bo_wait(struct ttm_buffer_object *bo,
1741 bool lazy, bool interruptible, bool no_wait, 1698 bool lazy, bool interruptible, bool no_wait)
1742 enum ttm_buffer_usage usage)
1743{ 1699{
1744 struct ttm_bo_driver *driver = bo->bdev->driver; 1700 struct ttm_bo_driver *driver = bo->bdev->driver;
1745 struct ttm_bo_device *bdev = bo->bdev; 1701 struct ttm_bo_device *bdev = bo->bdev;
1746 void *sync_obj; 1702 void *sync_obj;
1747 void *sync_obj_arg; 1703 void *sync_obj_arg;
1748 int ret = 0; 1704 int ret = 0;
1749 void **bo_sync_obj;
1750 1705
1751 switch (usage) { 1706 if (likely(bo->sync_obj == NULL))
1752 case TTM_USAGE_READ:
1753 bo_sync_obj = &bo->sync_obj_read;
1754 break;
1755 case TTM_USAGE_WRITE:
1756 bo_sync_obj = &bo->sync_obj_write;
1757 break;
1758 case TTM_USAGE_READWRITE:
1759 default:
1760 bo_sync_obj = &bo->sync_obj;
1761 }
1762
1763 if (likely(*bo_sync_obj == NULL))
1764 return 0; 1707 return 0;
1765 1708
1766 while (*bo_sync_obj) { 1709 while (bo->sync_obj) {
1767 1710
1768 if (driver->sync_obj_signaled(*bo_sync_obj, bo->sync_obj_arg)) { 1711 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1769 ttm_bo_unref_sync_obj_locked(bo, *bo_sync_obj, NULL); 1712 void *tmp_obj = bo->sync_obj;
1713 bo->sync_obj = NULL;
1714 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1715 spin_unlock(&bdev->fence_lock);
1716 driver->sync_obj_unref(&tmp_obj);
1717 spin_lock(&bdev->fence_lock);
1770 continue; 1718 continue;
1771 } 1719 }
1772 1720
1773 if (no_wait) 1721 if (no_wait)
1774 return -EBUSY; 1722 return -EBUSY;
1775 1723
1776 sync_obj = driver->sync_obj_ref(*bo_sync_obj); 1724 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1777 sync_obj_arg = bo->sync_obj_arg; 1725 sync_obj_arg = bo->sync_obj_arg;
1778 spin_unlock(&bdev->fence_lock); 1726 spin_unlock(&bdev->fence_lock);
1779 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, 1727 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
@@ -1784,9 +1732,16 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1784 return ret; 1732 return ret;
1785 } 1733 }
1786 spin_lock(&bdev->fence_lock); 1734 spin_lock(&bdev->fence_lock);
1787 if (likely(*bo_sync_obj == sync_obj && 1735 if (likely(bo->sync_obj == sync_obj &&
1788 bo->sync_obj_arg == sync_obj_arg)) { 1736 bo->sync_obj_arg == sync_obj_arg)) {
1789 ttm_bo_unref_sync_obj_locked(bo, *bo_sync_obj, &sync_obj); 1737 void *tmp_obj = bo->sync_obj;
1738 bo->sync_obj = NULL;
1739 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1740 &bo->priv_flags);
1741 spin_unlock(&bdev->fence_lock);
1742 driver->sync_obj_unref(&sync_obj);
1743 driver->sync_obj_unref(&tmp_obj);
1744 spin_lock(&bdev->fence_lock);
1790 } else { 1745 } else {
1791 spin_unlock(&bdev->fence_lock); 1746 spin_unlock(&bdev->fence_lock);
1792 driver->sync_obj_unref(&sync_obj); 1747 driver->sync_obj_unref(&sync_obj);
@@ -1810,7 +1765,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1810 if (unlikely(ret != 0)) 1765 if (unlikely(ret != 0))
1811 return ret; 1766 return ret;
1812 spin_lock(&bdev->fence_lock); 1767 spin_lock(&bdev->fence_lock);
1813 ret = ttm_bo_wait(bo, false, true, no_wait, TTM_USAGE_READWRITE); 1768 ret = ttm_bo_wait(bo, false, true, no_wait);
1814 spin_unlock(&bdev->fence_lock); 1769 spin_unlock(&bdev->fence_lock);
1815 if (likely(ret == 0)) 1770 if (likely(ret == 0))
1816 atomic_inc(&bo->cpu_writers); 1771 atomic_inc(&bo->cpu_writers);
@@ -1884,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1884 */ 1839 */
1885 1840
1886 spin_lock(&bo->bdev->fence_lock); 1841 spin_lock(&bo->bdev->fence_lock);
1887 ret = ttm_bo_wait(bo, false, false, false, TTM_USAGE_READWRITE); 1842 ret = ttm_bo_wait(bo, false, false, false);
1888 spin_unlock(&bo->bdev->fence_lock); 1843 spin_unlock(&bo->bdev->fence_lock);
1889 1844
1890 if (unlikely(ret != 0)) 1845 if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 6135f58169c..ae3c6f5dd2b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -436,8 +436,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
436 atomic_set(&fbo->cpu_writers, 0); 436 atomic_set(&fbo->cpu_writers, 0);
437 437
438 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 438 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
439 fbo->sync_obj_read = driver->sync_obj_ref(bo->sync_obj_read);
440 fbo->sync_obj_write = driver->sync_obj_ref(bo->sync_obj_write);
441 kref_init(&fbo->list_kref); 439 kref_init(&fbo->list_kref);
442 kref_init(&fbo->kref); 440 kref_init(&fbo->kref);
443 fbo->destroy = &ttm_transfered_destroy; 441 fbo->destroy = &ttm_transfered_destroy;
@@ -620,30 +618,20 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
620 struct ttm_mem_reg *old_mem = &bo->mem; 618 struct ttm_mem_reg *old_mem = &bo->mem;
621 int ret; 619 int ret;
622 struct ttm_buffer_object *ghost_obj; 620 struct ttm_buffer_object *ghost_obj;
623 void *tmp_obj = NULL, *tmp_obj_read = NULL, *tmp_obj_write = NULL; 621 void *tmp_obj = NULL;
624 622
625 spin_lock(&bdev->fence_lock); 623 spin_lock(&bdev->fence_lock);
626 if (bo->sync_obj) 624 if (bo->sync_obj) {
627 tmp_obj = bo->sync_obj; 625 tmp_obj = bo->sync_obj;
628 if (bo->sync_obj_read) 626 bo->sync_obj = NULL;
629 tmp_obj_read = bo->sync_obj_read; 627 }
630 if (bo->sync_obj_write)
631 tmp_obj_write = bo->sync_obj_write;
632
633 bo->sync_obj = driver->sync_obj_ref(sync_obj); 628 bo->sync_obj = driver->sync_obj_ref(sync_obj);
634 bo->sync_obj_read = driver->sync_obj_ref(sync_obj);
635 bo->sync_obj_write = driver->sync_obj_ref(sync_obj);
636 bo->sync_obj_arg = sync_obj_arg; 629 bo->sync_obj_arg = sync_obj_arg;
637 if (evict) { 630 if (evict) {
638 ret = ttm_bo_wait(bo, false, false, false, 631 ret = ttm_bo_wait(bo, false, false, false);
639 TTM_USAGE_READWRITE);
640 spin_unlock(&bdev->fence_lock); 632 spin_unlock(&bdev->fence_lock);
641 if (tmp_obj) 633 if (tmp_obj)
642 driver->sync_obj_unref(&tmp_obj); 634 driver->sync_obj_unref(&tmp_obj);
643 if (tmp_obj_read)
644 driver->sync_obj_unref(&tmp_obj_read);
645 if (tmp_obj_write)
646 driver->sync_obj_unref(&tmp_obj_write);
647 if (ret) 635 if (ret)
648 return ret; 636 return ret;
649 637
@@ -667,10 +655,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
667 spin_unlock(&bdev->fence_lock); 655 spin_unlock(&bdev->fence_lock);
668 if (tmp_obj) 656 if (tmp_obj)
669 driver->sync_obj_unref(&tmp_obj); 657 driver->sync_obj_unref(&tmp_obj);
670 if (tmp_obj_read)
671 driver->sync_obj_unref(&tmp_obj_read);
672 if (tmp_obj_write)
673 driver->sync_obj_unref(&tmp_obj_write);
674 658
675 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 659 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
676 if (ret) 660 if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index ff1e26f4b09..221b924aceb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -122,7 +122,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
122 122
123 spin_lock(&bdev->fence_lock); 123 spin_lock(&bdev->fence_lock);
124 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { 124 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
125 ret = ttm_bo_wait(bo, false, true, false, TTM_USAGE_READWRITE); 125 ret = ttm_bo_wait(bo, false, true, false);
126 spin_unlock(&bdev->fence_lock); 126 spin_unlock(&bdev->fence_lock);
127 if (unlikely(ret != 0)) { 127 if (unlikely(ret != 0)) {
128 retval = (ret != -ERESTARTSYS) ? 128 retval = (ret != -ERESTARTSYS) ?
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 36d111a8823..3832fe10b4d 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -221,18 +221,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
221 221
222 list_for_each_entry(entry, list, head) { 222 list_for_each_entry(entry, list, head) {
223 bo = entry->bo; 223 bo = entry->bo;
224 entry->old_sync_obj_read = NULL;
225 entry->old_sync_obj_write = NULL;
226 entry->old_sync_obj = bo->sync_obj; 224 entry->old_sync_obj = bo->sync_obj;
227 bo->sync_obj = driver->sync_obj_ref(sync_obj); 225 bo->sync_obj = driver->sync_obj_ref(sync_obj);
228 if (entry->usage & TTM_USAGE_READ) {
229 entry->old_sync_obj_read = bo->sync_obj_read;
230 bo->sync_obj_read = driver->sync_obj_ref(sync_obj);
231 }
232 if (entry->usage & TTM_USAGE_WRITE) {
233 entry->old_sync_obj_write = bo->sync_obj_write;
234 bo->sync_obj_write = driver->sync_obj_ref(sync_obj);
235 }
236 bo->sync_obj_arg = entry->new_sync_obj_arg; 226 bo->sync_obj_arg = entry->new_sync_obj_arg;
237 ttm_bo_unreserve_locked(bo); 227 ttm_bo_unreserve_locked(bo);
238 entry->reserved = false; 228 entry->reserved = false;
@@ -241,15 +231,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
241 spin_unlock(&bdev->fence_lock); 231 spin_unlock(&bdev->fence_lock);
242 232
243 list_for_each_entry(entry, list, head) { 233 list_for_each_entry(entry, list, head) {
244 if (entry->old_sync_obj) { 234 if (entry->old_sync_obj)
245 driver->sync_obj_unref(&entry->old_sync_obj); 235 driver->sync_obj_unref(&entry->old_sync_obj);
246 }
247 if (entry->old_sync_obj_read) {
248 driver->sync_obj_unref(&entry->old_sync_obj_read);
249 }
250 if (entry->old_sync_obj_write) {
251 driver->sync_obj_unref(&entry->old_sync_obj_write);
252 }
253 } 236 }
254} 237}
255EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 238EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index b8eb8cdcfb7..13afddc1f03 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -244,7 +244,7 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
244 244
245 ttm_bo_reserve(bo, false, false, false, 0); 245 ttm_bo_reserve(bo, false, false, false, 0);
246 spin_lock(&bdev->fence_lock); 246 spin_lock(&bdev->fence_lock);
247 ret = ttm_bo_wait(bo, false, false, false, TTM_USAGE_READWRITE); 247 ret = ttm_bo_wait(bo, false, false, false);
248 spin_unlock(&bdev->fence_lock); 248 spin_unlock(&bdev->fence_lock);
249 if (unlikely(ret != 0)) 249 if (unlikely(ret != 0))
250 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 250 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 28e1c35aec6..40932fbdac0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -89,7 +89,6 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
89 if (unlikely(val_node == sw_context->cur_val_buf)) { 89 if (unlikely(val_node == sw_context->cur_val_buf)) {
90 val_buf->new_sync_obj_arg = NULL; 90 val_buf->new_sync_obj_arg = NULL;
91 val_buf->bo = ttm_bo_reference(bo); 91 val_buf->bo = ttm_bo_reference(bo);
92 val_buf->usage = TTM_USAGE_READWRITE;
93 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 92 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
94 ++sw_context->cur_val_buf; 93 ++sw_context->cur_val_buf;
95 } 94 }
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index da957bf3fe4..42e34698518 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,11 +44,6 @@ struct ttm_bo_device;
44 44
45struct drm_mm_node; 45struct drm_mm_node;
46 46
47enum ttm_buffer_usage {
48 TTM_USAGE_READ = 1,
49 TTM_USAGE_WRITE = 2,
50 TTM_USAGE_READWRITE = TTM_USAGE_READ | TTM_USAGE_WRITE
51};
52 47
53/** 48/**
54 * struct ttm_placement 49 * struct ttm_placement
@@ -179,10 +174,7 @@ struct ttm_tt;
179 * the bo_device::lru_lock. 174 * the bo_device::lru_lock.
180 * @reserved: Deadlock-free lock used for synchronization state transitions. 175 * @reserved: Deadlock-free lock used for synchronization state transitions.
181 * @sync_obj_arg: Opaque argument to synchronization object function. 176 * @sync_obj_arg: Opaque argument to synchronization object function.
182 * @sync_obj: Pointer to a synchronization object of a last read or write, 177 * @sync_obj: Pointer to a synchronization object.
183 * whichever is later.
184 * @sync_obj_read: Pointer to a synchronization object of a last read.
185 * @sync_obj_write: Pointer to a synchronization object of a last write.
186 * @priv_flags: Flags describing buffer object internal state. 178 * @priv_flags: Flags describing buffer object internal state.
187 * @vm_rb: Rb node for the vm rb tree. 179 * @vm_rb: Rb node for the vm rb tree.
188 * @vm_node: Address space manager node. 180 * @vm_node: Address space manager node.
@@ -266,8 +258,6 @@ struct ttm_buffer_object {
266 258
267 void *sync_obj_arg; 259 void *sync_obj_arg;
268 void *sync_obj; 260 void *sync_obj;
269 void *sync_obj_read;
270 void *sync_obj_write;
271 unsigned long priv_flags; 261 unsigned long priv_flags;
272 262
273 /** 263 /**
@@ -335,7 +325,6 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
335 * @bo: The buffer object. 325 * @bo: The buffer object.
336 * @interruptible: Use interruptible wait. 326 * @interruptible: Use interruptible wait.
337 * @no_wait: Return immediately if buffer is busy. 327 * @no_wait: Return immediately if buffer is busy.
338 * @usage: Whether to wait for the last read and/or the last write.
339 * 328 *
340 * This function must be called with the bo::mutex held, and makes 329 * This function must be called with the bo::mutex held, and makes
341 * sure any previous rendering to the buffer is completed. 330 * sure any previous rendering to the buffer is completed.
@@ -345,8 +334,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
345 * Returns -ERESTARTSYS if interrupted by a signal. 334 * Returns -ERESTARTSYS if interrupted by a signal.
346 */ 335 */
347extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, 336extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
348 bool interruptible, bool no_wait, 337 bool interruptible, bool no_wait);
349 enum ttm_buffer_usage usage);
350/** 338/**
351 * ttm_bo_validate 339 * ttm_bo_validate
352 * 340 *
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 375f2990229..26cc7f9ffa4 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -41,26 +41,20 @@
41 * @bo: refcounted buffer object pointer. 41 * @bo: refcounted buffer object pointer.
42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once 42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
43 * adding a new sync object. 43 * adding a new sync object.
44 * @usage Indicates how @bo is used by the device.
45 * @reserved: Indicates whether @bo has been reserved for validation. 44 * @reserved: Indicates whether @bo has been reserved for validation.
46 * @removed: Indicates whether @bo has been removed from lru lists. 45 * @removed: Indicates whether @bo has been removed from lru lists.
47 * @put_count: Number of outstanding references on bo::list_kref. 46 * @put_count: Number of outstanding references on bo::list_kref.
48 * @old_sync_obj: Pointer to a sync object about to be unreferenced 47 * @old_sync_obj: Pointer to a sync object about to be unreferenced
49 * @old_sync_obj_read: Pointer to a read sync object about to be unreferenced.
50 * @old_sync_obj_write: Pointer to a write sync object about to be unreferenced.
51 */ 48 */
52 49
53struct ttm_validate_buffer { 50struct ttm_validate_buffer {
54 struct list_head head; 51 struct list_head head;
55 struct ttm_buffer_object *bo; 52 struct ttm_buffer_object *bo;
56 void *new_sync_obj_arg; 53 void *new_sync_obj_arg;
57 enum ttm_buffer_usage usage;
58 bool reserved; 54 bool reserved;
59 bool removed; 55 bool removed;
60 int put_count; 56 int put_count;
61 void *old_sync_obj; 57 void *old_sync_obj;
62 void *old_sync_obj_read;
63 void *old_sync_obj_write;
64}; 58};
65 59
66/** 60/**