diff options
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_display.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_cmd.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_fence.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_object.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_release.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_display.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 75 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 16 | ||||
-rw-r--r-- | include/drm/ttm/ttm_bo_api.h | 5 | ||||
-rw-r--r-- | include/drm/ttm/ttm_bo_driver.h | 3 |
17 files changed, 37 insertions, 142 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index ed966f51e29b..8d8e5f6340d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -1212,9 +1212,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
1212 | } | 1212 | } |
1213 | 1213 | ||
1214 | /* Fallback to software copy. */ | 1214 | /* Fallback to software copy. */ |
1215 | spin_lock(&bo->bdev->fence_lock); | ||
1216 | ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); | 1215 | ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); |
1217 | spin_unlock(&bo->bdev->fence_lock); | ||
1218 | if (ret == 0) | 1216 | if (ret == 0) |
1219 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | 1217 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
1220 | 1218 | ||
@@ -1457,26 +1455,19 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
1457 | ttm_pool_unpopulate(ttm); | 1455 | ttm_pool_unpopulate(ttm); |
1458 | } | 1456 | } |
1459 | 1457 | ||
1458 | static void | ||
1459 | nouveau_bo_fence_unref(void **sync_obj) | ||
1460 | { | ||
1461 | nouveau_fence_unref((struct nouveau_fence **)sync_obj); | ||
1462 | } | ||
1463 | |||
1460 | void | 1464 | void |
1461 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) | 1465 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) |
1462 | { | 1466 | { |
1463 | struct nouveau_fence *new_fence = nouveau_fence_ref(fence); | ||
1464 | struct nouveau_fence *old_fence = NULL; | ||
1465 | |||
1466 | lockdep_assert_held(&nvbo->bo.resv->lock.base); | 1467 | lockdep_assert_held(&nvbo->bo.resv->lock.base); |
1467 | 1468 | ||
1468 | spin_lock(&nvbo->bo.bdev->fence_lock); | 1469 | nouveau_bo_fence_unref(&nvbo->bo.sync_obj); |
1469 | old_fence = nvbo->bo.sync_obj; | 1470 | nvbo->bo.sync_obj = nouveau_fence_ref(fence); |
1470 | nvbo->bo.sync_obj = new_fence; | ||
1471 | spin_unlock(&nvbo->bo.bdev->fence_lock); | ||
1472 | |||
1473 | nouveau_fence_unref(&old_fence); | ||
1474 | } | ||
1475 | |||
1476 | static void | ||
1477 | nouveau_bo_fence_unref(void **sync_obj) | ||
1478 | { | ||
1479 | nouveau_fence_unref((struct nouveau_fence **)sync_obj); | ||
1480 | } | 1471 | } |
1481 | 1472 | ||
1482 | static void * | 1473 | static void * |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 54b1f3d8fc7f..e6867b9ebb46 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -722,11 +722,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
722 | goto fail_unpin; | 722 | goto fail_unpin; |
723 | 723 | ||
724 | /* synchronise rendering channel with the kernel's channel */ | 724 | /* synchronise rendering channel with the kernel's channel */ |
725 | spin_lock(&new_bo->bo.bdev->fence_lock); | 725 | ret = nouveau_fence_sync(new_bo->bo.sync_obj, chan); |
726 | fence = nouveau_fence_ref(new_bo->bo.sync_obj); | ||
727 | spin_unlock(&new_bo->bo.bdev->fence_lock); | ||
728 | ret = nouveau_fence_sync(fence, chan); | ||
729 | nouveau_fence_unref(&fence); | ||
730 | if (ret) { | 726 | if (ret) { |
731 | ttm_bo_unreserve(&new_bo->bo); | 727 | ttm_bo_unreserve(&new_bo->bo); |
732 | goto fail_unpin; | 728 | goto fail_unpin; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0054315eb879..1650c0bdb0fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -103,9 +103,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) | |||
103 | list_del(&vma->head); | 103 | list_del(&vma->head); |
104 | 104 | ||
105 | if (mapped) { | 105 | if (mapped) { |
106 | spin_lock(&nvbo->bo.bdev->fence_lock); | ||
107 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); | 106 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); |
108 | spin_unlock(&nvbo->bo.bdev->fence_lock); | ||
109 | } | 107 | } |
110 | 108 | ||
111 | if (fence) { | 109 | if (fence) { |
@@ -430,17 +428,11 @@ retry: | |||
430 | static int | 428 | static int |
431 | validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) | 429 | validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) |
432 | { | 430 | { |
433 | struct nouveau_fence *fence = NULL; | 431 | struct nouveau_fence *fence = nvbo->bo.sync_obj; |
434 | int ret = 0; | 432 | int ret = 0; |
435 | 433 | ||
436 | spin_lock(&nvbo->bo.bdev->fence_lock); | 434 | if (fence) |
437 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); | ||
438 | spin_unlock(&nvbo->bo.bdev->fence_lock); | ||
439 | |||
440 | if (fence) { | ||
441 | ret = nouveau_fence_sync(fence, chan); | 435 | ret = nouveau_fence_sync(fence, chan); |
442 | nouveau_fence_unref(&fence); | ||
443 | } | ||
444 | 436 | ||
445 | return ret; | 437 | return ret; |
446 | } | 438 | } |
@@ -659,9 +651,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, | |||
659 | data |= r->vor; | 651 | data |= r->vor; |
660 | } | 652 | } |
661 | 653 | ||
662 | spin_lock(&nvbo->bo.bdev->fence_lock); | ||
663 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | 654 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); |
664 | spin_unlock(&nvbo->bo.bdev->fence_lock); | ||
665 | if (ret) { | 655 | if (ret) { |
666 | NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret); | 656 | NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret); |
667 | break; | 657 | break; |
@@ -894,11 +884,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |||
894 | 884 | ||
895 | ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL); | 885 | ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL); |
896 | if (!ret) { | 886 | if (!ret) { |
897 | spin_lock(&nvbo->bo.bdev->fence_lock); | ||
898 | ret = ttm_bo_wait(&nvbo->bo, true, true, true); | 887 | ret = ttm_bo_wait(&nvbo->bo, true, true, true); |
899 | if (!no_wait && ret) | 888 | if (!no_wait && ret) |
900 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); | 889 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); |
901 | spin_unlock(&nvbo->bo.bdev->fence_lock); | ||
902 | 890 | ||
903 | ttm_bo_unreserve(&nvbo->bo); | 891 | ttm_bo_unreserve(&nvbo->bo); |
904 | } | 892 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index eb89653a7a17..45fad7b45486 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c | |||
@@ -628,9 +628,7 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal | |||
628 | if (stall) | 628 | if (stall) |
629 | mutex_unlock(&qdev->surf_evict_mutex); | 629 | mutex_unlock(&qdev->surf_evict_mutex); |
630 | 630 | ||
631 | spin_lock(&surf->tbo.bdev->fence_lock); | ||
632 | ret = ttm_bo_wait(&surf->tbo, true, true, !stall); | 631 | ret = ttm_bo_wait(&surf->tbo, true, true, !stall); |
633 | spin_unlock(&surf->tbo.bdev->fence_lock); | ||
634 | 632 | ||
635 | if (stall) | 633 | if (stall) |
636 | mutex_lock(&qdev->surf_evict_mutex); | 634 | mutex_lock(&qdev->surf_evict_mutex); |
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c index ae59e91cfb9a..c7248418117d 100644 --- a/drivers/gpu/drm/qxl/qxl_fence.c +++ b/drivers/gpu/drm/qxl/qxl_fence.c | |||
@@ -60,9 +60,6 @@ int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) | |||
60 | { | 60 | { |
61 | void *ret; | 61 | void *ret; |
62 | int retval = 0; | 62 | int retval = 0; |
63 | struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); | ||
64 | |||
65 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
66 | 63 | ||
67 | ret = radix_tree_delete(&qfence->tree, rel_id); | 64 | ret = radix_tree_delete(&qfence->tree, rel_id); |
68 | if (ret == qfence) | 65 | if (ret == qfence) |
@@ -71,7 +68,6 @@ int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) | |||
71 | DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id); | 68 | DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id); |
72 | retval = -ENOENT; | 69 | retval = -ENOENT; |
73 | } | 70 | } |
74 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
75 | return retval; | 71 | return retval; |
76 | } | 72 | } |
77 | 73 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index 83a423293afd..1edaf5768086 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h | |||
@@ -76,12 +76,10 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, | |||
76 | } | 76 | } |
77 | return r; | 77 | return r; |
78 | } | 78 | } |
79 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
80 | if (mem_type) | 79 | if (mem_type) |
81 | *mem_type = bo->tbo.mem.mem_type; | 80 | *mem_type = bo->tbo.mem.mem_type; |
82 | if (bo->tbo.sync_obj) | 81 | if (bo->tbo.sync_obj) |
83 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | 82 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
84 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
85 | ttm_bo_unreserve(&bo->tbo); | 83 | ttm_bo_unreserve(&bo->tbo); |
86 | return r; | 84 | return r; |
87 | } | 85 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 14e776f1d14e..2e5e38fee9b2 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -337,7 +337,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) | |||
337 | glob = bo->glob; | 337 | glob = bo->glob; |
338 | 338 | ||
339 | spin_lock(&glob->lru_lock); | 339 | spin_lock(&glob->lru_lock); |
340 | spin_lock(&bdev->fence_lock); | ||
341 | 340 | ||
342 | list_for_each_entry(entry, &release->bos, head) { | 341 | list_for_each_entry(entry, &release->bos, head) { |
343 | bo = entry->bo; | 342 | bo = entry->bo; |
@@ -352,7 +351,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) | |||
352 | __ttm_bo_unreserve(bo); | 351 | __ttm_bo_unreserve(bo); |
353 | entry->reserved = false; | 352 | entry->reserved = false; |
354 | } | 353 | } |
355 | spin_unlock(&bdev->fence_lock); | ||
356 | spin_unlock(&glob->lru_lock); | 354 | spin_unlock(&glob->lru_lock); |
357 | ww_acquire_fini(&release->ticket); | 355 | ww_acquire_fini(&release->ticket); |
358 | } | 356 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index bd0d687379ee..7d0a7abdab2a 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -476,11 +476,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
476 | obj = new_radeon_fb->obj; | 476 | obj = new_radeon_fb->obj; |
477 | new_rbo = gem_to_radeon_bo(obj); | 477 | new_rbo = gem_to_radeon_bo(obj); |
478 | 478 | ||
479 | spin_lock(&new_rbo->tbo.bdev->fence_lock); | ||
480 | if (new_rbo->tbo.sync_obj) | ||
481 | work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); | ||
482 | spin_unlock(&new_rbo->tbo.bdev->fence_lock); | ||
483 | |||
484 | /* pin the new buffer */ | 479 | /* pin the new buffer */ |
485 | DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", | 480 | DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", |
486 | work->old_rbo, new_rbo); | 481 | work->old_rbo, new_rbo); |
@@ -499,6 +494,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
499 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | 494 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); |
500 | goto cleanup; | 495 | goto cleanup; |
501 | } | 496 | } |
497 | work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); | ||
502 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); | 498 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); |
503 | radeon_bo_unreserve(new_rbo); | 499 | radeon_bo_unreserve(new_rbo); |
504 | 500 | ||
@@ -582,7 +578,6 @@ cleanup: | |||
582 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 578 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); |
583 | radeon_fence_unref(&work->fence); | 579 | radeon_fence_unref(&work->fence); |
584 | kfree(work); | 580 | kfree(work); |
585 | |||
586 | return r; | 581 | return r; |
587 | } | 582 | } |
588 | 583 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index c97a42432e2b..cbac963571c0 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -779,12 +779,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) | |||
779 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); | 779 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); |
780 | if (unlikely(r != 0)) | 780 | if (unlikely(r != 0)) |
781 | return r; | 781 | return r; |
782 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
783 | if (mem_type) | 782 | if (mem_type) |
784 | *mem_type = bo->tbo.mem.mem_type; | 783 | *mem_type = bo->tbo.mem.mem_type; |
785 | if (bo->tbo.sync_obj) | 784 | if (bo->tbo.sync_obj) |
786 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | 785 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
787 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
788 | ttm_bo_unreserve(&bo->tbo); | 786 | ttm_bo_unreserve(&bo->tbo); |
789 | return r; | 787 | return r; |
790 | } | 788 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4f1bc948bda0..195386f16ca4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -415,24 +415,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
415 | spin_lock(&glob->lru_lock); | 415 | spin_lock(&glob->lru_lock); |
416 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); | 416 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); |
417 | 417 | ||
418 | spin_lock(&bdev->fence_lock); | 418 | if (!ret) { |
419 | (void) ttm_bo_wait(bo, false, false, true); | 419 | (void) ttm_bo_wait(bo, false, false, true); |
420 | if (!ret && !bo->sync_obj) { | ||
421 | spin_unlock(&bdev->fence_lock); | ||
422 | put_count = ttm_bo_del_from_lru(bo); | ||
423 | 420 | ||
424 | spin_unlock(&glob->lru_lock); | 421 | if (!bo->sync_obj) { |
425 | ttm_bo_cleanup_memtype_use(bo); | 422 | put_count = ttm_bo_del_from_lru(bo); |
426 | 423 | ||
427 | ttm_bo_list_ref_sub(bo, put_count, true); | 424 | spin_unlock(&glob->lru_lock); |
425 | ttm_bo_cleanup_memtype_use(bo); | ||
428 | 426 | ||
429 | return; | 427 | ttm_bo_list_ref_sub(bo, put_count, true); |
430 | } | ||
431 | if (bo->sync_obj) | ||
432 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
433 | spin_unlock(&bdev->fence_lock); | ||
434 | 428 | ||
435 | if (!ret) { | 429 | return; |
430 | } | ||
431 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
436 | 432 | ||
437 | /* | 433 | /* |
438 | * Make NO_EVICT bos immediately available to | 434 | * Make NO_EVICT bos immediately available to |
@@ -481,7 +477,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | |||
481 | int put_count; | 477 | int put_count; |
482 | int ret; | 478 | int ret; |
483 | 479 | ||
484 | spin_lock(&bdev->fence_lock); | ||
485 | ret = ttm_bo_wait(bo, false, false, true); | 480 | ret = ttm_bo_wait(bo, false, false, true); |
486 | 481 | ||
487 | if (ret && !no_wait_gpu) { | 482 | if (ret && !no_wait_gpu) { |
@@ -493,7 +488,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | |||
493 | * no new sync objects can be attached. | 488 | * no new sync objects can be attached. |
494 | */ | 489 | */ |
495 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | 490 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
496 | spin_unlock(&bdev->fence_lock); | ||
497 | 491 | ||
498 | __ttm_bo_unreserve(bo); | 492 | __ttm_bo_unreserve(bo); |
499 | spin_unlock(&glob->lru_lock); | 493 | spin_unlock(&glob->lru_lock); |
@@ -523,11 +517,9 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | |||
523 | * remove sync_obj with ttm_bo_wait, the wait should be | 517 | * remove sync_obj with ttm_bo_wait, the wait should be |
524 | * finished, and no new wait object should have been added. | 518 | * finished, and no new wait object should have been added. |
525 | */ | 519 | */ |
526 | spin_lock(&bdev->fence_lock); | ||
527 | ret = ttm_bo_wait(bo, false, false, true); | 520 | ret = ttm_bo_wait(bo, false, false, true); |
528 | WARN_ON(ret); | 521 | WARN_ON(ret); |
529 | } | 522 | } |
530 | spin_unlock(&bdev->fence_lock); | ||
531 | 523 | ||
532 | if (ret || unlikely(list_empty(&bo->ddestroy))) { | 524 | if (ret || unlikely(list_empty(&bo->ddestroy))) { |
533 | __ttm_bo_unreserve(bo); | 525 | __ttm_bo_unreserve(bo); |
@@ -665,9 +657,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
665 | struct ttm_placement placement; | 657 | struct ttm_placement placement; |
666 | int ret = 0; | 658 | int ret = 0; |
667 | 659 | ||
668 | spin_lock(&bdev->fence_lock); | ||
669 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 660 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
670 | spin_unlock(&bdev->fence_lock); | ||
671 | 661 | ||
672 | if (unlikely(ret != 0)) { | 662 | if (unlikely(ret != 0)) { |
673 | if (ret != -ERESTARTSYS) { | 663 | if (ret != -ERESTARTSYS) { |
@@ -958,7 +948,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
958 | { | 948 | { |
959 | int ret = 0; | 949 | int ret = 0; |
960 | struct ttm_mem_reg mem; | 950 | struct ttm_mem_reg mem; |
961 | struct ttm_bo_device *bdev = bo->bdev; | ||
962 | 951 | ||
963 | lockdep_assert_held(&bo->resv->lock.base); | 952 | lockdep_assert_held(&bo->resv->lock.base); |
964 | 953 | ||
@@ -967,9 +956,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
967 | * Have the driver move function wait for idle when necessary, | 956 | * Have the driver move function wait for idle when necessary, |
968 | * instead of doing it here. | 957 | * instead of doing it here. |
969 | */ | 958 | */ |
970 | spin_lock(&bdev->fence_lock); | ||
971 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 959 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
972 | spin_unlock(&bdev->fence_lock); | ||
973 | if (ret) | 960 | if (ret) |
974 | return ret; | 961 | return ret; |
975 | mem.num_pages = bo->num_pages; | 962 | mem.num_pages = bo->num_pages; |
@@ -1459,7 +1446,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1459 | bdev->glob = glob; | 1446 | bdev->glob = glob; |
1460 | bdev->need_dma32 = need_dma32; | 1447 | bdev->need_dma32 = need_dma32; |
1461 | bdev->val_seq = 0; | 1448 | bdev->val_seq = 0; |
1462 | spin_lock_init(&bdev->fence_lock); | ||
1463 | mutex_lock(&glob->device_list_mutex); | 1449 | mutex_lock(&glob->device_list_mutex); |
1464 | list_add_tail(&bdev->device_list, &glob->device_list); | 1450 | list_add_tail(&bdev->device_list, &glob->device_list); |
1465 | mutex_unlock(&glob->device_list_mutex); | 1451 | mutex_unlock(&glob->device_list_mutex); |
@@ -1517,7 +1503,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1517 | bool lazy, bool interruptible, bool no_wait) | 1503 | bool lazy, bool interruptible, bool no_wait) |
1518 | { | 1504 | { |
1519 | struct ttm_bo_driver *driver = bo->bdev->driver; | 1505 | struct ttm_bo_driver *driver = bo->bdev->driver; |
1520 | struct ttm_bo_device *bdev = bo->bdev; | ||
1521 | void *sync_obj; | 1506 | void *sync_obj; |
1522 | int ret = 0; | 1507 | int ret = 0; |
1523 | 1508 | ||
@@ -1526,53 +1511,33 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1526 | if (likely(bo->sync_obj == NULL)) | 1511 | if (likely(bo->sync_obj == NULL)) |
1527 | return 0; | 1512 | return 0; |
1528 | 1513 | ||
1529 | while (bo->sync_obj) { | 1514 | if (bo->sync_obj) { |
1530 | |||
1531 | if (driver->sync_obj_signaled(bo->sync_obj)) { | 1515 | if (driver->sync_obj_signaled(bo->sync_obj)) { |
1532 | void *tmp_obj = bo->sync_obj; | 1516 | driver->sync_obj_unref(&bo->sync_obj); |
1533 | bo->sync_obj = NULL; | ||
1534 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 1517 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
1535 | spin_unlock(&bdev->fence_lock); | 1518 | return 0; |
1536 | driver->sync_obj_unref(&tmp_obj); | ||
1537 | spin_lock(&bdev->fence_lock); | ||
1538 | continue; | ||
1539 | } | 1519 | } |
1540 | 1520 | ||
1541 | if (no_wait) | 1521 | if (no_wait) |
1542 | return -EBUSY; | 1522 | return -EBUSY; |
1543 | 1523 | ||
1544 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | 1524 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
1545 | spin_unlock(&bdev->fence_lock); | ||
1546 | ret = driver->sync_obj_wait(sync_obj, | 1525 | ret = driver->sync_obj_wait(sync_obj, |
1547 | lazy, interruptible); | 1526 | lazy, interruptible); |
1548 | if (unlikely(ret != 0)) { | 1527 | |
1549 | driver->sync_obj_unref(&sync_obj); | 1528 | if (likely(ret == 0)) { |
1550 | spin_lock(&bdev->fence_lock); | ||
1551 | return ret; | ||
1552 | } | ||
1553 | spin_lock(&bdev->fence_lock); | ||
1554 | if (likely(bo->sync_obj == sync_obj)) { | ||
1555 | void *tmp_obj = bo->sync_obj; | ||
1556 | bo->sync_obj = NULL; | ||
1557 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, | 1529 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, |
1558 | &bo->priv_flags); | 1530 | &bo->priv_flags); |
1559 | spin_unlock(&bdev->fence_lock); | 1531 | driver->sync_obj_unref(&bo->sync_obj); |
1560 | driver->sync_obj_unref(&sync_obj); | ||
1561 | driver->sync_obj_unref(&tmp_obj); | ||
1562 | spin_lock(&bdev->fence_lock); | ||
1563 | } else { | ||
1564 | spin_unlock(&bdev->fence_lock); | ||
1565 | driver->sync_obj_unref(&sync_obj); | ||
1566 | spin_lock(&bdev->fence_lock); | ||
1567 | } | 1532 | } |
1533 | driver->sync_obj_unref(&sync_obj); | ||
1568 | } | 1534 | } |
1569 | return 0; | 1535 | return ret; |
1570 | } | 1536 | } |
1571 | EXPORT_SYMBOL(ttm_bo_wait); | 1537 | EXPORT_SYMBOL(ttm_bo_wait); |
1572 | 1538 | ||
1573 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | 1539 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
1574 | { | 1540 | { |
1575 | struct ttm_bo_device *bdev = bo->bdev; | ||
1576 | int ret = 0; | 1541 | int ret = 0; |
1577 | 1542 | ||
1578 | /* | 1543 | /* |
@@ -1582,9 +1547,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | |||
1582 | ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); | 1547 | ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); |
1583 | if (unlikely(ret != 0)) | 1548 | if (unlikely(ret != 0)) |
1584 | return ret; | 1549 | return ret; |
1585 | spin_lock(&bdev->fence_lock); | ||
1586 | ret = ttm_bo_wait(bo, false, true, no_wait); | 1550 | ret = ttm_bo_wait(bo, false, true, no_wait); |
1587 | spin_unlock(&bdev->fence_lock); | ||
1588 | if (likely(ret == 0)) | 1551 | if (likely(ret == 0)) |
1589 | atomic_inc(&bo->cpu_writers); | 1552 | atomic_inc(&bo->cpu_writers); |
1590 | ttm_bo_unreserve(bo); | 1553 | ttm_bo_unreserve(bo); |
@@ -1641,9 +1604,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1641 | * Wait for GPU, then move to system cached. | 1604 | * Wait for GPU, then move to system cached. |
1642 | */ | 1605 | */ |
1643 | 1606 | ||
1644 | spin_lock(&bo->bdev->fence_lock); | ||
1645 | ret = ttm_bo_wait(bo, false, false, false); | 1607 | ret = ttm_bo_wait(bo, false, false, false); |
1646 | spin_unlock(&bo->bdev->fence_lock); | ||
1647 | 1608 | ||
1648 | if (unlikely(ret != 0)) | 1609 | if (unlikely(ret != 0)) |
1649 | goto out; | 1610 | goto out; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 30e5d90cb7bc..495aebf0f9c3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -466,12 +466,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
466 | drm_vma_node_reset(&fbo->vma_node); | 466 | drm_vma_node_reset(&fbo->vma_node); |
467 | atomic_set(&fbo->cpu_writers, 0); | 467 | atomic_set(&fbo->cpu_writers, 0); |
468 | 468 | ||
469 | spin_lock(&bdev->fence_lock); | ||
470 | if (bo->sync_obj) | 469 | if (bo->sync_obj) |
471 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | 470 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); |
472 | else | 471 | else |
473 | fbo->sync_obj = NULL; | 472 | fbo->sync_obj = NULL; |
474 | spin_unlock(&bdev->fence_lock); | ||
475 | kref_init(&fbo->list_kref); | 473 | kref_init(&fbo->list_kref); |
476 | kref_init(&fbo->kref); | 474 | kref_init(&fbo->kref); |
477 | fbo->destroy = &ttm_transfered_destroy; | 475 | fbo->destroy = &ttm_transfered_destroy; |
@@ -657,7 +655,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
657 | struct ttm_buffer_object *ghost_obj; | 655 | struct ttm_buffer_object *ghost_obj; |
658 | void *tmp_obj = NULL; | 656 | void *tmp_obj = NULL; |
659 | 657 | ||
660 | spin_lock(&bdev->fence_lock); | ||
661 | if (bo->sync_obj) { | 658 | if (bo->sync_obj) { |
662 | tmp_obj = bo->sync_obj; | 659 | tmp_obj = bo->sync_obj; |
663 | bo->sync_obj = NULL; | 660 | bo->sync_obj = NULL; |
@@ -665,7 +662,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
665 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | 662 | bo->sync_obj = driver->sync_obj_ref(sync_obj); |
666 | if (evict) { | 663 | if (evict) { |
667 | ret = ttm_bo_wait(bo, false, false, false); | 664 | ret = ttm_bo_wait(bo, false, false, false); |
668 | spin_unlock(&bdev->fence_lock); | ||
669 | if (tmp_obj) | 665 | if (tmp_obj) |
670 | driver->sync_obj_unref(&tmp_obj); | 666 | driver->sync_obj_unref(&tmp_obj); |
671 | if (ret) | 667 | if (ret) |
@@ -688,7 +684,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
688 | */ | 684 | */ |
689 | 685 | ||
690 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 686 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
691 | spin_unlock(&bdev->fence_lock); | ||
692 | if (tmp_obj) | 687 | if (tmp_obj) |
693 | driver->sync_obj_unref(&tmp_obj); | 688 | driver->sync_obj_unref(&tmp_obj); |
694 | 689 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 0ce48e5a9cb4..d05437f219e9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -45,10 +45,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
45 | struct vm_area_struct *vma, | 45 | struct vm_area_struct *vma, |
46 | struct vm_fault *vmf) | 46 | struct vm_fault *vmf) |
47 | { | 47 | { |
48 | struct ttm_bo_device *bdev = bo->bdev; | ||
49 | int ret = 0; | 48 | int ret = 0; |
50 | 49 | ||
51 | spin_lock(&bdev->fence_lock); | ||
52 | if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) | 50 | if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) |
53 | goto out_unlock; | 51 | goto out_unlock; |
54 | 52 | ||
@@ -82,7 +80,6 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
82 | VM_FAULT_NOPAGE; | 80 | VM_FAULT_NOPAGE; |
83 | 81 | ||
84 | out_unlock: | 82 | out_unlock: |
85 | spin_unlock(&bdev->fence_lock); | ||
86 | return ret; | 83 | return ret; |
87 | } | 84 | } |
88 | 85 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index e8dac8758528..0fbbbbd67afc 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -217,7 +217,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | |||
217 | glob = bo->glob; | 217 | glob = bo->glob; |
218 | 218 | ||
219 | spin_lock(&glob->lru_lock); | 219 | spin_lock(&glob->lru_lock); |
220 | spin_lock(&bdev->fence_lock); | ||
221 | 220 | ||
222 | list_for_each_entry(entry, list, head) { | 221 | list_for_each_entry(entry, list, head) { |
223 | bo = entry->bo; | 222 | bo = entry->bo; |
@@ -227,7 +226,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | |||
227 | __ttm_bo_unreserve(bo); | 226 | __ttm_bo_unreserve(bo); |
228 | entry->reserved = false; | 227 | entry->reserved = false; |
229 | } | 228 | } |
230 | spin_unlock(&bdev->fence_lock); | ||
231 | spin_unlock(&glob->lru_lock); | 229 | spin_unlock(&glob->lru_lock); |
232 | if (ticket) | 230 | if (ticket) |
233 | ww_acquire_fini(ticket); | 231 | ww_acquire_fini(ticket); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 37c093c0c7b8..c133b3d10de8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -863,11 +863,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, | |||
863 | */ | 863 | */ |
864 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | 864 | static void vmw_swap_notify(struct ttm_buffer_object *bo) |
865 | { | 865 | { |
866 | struct ttm_bo_device *bdev = bo->bdev; | ||
867 | |||
868 | spin_lock(&bdev->fence_lock); | ||
869 | ttm_bo_wait(bo, false, false, false); | 866 | ttm_bo_wait(bo, false, false, false); |
870 | spin_unlock(&bdev->fence_lock); | ||
871 | } | 867 | } |
872 | 868 | ||
873 | 869 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index a432c0db257c..1ee86bf82750 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -567,12 +567,13 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, | |||
567 | int ret; | 567 | int ret; |
568 | 568 | ||
569 | if (flags & drm_vmw_synccpu_allow_cs) { | 569 | if (flags & drm_vmw_synccpu_allow_cs) { |
570 | struct ttm_bo_device *bdev = bo->bdev; | 570 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
571 | 571 | ||
572 | spin_lock(&bdev->fence_lock); | 572 | ret = ttm_bo_reserve(bo, true, nonblock, false, NULL); |
573 | ret = ttm_bo_wait(bo, false, true, | 573 | if (!ret) { |
574 | !!(flags & drm_vmw_synccpu_dontblock)); | 574 | ret = ttm_bo_wait(bo, false, true, nonblock); |
575 | spin_unlock(&bdev->fence_lock); | 575 | ttm_bo_unreserve(bo); |
576 | } | ||
576 | return ret; | 577 | return ret; |
577 | } | 578 | } |
578 | 579 | ||
@@ -1429,12 +1430,10 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, | |||
1429 | else | 1430 | else |
1430 | driver->sync_obj_ref(fence); | 1431 | driver->sync_obj_ref(fence); |
1431 | 1432 | ||
1432 | spin_lock(&bdev->fence_lock); | ||
1433 | 1433 | ||
1434 | old_fence_obj = bo->sync_obj; | 1434 | old_fence_obj = bo->sync_obj; |
1435 | bo->sync_obj = fence; | 1435 | bo->sync_obj = fence; |
1436 | 1436 | ||
1437 | spin_unlock(&bdev->fence_lock); | ||
1438 | 1437 | ||
1439 | if (old_fence_obj) | 1438 | if (old_fence_obj) |
1440 | vmw_fence_obj_unreference(&old_fence_obj); | 1439 | vmw_fence_obj_unreference(&old_fence_obj); |
@@ -1475,7 +1474,6 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, | |||
1475 | 1474 | ||
1476 | if (mem->mem_type != VMW_PL_MOB) { | 1475 | if (mem->mem_type != VMW_PL_MOB) { |
1477 | struct vmw_resource *res, *n; | 1476 | struct vmw_resource *res, *n; |
1478 | struct ttm_bo_device *bdev = bo->bdev; | ||
1479 | struct ttm_validate_buffer val_buf; | 1477 | struct ttm_validate_buffer val_buf; |
1480 | 1478 | ||
1481 | val_buf.bo = bo; | 1479 | val_buf.bo = bo; |
@@ -1491,9 +1489,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, | |||
1491 | list_del_init(&res->mob_head); | 1489 | list_del_init(&res->mob_head); |
1492 | } | 1490 | } |
1493 | 1491 | ||
1494 | spin_lock(&bdev->fence_lock); | ||
1495 | (void) ttm_bo_wait(bo, false, false, false); | 1492 | (void) ttm_bo_wait(bo, false, false, false); |
1496 | spin_unlock(&bdev->fence_lock); | ||
1497 | } | 1493 | } |
1498 | } | 1494 | } |
1499 | 1495 | ||
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index e3d39c80a091..5805f4a49478 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -237,10 +237,7 @@ struct ttm_buffer_object { | |||
237 | struct list_head io_reserve_lru; | 237 | struct list_head io_reserve_lru; |
238 | 238 | ||
239 | /** | 239 | /** |
240 | * Members protected by struct buffer_object_device::fence_lock | 240 | * Members protected by a bo reservation. |
241 | * In addition, setting sync_obj to anything else | ||
242 | * than NULL requires bo::reserved to be held. This allows for | ||
243 | * checking NULL while reserved but not holding the mentioned lock. | ||
244 | */ | 241 | */ |
245 | 242 | ||
246 | void *sync_obj; | 243 | void *sync_obj; |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 5c8bb5699a6f..e1ee141e26cc 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -521,8 +521,6 @@ struct ttm_bo_global { | |||
521 | * | 521 | * |
522 | * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. | 522 | * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. |
523 | * @man: An array of mem_type_managers. | 523 | * @man: An array of mem_type_managers. |
524 | * @fence_lock: Protects the synchronizing members on *all* bos belonging | ||
525 | * to this device. | ||
526 | * @vma_manager: Address space manager | 524 | * @vma_manager: Address space manager |
527 | * lru_lock: Spinlock that protects the buffer+device lru lists and | 525 | * lru_lock: Spinlock that protects the buffer+device lru lists and |
528 | * ddestroy lists. | 526 | * ddestroy lists. |
@@ -542,7 +540,6 @@ struct ttm_bo_device { | |||
542 | struct ttm_bo_global *glob; | 540 | struct ttm_bo_global *glob; |
543 | struct ttm_bo_driver *driver; | 541 | struct ttm_bo_driver *driver; |
544 | struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; | 542 | struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
545 | spinlock_t fence_lock; | ||
546 | 543 | ||
547 | /* | 544 | /* |
548 | * Protected by internal locks. | 545 | * Protected by internal locks. |