aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_gem.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2010-11-17 07:28:29 -0500
committerDave Airlie <airlied@redhat.com>2010-11-21 22:25:18 -0500
commit702adba22433c175e8429a47760f35ca16caf1cd (patch)
treea9c6a1ad8ebaf9970a87b7047357b6d7232b70e6 /drivers/gpu/drm/nouveau/nouveau_gem.c
parent96726fe50feae74812a2ccf5d5da23cb01c0a413 (diff)
drm/ttm/radeon/nouveau: Kill the bo lock in favour of a bo device fence_lock
The bo lock used only to protect the bo sync object members, and since it is a per bo lock, fencing a buffer list will see a lot of locks and unlocks. Replace it with a per-device lock that protects the sync object members on *all* bos. Reading and setting these members will always be very quick, so the risc of heavy lock contention is microscopic. Note that waiting for sync objects will always take place outside of this lock. The bo device fence lock will eventually be replaced with a seqlock / rcu mechanism so we can determine that a bo is idle under a rcu / read seqlock. However this change will allow us to batch fencing and unreserving of buffers with a minimal amount of locking. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jerome Glisse <j.glisse@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9a1fdcf400c2..1f2301d26c0a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -234,10 +234,10 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
234 if (likely(fence)) { 234 if (likely(fence)) {
235 struct nouveau_fence *prev_fence; 235 struct nouveau_fence *prev_fence;
236 236
237 spin_lock(&nvbo->bo.lock); 237 spin_lock(&nvbo->bo.bdev->fence_lock);
238 prev_fence = nvbo->bo.sync_obj; 238 prev_fence = nvbo->bo.sync_obj;
239 nvbo->bo.sync_obj = nouveau_fence_ref(fence); 239 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
240 spin_unlock(&nvbo->bo.lock); 240 spin_unlock(&nvbo->bo.bdev->fence_lock);
241 nouveau_fence_unref((void *)&prev_fence); 241 nouveau_fence_unref((void *)&prev_fence);
242 } 242 }
243 243
@@ -557,9 +557,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
557 data |= r->vor; 557 data |= r->vor;
558 } 558 }
559 559
560 spin_lock(&nvbo->bo.lock); 560 spin_lock(&nvbo->bo.bdev->fence_lock);
561 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 561 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
562 spin_unlock(&nvbo->bo.lock); 562 spin_unlock(&nvbo->bo.bdev->fence_lock);
563 if (ret) { 563 if (ret) {
564 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); 564 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
565 break; 565 break;
@@ -791,9 +791,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
791 } 791 }
792 792
793 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 793 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
794 spin_lock(&nvbo->bo.lock); 794 spin_lock(&nvbo->bo.bdev->fence_lock);
795 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 795 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
796 spin_unlock(&nvbo->bo.lock); 796 spin_unlock(&nvbo->bo.bdev->fence_lock);
797 } else { 797 } else {
798 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 798 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
799 if (ret == 0) 799 if (ret == 0)