aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaarten Lankhorst <m.b.lankhorst@gmail.com>2012-11-28 06:25:39 -0500
committerDave Airlie <airlied@redhat.com>2012-12-10 05:09:58 -0500
commit4154f051e74e6a5db174c8f4fc8a2f9c8a6b2541 (patch)
tree6bdfc3dc7fd144bf5cbe23c908649afff6183448
parent1a1494def7eacbd25db05185aa2e81ef90892460 (diff)
drm/ttm: change fence_lock to inner lock
This requires changing the order in ttm_bo_cleanup_refs_or_queue to take the reservation first, as there is otherwise no race free way to take lru lock before fence_lock. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c4
2 files changed, 13 insertions, 22 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2c54c3d414b3..b7781453bfd1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -500,27 +500,17 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
500{ 500{
501 struct ttm_bo_device *bdev = bo->bdev; 501 struct ttm_bo_device *bdev = bo->bdev;
502 struct ttm_bo_global *glob = bo->glob; 502 struct ttm_bo_global *glob = bo->glob;
503 struct ttm_bo_driver *driver; 503 struct ttm_bo_driver *driver = bdev->driver;
504 void *sync_obj = NULL; 504 void *sync_obj = NULL;
505 int put_count; 505 int put_count;
506 int ret; 506 int ret;
507 507
508 spin_lock(&glob->lru_lock);
509 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
510
508 spin_lock(&bdev->fence_lock); 511 spin_lock(&bdev->fence_lock);
509 (void) ttm_bo_wait(bo, false, false, true); 512 (void) ttm_bo_wait(bo, false, false, true);
510 if (!bo->sync_obj) { 513 if (!ret && !bo->sync_obj) {
511
512 spin_lock(&glob->lru_lock);
513
514 /**
515 * Lock inversion between bo:reserve and bdev::fence_lock here,
516 * but that's OK, since we're only trylocking.
517 */
518
519 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
520
521 if (unlikely(ret == -EBUSY))
522 goto queue;
523
524 spin_unlock(&bdev->fence_lock); 514 spin_unlock(&bdev->fence_lock);
525 put_count = ttm_bo_del_from_lru(bo); 515 put_count = ttm_bo_del_from_lru(bo);
526 516
@@ -530,18 +520,19 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
530 ttm_bo_list_ref_sub(bo, put_count, true); 520 ttm_bo_list_ref_sub(bo, put_count, true);
531 521
532 return; 522 return;
533 } else {
534 spin_lock(&glob->lru_lock);
535 } 523 }
536queue:
537 driver = bdev->driver;
538 if (bo->sync_obj) 524 if (bo->sync_obj)
539 sync_obj = driver->sync_obj_ref(bo->sync_obj); 525 sync_obj = driver->sync_obj_ref(bo->sync_obj);
526 spin_unlock(&bdev->fence_lock);
527
528 if (!ret) {
529 atomic_set(&bo->reserved, 0);
530 wake_up_all(&bo->event_queue);
531 }
540 532
541 kref_get(&bo->list_kref); 533 kref_get(&bo->list_kref);
542 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 534 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
543 spin_unlock(&glob->lru_lock); 535 spin_unlock(&glob->lru_lock);
544 spin_unlock(&bdev->fence_lock);
545 536
546 if (sync_obj) { 537 if (sync_obj) {
547 driver->sync_obj_flush(sync_obj); 538 driver->sync_obj_flush(sync_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 1986d006c264..cd9e4523dc56 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -213,8 +213,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
213 driver = bdev->driver; 213 driver = bdev->driver;
214 glob = bo->glob; 214 glob = bo->glob;
215 215
216 spin_lock(&bdev->fence_lock);
217 spin_lock(&glob->lru_lock); 216 spin_lock(&glob->lru_lock);
217 spin_lock(&bdev->fence_lock);
218 218
219 list_for_each_entry(entry, list, head) { 219 list_for_each_entry(entry, list, head) {
220 bo = entry->bo; 220 bo = entry->bo;
@@ -223,8 +223,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
223 ttm_bo_unreserve_locked(bo); 223 ttm_bo_unreserve_locked(bo);
224 entry->reserved = false; 224 entry->reserved = false;
225 } 225 }
226 spin_unlock(&glob->lru_lock);
227 spin_unlock(&bdev->fence_lock); 226 spin_unlock(&bdev->fence_lock);
227 spin_unlock(&glob->lru_lock);
228 228
229 list_for_each_entry(entry, list, head) { 229 list_for_each_entry(entry, list, head) {
230 if (entry->old_sync_obj) 230 if (entry->old_sync_obj)