aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 08:56:37 -0500
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 08:56:37 -0500
commit63d0a4195560362e2e00a3ad38fc331d34e1da9b (patch)
tree64df3550af24b6f583c17aac878a9f1fb6fa85eb
parent979ee290ff0a543352243145dc3654af5a856ab8 (diff)
drm/ttm: remove lru_lock around ttm_bo_reserve
There should no longer be assumptions that reserve will always succeed with the lru lock held, so we can safely break the whole atomic reserve/lru thing. As a bonus this fixes most lockdep annotations for reservations. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com>
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c54
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--include/drm/ttm/ttm_bo_driver.h19
3 files changed, 40 insertions, 35 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 33d20be87db5..e8e4814b1295 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
213 return put_count; 213 return put_count;
214} 214}
215 215
216int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, 216int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
217 bool interruptible, 217 bool interruptible,
218 bool no_wait, bool use_sequence, uint32_t sequence) 218 bool no_wait, bool use_sequence, uint32_t sequence)
219{ 219{
220 struct ttm_bo_global *glob = bo->glob;
221 int ret; 220 int ret;
222 221
223 while (unlikely(atomic_read(&bo->reserved) != 0)) { 222 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
224 /** 223 /**
225 * Deadlock avoidance for multi-bo reserving. 224 * Deadlock avoidance for multi-bo reserving.
226 */ 225 */
@@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
241 if (no_wait) 240 if (no_wait)
242 return -EBUSY; 241 return -EBUSY;
243 242
244 spin_unlock(&glob->lru_lock);
245 ret = ttm_bo_wait_unreserved(bo, interruptible); 243 ret = ttm_bo_wait_unreserved(bo, interruptible);
246 spin_lock(&glob->lru_lock);
247 244
248 if (unlikely(ret)) 245 if (unlikely(ret))
249 return ret; 246 return ret;
250 } 247 }
251 248
252 atomic_set(&bo->reserved, 1);
253 if (use_sequence) { 249 if (use_sequence) {
250 bool wake_up = false;
254 /** 251 /**
255 * Wake up waiters that may need to recheck for deadlock, 252 * Wake up waiters that may need to recheck for deadlock,
256 * if we decreased the sequence number. 253 * if we decreased the sequence number.
257 */ 254 */
258 if (unlikely((bo->val_seq - sequence < (1 << 31)) 255 if (unlikely((bo->val_seq - sequence < (1 << 31))
259 || !bo->seq_valid)) 256 || !bo->seq_valid))
260 wake_up_all(&bo->event_queue); 257 wake_up = true;
261 258
259 /*
260 * In the worst case with memory ordering these values can be
261 * seen in the wrong order. However since we call wake_up_all
262 * in that case, this will hopefully not pose a problem,
263 * and the worst case would only cause someone to accidentally
264 * hit -EAGAIN in ttm_bo_reserve when they see old value of
265 * val_seq. However this would only happen if seq_valid was
266 * written before val_seq was, and just means some slightly
267 * increased cpu usage
268 */
262 bo->val_seq = sequence; 269 bo->val_seq = sequence;
263 bo->seq_valid = true; 270 bo->seq_valid = true;
271 if (wake_up)
272 wake_up_all(&bo->event_queue);
264 } else { 273 } else {
265 bo->seq_valid = false; 274 bo->seq_valid = false;
266 } 275 }
@@ -289,14 +298,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
289 int put_count = 0; 298 int put_count = 0;
290 int ret; 299 int ret;
291 300
292 spin_lock(&glob->lru_lock); 301 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
293 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, 302 sequence);
294 sequence); 303 if (likely(ret == 0)) {
295 if (likely(ret == 0)) 304 spin_lock(&glob->lru_lock);
296 put_count = ttm_bo_del_from_lru(bo); 305 put_count = ttm_bo_del_from_lru(bo);
297 spin_unlock(&glob->lru_lock); 306 spin_unlock(&glob->lru_lock);
298 307 ttm_bo_list_ref_sub(bo, put_count, true);
299 ttm_bo_list_ref_sub(bo, put_count, true); 308 }
300 309
301 return ret; 310 return ret;
302} 311}
@@ -510,7 +519,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
510 int ret; 519 int ret;
511 520
512 spin_lock(&glob->lru_lock); 521 spin_lock(&glob->lru_lock);
513 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 522 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
514 523
515 spin_lock(&bdev->fence_lock); 524 spin_lock(&bdev->fence_lock);
516 (void) ttm_bo_wait(bo, false, false, true); 525 (void) ttm_bo_wait(bo, false, false, true);
@@ -603,7 +612,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
603 return ret; 612 return ret;
604 613
605 spin_lock(&glob->lru_lock); 614 spin_lock(&glob->lru_lock);
606 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 615 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
607 616
608 /* 617 /*
609 * We raced, and lost, someone else holds the reservation now, 618 * We raced, and lost, someone else holds the reservation now,
@@ -667,7 +676,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
667 kref_get(&nentry->list_kref); 676 kref_get(&nentry->list_kref);
668 } 677 }
669 678
670 ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0); 679 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
680 if (remove_all && ret) {
681 spin_unlock(&glob->lru_lock);
682 ret = ttm_bo_reserve_nolru(entry, false, false,
683 false, 0);
684 spin_lock(&glob->lru_lock);
685 }
686
671 if (!ret) 687 if (!ret)
672 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 688 ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
673 !remove_all); 689 !remove_all);
@@ -815,7 +831,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
815 831
816 spin_lock(&glob->lru_lock); 832 spin_lock(&glob->lru_lock);
817 list_for_each_entry(bo, &man->lru, lru) { 833 list_for_each_entry(bo, &man->lru, lru) {
818 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 834 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
819 if (!ret) 835 if (!ret)
820 break; 836 break;
821 } 837 }
@@ -1796,7 +1812,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1796 1812
1797 spin_lock(&glob->lru_lock); 1813 spin_lock(&glob->lru_lock);
1798 list_for_each_entry(bo, &glob->swap_lru, swap) { 1814 list_for_each_entry(bo, &glob->swap_lru, swap) {
1799 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 1815 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1800 if (!ret) 1816 if (!ret)
1801 break; 1817 break;
1802 } 1818 }
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index cd9e4523dc56..bd37b5cb8553 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -153,7 +153,7 @@ retry:
153 struct ttm_buffer_object *bo = entry->bo; 153 struct ttm_buffer_object *bo = entry->bo;
154 154
155retry_this_bo: 155retry_this_bo:
156 ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); 156 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
157 switch (ret) { 157 switch (ret) {
158 case 0: 158 case 0:
159 break; 159 break;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e3a43a47d78c..6fff43222e20 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -790,16 +790,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
790 * to make room for a buffer already reserved. (Buffers are reserved before 790 * to make room for a buffer already reserved. (Buffers are reserved before
791 * they are evicted). The following algorithm prevents such deadlocks from 791 * they are evicted). The following algorithm prevents such deadlocks from
792 * occurring: 792 * occurring:
793 * 1) Buffers are reserved with the lru spinlock held. Upon successful 793 * Processes attempting to reserve multiple buffers other than for eviction,
794 * reservation they are removed from the lru list. This stops a reserved buffer
795 * from being evicted. However the lru spinlock is released between the time
796 * a buffer is selected for eviction and the time it is reserved.
797 * Therefore a check is made when a buffer is reserved for eviction, that it
798 * is still the first buffer in the lru list, before it is removed from the
799 * list. @check_lru == 1 forces this check. If it fails, the function returns
800 * -EINVAL, and the caller should then choose a new buffer to evict and repeat
801 * the procedure.
802 * 2) Processes attempting to reserve multiple buffers other than for eviction,
803 * (typically execbuf), should first obtain a unique 32-bit 794 * (typically execbuf), should first obtain a unique 32-bit
804 * validation sequence number, 795 * validation sequence number,
805 * and call this function with @use_sequence == 1 and @sequence == the unique 796 * and call this function with @use_sequence == 1 and @sequence == the unique
@@ -832,7 +823,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
832 823
833 824
834/** 825/**
835 * ttm_bo_reserve_locked: 826 * ttm_bo_reserve_nolru:
836 * 827 *
837 * @bo: A pointer to a struct ttm_buffer_object. 828 * @bo: A pointer to a struct ttm_buffer_object.
838 * @interruptible: Sleep interruptible if waiting. 829 * @interruptible: Sleep interruptible if waiting.
@@ -840,9 +831,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
840 * @use_sequence: If @bo is already reserved, Only sleep waiting for 831 * @use_sequence: If @bo is already reserved, Only sleep waiting for
841 * it to become unreserved if @sequence < (@bo)->sequence. 832 * it to become unreserved if @sequence < (@bo)->sequence.
842 * 833 *
843 * Must be called with struct ttm_bo_global::lru_lock held, 834 * Will not remove reserved buffers from the lru lists.
844 * and will not remove reserved buffers from the lru lists.
845 * The function may release the LRU spinlock if it needs to sleep.
846 * Otherwise identical to ttm_bo_reserve. 835 * Otherwise identical to ttm_bo_reserve.
847 * 836 *
848 * Returns: 837 * Returns:
@@ -855,7 +844,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
855 * -EDEADLK: Bo already reserved using @sequence. This error code will only 844 * -EDEADLK: Bo already reserved using @sequence. This error code will only
856 * be returned if @use_sequence is set to true. 845 * be returned if @use_sequence is set to true.
857 */ 846 */
858extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, 847extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
859 bool interruptible, 848 bool interruptible,
860 bool no_wait, bool use_sequence, 849 bool no_wait, bool use_sequence,
861 uint32_t sequence); 850 uint32_t sequence);