aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-02-07 23:02:32 -0500
committerDave Airlie <airlied@redhat.com>2013-02-07 23:02:32 -0500
commitb9e5071386007729110e86fd2c55c687085624e3 (patch)
tree4720ecd2ee9be3f8c46e71d75119219db1692507
parent85a7ce67f3ebfd5975ffd1febcabfe4999ca911d (diff)
parentcc4c0c4de3c775be22072ec3251f2e581b63d9a0 (diff)
Merge branch 'for-airlied' of git://people.freedesktop.org/~mlankhorst/linux into drm-next
TTM reservations changes, preparing for new reservation mutex system. * 'for-airlied' of git://people.freedesktop.org/~mlankhorst/linux: drm/ttm: unexport ttm_bo_wait_unreserved drm/nouveau: use ttm_bo_reserve_slowpath in validate_init, v2 drm/ttm: use ttm_bo_reserve_slowpath_nolru in ttm_eu_reserve_buffers, v2 drm/ttm: add ttm_bo_reserve_slowpath drm/ttm: cleanup ttm_eu_reserve_buffers handling drm/ttm: remove lru_lock around ttm_bo_reserve drm/nouveau: increase reservation sequence every retry drm/vmwgfx: always use ttm_bo_is_reserved
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c103
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c78
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h61
5 files changed, 178 insertions, 88 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 24e0aabda03c..d98bee012cab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -318,6 +318,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
318 uint32_t sequence; 318 uint32_t sequence;
319 int trycnt = 0; 319 int trycnt = 0;
320 int ret, i; 320 int ret, i;
321 struct nouveau_bo *res_bo = NULL;
321 322
322 sequence = atomic_add_return(1, &drm->ttm.validate_sequence); 323 sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
323retry: 324retry:
@@ -338,6 +339,11 @@ retry:
338 return -ENOENT; 339 return -ENOENT;
339 } 340 }
340 nvbo = gem->driver_private; 341 nvbo = gem->driver_private;
342 if (nvbo == res_bo) {
343 res_bo = NULL;
344 drm_gem_object_unreference_unlocked(gem);
345 continue;
346 }
341 347
342 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 348 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
343 NV_ERROR(drm, "multiple instances of buffer %d on " 349 NV_ERROR(drm, "multiple instances of buffer %d on "
@@ -350,15 +356,19 @@ retry:
350 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); 356 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
351 if (ret) { 357 if (ret) {
352 validate_fini(op, NULL); 358 validate_fini(op, NULL);
353 if (unlikely(ret == -EAGAIN)) 359 if (unlikely(ret == -EAGAIN)) {
354 ret = ttm_bo_wait_unreserved(&nvbo->bo, true); 360 sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
355 drm_gem_object_unreference_unlocked(gem); 361 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
362 sequence);
363 if (!ret)
364 res_bo = nvbo;
365 }
356 if (unlikely(ret)) { 366 if (unlikely(ret)) {
367 drm_gem_object_unreference_unlocked(gem);
357 if (ret != -ERESTARTSYS) 368 if (ret != -ERESTARTSYS)
358 NV_ERROR(drm, "fail reserve\n"); 369 NV_ERROR(drm, "fail reserve\n");
359 return ret; 370 return ret;
360 } 371 }
361 goto retry;
362 } 372 }
363 373
364 b->user_priv = (uint64_t)(unsigned long)nvbo; 374 b->user_priv = (uint64_t)(unsigned long)nvbo;
@@ -380,6 +390,8 @@ retry:
380 validate_fini(op, NULL); 390 validate_fini(op, NULL);
381 return -EINVAL; 391 return -EINVAL;
382 } 392 }
393 if (nvbo == res_bo)
394 goto retry;
383 } 395 }
384 396
385 return 0; 397 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 52b20b12c83a..9b07b7d44a58 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -158,7 +158,8 @@ static void ttm_bo_release_list(struct kref *list_kref)
158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
159} 159}
160 160
161int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) 161static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
162 bool interruptible)
162{ 163{
163 if (interruptible) { 164 if (interruptible) {
164 return wait_event_interruptible(bo->event_queue, 165 return wait_event_interruptible(bo->event_queue,
@@ -168,7 +169,6 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
168 return 0; 169 return 0;
169 } 170 }
170} 171}
171EXPORT_SYMBOL(ttm_bo_wait_unreserved);
172 172
173void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 173void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
174{ 174{
@@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
213 return put_count; 213 return put_count;
214} 214}
215 215
216int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, 216int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
217 bool interruptible, 217 bool interruptible,
218 bool no_wait, bool use_sequence, uint32_t sequence) 218 bool no_wait, bool use_sequence, uint32_t sequence)
219{ 219{
220 struct ttm_bo_global *glob = bo->glob;
221 int ret; 220 int ret;
222 221
223 while (unlikely(atomic_read(&bo->reserved) != 0)) { 222 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
224 /** 223 /**
225 * Deadlock avoidance for multi-bo reserving. 224 * Deadlock avoidance for multi-bo reserving.
226 */ 225 */
@@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
241 if (no_wait) 240 if (no_wait)
242 return -EBUSY; 241 return -EBUSY;
243 242
244 spin_unlock(&glob->lru_lock);
245 ret = ttm_bo_wait_unreserved(bo, interruptible); 243 ret = ttm_bo_wait_unreserved(bo, interruptible);
246 spin_lock(&glob->lru_lock);
247 244
248 if (unlikely(ret)) 245 if (unlikely(ret))
249 return ret; 246 return ret;
250 } 247 }
251 248
252 atomic_set(&bo->reserved, 1);
253 if (use_sequence) { 249 if (use_sequence) {
250 bool wake_up = false;
254 /** 251 /**
255 * Wake up waiters that may need to recheck for deadlock, 252 * Wake up waiters that may need to recheck for deadlock,
256 * if we decreased the sequence number. 253 * if we decreased the sequence number.
257 */ 254 */
258 if (unlikely((bo->val_seq - sequence < (1 << 31)) 255 if (unlikely((bo->val_seq - sequence < (1 << 31))
259 || !bo->seq_valid)) 256 || !bo->seq_valid))
260 wake_up_all(&bo->event_queue); 257 wake_up = true;
261 258
259 /*
260 * In the worst case with memory ordering these values can be
261 * seen in the wrong order. However since we call wake_up_all
262 * in that case, this will hopefully not pose a problem,
263 * and the worst case would only cause someone to accidentally
264 * hit -EAGAIN in ttm_bo_reserve when they see old value of
265 * val_seq. However this would only happen if seq_valid was
266 * written before val_seq was, and just means some slightly
267 * increased cpu usage
268 */
262 bo->val_seq = sequence; 269 bo->val_seq = sequence;
263 bo->seq_valid = true; 270 bo->seq_valid = true;
271 if (wake_up)
272 wake_up_all(&bo->event_queue);
264 } else { 273 } else {
265 bo->seq_valid = false; 274 bo->seq_valid = false;
266 } 275 }
@@ -289,17 +298,64 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
289 int put_count = 0; 298 int put_count = 0;
290 int ret; 299 int ret;
291 300
292 spin_lock(&glob->lru_lock); 301 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
293 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, 302 sequence);
294 sequence); 303 if (likely(ret == 0)) {
295 if (likely(ret == 0)) 304 spin_lock(&glob->lru_lock);
296 put_count = ttm_bo_del_from_lru(bo); 305 put_count = ttm_bo_del_from_lru(bo);
297 spin_unlock(&glob->lru_lock); 306 spin_unlock(&glob->lru_lock);
307 ttm_bo_list_ref_sub(bo, put_count, true);
308 }
298 309
299 ttm_bo_list_ref_sub(bo, put_count, true); 310 return ret;
311}
312
313int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
314 bool interruptible, uint32_t sequence)
315{
316 bool wake_up = false;
317 int ret;
318
319 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
320 WARN_ON(bo->seq_valid && sequence == bo->val_seq);
321
322 ret = ttm_bo_wait_unreserved(bo, interruptible);
300 323
324 if (unlikely(ret))
325 return ret;
326 }
327
328 if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
329 wake_up = true;
330
331 /**
332 * Wake up waiters that may need to recheck for deadlock,
333 * if we decreased the sequence number.
334 */
335 bo->val_seq = sequence;
336 bo->seq_valid = true;
337 if (wake_up)
338 wake_up_all(&bo->event_queue);
339
340 return 0;
341}
342
343int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
344 bool interruptible, uint32_t sequence)
345{
346 struct ttm_bo_global *glob = bo->glob;
347 int put_count, ret;
348
349 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
350 if (likely(!ret)) {
351 spin_lock(&glob->lru_lock);
352 put_count = ttm_bo_del_from_lru(bo);
353 spin_unlock(&glob->lru_lock);
354 ttm_bo_list_ref_sub(bo, put_count, true);
355 }
301 return ret; 356 return ret;
302} 357}
358EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
303 359
304void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) 360void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
305{ 361{
@@ -511,7 +567,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
511 int ret; 567 int ret;
512 568
513 spin_lock(&glob->lru_lock); 569 spin_lock(&glob->lru_lock);
514 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 570 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
515 571
516 spin_lock(&bdev->fence_lock); 572 spin_lock(&bdev->fence_lock);
517 (void) ttm_bo_wait(bo, false, false, true); 573 (void) ttm_bo_wait(bo, false, false, true);
@@ -604,7 +660,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
604 return ret; 660 return ret;
605 661
606 spin_lock(&glob->lru_lock); 662 spin_lock(&glob->lru_lock);
607 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 663 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
608 664
609 /* 665 /*
610 * We raced, and lost, someone else holds the reservation now, 666 * We raced, and lost, someone else holds the reservation now,
@@ -668,7 +724,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
668 kref_get(&nentry->list_kref); 724 kref_get(&nentry->list_kref);
669 } 725 }
670 726
671 ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0); 727 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
728 if (remove_all && ret) {
729 spin_unlock(&glob->lru_lock);
730 ret = ttm_bo_reserve_nolru(entry, false, false,
731 false, 0);
732 spin_lock(&glob->lru_lock);
733 }
734
672 if (!ret) 735 if (!ret)
673 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 736 ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
674 !remove_all); 737 !remove_all);
@@ -816,7 +879,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
816 879
817 spin_lock(&glob->lru_lock); 880 spin_lock(&glob->lru_lock);
818 list_for_each_entry(bo, &man->lru, lru) { 881 list_for_each_entry(bo, &man->lru, lru) {
819 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 882 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
820 if (!ret) 883 if (!ret)
821 break; 884 break;
822 } 885 }
@@ -1797,7 +1860,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1797 1860
1798 spin_lock(&glob->lru_lock); 1861 spin_lock(&glob->lru_lock);
1799 list_for_each_entry(bo, &glob->swap_lru, swap) { 1862 list_for_each_entry(bo, &glob->swap_lru, swap) {
1800 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 1863 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1801 if (!ret) 1864 if (!ret)
1802 break; 1865 break;
1803 } 1866 }
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index cd9e4523dc56..7b90def15674 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
82 } 82 }
83} 83}
84 84
85static int ttm_eu_wait_unreserved_locked(struct list_head *list,
86 struct ttm_buffer_object *bo)
87{
88 struct ttm_bo_global *glob = bo->glob;
89 int ret;
90
91 ttm_eu_del_from_lru_locked(list);
92 spin_unlock(&glob->lru_lock);
93 ret = ttm_bo_wait_unreserved(bo, true);
94 spin_lock(&glob->lru_lock);
95 if (unlikely(ret != 0))
96 ttm_eu_backoff_reservation_locked(list);
97 return ret;
98}
99
100
101void ttm_eu_backoff_reservation(struct list_head *list) 85void ttm_eu_backoff_reservation(struct list_head *list)
102{ 86{
103 struct ttm_validate_buffer *entry; 87 struct ttm_validate_buffer *entry;
@@ -145,47 +129,65 @@ int ttm_eu_reserve_buffers(struct list_head *list)
145 entry = list_first_entry(list, struct ttm_validate_buffer, head); 129 entry = list_first_entry(list, struct ttm_validate_buffer, head);
146 glob = entry->bo->glob; 130 glob = entry->bo->glob;
147 131
148retry:
149 spin_lock(&glob->lru_lock); 132 spin_lock(&glob->lru_lock);
150 val_seq = entry->bo->bdev->val_seq++; 133 val_seq = entry->bo->bdev->val_seq++;
151 134
135retry:
152 list_for_each_entry(entry, list, head) { 136 list_for_each_entry(entry, list, head) {
153 struct ttm_buffer_object *bo = entry->bo; 137 struct ttm_buffer_object *bo = entry->bo;
154 138
155retry_this_bo: 139 /* already slowpath reserved? */
156 ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); 140 if (entry->reserved)
141 continue;
142
143 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
157 switch (ret) { 144 switch (ret) {
158 case 0: 145 case 0:
159 break; 146 break;
160 case -EBUSY: 147 case -EBUSY:
161 ret = ttm_eu_wait_unreserved_locked(list, bo); 148 ttm_eu_del_from_lru_locked(list);
162 if (unlikely(ret != 0)) { 149 spin_unlock(&glob->lru_lock);
163 spin_unlock(&glob->lru_lock); 150 ret = ttm_bo_reserve_nolru(bo, true, false,
164 ttm_eu_list_ref_sub(list); 151 true, val_seq);
165 return ret; 152 spin_lock(&glob->lru_lock);
166 } 153 if (!ret)
167 goto retry_this_bo; 154 break;
155
156 if (unlikely(ret != -EAGAIN))
157 goto err;
158
159 /* fallthrough */
168 case -EAGAIN: 160 case -EAGAIN:
169 ttm_eu_backoff_reservation_locked(list); 161 ttm_eu_backoff_reservation_locked(list);
162
163 /*
164 * temporarily increase sequence number every retry,
165 * to prevent us from seeing our old reservation
166 * sequence when someone else reserved the buffer,
167 * but hasn't updated the seq_valid/seqno members yet.
168 */
169 val_seq = entry->bo->bdev->val_seq++;
170
170 spin_unlock(&glob->lru_lock); 171 spin_unlock(&glob->lru_lock);
171 ttm_eu_list_ref_sub(list); 172 ttm_eu_list_ref_sub(list);
172 ret = ttm_bo_wait_unreserved(bo, true); 173 ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
173 if (unlikely(ret != 0)) 174 if (unlikely(ret != 0))
174 return ret; 175 return ret;
176 spin_lock(&glob->lru_lock);
177 entry->reserved = true;
178 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
179 ret = -EBUSY;
180 goto err;
181 }
175 goto retry; 182 goto retry;
176 default: 183 default:
177 ttm_eu_backoff_reservation_locked(list); 184 goto err;
178 spin_unlock(&glob->lru_lock);
179 ttm_eu_list_ref_sub(list);
180 return ret;
181 } 185 }
182 186
183 entry->reserved = true; 187 entry->reserved = true;
184 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 188 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
185 ttm_eu_backoff_reservation_locked(list); 189 ret = -EBUSY;
186 spin_unlock(&glob->lru_lock); 190 goto err;
187 ttm_eu_list_ref_sub(list);
188 return -EBUSY;
189 } 191 }
190 } 192 }
191 193
@@ -194,6 +196,12 @@ retry_this_bo:
194 ttm_eu_list_ref_sub(list); 196 ttm_eu_list_ref_sub(list);
195 197
196 return 0; 198 return 0;
199
200err:
201 ttm_eu_backoff_reservation_locked(list);
202 spin_unlock(&glob->lru_lock);
203 ttm_eu_list_ref_sub(list);
204 return ret;
197} 205}
198EXPORT_SYMBOL(ttm_eu_reserve_buffers); 206EXPORT_SYMBOL(ttm_eu_reserve_buffers);
199 207
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e01a17b407b2..16556170fb32 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -959,13 +959,13 @@ void vmw_resource_unreserve(struct vmw_resource *res,
959 if (new_backup && new_backup != res->backup) { 959 if (new_backup && new_backup != res->backup) {
960 960
961 if (res->backup) { 961 if (res->backup) {
962 BUG_ON(atomic_read(&res->backup->base.reserved) == 0); 962 BUG_ON(!ttm_bo_is_reserved(&res->backup->base));
963 list_del_init(&res->mob_head); 963 list_del_init(&res->mob_head);
964 vmw_dmabuf_unreference(&res->backup); 964 vmw_dmabuf_unreference(&res->backup);
965 } 965 }
966 966
967 res->backup = vmw_dmabuf_reference(new_backup); 967 res->backup = vmw_dmabuf_reference(new_backup);
968 BUG_ON(atomic_read(&new_backup->base.reserved) == 0); 968 BUG_ON(!ttm_bo_is_reserved(&new_backup->base));
969 list_add_tail(&res->mob_head, &new_backup->res_list); 969 list_add_tail(&res->mob_head, &new_backup->res_list);
970 } 970 }
971 if (new_backup) 971 if (new_backup)
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e3a43a47d78c..0fbd046e7c93 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -790,16 +790,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
790 * to make room for a buffer already reserved. (Buffers are reserved before 790 * to make room for a buffer already reserved. (Buffers are reserved before
791 * they are evicted). The following algorithm prevents such deadlocks from 791 * they are evicted). The following algorithm prevents such deadlocks from
792 * occurring: 792 * occurring:
793 * 1) Buffers are reserved with the lru spinlock held. Upon successful 793 * Processes attempting to reserve multiple buffers other than for eviction,
794 * reservation they are removed from the lru list. This stops a reserved buffer
795 * from being evicted. However the lru spinlock is released between the time
796 * a buffer is selected for eviction and the time it is reserved.
797 * Therefore a check is made when a buffer is reserved for eviction, that it
798 * is still the first buffer in the lru list, before it is removed from the
799 * list. @check_lru == 1 forces this check. If it fails, the function returns
800 * -EINVAL, and the caller should then choose a new buffer to evict and repeat
801 * the procedure.
802 * 2) Processes attempting to reserve multiple buffers other than for eviction,
803 * (typically execbuf), should first obtain a unique 32-bit 794 * (typically execbuf), should first obtain a unique 32-bit
804 * validation sequence number, 795 * validation sequence number,
805 * and call this function with @use_sequence == 1 and @sequence == the unique 796 * and call this function with @use_sequence == 1 and @sequence == the unique
@@ -830,9 +821,39 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
830 bool interruptible, 821 bool interruptible,
831 bool no_wait, bool use_sequence, uint32_t sequence); 822 bool no_wait, bool use_sequence, uint32_t sequence);
832 823
824/**
825 * ttm_bo_reserve_slowpath_nolru:
826 * @bo: A pointer to a struct ttm_buffer_object.
827 * @interruptible: Sleep interruptible if waiting.
828 * @sequence: Set (@bo)->sequence to this value after lock
829 *
830 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
831 * from all our other reservations. Because there are no other reservations
832 * held by us, this function cannot deadlock any more.
833 *
834 * Will not remove reserved buffers from the lru lists.
835 * Otherwise identical to ttm_bo_reserve_slowpath.
836 */
837extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
838 bool interruptible,
839 uint32_t sequence);
840
833 841
834/** 842/**
835 * ttm_bo_reserve_locked: 843 * ttm_bo_reserve_slowpath:
844 * @bo: A pointer to a struct ttm_buffer_object.
845 * @interruptible: Sleep interruptible if waiting.
846 * @sequence: Set (@bo)->sequence to this value after lock
847 *
848 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
849 * from all our other reservations. Because there are no other reservations
850 * held by us, this function cannot deadlock any more.
851 */
852extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
853 bool interruptible, uint32_t sequence);
854
855/**
856 * ttm_bo_reserve_nolru:
836 * 857 *
837 * @bo: A pointer to a struct ttm_buffer_object. 858 * @bo: A pointer to a struct ttm_buffer_object.
838 * @interruptible: Sleep interruptible if waiting. 859 * @interruptible: Sleep interruptible if waiting.
@@ -840,9 +861,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
840 * @use_sequence: If @bo is already reserved, Only sleep waiting for 861 * @use_sequence: If @bo is already reserved, Only sleep waiting for
841 * it to become unreserved if @sequence < (@bo)->sequence. 862 * it to become unreserved if @sequence < (@bo)->sequence.
842 * 863 *
843 * Must be called with struct ttm_bo_global::lru_lock held, 864 * Will not remove reserved buffers from the lru lists.
844 * and will not remove reserved buffers from the lru lists.
845 * The function may release the LRU spinlock if it needs to sleep.
846 * Otherwise identical to ttm_bo_reserve. 865 * Otherwise identical to ttm_bo_reserve.
847 * 866 *
848 * Returns: 867 * Returns:
@@ -855,7 +874,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
855 * -EDEADLK: Bo already reserved using @sequence. This error code will only 874 * -EDEADLK: Bo already reserved using @sequence. This error code will only
856 * be returned if @use_sequence is set to true. 875 * be returned if @use_sequence is set to true.
857 */ 876 */
858extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, 877extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
859 bool interruptible, 878 bool interruptible,
860 bool no_wait, bool use_sequence, 879 bool no_wait, bool use_sequence,
861 uint32_t sequence); 880 uint32_t sequence);
@@ -879,18 +898,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
879 */ 898 */
880extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); 899extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
881 900
882/**
883 * ttm_bo_wait_unreserved
884 *
885 * @bo: A pointer to a struct ttm_buffer_object.
886 *
887 * Wait for a struct ttm_buffer_object to become unreserved.
888 * This is typically used in the execbuf code to relax cpu-usage when
889 * a potential deadlock condition backoff.
890 */
891extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
892 bool interruptible);
893
894/* 901/*
895 * ttm_bo_util.c 902 * ttm_bo_util.c
896 */ 903 */