aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorMaarten Lankhorst <m.b.lankhorst@gmail.com>2013-06-27 07:48:19 -0400
committerDave Airlie <airlied@redhat.com>2013-06-27 22:04:01 -0400
commit5e338405119a80aa59e811626739122d1c15045d (patch)
tree3474b9ab52408c78480a92a9d0c33626c61d7473 /drivers/gpu/drm
parentb580c9e2b7ba5030a795aa2fb73b796523d65a78 (diff)
drm/ttm: convert to the reservation api
Now that the code is compatible in semantics, flip the switch. Use ww_mutex instead of the homegrown implementation. ww_mutex uses -EDEADLK to signal that the caller has to back off, and -EALREADY to indicate this buffer is already held by the caller. ttm used -EAGAIN and -EDEADLK for those, respectively. So some changes were needed to handle this correctly. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c190
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c43
5 files changed, 73 insertions, 173 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index e35d4688711c..2b2077d1e2f7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -368,7 +368,7 @@ retry:
368 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); 368 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
369 if (ret) { 369 if (ret) {
370 validate_fini_no_ticket(op, NULL); 370 validate_fini_no_ticket(op, NULL);
371 if (unlikely(ret == -EAGAIN)) { 371 if (unlikely(ret == -EDEADLK)) {
372 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, 372 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
373 &op->ticket); 373 &op->ticket);
374 if (!ret) 374 if (!ret)
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index b4fd89fbd8b7..ee7ad79ce781 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -57,11 +57,6 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
57 return bo->tbo.num_pages << PAGE_SHIFT; 57 return bo->tbo.num_pages << PAGE_SHIFT;
58} 58}
59 59
60static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)
61{
62 return !!atomic_read(&bo->tbo.reserved);
63}
64
65static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) 60static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
66{ 61{
67 return bo->tbo.addr_space_offset; 62 return bo->tbo.addr_space_offset;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index b912375b9c18..5f9fe8044afc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -150,6 +150,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
150 if (bo->ttm) 150 if (bo->ttm)
151 ttm_tt_destroy(bo->ttm); 151 ttm_tt_destroy(bo->ttm);
152 atomic_dec(&bo->glob->bo_count); 152 atomic_dec(&bo->glob->bo_count);
153 if (bo->resv == &bo->ttm_resv)
154 reservation_object_fini(&bo->ttm_resv);
155
153 if (bo->destroy) 156 if (bo->destroy)
154 bo->destroy(bo); 157 bo->destroy(bo);
155 else { 158 else {
@@ -158,18 +161,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 161 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
159} 162}
160 163
161static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
162 bool interruptible)
163{
164 if (interruptible) {
165 return wait_event_interruptible(bo->event_queue,
166 !ttm_bo_is_reserved(bo));
167 } else {
168 wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
169 return 0;
170 }
171}
172
173void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 164void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
174{ 165{
175 struct ttm_bo_device *bdev = bo->bdev; 166 struct ttm_bo_device *bdev = bo->bdev;
@@ -218,65 +209,27 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
218 bool no_wait, bool use_ticket, 209 bool no_wait, bool use_ticket,
219 struct ww_acquire_ctx *ticket) 210 struct ww_acquire_ctx *ticket)
220{ 211{
221 int ret; 212 int ret = 0;
222 213
223 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { 214 if (no_wait) {
224 /** 215 bool success;
225 * Deadlock avoidance for multi-bo reserving.
226 */
227 if (use_ticket && bo->seq_valid) {
228 /**
229 * We've already reserved this one.
230 */
231 if (unlikely(ticket->stamp == bo->val_seq))
232 return -EDEADLK;
233 /**
234 * Already reserved by a thread that will not back
235 * off for us. We need to back off.
236 */
237 if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX))
238 return -EAGAIN;
239 }
240 216
241 if (no_wait) 217 /* not valid any more, fix your locking! */
218 if (WARN_ON(ticket))
242 return -EBUSY; 219 return -EBUSY;
243 220
244 ret = ttm_bo_wait_unreserved(bo, interruptible); 221 success = ww_mutex_trylock(&bo->resv->lock);
245 222 return success ? 0 : -EBUSY;
246 if (unlikely(ret))
247 return ret;
248 }
249
250 if (use_ticket) {
251 bool wake_up = false;
252
253 /**
254 * Wake up waiters that may need to recheck for deadlock,
255 * if we decreased the sequence number.
256 */
257 if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX)
258 || !bo->seq_valid))
259 wake_up = true;
260
261 /*
262 * In the worst case with memory ordering these values can be
263 * seen in the wrong order. However since we call wake_up_all
264 * in that case, this will hopefully not pose a problem,
265 * and the worst case would only cause someone to accidentally
266 * hit -EAGAIN in ttm_bo_reserve when they see old value of
267 * val_seq. However this would only happen if seq_valid was
268 * written before val_seq was, and just means some slightly
269 * increased cpu usage
270 */
271 bo->val_seq = ticket->stamp;
272 bo->seq_valid = true;
273 if (wake_up)
274 wake_up_all(&bo->event_queue);
275 } else {
276 bo->seq_valid = false;
277 } 223 }
278 224
279 return 0; 225 if (interruptible)
226 ret = ww_mutex_lock_interruptible(&bo->resv->lock,
227 ticket);
228 else
229 ret = ww_mutex_lock(&bo->resv->lock, ticket);
230 if (ret == -EINTR)
231 return -ERESTARTSYS;
232 return ret;
280} 233}
281EXPORT_SYMBOL(ttm_bo_reserve); 234EXPORT_SYMBOL(ttm_bo_reserve);
282 235
@@ -313,50 +266,27 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
313 return ret; 266 return ret;
314} 267}
315 268
316int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
317 bool interruptible,
318 struct ww_acquire_ctx *ticket)
319{
320 bool wake_up = false;
321 int ret;
322
323 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
324 WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq);
325
326 ret = ttm_bo_wait_unreserved(bo, interruptible);
327
328 if (unlikely(ret))
329 return ret;
330 }
331
332 if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid)
333 wake_up = true;
334
335 /**
336 * Wake up waiters that may need to recheck for deadlock,
337 * if we decreased the sequence number.
338 */
339 bo->val_seq = ticket->stamp;
340 bo->seq_valid = true;
341 if (wake_up)
342 wake_up_all(&bo->event_queue);
343
344 return 0;
345}
346
347int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 269int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
348 bool interruptible, struct ww_acquire_ctx *ticket) 270 bool interruptible, struct ww_acquire_ctx *ticket)
349{ 271{
350 struct ttm_bo_global *glob = bo->glob; 272 struct ttm_bo_global *glob = bo->glob;
351 int put_count, ret; 273 int put_count = 0;
274 int ret = 0;
275
276 if (interruptible)
277 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
278 ticket);
279 else
280 ww_mutex_lock_slow(&bo->resv->lock, ticket);
352 281
353 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket); 282 if (likely(ret == 0)) {
354 if (likely(!ret)) {
355 spin_lock(&glob->lru_lock); 283 spin_lock(&glob->lru_lock);
356 put_count = ttm_bo_del_from_lru(bo); 284 put_count = ttm_bo_del_from_lru(bo);
357 spin_unlock(&glob->lru_lock); 285 spin_unlock(&glob->lru_lock);
358 ttm_bo_list_ref_sub(bo, put_count, true); 286 ttm_bo_list_ref_sub(bo, put_count, true);
359 } 287 } else if (ret == -EINTR)
288 ret = -ERESTARTSYS;
289
360 return ret; 290 return ret;
361} 291}
362EXPORT_SYMBOL(ttm_bo_reserve_slowpath); 292EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
@@ -364,8 +294,7 @@ EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
364void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) 294void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
365{ 295{
366 ttm_bo_add_to_lru(bo); 296 ttm_bo_add_to_lru(bo);
367 atomic_set(&bo->reserved, 0); 297 ww_mutex_unlock(&bo->resv->lock);
368 wake_up_all(&bo->event_queue);
369} 298}
370 299
371void ttm_bo_unreserve(struct ttm_buffer_object *bo) 300void ttm_bo_unreserve(struct ttm_buffer_object *bo)
@@ -558,17 +487,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
558 } 487 }
559 ttm_bo_mem_put(bo, &bo->mem); 488 ttm_bo_mem_put(bo, &bo->mem);
560 489
561 atomic_set(&bo->reserved, 0); 490 ww_mutex_unlock (&bo->resv->lock);
562 wake_up_all(&bo->event_queue);
563
564 /*
565 * Since the final reference to this bo may not be dropped by
566 * the current task we have to put a memory barrier here to make
567 * sure the changes done in this function are always visible.
568 *
569 * This function only needs protection against the final kref_put.
570 */
571 smp_mb__before_atomic_dec();
572} 491}
573 492
574static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 493static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
@@ -600,10 +519,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
600 sync_obj = driver->sync_obj_ref(bo->sync_obj); 519 sync_obj = driver->sync_obj_ref(bo->sync_obj);
601 spin_unlock(&bdev->fence_lock); 520 spin_unlock(&bdev->fence_lock);
602 521
603 if (!ret) { 522 if (!ret)
604 atomic_set(&bo->reserved, 0); 523 ww_mutex_unlock(&bo->resv->lock);
605 wake_up_all(&bo->event_queue);
606 }
607 524
608 kref_get(&bo->list_kref); 525 kref_get(&bo->list_kref);
609 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 526 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -653,8 +570,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
653 sync_obj = driver->sync_obj_ref(bo->sync_obj); 570 sync_obj = driver->sync_obj_ref(bo->sync_obj);
654 spin_unlock(&bdev->fence_lock); 571 spin_unlock(&bdev->fence_lock);
655 572
656 atomic_set(&bo->reserved, 0); 573 ww_mutex_unlock(&bo->resv->lock);
657 wake_up_all(&bo->event_queue);
658 spin_unlock(&glob->lru_lock); 574 spin_unlock(&glob->lru_lock);
659 575
660 ret = driver->sync_obj_wait(sync_obj, false, interruptible); 576 ret = driver->sync_obj_wait(sync_obj, false, interruptible);
@@ -692,8 +608,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
692 spin_unlock(&bdev->fence_lock); 608 spin_unlock(&bdev->fence_lock);
693 609
694 if (ret || unlikely(list_empty(&bo->ddestroy))) { 610 if (ret || unlikely(list_empty(&bo->ddestroy))) {
695 atomic_set(&bo->reserved, 0); 611 ww_mutex_unlock(&bo->resv->lock);
696 wake_up_all(&bo->event_queue);
697 spin_unlock(&glob->lru_lock); 612 spin_unlock(&glob->lru_lock);
698 return ret; 613 return ret;
699 } 614 }
@@ -1253,6 +1168,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1253 int ret = 0; 1168 int ret = 0;
1254 unsigned long num_pages; 1169 unsigned long num_pages;
1255 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1170 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1171 bool locked;
1256 1172
1257 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1173 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1258 if (ret) { 1174 if (ret) {
@@ -1279,8 +1195,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1279 kref_init(&bo->kref); 1195 kref_init(&bo->kref);
1280 kref_init(&bo->list_kref); 1196 kref_init(&bo->list_kref);
1281 atomic_set(&bo->cpu_writers, 0); 1197 atomic_set(&bo->cpu_writers, 0);
1282 atomic_set(&bo->reserved, 1);
1283 init_waitqueue_head(&bo->event_queue);
1284 INIT_LIST_HEAD(&bo->lru); 1198 INIT_LIST_HEAD(&bo->lru);
1285 INIT_LIST_HEAD(&bo->ddestroy); 1199 INIT_LIST_HEAD(&bo->ddestroy);
1286 INIT_LIST_HEAD(&bo->swap); 1200 INIT_LIST_HEAD(&bo->swap);
@@ -1298,37 +1212,34 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1298 bo->mem.bus.io_reserved_count = 0; 1212 bo->mem.bus.io_reserved_count = 0;
1299 bo->priv_flags = 0; 1213 bo->priv_flags = 0;
1300 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1214 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1301 bo->seq_valid = false;
1302 bo->persistent_swap_storage = persistent_swap_storage; 1215 bo->persistent_swap_storage = persistent_swap_storage;
1303 bo->acc_size = acc_size; 1216 bo->acc_size = acc_size;
1304 bo->sg = sg; 1217 bo->sg = sg;
1218 bo->resv = &bo->ttm_resv;
1219 reservation_object_init(bo->resv);
1305 atomic_inc(&bo->glob->bo_count); 1220 atomic_inc(&bo->glob->bo_count);
1306 1221
1307 ret = ttm_bo_check_placement(bo, placement); 1222 ret = ttm_bo_check_placement(bo, placement);
1308 if (unlikely(ret != 0))
1309 goto out_err;
1310 1223
1311 /* 1224 /*
1312 * For ttm_bo_type_device buffers, allocate 1225 * For ttm_bo_type_device buffers, allocate
1313 * address space from the device. 1226 * address space from the device.
1314 */ 1227 */
1315 if (bo->type == ttm_bo_type_device || 1228 if (likely(!ret) &&
1316 bo->type == ttm_bo_type_sg) { 1229 (bo->type == ttm_bo_type_device ||
1230 bo->type == ttm_bo_type_sg))
1317 ret = ttm_bo_setup_vm(bo); 1231 ret = ttm_bo_setup_vm(bo);
1318 if (ret)
1319 goto out_err;
1320 }
1321 1232
1322 ret = ttm_bo_validate(bo, placement, interruptible, false); 1233 locked = ww_mutex_trylock(&bo->resv->lock);
1323 if (ret) 1234 WARN_ON(!locked);
1324 goto out_err;
1325 1235
1326 ttm_bo_unreserve(bo); 1236 if (likely(!ret))
1327 return 0; 1237 ret = ttm_bo_validate(bo, placement, interruptible, false);
1328 1238
1329out_err:
1330 ttm_bo_unreserve(bo); 1239 ttm_bo_unreserve(bo);
1331 ttm_bo_unref(&bo); 1240
1241 if (unlikely(ret))
1242 ttm_bo_unref(&bo);
1332 1243
1333 return ret; 1244 return ret;
1334} 1245}
@@ -1941,8 +1852,7 @@ out:
1941 * already swapped buffer. 1852 * already swapped buffer.
1942 */ 1853 */
1943 1854
1944 atomic_set(&bo->reserved, 0); 1855 ww_mutex_unlock(&bo->resv->lock);
1945 wake_up_all(&bo->event_queue);
1946 kref_put(&bo->list_kref, ttm_bo_release_list); 1856 kref_put(&bo->list_kref, ttm_bo_release_list);
1947 return ret; 1857 return ret;
1948} 1858}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index af894584dd90..319cf4127c5b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -433,6 +433,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
433 struct ttm_buffer_object *fbo; 433 struct ttm_buffer_object *fbo;
434 struct ttm_bo_device *bdev = bo->bdev; 434 struct ttm_bo_device *bdev = bo->bdev;
435 struct ttm_bo_driver *driver = bdev->driver; 435 struct ttm_bo_driver *driver = bdev->driver;
436 int ret;
436 437
437 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); 438 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
438 if (!fbo) 439 if (!fbo)
@@ -445,7 +446,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
445 * TODO: Explicit member copy would probably be better here. 446 * TODO: Explicit member copy would probably be better here.
446 */ 447 */
447 448
448 init_waitqueue_head(&fbo->event_queue);
449 INIT_LIST_HEAD(&fbo->ddestroy); 449 INIT_LIST_HEAD(&fbo->ddestroy);
450 INIT_LIST_HEAD(&fbo->lru); 450 INIT_LIST_HEAD(&fbo->lru);
451 INIT_LIST_HEAD(&fbo->swap); 451 INIT_LIST_HEAD(&fbo->swap);
@@ -463,6 +463,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
463 kref_init(&fbo->kref); 463 kref_init(&fbo->kref);
464 fbo->destroy = &ttm_transfered_destroy; 464 fbo->destroy = &ttm_transfered_destroy;
465 fbo->acc_size = 0; 465 fbo->acc_size = 0;
466 fbo->resv = &fbo->ttm_resv;
467 reservation_object_init(fbo->resv);
468 ret = ww_mutex_trylock(&fbo->resv->lock);
469 WARN_ON(!ret);
466 470
467 *new_obj = fbo; 471 *new_obj = fbo;
468 return 0; 472 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index efcb734e5543..7392da557be2 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -48,8 +48,7 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list,
48 entry->removed = false; 48 entry->removed = false;
49 49
50 } else { 50 } else {
51 atomic_set(&bo->reserved, 0); 51 ww_mutex_unlock(&bo->resv->lock);
52 wake_up_all(&bo->event_queue);
53 } 52 }
54 } 53 }
55} 54}
@@ -134,8 +133,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
134 glob = entry->bo->glob; 133 glob = entry->bo->glob;
135 134
136 ww_acquire_init(ticket, &reservation_ww_class); 135 ww_acquire_init(ticket, &reservation_ww_class);
137 spin_lock(&glob->lru_lock);
138
139retry: 136retry:
140 list_for_each_entry(entry, list, head) { 137 list_for_each_entry(entry, list, head) {
141 struct ttm_buffer_object *bo = entry->bo; 138 struct ttm_buffer_object *bo = entry->bo;
@@ -144,42 +141,34 @@ retry:
144 if (entry->reserved) 141 if (entry->reserved)
145 continue; 142 continue;
146 143
147 ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket);
148 switch (ret) {
149 case 0:
150 break;
151 case -EBUSY:
152 ttm_eu_del_from_lru_locked(list);
153 spin_unlock(&glob->lru_lock);
154 ret = ttm_bo_reserve_nolru(bo, true, false,
155 true, ticket);
156 spin_lock(&glob->lru_lock);
157
158 if (!ret)
159 break;
160 144
161 if (unlikely(ret != -EAGAIN)) 145 ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
162 goto err;
163 146
164 /* fallthrough */ 147 if (ret == -EDEADLK) {
165 case -EAGAIN: 148 /* uh oh, we lost out, drop every reservation and try
149 * to only reserve this buffer, then start over if
150 * this succeeds.
151 */
152 spin_lock(&glob->lru_lock);
166 ttm_eu_backoff_reservation_locked(list, ticket); 153 ttm_eu_backoff_reservation_locked(list, ticket);
167 spin_unlock(&glob->lru_lock); 154 spin_unlock(&glob->lru_lock);
168 ttm_eu_list_ref_sub(list); 155 ttm_eu_list_ref_sub(list);
169 ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket); 156 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
170 if (unlikely(ret != 0)) 157 ticket);
158 if (unlikely(ret != 0)) {
159 if (ret == -EINTR)
160 ret = -ERESTARTSYS;
171 goto err_fini; 161 goto err_fini;
162 }
172 163
173 spin_lock(&glob->lru_lock);
174 entry->reserved = true; 164 entry->reserved = true;
175 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 165 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
176 ret = -EBUSY; 166 ret = -EBUSY;
177 goto err; 167 goto err;
178 } 168 }
179 goto retry; 169 goto retry;
180 default: 170 } else if (ret)
181 goto err; 171 goto err;
182 }
183 172
184 entry->reserved = true; 173 entry->reserved = true;
185 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 174 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
@@ -189,12 +178,14 @@ retry:
189 } 178 }
190 179
191 ww_acquire_done(ticket); 180 ww_acquire_done(ticket);
181 spin_lock(&glob->lru_lock);
192 ttm_eu_del_from_lru_locked(list); 182 ttm_eu_del_from_lru_locked(list);
193 spin_unlock(&glob->lru_lock); 183 spin_unlock(&glob->lru_lock);
194 ttm_eu_list_ref_sub(list); 184 ttm_eu_list_ref_sub(list);
195 return 0; 185 return 0;
196 186
197err: 187err:
188 spin_lock(&glob->lru_lock);
198 ttm_eu_backoff_reservation_locked(list, ticket); 189 ttm_eu_backoff_reservation_locked(list, ticket);
199 spin_unlock(&glob->lru_lock); 190 spin_unlock(&glob->lru_lock);
200 ttm_eu_list_ref_sub(list); 191 ttm_eu_list_ref_sub(list);