aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 08:57:05 -0500
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 08:57:05 -0500
commit5e45d7dfd74100d622f9cdc70bfd1f9fae1671de (patch)
treeb12de2542f55d332a73fcd7d863bd2e45fd7d4ef /drivers/gpu/drm/ttm
parent7a1863084c9d90ce4b67d645bf9b0f1612e68f62 (diff)
drm/ttm: add ttm_bo_reserve_slowpath
Instead of dropping everything, waiting for the bo to be unreserved and trying over, a better strategy would be to do a blocking wait. This can be mapped a lot better to a mutex_lock-like call. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e8e4814b1295..4dd6f9e77a7d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -310,6 +310,53 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
310 return ret; 310 return ret;
311} 311}
312 312
313int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
314 bool interruptible, uint32_t sequence)
315{
316 bool wake_up = false;
317 int ret;
318
319 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
320 WARN_ON(bo->seq_valid && sequence == bo->val_seq);
321
322 ret = ttm_bo_wait_unreserved(bo, interruptible);
323
324 if (unlikely(ret))
325 return ret;
326 }
327
328 if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
329 wake_up = true;
330
331 /**
332 * Wake up waiters that may need to recheck for deadlock,
333 * if we decreased the sequence number.
334 */
335 bo->val_seq = sequence;
336 bo->seq_valid = true;
337 if (wake_up)
338 wake_up_all(&bo->event_queue);
339
340 return 0;
341}
342
343int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
344 bool interruptible, uint32_t sequence)
345{
346 struct ttm_bo_global *glob = bo->glob;
347 int put_count, ret;
348
349 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
350 if (likely(!ret)) {
351 spin_lock(&glob->lru_lock);
352 put_count = ttm_bo_del_from_lru(bo);
353 spin_unlock(&glob->lru_lock);
354 ttm_bo_list_ref_sub(bo, put_count, true);
355 }
356 return ret;
357}
358EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
359
313void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) 360void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
314{ 361{
315 ttm_bo_add_to_lru(bo); 362 ttm_bo_add_to_lru(bo);