aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2012-11-06 06:31:51 -0500
committerDave Airlie <airlied@redhat.com>2012-11-20 01:15:06 -0500
commit6c1e963cc5771c93d4ed7aa8bdd4322a7c918e9b (patch)
tree435fec38d156d82a0f7497431712f39f59141919 /drivers/gpu
parentcdad05216c2b2edfe92a9f87d6ae51aab277f3b2 (diff)
drm/ttm: Optimize reservation slightly
Reservation locking currently always takes place under the LRU spinlock. Hence, strictly there is no need for an atomic_cmpxchg call; we can use atomic_read followed by atomic_write since nobody else will ever reserve without the lru spinlock held. At least on Intel this should remove a locked bus cycle on successful reserve. Note that thit commit may be obsoleted by the cross-device reservation work. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d1e5326d442c..5f61f133b419 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
220 struct ttm_bo_global *glob = bo->glob; 220 struct ttm_bo_global *glob = bo->glob;
221 int ret; 221 int ret;
222 222
223 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 223 while (unlikely(atomic_read(&bo->reserved) != 0)) {
224 /** 224 /**
225 * Deadlock avoidance for multi-bo reserving. 225 * Deadlock avoidance for multi-bo reserving.
226 */ 226 */
@@ -249,6 +249,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
249 return ret; 249 return ret;
250 } 250 }
251 251
252 atomic_set(&bo->reserved, 1);
252 if (use_sequence) { 253 if (use_sequence) {
253 /** 254 /**
254 * Wake up waiters that may need to recheck for deadlock, 255 * Wake up waiters that may need to recheck for deadlock,