aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_bo.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <m.b.lankhorst@gmail.com>2013-06-27 07:48:17 -0400
committerDave Airlie <airlied@redhat.com>2013-06-27 22:02:20 -0400
commitecff665f5e3f1c6909353e00b9420e45ae23d995 (patch)
tree8fed7d4570ec707427e954c6d2695d1549e08364 /drivers/gpu/drm/ttm/ttm_bo.c
parent786d7257e537da0674c02e16e3b30a44665d1cee (diff)
drm/ttm: make ttm reservation calls behave like reservation calls
This commit converts the source of the val_seq counter to the ww_mutex api. The reservation objects are converted later, because there is still a lockdep splat in nouveau that has to resolved first. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c50
1 files changed, 32 insertions, 18 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9b07b7d44a58..b912375b9c18 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -215,7 +215,8 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
215 215
216int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, 216int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
217 bool interruptible, 217 bool interruptible,
218 bool no_wait, bool use_sequence, uint32_t sequence) 218 bool no_wait, bool use_ticket,
219 struct ww_acquire_ctx *ticket)
219{ 220{
220 int ret; 221 int ret;
221 222
@@ -223,17 +224,17 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
223 /** 224 /**
224 * Deadlock avoidance for multi-bo reserving. 225 * Deadlock avoidance for multi-bo reserving.
225 */ 226 */
226 if (use_sequence && bo->seq_valid) { 227 if (use_ticket && bo->seq_valid) {
227 /** 228 /**
228 * We've already reserved this one. 229 * We've already reserved this one.
229 */ 230 */
230 if (unlikely(sequence == bo->val_seq)) 231 if (unlikely(ticket->stamp == bo->val_seq))
231 return -EDEADLK; 232 return -EDEADLK;
232 /** 233 /**
233 * Already reserved by a thread that will not back 234 * Already reserved by a thread that will not back
234 * off for us. We need to back off. 235 * off for us. We need to back off.
235 */ 236 */
236 if (unlikely(sequence - bo->val_seq < (1 << 31))) 237 if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX))
237 return -EAGAIN; 238 return -EAGAIN;
238 } 239 }
239 240
@@ -246,13 +247,14 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
246 return ret; 247 return ret;
247 } 248 }
248 249
249 if (use_sequence) { 250 if (use_ticket) {
250 bool wake_up = false; 251 bool wake_up = false;
252
251 /** 253 /**
252 * Wake up waiters that may need to recheck for deadlock, 254 * Wake up waiters that may need to recheck for deadlock,
253 * if we decreased the sequence number. 255 * if we decreased the sequence number.
254 */ 256 */
255 if (unlikely((bo->val_seq - sequence < (1 << 31)) 257 if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX)
256 || !bo->seq_valid)) 258 || !bo->seq_valid))
257 wake_up = true; 259 wake_up = true;
258 260
@@ -266,7 +268,7 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
266 * written before val_seq was, and just means some slightly 268 * written before val_seq was, and just means some slightly
267 * increased cpu usage 269 * increased cpu usage
268 */ 270 */
269 bo->val_seq = sequence; 271 bo->val_seq = ticket->stamp;
270 bo->seq_valid = true; 272 bo->seq_valid = true;
271 if (wake_up) 273 if (wake_up)
272 wake_up_all(&bo->event_queue); 274 wake_up_all(&bo->event_queue);
@@ -292,14 +294,15 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
292 294
293int ttm_bo_reserve(struct ttm_buffer_object *bo, 295int ttm_bo_reserve(struct ttm_buffer_object *bo,
294 bool interruptible, 296 bool interruptible,
295 bool no_wait, bool use_sequence, uint32_t sequence) 297 bool no_wait, bool use_ticket,
298 struct ww_acquire_ctx *ticket)
296{ 299{
297 struct ttm_bo_global *glob = bo->glob; 300 struct ttm_bo_global *glob = bo->glob;
298 int put_count = 0; 301 int put_count = 0;
299 int ret; 302 int ret;
300 303
301 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, 304 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
302 sequence); 305 ticket);
303 if (likely(ret == 0)) { 306 if (likely(ret == 0)) {
304 spin_lock(&glob->lru_lock); 307 spin_lock(&glob->lru_lock);
305 put_count = ttm_bo_del_from_lru(bo); 308 put_count = ttm_bo_del_from_lru(bo);
@@ -311,13 +314,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
311} 314}
312 315
313int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, 316int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
314 bool interruptible, uint32_t sequence) 317 bool interruptible,
318 struct ww_acquire_ctx *ticket)
315{ 319{
316 bool wake_up = false; 320 bool wake_up = false;
317 int ret; 321 int ret;
318 322
319 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { 323 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
320 WARN_ON(bo->seq_valid && sequence == bo->val_seq); 324 WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq);
321 325
322 ret = ttm_bo_wait_unreserved(bo, interruptible); 326 ret = ttm_bo_wait_unreserved(bo, interruptible);
323 327
@@ -325,14 +329,14 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
325 return ret; 329 return ret;
326 } 330 }
327 331
328 if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) 332 if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid)
329 wake_up = true; 333 wake_up = true;
330 334
331 /** 335 /**
332 * Wake up waiters that may need to recheck for deadlock, 336 * Wake up waiters that may need to recheck for deadlock,
333 * if we decreased the sequence number. 337 * if we decreased the sequence number.
334 */ 338 */
335 bo->val_seq = sequence; 339 bo->val_seq = ticket->stamp;
336 bo->seq_valid = true; 340 bo->seq_valid = true;
337 if (wake_up) 341 if (wake_up)
338 wake_up_all(&bo->event_queue); 342 wake_up_all(&bo->event_queue);
@@ -341,12 +345,12 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
341} 345}
342 346
343int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 347int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
344 bool interruptible, uint32_t sequence) 348 bool interruptible, struct ww_acquire_ctx *ticket)
345{ 349{
346 struct ttm_bo_global *glob = bo->glob; 350 struct ttm_bo_global *glob = bo->glob;
347 int put_count, ret; 351 int put_count, ret;
348 352
349 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); 353 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket);
350 if (likely(!ret)) { 354 if (likely(!ret)) {
351 spin_lock(&glob->lru_lock); 355 spin_lock(&glob->lru_lock);
352 put_count = ttm_bo_del_from_lru(bo); 356 put_count = ttm_bo_del_from_lru(bo);
@@ -357,7 +361,7 @@ int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
357} 361}
358EXPORT_SYMBOL(ttm_bo_reserve_slowpath); 362EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
359 363
360void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) 364void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
361{ 365{
362 ttm_bo_add_to_lru(bo); 366 ttm_bo_add_to_lru(bo);
363 atomic_set(&bo->reserved, 0); 367 atomic_set(&bo->reserved, 0);
@@ -369,11 +373,21 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo)
369 struct ttm_bo_global *glob = bo->glob; 373 struct ttm_bo_global *glob = bo->glob;
370 374
371 spin_lock(&glob->lru_lock); 375 spin_lock(&glob->lru_lock);
372 ttm_bo_unreserve_locked(bo); 376 ttm_bo_unreserve_ticket_locked(bo, NULL);
373 spin_unlock(&glob->lru_lock); 377 spin_unlock(&glob->lru_lock);
374} 378}
375EXPORT_SYMBOL(ttm_bo_unreserve); 379EXPORT_SYMBOL(ttm_bo_unreserve);
376 380
381void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
382{
383 struct ttm_bo_global *glob = bo->glob;
384
385 spin_lock(&glob->lru_lock);
386 ttm_bo_unreserve_ticket_locked(bo, ticket);
387 spin_unlock(&glob->lru_lock);
388}
389EXPORT_SYMBOL(ttm_bo_unreserve_ticket);
390
377/* 391/*
378 * Call bo->mutex locked. 392 * Call bo->mutex locked.
379 */ 393 */