diff options
author | Dave Airlie <airlied@redhat.com> | 2010-10-18 19:48:34 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-18 19:48:34 -0400 |
commit | b7ae5056c94a8191c1fd0b5697707377516c0c5d (patch) | |
tree | 394f68003ea0bc2a0c24e510a7c5b13f479ce743 /drivers/gpu/drm/ttm | |
parent | 2126d0a4a205e2d6b763434f892524cd60f74228 (diff) | |
parent | 6a2a11dbea5db417d200d38dda53c30a2e5603e0 (diff) |
Merge branch 'drm-fixes' of /home/airlied/kernel/linux-2.6 into drm-core-next
Conflicts:
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/ttm/ttm_bo.c
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 79 |
1 files changed, 70 insertions, 9 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index af7b57a47fbc..1e9bb2156dcf 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -439,6 +439,42 @@ out_err: | |||
439 | } | 439 | } |
440 | 440 | ||
441 | /** | 441 | /** |
442 | * Call bo::reserved and with the lru lock held. | ||
443 | * Will release GPU memory type usage on destruction. | ||
444 | * This is the place to put in driver specific hooks. | ||
445 | * Will release the bo::reserved lock and the | ||
446 | * lru lock on exit. | ||
447 | */ | ||
448 | |||
449 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | ||
450 | { | ||
451 | struct ttm_bo_global *glob = bo->glob; | ||
452 | |||
453 | if (bo->ttm) { | ||
454 | |||
455 | /** | ||
456 | * Release the lru_lock, since we don't want to have | ||
457 | * an atomic requirement on ttm_tt[unbind|destroy]. | ||
458 | */ | ||
459 | |||
460 | spin_unlock(&glob->lru_lock); | ||
461 | ttm_tt_unbind(bo->ttm); | ||
462 | ttm_tt_destroy(bo->ttm); | ||
463 | bo->ttm = NULL; | ||
464 | spin_lock(&glob->lru_lock); | ||
465 | } | ||
466 | |||
467 | if (bo->mem.mm_node) { | ||
468 | ttm_bo_mem_put(bo, &bo->mem); | ||
469 | } | ||
470 | |||
471 | atomic_set(&bo->reserved, 0); | ||
472 | wake_up_all(&bo->event_queue); | ||
473 | spin_unlock(&glob->lru_lock); | ||
474 | } | ||
475 | |||
476 | |||
477 | /** | ||
442 | * If bo idle, remove from delayed- and lru lists, and unref. | 478 | * If bo idle, remove from delayed- and lru lists, and unref. |
443 | * If not idle, and already on delayed list, do nothing. | 479 | * If not idle, and already on delayed list, do nothing. |
444 | * If not idle, and not on delayed list, put on delayed list, | 480 | * If not idle, and not on delayed list, put on delayed list, |
@@ -453,6 +489,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
453 | int ret; | 489 | int ret; |
454 | 490 | ||
455 | spin_lock(&bo->lock); | 491 | spin_lock(&bo->lock); |
492 | retry: | ||
456 | (void) ttm_bo_wait(bo, false, false, !remove_all); | 493 | (void) ttm_bo_wait(bo, false, false, !remove_all); |
457 | 494 | ||
458 | if (!bo->sync_obj) { | 495 | if (!bo->sync_obj) { |
@@ -461,28 +498,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
461 | spin_unlock(&bo->lock); | 498 | spin_unlock(&bo->lock); |
462 | 499 | ||
463 | spin_lock(&glob->lru_lock); | 500 | spin_lock(&glob->lru_lock); |
464 | put_count = ttm_bo_del_from_lru(bo); | 501 | ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); |
502 | |||
503 | /** | ||
504 | * Someone else has the object reserved. Bail and retry. | ||
505 | */ | ||
506 | |||
507 | if (unlikely(ret == -EBUSY)) { | ||
508 | spin_unlock(&glob->lru_lock); | ||
509 | spin_lock(&bo->lock); | ||
510 | goto requeue; | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * We can re-check for sync object without taking | ||
515 | * the bo::lock since setting the sync object requires | ||
516 | * also bo::reserved. A busy object at this point may | ||
517 | * be caused by another thread starting an accelerated | ||
518 | * eviction. | ||
519 | */ | ||
520 | |||
521 | if (unlikely(bo->sync_obj)) { | ||
522 | atomic_set(&bo->reserved, 0); | ||
523 | wake_up_all(&bo->event_queue); | ||
524 | spin_unlock(&glob->lru_lock); | ||
525 | spin_lock(&bo->lock); | ||
526 | if (remove_all) | ||
527 | goto retry; | ||
528 | else | ||
529 | goto requeue; | ||
530 | } | ||
465 | 531 | ||
466 | ret = ttm_bo_reserve_locked(bo, false, false, false, 0); | 532 | put_count = ttm_bo_del_from_lru(bo); |
467 | BUG_ON(ret); | ||
468 | if (bo->ttm) | ||
469 | ttm_tt_unbind(bo->ttm); | ||
470 | 533 | ||
471 | if (!list_empty(&bo->ddestroy)) { | 534 | if (!list_empty(&bo->ddestroy)) { |
472 | list_del_init(&bo->ddestroy); | 535 | list_del_init(&bo->ddestroy); |
473 | ++put_count; | 536 | ++put_count; |
474 | } | 537 | } |
475 | spin_unlock(&glob->lru_lock); | ||
476 | ttm_bo_mem_put(bo, &bo->mem); | ||
477 | 538 | ||
478 | atomic_set(&bo->reserved, 0); | 539 | ttm_bo_cleanup_memtype_use(bo); |
479 | 540 | ||
480 | while (put_count--) | 541 | while (put_count--) |
481 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | 542 | kref_put(&bo->list_kref, ttm_bo_ref_bug); |
482 | 543 | ||
483 | return 0; | 544 | return 0; |
484 | } | 545 | } |
485 | 546 | requeue: | |
486 | spin_lock(&glob->lru_lock); | 547 | spin_lock(&glob->lru_lock); |
487 | if (list_empty(&bo->ddestroy)) { | 548 | if (list_empty(&bo->ddestroy)) { |
488 | void *sync_obj = bo->sync_obj; | 549 | void *sync_obj = bo->sync_obj; |