diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-07 16:47:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-07 16:47:20 -0400 |
commit | dda9cd9fb30dfe6f2a51a394c18aef7c401b497d (patch) | |
tree | d969967eae1d2d7c00c7f8b3de802eee9a5d19fa /drivers | |
parent | afe147466c8f22a69eb470ab504608a9f4ae3345 (diff) | |
parent | dab8dcfa3c8e3b021a138bee7c17791b4991ba55 (diff) |
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm: don't drop handle reference on unload
drm/ttm: Fix two race conditions + fix busy codepaths
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/intel_fb.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fbcon.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_notifier.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_fb.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 83 |
5 files changed, 72 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 56ad9df2ccb5..b61966c126d3 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -238,8 +238,8 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
238 | 238 | ||
239 | drm_framebuffer_cleanup(&ifb->base); | 239 | drm_framebuffer_cleanup(&ifb->base); |
240 | if (ifb->obj) { | 240 | if (ifb->obj) { |
241 | drm_gem_object_handle_unreference(ifb->obj); | ||
242 | drm_gem_object_unreference(ifb->obj); | 241 | drm_gem_object_unreference(ifb->obj); |
242 | ifb->obj = NULL; | ||
243 | } | 243 | } |
244 | 244 | ||
245 | return 0; | 245 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index d2047713dc59..dbd30b2e43fd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -352,7 +352,6 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) | |||
352 | 352 | ||
353 | if (nouveau_fb->nvbo) { | 353 | if (nouveau_fb->nvbo) { |
354 | nouveau_bo_unmap(nouveau_fb->nvbo); | 354 | nouveau_bo_unmap(nouveau_fb->nvbo); |
355 | drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem); | ||
356 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); | 355 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); |
357 | nouveau_fb->nvbo = NULL; | 356 | nouveau_fb->nvbo = NULL; |
358 | } | 357 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 3c9964a8fbad..3ec181ff50ce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -79,7 +79,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) | |||
79 | mutex_lock(&dev->struct_mutex); | 79 | mutex_lock(&dev->struct_mutex); |
80 | nouveau_bo_unpin(chan->notifier_bo); | 80 | nouveau_bo_unpin(chan->notifier_bo); |
81 | mutex_unlock(&dev->struct_mutex); | 81 | mutex_unlock(&dev->struct_mutex); |
82 | drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem); | ||
83 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); | 82 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); |
84 | drm_mm_takedown(&chan->notifier_heap); | 83 | drm_mm_takedown(&chan->notifier_heap); |
85 | } | 84 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 9cdf6a35bc2c..40b0c087b592 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -97,7 +97,6 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) | |||
97 | radeon_bo_unpin(rbo); | 97 | radeon_bo_unpin(rbo); |
98 | radeon_bo_unreserve(rbo); | 98 | radeon_bo_unreserve(rbo); |
99 | } | 99 | } |
100 | drm_gem_object_handle_unreference(gobj); | ||
101 | drm_gem_object_unreference_unlocked(gobj); | 100 | drm_gem_object_unreference_unlocked(gobj); |
102 | } | 101 | } |
103 | 102 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cb4cf7ef4d1e..db809e034cc4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -442,6 +442,43 @@ out_err: | |||
442 | } | 442 | } |
443 | 443 | ||
444 | /** | 444 | /** |
445 | * Call bo::reserved and with the lru lock held. | ||
446 | * Will release GPU memory type usage on destruction. | ||
447 | * This is the place to put in driver specific hooks. | ||
448 | * Will release the bo::reserved lock and the | ||
449 | * lru lock on exit. | ||
450 | */ | ||
451 | |||
452 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | ||
453 | { | ||
454 | struct ttm_bo_global *glob = bo->glob; | ||
455 | |||
456 | if (bo->ttm) { | ||
457 | |||
458 | /** | ||
459 | * Release the lru_lock, since we don't want to have | ||
460 | * an atomic requirement on ttm_tt[unbind|destroy]. | ||
461 | */ | ||
462 | |||
463 | spin_unlock(&glob->lru_lock); | ||
464 | ttm_tt_unbind(bo->ttm); | ||
465 | ttm_tt_destroy(bo->ttm); | ||
466 | bo->ttm = NULL; | ||
467 | spin_lock(&glob->lru_lock); | ||
468 | } | ||
469 | |||
470 | if (bo->mem.mm_node) { | ||
471 | drm_mm_put_block(bo->mem.mm_node); | ||
472 | bo->mem.mm_node = NULL; | ||
473 | } | ||
474 | |||
475 | atomic_set(&bo->reserved, 0); | ||
476 | wake_up_all(&bo->event_queue); | ||
477 | spin_unlock(&glob->lru_lock); | ||
478 | } | ||
479 | |||
480 | |||
481 | /** | ||
445 | * If bo idle, remove from delayed- and lru lists, and unref. | 482 | * If bo idle, remove from delayed- and lru lists, and unref. |
446 | * If not idle, and already on delayed list, do nothing. | 483 | * If not idle, and already on delayed list, do nothing. |
447 | * If not idle, and not on delayed list, put on delayed list, | 484 | * If not idle, and not on delayed list, put on delayed list, |
@@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
456 | int ret; | 493 | int ret; |
457 | 494 | ||
458 | spin_lock(&bo->lock); | 495 | spin_lock(&bo->lock); |
496 | retry: | ||
459 | (void) ttm_bo_wait(bo, false, false, !remove_all); | 497 | (void) ttm_bo_wait(bo, false, false, !remove_all); |
460 | 498 | ||
461 | if (!bo->sync_obj) { | 499 | if (!bo->sync_obj) { |
@@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
464 | spin_unlock(&bo->lock); | 502 | spin_unlock(&bo->lock); |
465 | 503 | ||
466 | spin_lock(&glob->lru_lock); | 504 | spin_lock(&glob->lru_lock); |
467 | put_count = ttm_bo_del_from_lru(bo); | 505 | ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); |
506 | |||
507 | /** | ||
508 | * Someone else has the object reserved. Bail and retry. | ||
509 | */ | ||
468 | 510 | ||
469 | ret = ttm_bo_reserve_locked(bo, false, false, false, 0); | 511 | if (unlikely(ret == -EBUSY)) { |
470 | BUG_ON(ret); | 512 | spin_unlock(&glob->lru_lock); |
471 | if (bo->ttm) | 513 | spin_lock(&bo->lock); |
472 | ttm_tt_unbind(bo->ttm); | 514 | goto requeue; |
515 | } | ||
516 | |||
517 | /** | ||
518 | * We can re-check for sync object without taking | ||
519 | * the bo::lock since setting the sync object requires | ||
520 | * also bo::reserved. A busy object at this point may | ||
521 | * be caused by another thread starting an accelerated | ||
522 | * eviction. | ||
523 | */ | ||
524 | |||
525 | if (unlikely(bo->sync_obj)) { | ||
526 | atomic_set(&bo->reserved, 0); | ||
527 | wake_up_all(&bo->event_queue); | ||
528 | spin_unlock(&glob->lru_lock); | ||
529 | spin_lock(&bo->lock); | ||
530 | if (remove_all) | ||
531 | goto retry; | ||
532 | else | ||
533 | goto requeue; | ||
534 | } | ||
535 | |||
536 | put_count = ttm_bo_del_from_lru(bo); | ||
473 | 537 | ||
474 | if (!list_empty(&bo->ddestroy)) { | 538 | if (!list_empty(&bo->ddestroy)) { |
475 | list_del_init(&bo->ddestroy); | 539 | list_del_init(&bo->ddestroy); |
476 | ++put_count; | 540 | ++put_count; |
477 | } | 541 | } |
478 | if (bo->mem.mm_node) { | ||
479 | drm_mm_put_block(bo->mem.mm_node); | ||
480 | bo->mem.mm_node = NULL; | ||
481 | } | ||
482 | spin_unlock(&glob->lru_lock); | ||
483 | 542 | ||
484 | atomic_set(&bo->reserved, 0); | 543 | ttm_bo_cleanup_memtype_use(bo); |
485 | 544 | ||
486 | while (put_count--) | 545 | while (put_count--) |
487 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | 546 | kref_put(&bo->list_kref, ttm_bo_ref_bug); |
488 | 547 | ||
489 | return 0; | 548 | return 0; |
490 | } | 549 | } |
491 | 550 | requeue: | |
492 | spin_lock(&glob->lru_lock); | 551 | spin_lock(&glob->lru_lock); |
493 | if (list_empty(&bo->ddestroy)) { | 552 | if (list_empty(&bo->ddestroy)) { |
494 | void *sync_obj = bo->sync_obj; | 553 | void *sync_obj = bo->sync_obj; |