diff options
author | Luca Barbieri <luca@luca-barbieri.com> | 2010-01-20 14:01:30 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-01-24 20:43:54 -0500 |
commit | 1a961ce09fe39df9a1b796df98794fd32c76c413 (patch) | |
tree | 06ab04dd5f4fb780f23a8468cf2b65d152c3db1f /drivers/gpu/drm/ttm | |
parent | 8471a26b9c36c965d278020cc0699e2e95d120e5 (diff) |
drm/ttm: Fix race condition in ttm_bo_delayed_delete (v3, final)
Resending this with Thomas Hellstrom's signoff for merging into 2.6.33
ttm_bo_delayed_delete has a race condition, because after we do:
kref_put(&nentry->list_kref, ttm_bo_release_list);
we are not holding the list lock and not holding any reference to
objects, and thus every bo in the list can be removed and freed at
this point.
However, we then use the next pointer we stored, which is not guaranteed
to be valid.
This was apparently the cause of some Nouveau oopses I experienced.
This patch rewrites the function so that it keeps the reference to nentry
until nentry itself is freed and we already got a reference to nentry->next.
v2 updated by me according to Thomas Hellstrom's feedback.
v3 proposed by Thomas Hellstrom. Commit comment updated by me.
Both updates fixed minor efficiency/style issues only and all three versions
should be correct.
Signed-off-by: Luca Barbieri <luca@luca-barbieri.com>
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 54 |
1 files changed, 23 insertions, 31 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index da3702135ada..8036b6e189ee 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -524,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
524 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) | 524 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
525 | { | 525 | { |
526 | struct ttm_bo_global *glob = bdev->glob; | 526 | struct ttm_bo_global *glob = bdev->glob; |
527 | struct ttm_buffer_object *entry, *nentry; | 527 | struct ttm_buffer_object *entry = NULL; |
528 | struct list_head *list, *next; | 528 | int ret = 0; |
529 | int ret; | ||
530 | 529 | ||
531 | spin_lock(&glob->lru_lock); | 530 | spin_lock(&glob->lru_lock); |
532 | list_for_each_safe(list, next, &bdev->ddestroy) { | 531 | if (list_empty(&bdev->ddestroy)) |
533 | entry = list_entry(list, struct ttm_buffer_object, ddestroy); | 532 | goto out_unlock; |
534 | nentry = NULL; | ||
535 | 533 | ||
536 | /* | 534 | entry = list_first_entry(&bdev->ddestroy, |
537 | * Protect the next list entry from destruction while we | 535 | struct ttm_buffer_object, ddestroy); |
538 | * unlock the lru_lock. | 536 | kref_get(&entry->list_kref); |
539 | */ | 537 | |
538 | for (;;) { | ||
539 | struct ttm_buffer_object *nentry = NULL; | ||
540 | 540 | ||
541 | if (next != &bdev->ddestroy) { | 541 | if (entry->ddestroy.next != &bdev->ddestroy) { |
542 | nentry = list_entry(next, struct ttm_buffer_object, | 542 | nentry = list_first_entry(&entry->ddestroy, |
543 | ddestroy); | 543 | struct ttm_buffer_object, ddestroy); |
544 | kref_get(&nentry->list_kref); | 544 | kref_get(&nentry->list_kref); |
545 | } | 545 | } |
546 | kref_get(&entry->list_kref); | ||
547 | 546 | ||
548 | spin_unlock(&glob->lru_lock); | 547 | spin_unlock(&glob->lru_lock); |
549 | ret = ttm_bo_cleanup_refs(entry, remove_all); | 548 | ret = ttm_bo_cleanup_refs(entry, remove_all); |
550 | kref_put(&entry->list_kref, ttm_bo_release_list); | 549 | kref_put(&entry->list_kref, ttm_bo_release_list); |
550 | entry = nentry; | ||
551 | |||
552 | if (ret || !entry) | ||
553 | goto out; | ||
551 | 554 | ||
552 | spin_lock(&glob->lru_lock); | 555 | spin_lock(&glob->lru_lock); |
553 | if (nentry) { | 556 | if (list_empty(&entry->ddestroy)) |
554 | bool next_onlist = !list_empty(next); | ||
555 | spin_unlock(&glob->lru_lock); | ||
556 | kref_put(&nentry->list_kref, ttm_bo_release_list); | ||
557 | spin_lock(&glob->lru_lock); | ||
558 | /* | ||
559 | * Someone might have raced us and removed the | ||
560 | * next entry from the list. We don't bother restarting | ||
561 | * list traversal. | ||
562 | */ | ||
563 | |||
564 | if (!next_onlist) | ||
565 | break; | ||
566 | } | ||
567 | if (ret) | ||
568 | break; | 557 | break; |
569 | } | 558 | } |
570 | ret = !list_empty(&bdev->ddestroy); | ||
571 | spin_unlock(&glob->lru_lock); | ||
572 | 559 | ||
560 | out_unlock: | ||
561 | spin_unlock(&glob->lru_lock); | ||
562 | out: | ||
563 | if (entry) | ||
564 | kref_put(&entry->list_kref, ttm_bo_release_list); | ||
573 | return ret; | 565 | return ret; |
574 | } | 566 | } |
575 | 567 | ||