aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-15 20:02:33 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-15 20:20:17 -0400
commit6e74057c4686dc12ea767b4bdc50a63876056e1c (patch)
treee7c70b8e08ab9e5363be28bcbcc72348122ae6e4 /fs/btrfs
parent627e421a3f35ad6b52dc58982fb6f8a97c30dcd7 (diff)
Btrfs: Fix async thread shutdown race
It was possible for an async worker thread to be selected to receive a new work item, but exit before the work item was actually placed into that thread's work list. This commit fixes the race by incrementing the num_pending counter earlier, and making sure to check the number of pending work items before a thread exits. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/async-thread.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 80e33bc96c84..282ca085c2fb 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -191,7 +191,8 @@ static int try_worker_shutdown(struct btrfs_worker_thread *worker)
191 !worker->working && 191 !worker->working &&
192 !list_empty(&worker->worker_list) && 192 !list_empty(&worker->worker_list) &&
193 list_empty(&worker->prio_pending) && 193 list_empty(&worker->prio_pending) &&
194 list_empty(&worker->pending)) { 194 list_empty(&worker->pending) &&
195 atomic_read(&worker->num_pending) == 0) {
195 freeit = 1; 196 freeit = 1;
196 list_del_init(&worker->worker_list); 197 list_del_init(&worker->worker_list);
197 worker->workers->num_workers--; 198 worker->workers->num_workers--;
@@ -485,7 +486,6 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
485 */ 486 */
486 next = workers->worker_list.next; 487 next = workers->worker_list.next;
487 worker = list_entry(next, struct btrfs_worker_thread, worker_list); 488 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
488 atomic_inc(&worker->num_pending);
489 worker->sequence++; 489 worker->sequence++;
490 490
491 if (worker->sequence % workers->idle_thresh == 0) 491 if (worker->sequence % workers->idle_thresh == 0)
@@ -521,8 +521,7 @@ again:
521 goto again; 521 goto again;
522 } 522 }
523 } 523 }
524 spin_unlock_irqrestore(&workers->lock, flags); 524 goto found;
525 return worker;
526 525
527fallback: 526fallback:
528 fallback = NULL; 527 fallback = NULL;
@@ -537,6 +536,12 @@ fallback:
537 BUG_ON(!fallback); 536 BUG_ON(!fallback);
538 worker = list_entry(fallback, 537 worker = list_entry(fallback,
539 struct btrfs_worker_thread, worker_list); 538 struct btrfs_worker_thread, worker_list);
539found:
540 /*
541 * this makes sure the worker doesn't exit before it is placed
542 * onto a busy/idle list
543 */
544 atomic_inc(&worker->num_pending);
540 spin_unlock_irqrestore(&workers->lock, flags); 545 spin_unlock_irqrestore(&workers->lock, flags);
541 return worker; 546 return worker;
542} 547}
@@ -569,7 +574,7 @@ int btrfs_requeue_work(struct btrfs_work *work)
569 spin_lock(&worker->workers->lock); 574 spin_lock(&worker->workers->lock);
570 worker->idle = 0; 575 worker->idle = 0;
571 list_move_tail(&worker->worker_list, 576 list_move_tail(&worker->worker_list,
572 &worker->workers->worker_list); 577 &worker->workers->worker_list);
573 spin_unlock(&worker->workers->lock); 578 spin_unlock(&worker->workers->lock);
574 } 579 }
575 if (!worker->working) { 580 if (!worker->working) {
@@ -627,7 +632,6 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
627 list_add_tail(&work->list, &worker->prio_pending); 632 list_add_tail(&work->list, &worker->prio_pending);
628 else 633 else
629 list_add_tail(&work->list, &worker->pending); 634 list_add_tail(&work->list, &worker->pending);
630 atomic_inc(&worker->num_pending);
631 check_busy_worker(worker); 635 check_busy_worker(worker);
632 636
633 /* 637 /*