diff options
author | Josef Bacik <josef@redhat.com> | 2011-11-18 14:37:27 -0500 |
---|---|---|
committer | Josef Bacik <josef@redhat.com> | 2011-12-15 11:04:21 -0500 |
commit | 0dc3b84a73267f47a75468f924f5d58a840e3152 (patch) | |
tree | 0c2f64c2884a0de54e3fe15971e960f71a722afb | |
parent | 5dbc8fca8ef5d719014f22345d990e957dcfc692 (diff) |
Btrfs: fix num_workers_starting bug and other bugs in async thread
Al pointed out we have some random problems with the way we account for
num_workers_starting in the async thread stuff. First of all we need to make
sure to decrement num_workers_starting if we fail to start the worker, so make
__btrfs_start_workers do this. Also fix __btrfs_start_workers so that it
doesn't call btrfs_stop_workers(), there is no point in stopping everybody if we
failed to create a worker. Also check_pending_worker_creates needs to call
__btrfs_start_work in it's work function since it already increments
num_workers_starting.
People only start one worker at a time, so get rid of the num_workers argument
everywhere, and make btrfs_queue_worker a void since it will always succeed.
Thanks,
Signed-off-by: Josef Bacik <josef@redhat.com>
-rw-r--r-- | fs/btrfs/async-thread.c | 115 | ||||
-rw-r--r-- | fs/btrfs/async-thread.h | 4 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 34 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 8 |
4 files changed, 83 insertions, 78 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 7ec14097fef1..af8e117c8978 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -64,6 +64,8 @@ struct btrfs_worker_thread { | |||
64 | int idle; | 64 | int idle; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static int __btrfs_start_workers(struct btrfs_workers *workers); | ||
68 | |||
67 | /* | 69 | /* |
68 | * btrfs_start_workers uses kthread_run, which can block waiting for memory | 70 | * btrfs_start_workers uses kthread_run, which can block waiting for memory |
69 | * for a very long time. It will actually throttle on page writeback, | 71 | * for a very long time. It will actually throttle on page writeback, |
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work) | |||
88 | { | 90 | { |
89 | struct worker_start *start; | 91 | struct worker_start *start; |
90 | start = container_of(work, struct worker_start, work); | 92 | start = container_of(work, struct worker_start, work); |
91 | btrfs_start_workers(start->queue, 1); | 93 | __btrfs_start_workers(start->queue); |
92 | kfree(start); | 94 | kfree(start); |
93 | } | 95 | } |
94 | 96 | ||
95 | static int start_new_worker(struct btrfs_workers *queue) | ||
96 | { | ||
97 | struct worker_start *start; | ||
98 | int ret; | ||
99 | |||
100 | start = kzalloc(sizeof(*start), GFP_NOFS); | ||
101 | if (!start) | ||
102 | return -ENOMEM; | ||
103 | |||
104 | start->work.func = start_new_worker_func; | ||
105 | start->queue = queue; | ||
106 | ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work); | ||
107 | if (ret) | ||
108 | kfree(start); | ||
109 | return ret; | ||
110 | } | ||
111 | |||
112 | /* | 97 | /* |
113 | * helper function to move a thread onto the idle list after it | 98 | * helper function to move a thread onto the idle list after it |
114 | * has finished some requests. | 99 | * has finished some requests. |
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker) | |||
153 | static void check_pending_worker_creates(struct btrfs_worker_thread *worker) | 138 | static void check_pending_worker_creates(struct btrfs_worker_thread *worker) |
154 | { | 139 | { |
155 | struct btrfs_workers *workers = worker->workers; | 140 | struct btrfs_workers *workers = worker->workers; |
141 | struct worker_start *start; | ||
156 | unsigned long flags; | 142 | unsigned long flags; |
157 | 143 | ||
158 | rmb(); | 144 | rmb(); |
159 | if (!workers->atomic_start_pending) | 145 | if (!workers->atomic_start_pending) |
160 | return; | 146 | return; |
161 | 147 | ||
148 | start = kzalloc(sizeof(*start), GFP_NOFS); | ||
149 | if (!start) | ||
150 | return; | ||
151 | |||
152 | start->work.func = start_new_worker_func; | ||
153 | start->queue = workers; | ||
154 | |||
162 | spin_lock_irqsave(&workers->lock, flags); | 155 | spin_lock_irqsave(&workers->lock, flags); |
163 | if (!workers->atomic_start_pending) | 156 | if (!workers->atomic_start_pending) |
164 | goto out; | 157 | goto out; |
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker) | |||
170 | 163 | ||
171 | workers->num_workers_starting += 1; | 164 | workers->num_workers_starting += 1; |
172 | spin_unlock_irqrestore(&workers->lock, flags); | 165 | spin_unlock_irqrestore(&workers->lock, flags); |
173 | start_new_worker(workers); | 166 | btrfs_queue_worker(workers->atomic_worker_start, &start->work); |
174 | return; | 167 | return; |
175 | 168 | ||
176 | out: | 169 | out: |
170 | kfree(start); | ||
177 | spin_unlock_irqrestore(&workers->lock, flags); | 171 | spin_unlock_irqrestore(&workers->lock, flags); |
178 | } | 172 | } |
179 | 173 | ||
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, | |||
462 | * starts new worker threads. This does not enforce the max worker | 456 | * starts new worker threads. This does not enforce the max worker |
463 | * count in case you need to temporarily go past it. | 457 | * count in case you need to temporarily go past it. |
464 | */ | 458 | */ |
465 | static int __btrfs_start_workers(struct btrfs_workers *workers, | 459 | static int __btrfs_start_workers(struct btrfs_workers *workers) |
466 | int num_workers) | ||
467 | { | 460 | { |
468 | struct btrfs_worker_thread *worker; | 461 | struct btrfs_worker_thread *worker; |
469 | int ret = 0; | 462 | int ret = 0; |
470 | int i; | ||
471 | 463 | ||
472 | for (i = 0; i < num_workers; i++) { | 464 | worker = kzalloc(sizeof(*worker), GFP_NOFS); |
473 | worker = kzalloc(sizeof(*worker), GFP_NOFS); | 465 | if (!worker) { |
474 | if (!worker) { | 466 | ret = -ENOMEM; |
475 | ret = -ENOMEM; | 467 | goto fail; |
476 | goto fail; | 468 | } |
477 | } | ||
478 | 469 | ||
479 | INIT_LIST_HEAD(&worker->pending); | 470 | INIT_LIST_HEAD(&worker->pending); |
480 | INIT_LIST_HEAD(&worker->prio_pending); | 471 | INIT_LIST_HEAD(&worker->prio_pending); |
481 | INIT_LIST_HEAD(&worker->worker_list); | 472 | INIT_LIST_HEAD(&worker->worker_list); |
482 | spin_lock_init(&worker->lock); | 473 | spin_lock_init(&worker->lock); |
483 | 474 | ||
484 | atomic_set(&worker->num_pending, 0); | 475 | atomic_set(&worker->num_pending, 0); |
485 | atomic_set(&worker->refs, 1); | 476 | atomic_set(&worker->refs, 1); |
486 | worker->workers = workers; | 477 | worker->workers = workers; |
487 | worker->task = kthread_run(worker_loop, worker, | 478 | worker->task = kthread_run(worker_loop, worker, |
488 | "btrfs-%s-%d", workers->name, | 479 | "btrfs-%s-%d", workers->name, |
489 | workers->num_workers + i); | 480 | workers->num_workers + 1); |
490 | if (IS_ERR(worker->task)) { | 481 | if (IS_ERR(worker->task)) { |
491 | ret = PTR_ERR(worker->task); | 482 | ret = PTR_ERR(worker->task); |
492 | kfree(worker); | 483 | kfree(worker); |
493 | goto fail; | 484 | goto fail; |
494 | } | ||
495 | spin_lock_irq(&workers->lock); | ||
496 | list_add_tail(&worker->worker_list, &workers->idle_list); | ||
497 | worker->idle = 1; | ||
498 | workers->num_workers++; | ||
499 | workers->num_workers_starting--; | ||
500 | WARN_ON(workers->num_workers_starting < 0); | ||
501 | spin_unlock_irq(&workers->lock); | ||
502 | } | 485 | } |
486 | spin_lock_irq(&workers->lock); | ||
487 | list_add_tail(&worker->worker_list, &workers->idle_list); | ||
488 | worker->idle = 1; | ||
489 | workers->num_workers++; | ||
490 | workers->num_workers_starting--; | ||
491 | WARN_ON(workers->num_workers_starting < 0); | ||
492 | spin_unlock_irq(&workers->lock); | ||
493 | |||
503 | return 0; | 494 | return 0; |
504 | fail: | 495 | fail: |
505 | btrfs_stop_workers(workers); | 496 | spin_lock_irq(&workers->lock); |
497 | workers->num_workers_starting--; | ||
498 | spin_unlock_irq(&workers->lock); | ||
506 | return ret; | 499 | return ret; |
507 | } | 500 | } |
508 | 501 | ||
509 | int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | 502 | int btrfs_start_workers(struct btrfs_workers *workers) |
510 | { | 503 | { |
511 | spin_lock_irq(&workers->lock); | 504 | spin_lock_irq(&workers->lock); |
512 | workers->num_workers_starting += num_workers; | 505 | workers->num_workers_starting++; |
513 | spin_unlock_irq(&workers->lock); | 506 | spin_unlock_irq(&workers->lock); |
514 | return __btrfs_start_workers(workers, num_workers); | 507 | return __btrfs_start_workers(workers); |
515 | } | 508 | } |
516 | 509 | ||
517 | /* | 510 | /* |
@@ -568,6 +561,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) | |||
568 | struct btrfs_worker_thread *worker; | 561 | struct btrfs_worker_thread *worker; |
569 | unsigned long flags; | 562 | unsigned long flags; |
570 | struct list_head *fallback; | 563 | struct list_head *fallback; |
564 | int ret; | ||
571 | 565 | ||
572 | again: | 566 | again: |
573 | spin_lock_irqsave(&workers->lock, flags); | 567 | spin_lock_irqsave(&workers->lock, flags); |
@@ -584,7 +578,9 @@ again: | |||
584 | workers->num_workers_starting++; | 578 | workers->num_workers_starting++; |
585 | spin_unlock_irqrestore(&workers->lock, flags); | 579 | spin_unlock_irqrestore(&workers->lock, flags); |
586 | /* we're below the limit, start another worker */ | 580 | /* we're below the limit, start another worker */ |
587 | __btrfs_start_workers(workers, 1); | 581 | ret = __btrfs_start_workers(workers); |
582 | if (ret) | ||
583 | goto fallback; | ||
588 | goto again; | 584 | goto again; |
589 | } | 585 | } |
590 | } | 586 | } |
@@ -665,7 +661,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work) | |||
665 | /* | 661 | /* |
666 | * places a struct btrfs_work into the pending queue of one of the kthreads | 662 | * places a struct btrfs_work into the pending queue of one of the kthreads |
667 | */ | 663 | */ |
668 | int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | 664 | void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) |
669 | { | 665 | { |
670 | struct btrfs_worker_thread *worker; | 666 | struct btrfs_worker_thread *worker; |
671 | unsigned long flags; | 667 | unsigned long flags; |
@@ -673,7 +669,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
673 | 669 | ||
674 | /* don't requeue something already on a list */ | 670 | /* don't requeue something already on a list */ |
675 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) | 671 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
676 | goto out; | 672 | return; |
677 | 673 | ||
678 | worker = find_worker(workers); | 674 | worker = find_worker(workers); |
679 | if (workers->ordered) { | 675 | if (workers->ordered) { |
@@ -712,7 +708,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
712 | if (wake) | 708 | if (wake) |
713 | wake_up_process(worker->task); | 709 | wake_up_process(worker->task); |
714 | spin_unlock_irqrestore(&worker->lock, flags); | 710 | spin_unlock_irqrestore(&worker->lock, flags); |
715 | |||
716 | out: | ||
717 | return 0; | ||
718 | } | 711 | } |
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index 5077746cf85e..f34cc31fa3c9 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h | |||
@@ -109,8 +109,8 @@ struct btrfs_workers { | |||
109 | char *name; | 109 | char *name; |
110 | }; | 110 | }; |
111 | 111 | ||
112 | int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); | 112 | void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); |
113 | int btrfs_start_workers(struct btrfs_workers *workers, int num_workers); | 113 | int btrfs_start_workers(struct btrfs_workers *workers); |
114 | int btrfs_stop_workers(struct btrfs_workers *workers); | 114 | int btrfs_stop_workers(struct btrfs_workers *workers); |
115 | void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, | 115 | void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, |
116 | struct btrfs_workers *async_starter); | 116 | struct btrfs_workers *async_starter); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 94abc25392f6..3f9d5551e582 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -2194,19 +2194,27 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
2194 | fs_info->endio_meta_write_workers.idle_thresh = 2; | 2194 | fs_info->endio_meta_write_workers.idle_thresh = 2; |
2195 | fs_info->readahead_workers.idle_thresh = 2; | 2195 | fs_info->readahead_workers.idle_thresh = 2; |
2196 | 2196 | ||
2197 | btrfs_start_workers(&fs_info->workers, 1); | 2197 | /* |
2198 | btrfs_start_workers(&fs_info->generic_worker, 1); | 2198 | * btrfs_start_workers can really only fail because of ENOMEM so just |
2199 | btrfs_start_workers(&fs_info->submit_workers, 1); | 2199 | * return -ENOMEM if any of these fail. |
2200 | btrfs_start_workers(&fs_info->delalloc_workers, 1); | 2200 | */ |
2201 | btrfs_start_workers(&fs_info->fixup_workers, 1); | 2201 | ret = btrfs_start_workers(&fs_info->workers); |
2202 | btrfs_start_workers(&fs_info->endio_workers, 1); | 2202 | ret |= btrfs_start_workers(&fs_info->generic_worker); |
2203 | btrfs_start_workers(&fs_info->endio_meta_workers, 1); | 2203 | ret |= btrfs_start_workers(&fs_info->submit_workers); |
2204 | btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); | 2204 | ret |= btrfs_start_workers(&fs_info->delalloc_workers); |
2205 | btrfs_start_workers(&fs_info->endio_write_workers, 1); | 2205 | ret |= btrfs_start_workers(&fs_info->fixup_workers); |
2206 | btrfs_start_workers(&fs_info->endio_freespace_worker, 1); | 2206 | ret |= btrfs_start_workers(&fs_info->endio_workers); |
2207 | btrfs_start_workers(&fs_info->delayed_workers, 1); | 2207 | ret |= btrfs_start_workers(&fs_info->endio_meta_workers); |
2208 | btrfs_start_workers(&fs_info->caching_workers, 1); | 2208 | ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers); |
2209 | btrfs_start_workers(&fs_info->readahead_workers, 1); | 2209 | ret |= btrfs_start_workers(&fs_info->endio_write_workers); |
2210 | ret |= btrfs_start_workers(&fs_info->endio_freespace_worker); | ||
2211 | ret |= btrfs_start_workers(&fs_info->delayed_workers); | ||
2212 | ret |= btrfs_start_workers(&fs_info->caching_workers); | ||
2213 | ret |= btrfs_start_workers(&fs_info->readahead_workers); | ||
2214 | if (ret) { | ||
2215 | ret = -ENOMEM; | ||
2216 | goto fail_sb_buffer; | ||
2217 | } | ||
2210 | 2218 | ||
2211 | fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); | 2219 | fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); |
2212 | fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, | 2220 | fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index c27bcb67f330..ddf2c90d3fc0 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -1535,18 +1535,22 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) | |||
1535 | static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) | 1535 | static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) |
1536 | { | 1536 | { |
1537 | struct btrfs_fs_info *fs_info = root->fs_info; | 1537 | struct btrfs_fs_info *fs_info = root->fs_info; |
1538 | int ret = 0; | ||
1538 | 1539 | ||
1539 | mutex_lock(&fs_info->scrub_lock); | 1540 | mutex_lock(&fs_info->scrub_lock); |
1540 | if (fs_info->scrub_workers_refcnt == 0) { | 1541 | if (fs_info->scrub_workers_refcnt == 0) { |
1541 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", | 1542 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", |
1542 | fs_info->thread_pool_size, &fs_info->generic_worker); | 1543 | fs_info->thread_pool_size, &fs_info->generic_worker); |
1543 | fs_info->scrub_workers.idle_thresh = 4; | 1544 | fs_info->scrub_workers.idle_thresh = 4; |
1544 | btrfs_start_workers(&fs_info->scrub_workers, 1); | 1545 | ret = btrfs_start_workers(&fs_info->scrub_workers); |
1546 | if (ret) | ||
1547 | goto out; | ||
1545 | } | 1548 | } |
1546 | ++fs_info->scrub_workers_refcnt; | 1549 | ++fs_info->scrub_workers_refcnt; |
1550 | out: | ||
1547 | mutex_unlock(&fs_info->scrub_lock); | 1551 | mutex_unlock(&fs_info->scrub_lock); |
1548 | 1552 | ||
1549 | return 0; | 1553 | return ret; |
1550 | } | 1554 | } |
1551 | 1555 | ||
1552 | static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) | 1556 | static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) |