diff options
Diffstat (limited to 'fs/btrfs/scrub.c')
| -rw-r--r-- | fs/btrfs/scrub.c | 97 |
1 files changed, 62 insertions, 35 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index efba5d1282ee..93e6d7172844 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -315,6 +315,16 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx) | |||
| 315 | atomic_inc(&fs_info->scrubs_running); | 315 | atomic_inc(&fs_info->scrubs_running); |
| 316 | atomic_inc(&fs_info->scrubs_paused); | 316 | atomic_inc(&fs_info->scrubs_paused); |
| 317 | mutex_unlock(&fs_info->scrub_lock); | 317 | mutex_unlock(&fs_info->scrub_lock); |
| 318 | |||
| 319 | /* | ||
| 320 | * check if @scrubs_running=@scrubs_paused condition | ||
| 321 | * inside wait_event() is not an atomic operation. | ||
| 322 | * which means we may inc/dec @scrub_running/paused | ||
| 323 | * at any time. Let's wake up @scrub_pause_wait as | ||
| 324 | * much as we can to let commit transaction blocked less. | ||
| 325 | */ | ||
| 326 | wake_up(&fs_info->scrub_pause_wait); | ||
| 327 | |||
| 318 | atomic_inc(&sctx->workers_pending); | 328 | atomic_inc(&sctx->workers_pending); |
| 319 | } | 329 | } |
| 320 | 330 | ||
| @@ -418,7 +428,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) | |||
| 418 | sbio->index = i; | 428 | sbio->index = i; |
| 419 | sbio->sctx = sctx; | 429 | sbio->sctx = sctx; |
| 420 | sbio->page_count = 0; | 430 | sbio->page_count = 0; |
| 421 | sbio->work.func = scrub_bio_end_io_worker; | 431 | btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, |
| 432 | NULL, NULL); | ||
| 422 | 433 | ||
| 423 | if (i != SCRUB_BIOS_PER_SCTX - 1) | 434 | if (i != SCRUB_BIOS_PER_SCTX - 1) |
| 424 | sctx->bios[i]->next_free = i + 1; | 435 | sctx->bios[i]->next_free = i + 1; |
| @@ -987,9 +998,10 @@ nodatasum_case: | |||
| 987 | fixup_nodatasum->root = fs_info->extent_root; | 998 | fixup_nodatasum->root = fs_info->extent_root; |
| 988 | fixup_nodatasum->mirror_num = failed_mirror_index + 1; | 999 | fixup_nodatasum->mirror_num = failed_mirror_index + 1; |
| 989 | scrub_pending_trans_workers_inc(sctx); | 1000 | scrub_pending_trans_workers_inc(sctx); |
| 990 | fixup_nodatasum->work.func = scrub_fixup_nodatasum; | 1001 | btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum, |
| 991 | btrfs_queue_worker(&fs_info->scrub_workers, | 1002 | NULL, NULL); |
| 992 | &fixup_nodatasum->work); | 1003 | btrfs_queue_work(fs_info->scrub_workers, |
| 1004 | &fixup_nodatasum->work); | ||
| 993 | goto out; | 1005 | goto out; |
| 994 | } | 1006 | } |
| 995 | 1007 | ||
| @@ -1603,8 +1615,8 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err) | |||
| 1603 | sbio->err = err; | 1615 | sbio->err = err; |
| 1604 | sbio->bio = bio; | 1616 | sbio->bio = bio; |
| 1605 | 1617 | ||
| 1606 | sbio->work.func = scrub_wr_bio_end_io_worker; | 1618 | btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); |
| 1607 | btrfs_queue_worker(&fs_info->scrub_wr_completion_workers, &sbio->work); | 1619 | btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); |
| 1608 | } | 1620 | } |
| 1609 | 1621 | ||
| 1610 | static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) | 1622 | static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) |
| @@ -2072,7 +2084,7 @@ static void scrub_bio_end_io(struct bio *bio, int err) | |||
| 2072 | sbio->err = err; | 2084 | sbio->err = err; |
| 2073 | sbio->bio = bio; | 2085 | sbio->bio = bio; |
| 2074 | 2086 | ||
| 2075 | btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); | 2087 | btrfs_queue_work(fs_info->scrub_workers, &sbio->work); |
| 2076 | } | 2088 | } |
| 2077 | 2089 | ||
| 2078 | static void scrub_bio_end_io_worker(struct btrfs_work *work) | 2090 | static void scrub_bio_end_io_worker(struct btrfs_work *work) |
| @@ -2686,10 +2698,23 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
| 2686 | 2698 | ||
| 2687 | wait_event(sctx->list_wait, | 2699 | wait_event(sctx->list_wait, |
| 2688 | atomic_read(&sctx->bios_in_flight) == 0); | 2700 | atomic_read(&sctx->bios_in_flight) == 0); |
| 2689 | atomic_set(&sctx->wr_ctx.flush_all_writes, 0); | 2701 | atomic_inc(&fs_info->scrubs_paused); |
| 2702 | wake_up(&fs_info->scrub_pause_wait); | ||
| 2703 | |||
| 2704 | /* | ||
| 2705 | * must be called before we decrease @scrub_paused. | ||
| 2706 | * make sure we don't block transaction commit while | ||
| 2707 | * we are waiting pending workers finished. | ||
| 2708 | */ | ||
| 2690 | wait_event(sctx->list_wait, | 2709 | wait_event(sctx->list_wait, |
| 2691 | atomic_read(&sctx->workers_pending) == 0); | 2710 | atomic_read(&sctx->workers_pending) == 0); |
| 2692 | scrub_blocked_if_needed(fs_info); | 2711 | atomic_set(&sctx->wr_ctx.flush_all_writes, 0); |
| 2712 | |||
| 2713 | mutex_lock(&fs_info->scrub_lock); | ||
| 2714 | __scrub_blocked_if_needed(fs_info); | ||
| 2715 | atomic_dec(&fs_info->scrubs_paused); | ||
| 2716 | mutex_unlock(&fs_info->scrub_lock); | ||
| 2717 | wake_up(&fs_info->scrub_pause_wait); | ||
| 2693 | 2718 | ||
| 2694 | btrfs_put_block_group(cache); | 2719 | btrfs_put_block_group(cache); |
| 2695 | if (ret) | 2720 | if (ret) |
| @@ -2757,33 +2782,35 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, | |||
| 2757 | int is_dev_replace) | 2782 | int is_dev_replace) |
| 2758 | { | 2783 | { |
| 2759 | int ret = 0; | 2784 | int ret = 0; |
| 2785 | int flags = WQ_FREEZABLE | WQ_UNBOUND; | ||
| 2786 | int max_active = fs_info->thread_pool_size; | ||
| 2760 | 2787 | ||
| 2761 | if (fs_info->scrub_workers_refcnt == 0) { | 2788 | if (fs_info->scrub_workers_refcnt == 0) { |
| 2762 | if (is_dev_replace) | 2789 | if (is_dev_replace) |
| 2763 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1, | 2790 | fs_info->scrub_workers = |
| 2764 | &fs_info->generic_worker); | 2791 | btrfs_alloc_workqueue("btrfs-scrub", flags, |
| 2792 | 1, 4); | ||
| 2765 | else | 2793 | else |
| 2766 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", | 2794 | fs_info->scrub_workers = |
| 2767 | fs_info->thread_pool_size, | 2795 | btrfs_alloc_workqueue("btrfs-scrub", flags, |
| 2768 | &fs_info->generic_worker); | 2796 | max_active, 4); |
| 2769 | fs_info->scrub_workers.idle_thresh = 4; | 2797 | if (!fs_info->scrub_workers) { |
| 2770 | ret = btrfs_start_workers(&fs_info->scrub_workers); | 2798 | ret = -ENOMEM; |
| 2771 | if (ret) | ||
| 2772 | goto out; | 2799 | goto out; |
| 2773 | btrfs_init_workers(&fs_info->scrub_wr_completion_workers, | 2800 | } |
| 2774 | "scrubwrc", | 2801 | fs_info->scrub_wr_completion_workers = |
| 2775 | fs_info->thread_pool_size, | 2802 | btrfs_alloc_workqueue("btrfs-scrubwrc", flags, |
| 2776 | &fs_info->generic_worker); | 2803 | max_active, 2); |
| 2777 | fs_info->scrub_wr_completion_workers.idle_thresh = 2; | 2804 | if (!fs_info->scrub_wr_completion_workers) { |
| 2778 | ret = btrfs_start_workers( | 2805 | ret = -ENOMEM; |
| 2779 | &fs_info->scrub_wr_completion_workers); | ||
| 2780 | if (ret) | ||
| 2781 | goto out; | 2806 | goto out; |
| 2782 | btrfs_init_workers(&fs_info->scrub_nocow_workers, "scrubnc", 1, | 2807 | } |
| 2783 | &fs_info->generic_worker); | 2808 | fs_info->scrub_nocow_workers = |
| 2784 | ret = btrfs_start_workers(&fs_info->scrub_nocow_workers); | 2809 | btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0); |
| 2785 | if (ret) | 2810 | if (!fs_info->scrub_nocow_workers) { |
| 2811 | ret = -ENOMEM; | ||
| 2786 | goto out; | 2812 | goto out; |
| 2813 | } | ||
| 2787 | } | 2814 | } |
| 2788 | ++fs_info->scrub_workers_refcnt; | 2815 | ++fs_info->scrub_workers_refcnt; |
| 2789 | out: | 2816 | out: |
| @@ -2793,9 +2820,9 @@ out: | |||
| 2793 | static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) | 2820 | static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) |
| 2794 | { | 2821 | { |
| 2795 | if (--fs_info->scrub_workers_refcnt == 0) { | 2822 | if (--fs_info->scrub_workers_refcnt == 0) { |
| 2796 | btrfs_stop_workers(&fs_info->scrub_workers); | 2823 | btrfs_destroy_workqueue(fs_info->scrub_workers); |
| 2797 | btrfs_stop_workers(&fs_info->scrub_wr_completion_workers); | 2824 | btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); |
| 2798 | btrfs_stop_workers(&fs_info->scrub_nocow_workers); | 2825 | btrfs_destroy_workqueue(fs_info->scrub_nocow_workers); |
| 2799 | } | 2826 | } |
| 2800 | WARN_ON(fs_info->scrub_workers_refcnt < 0); | 2827 | WARN_ON(fs_info->scrub_workers_refcnt < 0); |
| 2801 | } | 2828 | } |
| @@ -3106,10 +3133,10 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, | |||
| 3106 | nocow_ctx->len = len; | 3133 | nocow_ctx->len = len; |
| 3107 | nocow_ctx->mirror_num = mirror_num; | 3134 | nocow_ctx->mirror_num = mirror_num; |
| 3108 | nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; | 3135 | nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; |
| 3109 | nocow_ctx->work.func = copy_nocow_pages_worker; | 3136 | btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL); |
| 3110 | INIT_LIST_HEAD(&nocow_ctx->inodes); | 3137 | INIT_LIST_HEAD(&nocow_ctx->inodes); |
| 3111 | btrfs_queue_worker(&fs_info->scrub_nocow_workers, | 3138 | btrfs_queue_work(fs_info->scrub_nocow_workers, |
| 3112 | &nocow_ctx->work); | 3139 | &nocow_ctx->work); |
| 3113 | 3140 | ||
| 3114 | return 0; | 3141 | return 0; |
| 3115 | } | 3142 | } |
