aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorStefan Behrens <sbehrens@giantdisaster.de>2012-11-02 11:44:58 -0400
committerJosef Bacik <jbacik@fusionio.com>2012-12-12 17:15:32 -0500
commitb6bfebc13218f1fc1502041a810919d3a81b8b4e (patch)
tree65b6daaa1395fbae0d2e7aee5cb94d54974af5c7 /fs/btrfs/scrub.c
parent34f5c8e90b3f002672cd6b4e6e7c5b959fd981ae (diff)
Btrfs: cleanup scrub bio and worker wait code
Just move some code into functions to make everything more readable. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c106
1 files changed, 71 insertions, 35 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index fcd5bccaa4ed..a67b1a17a009 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2011 STRATO. All rights reserved. 2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public 5 * modify it under the terms of the GNU General Public
@@ -104,8 +104,8 @@ struct scrub_ctx {
104 struct btrfs_root *dev_root; 104 struct btrfs_root *dev_root;
105 int first_free; 105 int first_free;
106 int curr; 106 int curr;
107 atomic_t in_flight; 107 atomic_t bios_in_flight;
108 atomic_t fixup_cnt; 108 atomic_t workers_pending;
109 spinlock_t list_lock; 109 spinlock_t list_lock;
110 wait_queue_head_t list_wait; 110 wait_queue_head_t list_wait;
111 u16 csum_size; 111 u16 csum_size;
@@ -146,6 +146,10 @@ struct scrub_warning {
146}; 146};
147 147
148 148
149static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
150static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
151static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
152static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
149static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); 153static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
150static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 154static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
151 struct btrfs_mapping_tree *map_tree, 155 struct btrfs_mapping_tree *map_tree,
@@ -184,6 +188,59 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work);
184static void scrub_block_complete(struct scrub_block *sblock); 188static void scrub_block_complete(struct scrub_block *sblock);
185 189
186 190
191static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
192{
193 atomic_inc(&sctx->bios_in_flight);
194}
195
196static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
197{
198 atomic_dec(&sctx->bios_in_flight);
199 wake_up(&sctx->list_wait);
200}
201
202/*
203 * used for workers that require transaction commits (i.e., for the
204 * NOCOW case)
205 */
206static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
207{
208 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
209
210 /*
211 * increment scrubs_running to prevent cancel requests from
212 * completing as long as a worker is running. we must also
213 * increment scrubs_paused to prevent deadlocking on pause
214 * requests used for transactions commits (as the worker uses a
215 * transaction context). it is safe to regard the worker
216 * as paused for all matters practical. effectively, we only
217 * avoid cancellation requests from completing.
218 */
219 mutex_lock(&fs_info->scrub_lock);
220 atomic_inc(&fs_info->scrubs_running);
221 atomic_inc(&fs_info->scrubs_paused);
222 mutex_unlock(&fs_info->scrub_lock);
223 atomic_inc(&sctx->workers_pending);
224}
225
226/* used for workers that require transaction commits */
227static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
228{
229 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
230
231 /*
232 * see scrub_pending_trans_workers_inc() why we're pretending
233 * to be paused in the scrub counters
234 */
235 mutex_lock(&fs_info->scrub_lock);
236 atomic_dec(&fs_info->scrubs_running);
237 atomic_dec(&fs_info->scrubs_paused);
238 mutex_unlock(&fs_info->scrub_lock);
239 atomic_dec(&sctx->workers_pending);
240 wake_up(&fs_info->scrub_pause_wait);
241 wake_up(&sctx->list_wait);
242}
243
187static void scrub_free_csums(struct scrub_ctx *sctx) 244static void scrub_free_csums(struct scrub_ctx *sctx)
188{ 245{
189 while (!list_empty(&sctx->csum_list)) { 246 while (!list_empty(&sctx->csum_list)) {
@@ -264,8 +321,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev)
264 sctx->nodesize = dev->dev_root->nodesize; 321 sctx->nodesize = dev->dev_root->nodesize;
265 sctx->leafsize = dev->dev_root->leafsize; 322 sctx->leafsize = dev->dev_root->leafsize;
266 sctx->sectorsize = dev->dev_root->sectorsize; 323 sctx->sectorsize = dev->dev_root->sectorsize;
267 atomic_set(&sctx->in_flight, 0); 324 atomic_set(&sctx->bios_in_flight, 0);
268 atomic_set(&sctx->fixup_cnt, 0); 325 atomic_set(&sctx->workers_pending, 0);
269 atomic_set(&sctx->cancel_req, 0); 326 atomic_set(&sctx->cancel_req, 0);
270 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); 327 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
271 INIT_LIST_HEAD(&sctx->csum_list); 328 INIT_LIST_HEAD(&sctx->csum_list);
@@ -609,14 +666,7 @@ out:
609 btrfs_free_path(path); 666 btrfs_free_path(path);
610 kfree(fixup); 667 kfree(fixup);
611 668
612 /* see caller why we're pretending to be paused in the scrub counters */ 669 scrub_pending_trans_workers_dec(sctx);
613 mutex_lock(&fs_info->scrub_lock);
614 atomic_dec(&fs_info->scrubs_running);
615 atomic_dec(&fs_info->scrubs_paused);
616 mutex_unlock(&fs_info->scrub_lock);
617 atomic_dec(&sctx->fixup_cnt);
618 wake_up(&fs_info->scrub_pause_wait);
619 wake_up(&sctx->list_wait);
620} 670}
621 671
622/* 672/*
@@ -789,20 +839,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
789 fixup_nodatasum->logical = logical; 839 fixup_nodatasum->logical = logical;
790 fixup_nodatasum->root = fs_info->extent_root; 840 fixup_nodatasum->root = fs_info->extent_root;
791 fixup_nodatasum->mirror_num = failed_mirror_index + 1; 841 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
792 /* 842 scrub_pending_trans_workers_inc(sctx);
793 * increment scrubs_running to prevent cancel requests from
794 * completing as long as a fixup worker is running. we must also
795 * increment scrubs_paused to prevent deadlocking on pause
796 * requests used for transactions commits (as the worker uses a
797 * transaction context). it is safe to regard the fixup worker
798 * as paused for all matters practical. effectively, we only
799 * avoid cancellation requests from completing.
800 */
801 mutex_lock(&fs_info->scrub_lock);
802 atomic_inc(&fs_info->scrubs_running);
803 atomic_inc(&fs_info->scrubs_paused);
804 mutex_unlock(&fs_info->scrub_lock);
805 atomic_inc(&sctx->fixup_cnt);
806 fixup_nodatasum->work.func = scrub_fixup_nodatasum; 843 fixup_nodatasum->work.func = scrub_fixup_nodatasum;
807 btrfs_queue_worker(&fs_info->scrub_workers, 844 btrfs_queue_worker(&fs_info->scrub_workers,
808 &fixup_nodatasum->work); 845 &fixup_nodatasum->work);
@@ -1491,7 +1528,7 @@ static void scrub_submit(struct scrub_ctx *sctx)
1491 1528
1492 sbio = sctx->bios[sctx->curr]; 1529 sbio = sctx->bios[sctx->curr];
1493 sctx->curr = -1; 1530 sctx->curr = -1;
1494 atomic_inc(&sctx->in_flight); 1531 scrub_pending_bio_inc(sctx);
1495 1532
1496 btrfsic_submit_bio(READ, sbio->bio); 1533 btrfsic_submit_bio(READ, sbio->bio);
1497} 1534}
@@ -1692,8 +1729,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
1692 sbio->next_free = sctx->first_free; 1729 sbio->next_free = sctx->first_free;
1693 sctx->first_free = sbio->index; 1730 sctx->first_free = sbio->index;
1694 spin_unlock(&sctx->list_lock); 1731 spin_unlock(&sctx->list_lock);
1695 atomic_dec(&sctx->in_flight); 1732 scrub_pending_bio_dec(sctx);
1696 wake_up(&sctx->list_wait);
1697} 1733}
1698 1734
1699static void scrub_block_complete(struct scrub_block *sblock) 1735static void scrub_block_complete(struct scrub_block *sblock)
@@ -1863,7 +1899,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
1863 logical = base + offset; 1899 logical = base + offset;
1864 1900
1865 wait_event(sctx->list_wait, 1901 wait_event(sctx->list_wait,
1866 atomic_read(&sctx->in_flight) == 0); 1902 atomic_read(&sctx->bios_in_flight) == 0);
1867 atomic_inc(&fs_info->scrubs_paused); 1903 atomic_inc(&fs_info->scrubs_paused);
1868 wake_up(&fs_info->scrub_pause_wait); 1904 wake_up(&fs_info->scrub_pause_wait);
1869 1905
@@ -1928,7 +1964,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
1928 /* push queued extents */ 1964 /* push queued extents */
1929 scrub_submit(sctx); 1965 scrub_submit(sctx);
1930 wait_event(sctx->list_wait, 1966 wait_event(sctx->list_wait,
1931 atomic_read(&sctx->in_flight) == 0); 1967 atomic_read(&sctx->bios_in_flight) == 0);
1932 atomic_inc(&fs_info->scrubs_paused); 1968 atomic_inc(&fs_info->scrubs_paused);
1933 wake_up(&fs_info->scrub_pause_wait); 1969 wake_up(&fs_info->scrub_pause_wait);
1934 mutex_lock(&fs_info->scrub_lock); 1970 mutex_lock(&fs_info->scrub_lock);
@@ -2218,7 +2254,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2218 if (ret) 2254 if (ret)
2219 return ret; 2255 return ret;
2220 } 2256 }
2221 wait_event(sctx->list_wait, atomic_read(&sctx->in_flight) == 0); 2257 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
2222 2258
2223 return 0; 2259 return 0;
2224} 2260}
@@ -2363,11 +2399,11 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2363 if (!ret) 2399 if (!ret)
2364 ret = scrub_enumerate_chunks(sctx, dev, start, end); 2400 ret = scrub_enumerate_chunks(sctx, dev, start, end);
2365 2401
2366 wait_event(sctx->list_wait, atomic_read(&sctx->in_flight) == 0); 2402 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
2367 atomic_dec(&fs_info->scrubs_running); 2403 atomic_dec(&fs_info->scrubs_running);
2368 wake_up(&fs_info->scrub_pause_wait); 2404 wake_up(&fs_info->scrub_pause_wait);
2369 2405
2370 wait_event(sctx->list_wait, atomic_read(&sctx->fixup_cnt) == 0); 2406 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
2371 2407
2372 if (progress) 2408 if (progress)
2373 memcpy(progress, &sctx->stat, sizeof(*progress)); 2409 memcpy(progress, &sctx->stat, sizeof(*progress));