aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorWang Shilong <wangsl.fnst@cn.fujitsu.com>2013-12-04 08:16:53 -0500
committerChris Mason <clm@fb.com>2014-01-28 16:19:53 -0500
commitcb7ab02156e4ba999df90e9fa8e96107683586fd (patch)
tree3557996fdee5e02bfd5f921aa1522fa87f54687c /fs/btrfs/scrub.c
parent3cb0929ad24c95c5fd8f08eb41a702a65954b4c6 (diff)
Btrfs: wrap repeated code into scrub_blocked_if_needed()
Just wrap same code into one function scrub_blocked_if_needed(). This make a change that we will move waiting (@workers_pending = 0) before we can wake up commiting transaction(atomic_inc(@scrub_paused)), we must take carefully to not deadlock here. Thread 1 Thread 2 |->btrfs_commit_transaction() |->set trans type(COMMIT_DOING) |->btrfs_scrub_paused()(blocked) |->join_transaction(blocked) Move btrfs_scrub_paused() before setting trans type which means we can still join a transaction when commiting_transaction is blocked. Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com> Suggested-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c43
1 files changed, 17 insertions, 26 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6acb573e7d6b..adebe12e497e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -256,6 +256,7 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
256static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 256static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
257 int mirror_num, u64 physical_for_dev_replace); 257 int mirror_num, u64 physical_for_dev_replace);
258static void copy_nocow_pages_worker(struct btrfs_work *work); 258static void copy_nocow_pages_worker(struct btrfs_work *work);
259static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
259static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); 260static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
260 261
261 262
@@ -270,7 +271,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
270 wake_up(&sctx->list_wait); 271 wake_up(&sctx->list_wait);
271} 272}
272 273
273static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) 274static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
274{ 275{
275 while (atomic_read(&fs_info->scrub_pause_req)) { 276 while (atomic_read(&fs_info->scrub_pause_req)) {
276 mutex_unlock(&fs_info->scrub_lock); 277 mutex_unlock(&fs_info->scrub_lock);
@@ -280,6 +281,19 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
280 } 281 }
281} 282}
282 283
284static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
285{
286 atomic_inc(&fs_info->scrubs_paused);
287 wake_up(&fs_info->scrub_pause_wait);
288
289 mutex_lock(&fs_info->scrub_lock);
290 __scrub_blocked_if_needed(fs_info);
291 atomic_dec(&fs_info->scrubs_paused);
292 mutex_unlock(&fs_info->scrub_lock);
293
294 wake_up(&fs_info->scrub_pause_wait);
295}
296
283/* 297/*
284 * used for workers that require transaction commits (i.e., for the 298 * used for workers that require transaction commits (i.e., for the
285 * NOCOW case) 299 * NOCOW case)
@@ -2295,8 +2309,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2295 2309
2296 wait_event(sctx->list_wait, 2310 wait_event(sctx->list_wait,
2297 atomic_read(&sctx->bios_in_flight) == 0); 2311 atomic_read(&sctx->bios_in_flight) == 0);
2298 atomic_inc(&fs_info->scrubs_paused); 2312 scrub_blocked_if_needed(fs_info);
2299 wake_up(&fs_info->scrub_pause_wait);
2300 2313
2301 /* FIXME it might be better to start readahead at commit root */ 2314 /* FIXME it might be better to start readahead at commit root */
2302 key_start.objectid = logical; 2315 key_start.objectid = logical;
@@ -2320,12 +2333,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2320 if (!IS_ERR(reada2)) 2333 if (!IS_ERR(reada2))
2321 btrfs_reada_wait(reada2); 2334 btrfs_reada_wait(reada2);
2322 2335
2323 mutex_lock(&fs_info->scrub_lock);
2324 scrub_blocked_if_needed(fs_info);
2325 atomic_dec(&fs_info->scrubs_paused);
2326 mutex_unlock(&fs_info->scrub_lock);
2327
2328 wake_up(&fs_info->scrub_pause_wait);
2329 2336
2330 /* 2337 /*
2331 * collect all data csums for the stripe to avoid seeking during 2338 * collect all data csums for the stripe to avoid seeking during
@@ -2362,15 +2369,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2362 wait_event(sctx->list_wait, 2369 wait_event(sctx->list_wait,
2363 atomic_read(&sctx->bios_in_flight) == 0); 2370 atomic_read(&sctx->bios_in_flight) == 0);
2364 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); 2371 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2365 atomic_inc(&fs_info->scrubs_paused);
2366 wake_up(&fs_info->scrub_pause_wait);
2367
2368 mutex_lock(&fs_info->scrub_lock);
2369 scrub_blocked_if_needed(fs_info); 2372 scrub_blocked_if_needed(fs_info);
2370 atomic_dec(&fs_info->scrubs_paused);
2371 mutex_unlock(&fs_info->scrub_lock);
2372
2373 wake_up(&fs_info->scrub_pause_wait);
2374 } 2373 }
2375 2374
2376 key.objectid = logical; 2375 key.objectid = logical;
@@ -2685,17 +2684,9 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2685 wait_event(sctx->list_wait, 2684 wait_event(sctx->list_wait,
2686 atomic_read(&sctx->bios_in_flight) == 0); 2685 atomic_read(&sctx->bios_in_flight) == 0);
2687 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); 2686 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2688 atomic_inc(&fs_info->scrubs_paused);
2689 wake_up(&fs_info->scrub_pause_wait);
2690 wait_event(sctx->list_wait, 2687 wait_event(sctx->list_wait,
2691 atomic_read(&sctx->workers_pending) == 0); 2688 atomic_read(&sctx->workers_pending) == 0);
2692
2693 mutex_lock(&fs_info->scrub_lock);
2694 scrub_blocked_if_needed(fs_info); 2689 scrub_blocked_if_needed(fs_info);
2695 atomic_dec(&fs_info->scrubs_paused);
2696 mutex_unlock(&fs_info->scrub_lock);
2697
2698 wake_up(&fs_info->scrub_pause_wait);
2699 2690
2700 btrfs_put_block_group(cache); 2691 btrfs_put_block_group(cache);
2701 if (ret) 2692 if (ret)
@@ -2912,7 +2903,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2912 * checking @scrub_pause_req here, we can avoid 2903 * checking @scrub_pause_req here, we can avoid
2913 * race between committing transaction and scrubbing. 2904 * race between committing transaction and scrubbing.
2914 */ 2905 */
2915 scrub_blocked_if_needed(fs_info); 2906 __scrub_blocked_if_needed(fs_info);
2916 atomic_inc(&fs_info->scrubs_running); 2907 atomic_inc(&fs_info->scrubs_running);
2917 mutex_unlock(&fs_info->scrub_lock); 2908 mutex_unlock(&fs_info->scrub_lock);
2918 2909