aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/scrub.c39
1 files changed, 23 insertions, 16 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6d6e155c8c8b..db21f17df996 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -193,6 +193,15 @@ struct scrub_ctx {
193 */ 193 */
194 struct btrfs_scrub_progress stat; 194 struct btrfs_scrub_progress stat;
195 spinlock_t stat_lock; 195 spinlock_t stat_lock;
196
197 /*
198 * Use a ref counter to avoid use-after-free issues. Scrub workers
199 * decrement bios_in_flight and workers_pending and then do a wakeup
200 * on the list_wait wait queue. We must ensure the main scrub task
201 * doesn't free the scrub context before or while the workers are
202 * doing the wakeup() call.
203 */
204 atomic_t refs;
196}; 205};
197 206
198struct scrub_fixup_nodatasum { 207struct scrub_fixup_nodatasum {
@@ -297,26 +306,20 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
297static void copy_nocow_pages_worker(struct btrfs_work *work); 306static void copy_nocow_pages_worker(struct btrfs_work *work);
298static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); 307static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
299static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); 308static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
309static void scrub_put_ctx(struct scrub_ctx *sctx);
300 310
301 311
302static void scrub_pending_bio_inc(struct scrub_ctx *sctx) 312static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
303{ 313{
314 atomic_inc(&sctx->refs);
304 atomic_inc(&sctx->bios_in_flight); 315 atomic_inc(&sctx->bios_in_flight);
305} 316}
306 317
307static void scrub_pending_bio_dec(struct scrub_ctx *sctx) 318static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
308{ 319{
309 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
310
311 /*
312 * Hold the scrub_lock while doing the wakeup to ensure the
313 * sctx (and its wait queue list_wait) isn't destroyed/freed
314 * during the wakeup.
315 */
316 mutex_lock(&fs_info->scrub_lock);
317 atomic_dec(&sctx->bios_in_flight); 320 atomic_dec(&sctx->bios_in_flight);
318 wake_up(&sctx->list_wait); 321 wake_up(&sctx->list_wait);
319 mutex_unlock(&fs_info->scrub_lock); 322 scrub_put_ctx(sctx);
320} 323}
321 324
322static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) 325static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
@@ -350,6 +353,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
350{ 353{
351 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 354 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
352 355
356 atomic_inc(&sctx->refs);
353 /* 357 /*
354 * increment scrubs_running to prevent cancel requests from 358 * increment scrubs_running to prevent cancel requests from
355 * completing as long as a worker is running. we must also 359 * completing as long as a worker is running. we must also
@@ -388,15 +392,11 @@ static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
388 mutex_lock(&fs_info->scrub_lock); 392 mutex_lock(&fs_info->scrub_lock);
389 atomic_dec(&fs_info->scrubs_running); 393 atomic_dec(&fs_info->scrubs_running);
390 atomic_dec(&fs_info->scrubs_paused); 394 atomic_dec(&fs_info->scrubs_paused);
395 mutex_unlock(&fs_info->scrub_lock);
391 atomic_dec(&sctx->workers_pending); 396 atomic_dec(&sctx->workers_pending);
392 wake_up(&fs_info->scrub_pause_wait); 397 wake_up(&fs_info->scrub_pause_wait);
393 /*
394 * Hold the scrub_lock while doing the wakeup to ensure the
395 * sctx (and its wait queue list_wait) isn't destroyed/freed
396 * during the wakeup.
397 */
398 wake_up(&sctx->list_wait); 398 wake_up(&sctx->list_wait);
399 mutex_unlock(&fs_info->scrub_lock); 399 scrub_put_ctx(sctx);
400} 400}
401 401
402static void scrub_free_csums(struct scrub_ctx *sctx) 402static void scrub_free_csums(struct scrub_ctx *sctx)
@@ -442,6 +442,12 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
442 kfree(sctx); 442 kfree(sctx);
443} 443}
444 444
445static void scrub_put_ctx(struct scrub_ctx *sctx)
446{
447 if (atomic_dec_and_test(&sctx->refs))
448 scrub_free_ctx(sctx);
449}
450
445static noinline_for_stack 451static noinline_for_stack
446struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) 452struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
447{ 453{
@@ -466,6 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
466 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); 472 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
467 if (!sctx) 473 if (!sctx)
468 goto nomem; 474 goto nomem;
475 atomic_set(&sctx->refs, 1);
469 sctx->is_dev_replace = is_dev_replace; 476 sctx->is_dev_replace = is_dev_replace;
470 sctx->pages_per_rd_bio = pages_per_rd_bio; 477 sctx->pages_per_rd_bio = pages_per_rd_bio;
471 sctx->curr = -1; 478 sctx->curr = -1;
@@ -3739,7 +3746,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3739 scrub_workers_put(fs_info); 3746 scrub_workers_put(fs_info);
3740 mutex_unlock(&fs_info->scrub_lock); 3747 mutex_unlock(&fs_info->scrub_lock);
3741 3748
3742 scrub_free_ctx(sctx); 3749 scrub_put_ctx(sctx);
3743 3750
3744 return ret; 3751 return ret;
3745} 3752}