diff options
author | Filipe Manana <fdmanana@suse.com> | 2018-12-07 08:23:32 -0500 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2018-12-17 08:51:48 -0500 |
commit | 7c3c7cb99c771c4ce6a01a95e969dbc576fd7533 (patch) | |
tree | 67c1a25a3b1357a9dadbbfc6fff627e84a80ac12 /fs/btrfs/scrub.c | |
parent | 0e94c4f45d14cf89d1f40c91b0a8517e791672a7 (diff) |
Btrfs: scrub, move setup of nofs contexts higher in the stack
Since scrub workers only do memory allocation with GFP_KERNEL when they
need to perform repair, we can move the recent setup of the nofs context
up to scrub_handle_errored_block() instead of setting it up down the call
chain at insert_full_stripe_lock() and scrub_add_page_to_wr_bio(),
removing some duplicate code and comment. So the only paths for which a
scrub worker can do memory allocations using GFP_KERNEL are the following:
scrub_bio_end_io_worker()
scrub_block_complete()
scrub_handle_errored_block()
lock_full_stripe()
insert_full_stripe_lock()
-> kmalloc with GFP_KERNEL
scrub_bio_end_io_worker()
scrub_block_complete()
scrub_handle_errored_block()
scrub_write_page_to_dev_replace()
scrub_add_page_to_wr_bio()
-> kzalloc with GFP_KERNEL
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r-- | fs/btrfs/scrub.c | 34 |
1 files changed, 14 insertions, 20 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 367ab0911c01..099eb3c8f86b 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -322,7 +322,6 @@ static struct full_stripe_lock *insert_full_stripe_lock( | |||
322 | struct rb_node *parent = NULL; | 322 | struct rb_node *parent = NULL; |
323 | struct full_stripe_lock *entry; | 323 | struct full_stripe_lock *entry; |
324 | struct full_stripe_lock *ret; | 324 | struct full_stripe_lock *ret; |
325 | unsigned int nofs_flag; | ||
326 | 325 | ||
327 | lockdep_assert_held(&locks_root->lock); | 326 | lockdep_assert_held(&locks_root->lock); |
328 | 327 | ||
@@ -342,15 +341,8 @@ static struct full_stripe_lock *insert_full_stripe_lock( | |||
342 | 341 | ||
343 | /* | 342 | /* |
344 | * Insert new lock. | 343 | * Insert new lock. |
345 | * | ||
346 | * We must use GFP_NOFS because the scrub task might be waiting for a | ||
347 | * worker task executing this function and in turn a transaction commit | ||
348 | * might be waiting the scrub task to pause (which needs to wait for all | ||
349 | * the worker tasks to complete before pausing). | ||
350 | */ | 344 | */ |
351 | nofs_flag = memalloc_nofs_save(); | ||
352 | ret = kmalloc(sizeof(*ret), GFP_KERNEL); | 345 | ret = kmalloc(sizeof(*ret), GFP_KERNEL); |
353 | memalloc_nofs_restore(nofs_flag); | ||
354 | if (!ret) | 346 | if (!ret) |
355 | return ERR_PTR(-ENOMEM); | 347 | return ERR_PTR(-ENOMEM); |
356 | ret->logical = fstripe_logical; | 348 | ret->logical = fstripe_logical; |
@@ -841,6 +833,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
841 | int page_num; | 833 | int page_num; |
842 | int success; | 834 | int success; |
843 | bool full_stripe_locked; | 835 | bool full_stripe_locked; |
836 | unsigned int nofs_flag; | ||
844 | static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, | 837 | static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, |
845 | DEFAULT_RATELIMIT_BURST); | 838 | DEFAULT_RATELIMIT_BURST); |
846 | 839 | ||
@@ -866,6 +859,16 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
866 | dev = sblock_to_check->pagev[0]->dev; | 859 | dev = sblock_to_check->pagev[0]->dev; |
867 | 860 | ||
868 | /* | 861 | /* |
862 | * We must use GFP_NOFS because the scrub task might be waiting for a | ||
863 | * worker task executing this function and in turn a transaction commit | ||
864 | * might be waiting the scrub task to pause (which needs to wait for all | ||
865 | * the worker tasks to complete before pausing). | ||
866 | * We do allocations in the workers through insert_full_stripe_lock() | ||
867 | * and scrub_add_page_to_wr_bio(), which happens down the call chain of | ||
868 | * this function. | ||
869 | */ | ||
870 | nofs_flag = memalloc_nofs_save(); | ||
871 | /* | ||
869 | * For RAID5/6, race can happen for a different device scrub thread. | 872 | * For RAID5/6, race can happen for a different device scrub thread. |
870 | * For data corruption, Parity and Data threads will both try | 873 | * For data corruption, Parity and Data threads will both try |
871 | * to recovery the data. | 874 | * to recovery the data. |
@@ -874,6 +877,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
874 | */ | 877 | */ |
875 | ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); | 878 | ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); |
876 | if (ret < 0) { | 879 | if (ret < 0) { |
880 | memalloc_nofs_restore(nofs_flag); | ||
877 | spin_lock(&sctx->stat_lock); | 881 | spin_lock(&sctx->stat_lock); |
878 | if (ret == -ENOMEM) | 882 | if (ret == -ENOMEM) |
879 | sctx->stat.malloc_errors++; | 883 | sctx->stat.malloc_errors++; |
@@ -913,7 +917,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
913 | */ | 917 | */ |
914 | 918 | ||
915 | sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS, | 919 | sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS, |
916 | sizeof(*sblocks_for_recheck), GFP_NOFS); | 920 | sizeof(*sblocks_for_recheck), GFP_KERNEL); |
917 | if (!sblocks_for_recheck) { | 921 | if (!sblocks_for_recheck) { |
918 | spin_lock(&sctx->stat_lock); | 922 | spin_lock(&sctx->stat_lock); |
919 | sctx->stat.malloc_errors++; | 923 | sctx->stat.malloc_errors++; |
@@ -1211,6 +1215,7 @@ out: | |||
1211 | } | 1215 | } |
1212 | 1216 | ||
1213 | ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); | 1217 | ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); |
1218 | memalloc_nofs_restore(nofs_flag); | ||
1214 | if (ret < 0) | 1219 | if (ret < 0) |
1215 | return ret; | 1220 | return ret; |
1216 | return 0; | 1221 | return 0; |
@@ -1629,19 +1634,8 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, | |||
1629 | mutex_lock(&sctx->wr_lock); | 1634 | mutex_lock(&sctx->wr_lock); |
1630 | again: | 1635 | again: |
1631 | if (!sctx->wr_curr_bio) { | 1636 | if (!sctx->wr_curr_bio) { |
1632 | unsigned int nofs_flag; | ||
1633 | |||
1634 | /* | ||
1635 | * We must use GFP_NOFS because the scrub task might be waiting | ||
1636 | * for a worker task executing this function and in turn a | ||
1637 | * transaction commit might be waiting the scrub task to pause | ||
1638 | * (which needs to wait for all the worker tasks to complete | ||
1639 | * before pausing). | ||
1640 | */ | ||
1641 | nofs_flag = memalloc_nofs_save(); | ||
1642 | sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), | 1637 | sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), |
1643 | GFP_KERNEL); | 1638 | GFP_KERNEL); |
1644 | memalloc_nofs_restore(nofs_flag); | ||
1645 | if (!sctx->wr_curr_bio) { | 1639 | if (!sctx->wr_curr_bio) { |
1646 | mutex_unlock(&sctx->wr_lock); | 1640 | mutex_unlock(&sctx->wr_lock); |
1647 | return -ENOMEM; | 1641 | return -ENOMEM; |