diff options
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r-- | fs/btrfs/scrub.c | 32 |
1 files changed, 17 insertions, 15 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 92bf5ee732fb..39dbdcbf4d13 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -461,7 +461,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) | |||
461 | struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; | 461 | struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; |
462 | int ret; | 462 | int ret; |
463 | 463 | ||
464 | sctx = kzalloc(sizeof(*sctx), GFP_NOFS); | 464 | sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); |
465 | if (!sctx) | 465 | if (!sctx) |
466 | goto nomem; | 466 | goto nomem; |
467 | atomic_set(&sctx->refs, 1); | 467 | atomic_set(&sctx->refs, 1); |
@@ -472,7 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) | |||
472 | for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { | 472 | for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { |
473 | struct scrub_bio *sbio; | 473 | struct scrub_bio *sbio; |
474 | 474 | ||
475 | sbio = kzalloc(sizeof(*sbio), GFP_NOFS); | 475 | sbio = kzalloc(sizeof(*sbio), GFP_KERNEL); |
476 | if (!sbio) | 476 | if (!sbio) |
477 | goto nomem; | 477 | goto nomem; |
478 | sctx->bios[i] = sbio; | 478 | sctx->bios[i] = sbio; |
@@ -611,7 +611,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | |||
611 | u64 flags = 0; | 611 | u64 flags = 0; |
612 | u64 ref_root; | 612 | u64 ref_root; |
613 | u32 item_size; | 613 | u32 item_size; |
614 | u8 ref_level; | 614 | u8 ref_level = 0; |
615 | int ret; | 615 | int ret; |
616 | 616 | ||
617 | WARN_ON(sblock->page_count < 1); | 617 | WARN_ON(sblock->page_count < 1); |
@@ -1654,7 +1654,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, | |||
1654 | again: | 1654 | again: |
1655 | if (!wr_ctx->wr_curr_bio) { | 1655 | if (!wr_ctx->wr_curr_bio) { |
1656 | wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio), | 1656 | wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio), |
1657 | GFP_NOFS); | 1657 | GFP_KERNEL); |
1658 | if (!wr_ctx->wr_curr_bio) { | 1658 | if (!wr_ctx->wr_curr_bio) { |
1659 | mutex_unlock(&wr_ctx->wr_lock); | 1659 | mutex_unlock(&wr_ctx->wr_lock); |
1660 | return -ENOMEM; | 1660 | return -ENOMEM; |
@@ -1671,7 +1671,8 @@ again: | |||
1671 | sbio->dev = wr_ctx->tgtdev; | 1671 | sbio->dev = wr_ctx->tgtdev; |
1672 | bio = sbio->bio; | 1672 | bio = sbio->bio; |
1673 | if (!bio) { | 1673 | if (!bio) { |
1674 | bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); | 1674 | bio = btrfs_io_bio_alloc(GFP_KERNEL, |
1675 | wr_ctx->pages_per_wr_bio); | ||
1675 | if (!bio) { | 1676 | if (!bio) { |
1676 | mutex_unlock(&wr_ctx->wr_lock); | 1677 | mutex_unlock(&wr_ctx->wr_lock); |
1677 | return -ENOMEM; | 1678 | return -ENOMEM; |
@@ -2076,7 +2077,8 @@ again: | |||
2076 | sbio->dev = spage->dev; | 2077 | sbio->dev = spage->dev; |
2077 | bio = sbio->bio; | 2078 | bio = sbio->bio; |
2078 | if (!bio) { | 2079 | if (!bio) { |
2079 | bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); | 2080 | bio = btrfs_io_bio_alloc(GFP_KERNEL, |
2081 | sctx->pages_per_rd_bio); | ||
2080 | if (!bio) | 2082 | if (!bio) |
2081 | return -ENOMEM; | 2083 | return -ENOMEM; |
2082 | sbio->bio = bio; | 2084 | sbio->bio = bio; |
@@ -2241,7 +2243,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, | |||
2241 | struct scrub_block *sblock; | 2243 | struct scrub_block *sblock; |
2242 | int index; | 2244 | int index; |
2243 | 2245 | ||
2244 | sblock = kzalloc(sizeof(*sblock), GFP_NOFS); | 2246 | sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); |
2245 | if (!sblock) { | 2247 | if (!sblock) { |
2246 | spin_lock(&sctx->stat_lock); | 2248 | spin_lock(&sctx->stat_lock); |
2247 | sctx->stat.malloc_errors++; | 2249 | sctx->stat.malloc_errors++; |
@@ -2259,7 +2261,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, | |||
2259 | struct scrub_page *spage; | 2261 | struct scrub_page *spage; |
2260 | u64 l = min_t(u64, len, PAGE_SIZE); | 2262 | u64 l = min_t(u64, len, PAGE_SIZE); |
2261 | 2263 | ||
2262 | spage = kzalloc(sizeof(*spage), GFP_NOFS); | 2264 | spage = kzalloc(sizeof(*spage), GFP_KERNEL); |
2263 | if (!spage) { | 2265 | if (!spage) { |
2264 | leave_nomem: | 2266 | leave_nomem: |
2265 | spin_lock(&sctx->stat_lock); | 2267 | spin_lock(&sctx->stat_lock); |
@@ -2286,7 +2288,7 @@ leave_nomem: | |||
2286 | spage->have_csum = 0; | 2288 | spage->have_csum = 0; |
2287 | } | 2289 | } |
2288 | sblock->page_count++; | 2290 | sblock->page_count++; |
2289 | spage->page = alloc_page(GFP_NOFS); | 2291 | spage->page = alloc_page(GFP_KERNEL); |
2290 | if (!spage->page) | 2292 | if (!spage->page) |
2291 | goto leave_nomem; | 2293 | goto leave_nomem; |
2292 | len -= l; | 2294 | len -= l; |
@@ -2541,7 +2543,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity, | |||
2541 | struct scrub_block *sblock; | 2543 | struct scrub_block *sblock; |
2542 | int index; | 2544 | int index; |
2543 | 2545 | ||
2544 | sblock = kzalloc(sizeof(*sblock), GFP_NOFS); | 2546 | sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); |
2545 | if (!sblock) { | 2547 | if (!sblock) { |
2546 | spin_lock(&sctx->stat_lock); | 2548 | spin_lock(&sctx->stat_lock); |
2547 | sctx->stat.malloc_errors++; | 2549 | sctx->stat.malloc_errors++; |
@@ -2561,7 +2563,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity, | |||
2561 | struct scrub_page *spage; | 2563 | struct scrub_page *spage; |
2562 | u64 l = min_t(u64, len, PAGE_SIZE); | 2564 | u64 l = min_t(u64, len, PAGE_SIZE); |
2563 | 2565 | ||
2564 | spage = kzalloc(sizeof(*spage), GFP_NOFS); | 2566 | spage = kzalloc(sizeof(*spage), GFP_KERNEL); |
2565 | if (!spage) { | 2567 | if (!spage) { |
2566 | leave_nomem: | 2568 | leave_nomem: |
2567 | spin_lock(&sctx->stat_lock); | 2569 | spin_lock(&sctx->stat_lock); |
@@ -2591,7 +2593,7 @@ leave_nomem: | |||
2591 | spage->have_csum = 0; | 2593 | spage->have_csum = 0; |
2592 | } | 2594 | } |
2593 | sblock->page_count++; | 2595 | sblock->page_count++; |
2594 | spage->page = alloc_page(GFP_NOFS); | 2596 | spage->page = alloc_page(GFP_KERNEL); |
2595 | if (!spage->page) | 2597 | if (!spage->page) |
2596 | goto leave_nomem; | 2598 | goto leave_nomem; |
2597 | len -= l; | 2599 | len -= l; |
@@ -3857,16 +3859,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, | |||
3857 | return -EIO; | 3859 | return -EIO; |
3858 | } | 3860 | } |
3859 | 3861 | ||
3860 | btrfs_dev_replace_lock(&fs_info->dev_replace); | 3862 | btrfs_dev_replace_lock(&fs_info->dev_replace, 0); |
3861 | if (dev->scrub_device || | 3863 | if (dev->scrub_device || |
3862 | (!is_dev_replace && | 3864 | (!is_dev_replace && |
3863 | btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { | 3865 | btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { |
3864 | btrfs_dev_replace_unlock(&fs_info->dev_replace); | 3866 | btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); |
3865 | mutex_unlock(&fs_info->scrub_lock); | 3867 | mutex_unlock(&fs_info->scrub_lock); |
3866 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 3868 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
3867 | return -EINPROGRESS; | 3869 | return -EINPROGRESS; |
3868 | } | 3870 | } |
3869 | btrfs_dev_replace_unlock(&fs_info->dev_replace); | 3871 | btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); |
3870 | 3872 | ||
3871 | ret = scrub_workers_get(fs_info, is_dev_replace); | 3873 | ret = scrub_workers_get(fs_info, is_dev_replace); |
3872 | if (ret) { | 3874 | if (ret) { |