aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2016-02-11 04:49:42 -0500
committerDavid Sterba <dsterba@suse.com>2016-02-11 09:19:39 -0500
commit58c4e173847af8e63537e11be6c1c3fd4b6153fe (patch)
treebec9b814ff4a4e6d7d3bba91bcde1e2795841867
parented0244faf59e33ff915b83acd15c903b81fd357b (diff)
btrfs: scrub: use GFP_KERNEL on the submission path
Scrub is not on the critical writeback path we don't need to use GFP_NOFS for all allocations. The failures are handled and stats passed back to userspace. Let's use GFP_KERNEL on the paths where everything is ok, ie. setup the global structures and the IO submission paths. Functions that do the repair and fixups still use GFP_NOFS as we might want to skip any other filesystem activity if we encounter an error. This could turn out to be unnecessary, but requires more review compared to the easy cases in this patch. Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/scrub.c24
2 files changed, 14 insertions, 12 deletions
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index cbb7dbfb3fff..01ce5fcecc5c 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -802,7 +802,7 @@ static int btrfs_dev_replace_kthread(void *data)
802 struct btrfs_ioctl_dev_replace_args *status_args; 802 struct btrfs_ioctl_dev_replace_args *status_args;
803 u64 progress; 803 u64 progress;
804 804
805 status_args = kzalloc(sizeof(*status_args), GFP_NOFS); 805 status_args = kzalloc(sizeof(*status_args), GFP_KERNEL);
806 if (status_args) { 806 if (status_args) {
807 btrfs_dev_replace_status(fs_info, status_args); 807 btrfs_dev_replace_status(fs_info, status_args);
808 progress = status_args->status.progress_1000; 808 progress = status_args->status.progress_1000;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 92bf5ee732fb..2de7817d0e1b 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -461,7 +461,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
461 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 461 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
462 int ret; 462 int ret;
463 463
464 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); 464 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
465 if (!sctx) 465 if (!sctx)
466 goto nomem; 466 goto nomem;
467 atomic_set(&sctx->refs, 1); 467 atomic_set(&sctx->refs, 1);
@@ -472,7 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
472 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { 472 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
473 struct scrub_bio *sbio; 473 struct scrub_bio *sbio;
474 474
475 sbio = kzalloc(sizeof(*sbio), GFP_NOFS); 475 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
476 if (!sbio) 476 if (!sbio)
477 goto nomem; 477 goto nomem;
478 sctx->bios[i] = sbio; 478 sctx->bios[i] = sbio;
@@ -1654,7 +1654,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1654again: 1654again:
1655 if (!wr_ctx->wr_curr_bio) { 1655 if (!wr_ctx->wr_curr_bio) {
1656 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio), 1656 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1657 GFP_NOFS); 1657 GFP_KERNEL);
1658 if (!wr_ctx->wr_curr_bio) { 1658 if (!wr_ctx->wr_curr_bio) {
1659 mutex_unlock(&wr_ctx->wr_lock); 1659 mutex_unlock(&wr_ctx->wr_lock);
1660 return -ENOMEM; 1660 return -ENOMEM;
@@ -1671,7 +1671,8 @@ again:
1671 sbio->dev = wr_ctx->tgtdev; 1671 sbio->dev = wr_ctx->tgtdev;
1672 bio = sbio->bio; 1672 bio = sbio->bio;
1673 if (!bio) { 1673 if (!bio) {
1674 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); 1674 bio = btrfs_io_bio_alloc(GFP_KERNEL,
1675 wr_ctx->pages_per_wr_bio);
1675 if (!bio) { 1676 if (!bio) {
1676 mutex_unlock(&wr_ctx->wr_lock); 1677 mutex_unlock(&wr_ctx->wr_lock);
1677 return -ENOMEM; 1678 return -ENOMEM;
@@ -2076,7 +2077,8 @@ again:
2076 sbio->dev = spage->dev; 2077 sbio->dev = spage->dev;
2077 bio = sbio->bio; 2078 bio = sbio->bio;
2078 if (!bio) { 2079 if (!bio) {
2079 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); 2080 bio = btrfs_io_bio_alloc(GFP_KERNEL,
2081 sctx->pages_per_rd_bio);
2080 if (!bio) 2082 if (!bio)
2081 return -ENOMEM; 2083 return -ENOMEM;
2082 sbio->bio = bio; 2084 sbio->bio = bio;
@@ -2241,7 +2243,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2241 struct scrub_block *sblock; 2243 struct scrub_block *sblock;
2242 int index; 2244 int index;
2243 2245
2244 sblock = kzalloc(sizeof(*sblock), GFP_NOFS); 2246 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2245 if (!sblock) { 2247 if (!sblock) {
2246 spin_lock(&sctx->stat_lock); 2248 spin_lock(&sctx->stat_lock);
2247 sctx->stat.malloc_errors++; 2249 sctx->stat.malloc_errors++;
@@ -2259,7 +2261,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2259 struct scrub_page *spage; 2261 struct scrub_page *spage;
2260 u64 l = min_t(u64, len, PAGE_SIZE); 2262 u64 l = min_t(u64, len, PAGE_SIZE);
2261 2263
2262 spage = kzalloc(sizeof(*spage), GFP_NOFS); 2264 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2263 if (!spage) { 2265 if (!spage) {
2264leave_nomem: 2266leave_nomem:
2265 spin_lock(&sctx->stat_lock); 2267 spin_lock(&sctx->stat_lock);
@@ -2286,7 +2288,7 @@ leave_nomem:
2286 spage->have_csum = 0; 2288 spage->have_csum = 0;
2287 } 2289 }
2288 sblock->page_count++; 2290 sblock->page_count++;
2289 spage->page = alloc_page(GFP_NOFS); 2291 spage->page = alloc_page(GFP_KERNEL);
2290 if (!spage->page) 2292 if (!spage->page)
2291 goto leave_nomem; 2293 goto leave_nomem;
2292 len -= l; 2294 len -= l;
@@ -2541,7 +2543,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
2541 struct scrub_block *sblock; 2543 struct scrub_block *sblock;
2542 int index; 2544 int index;
2543 2545
2544 sblock = kzalloc(sizeof(*sblock), GFP_NOFS); 2546 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2545 if (!sblock) { 2547 if (!sblock) {
2546 spin_lock(&sctx->stat_lock); 2548 spin_lock(&sctx->stat_lock);
2547 sctx->stat.malloc_errors++; 2549 sctx->stat.malloc_errors++;
@@ -2561,7 +2563,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
2561 struct scrub_page *spage; 2563 struct scrub_page *spage;
2562 u64 l = min_t(u64, len, PAGE_SIZE); 2564 u64 l = min_t(u64, len, PAGE_SIZE);
2563 2565
2564 spage = kzalloc(sizeof(*spage), GFP_NOFS); 2566 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2565 if (!spage) { 2567 if (!spage) {
2566leave_nomem: 2568leave_nomem:
2567 spin_lock(&sctx->stat_lock); 2569 spin_lock(&sctx->stat_lock);
@@ -2591,7 +2593,7 @@ leave_nomem:
2591 spage->have_csum = 0; 2593 spage->have_csum = 0;
2592 } 2594 }
2593 sblock->page_count++; 2595 sblock->page_count++;
2594 spage->page = alloc_page(GFP_NOFS); 2596 spage->page = alloc_page(GFP_KERNEL);
2595 if (!spage->page) 2597 if (!spage->page)
2596 goto leave_nomem; 2598 goto leave_nomem;
2597 len -= l; 2599 len -= l;