aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
commit1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch)
tree44db563f64cf5f8d62af8f99a61e2b248c44ea3a /fs/btrfs/scrub.c
parent03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff)
parentf9eccf24615672896dc13251410c3f2f33a14f95 (diff)
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano: - Fix the vt8500 timer leading to a system lock up when dealing with too small delta (Roman Volkov) - Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST (Daniel Lezcano) - Prevent to compile timers using the 'iomem' API when the architecture has not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c62
1 files changed, 56 insertions, 6 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2907a77fb1f6..b091d94ceef6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3432,7 +3432,9 @@ out:
3432static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 3432static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3433 struct btrfs_device *scrub_dev, 3433 struct btrfs_device *scrub_dev,
3434 u64 chunk_offset, u64 length, 3434 u64 chunk_offset, u64 length,
3435 u64 dev_offset, int is_dev_replace) 3435 u64 dev_offset,
3436 struct btrfs_block_group_cache *cache,
3437 int is_dev_replace)
3436{ 3438{
3437 struct btrfs_mapping_tree *map_tree = 3439 struct btrfs_mapping_tree *map_tree =
3438 &sctx->dev_root->fs_info->mapping_tree; 3440 &sctx->dev_root->fs_info->mapping_tree;
@@ -3445,8 +3447,18 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3445 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 3447 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3446 read_unlock(&map_tree->map_tree.lock); 3448 read_unlock(&map_tree->map_tree.lock);
3447 3449
3448 if (!em) 3450 if (!em) {
3449 return -EINVAL; 3451 /*
3452 * Might have been an unused block group deleted by the cleaner
3453 * kthread or relocation.
3454 */
3455 spin_lock(&cache->lock);
3456 if (!cache->removed)
3457 ret = -EINVAL;
3458 spin_unlock(&cache->lock);
3459
3460 return ret;
3461 }
3450 3462
3451 map = (struct map_lookup *)em->bdev; 3463 map = (struct map_lookup *)em->bdev;
3452 if (em->start != chunk_offset) 3464 if (em->start != chunk_offset)
@@ -3483,6 +3495,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3483 u64 length; 3495 u64 length;
3484 u64 chunk_offset; 3496 u64 chunk_offset;
3485 int ret = 0; 3497 int ret = 0;
3498 int ro_set;
3486 int slot; 3499 int slot;
3487 struct extent_buffer *l; 3500 struct extent_buffer *l;
3488 struct btrfs_key key; 3501 struct btrfs_key key;
@@ -3568,7 +3581,21 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3568 scrub_pause_on(fs_info); 3581 scrub_pause_on(fs_info);
3569 ret = btrfs_inc_block_group_ro(root, cache); 3582 ret = btrfs_inc_block_group_ro(root, cache);
3570 scrub_pause_off(fs_info); 3583 scrub_pause_off(fs_info);
3571 if (ret) { 3584
3585 if (ret == 0) {
3586 ro_set = 1;
3587 } else if (ret == -ENOSPC) {
3588 /*
3589 * btrfs_inc_block_group_ro return -ENOSPC when it
3590 * failed in creating new chunk for metadata.
3591 * It is not a problem for scrub/replace, because
3592 * metadata are always cowed, and our scrub paused
3593 * commit_transactions.
3594 */
3595 ro_set = 0;
3596 } else {
3597 btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n",
3598 ret);
3572 btrfs_put_block_group(cache); 3599 btrfs_put_block_group(cache);
3573 break; 3600 break;
3574 } 3601 }
@@ -3577,7 +3604,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3577 dev_replace->cursor_left = found_key.offset; 3604 dev_replace->cursor_left = found_key.offset;
3578 dev_replace->item_needs_writeback = 1; 3605 dev_replace->item_needs_writeback = 1;
3579 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, 3606 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3580 found_key.offset, is_dev_replace); 3607 found_key.offset, cache, is_dev_replace);
3581 3608
3582 /* 3609 /*
3583 * flush, submit all pending read and write bios, afterwards 3610 * flush, submit all pending read and write bios, afterwards
@@ -3611,7 +3638,30 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3611 3638
3612 scrub_pause_off(fs_info); 3639 scrub_pause_off(fs_info);
3613 3640
3614 btrfs_dec_block_group_ro(root, cache); 3641 if (ro_set)
3642 btrfs_dec_block_group_ro(root, cache);
3643
3644 /*
3645 * We might have prevented the cleaner kthread from deleting
3646 * this block group if it was already unused because we raced
3647 * and set it to RO mode first. So add it back to the unused
3648 * list, otherwise it might not ever be deleted unless a manual
3649 * balance is triggered or it becomes used and unused again.
3650 */
3651 spin_lock(&cache->lock);
3652 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3653 btrfs_block_group_used(&cache->item) == 0) {
3654 spin_unlock(&cache->lock);
3655 spin_lock(&fs_info->unused_bgs_lock);
3656 if (list_empty(&cache->bg_list)) {
3657 btrfs_get_block_group(cache);
3658 list_add_tail(&cache->bg_list,
3659 &fs_info->unused_bgs);
3660 }
3661 spin_unlock(&fs_info->unused_bgs_lock);
3662 } else {
3663 spin_unlock(&cache->lock);
3664 }
3615 3665
3616 btrfs_put_block_group(cache); 3666 btrfs_put_block_group(cache);
3617 if (ret) 3667 if (ret)