diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-05-01 10:46:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-05-01 10:46:21 -0400 |
commit | 64887b6882de36069c18ef2d9623484d6db7cd3a (patch) | |
tree | f19394feffe73007751161c98bf44ecfac16f5d2 /fs/btrfs | |
parent | 036f351e2566eaa5826581c8512dd55f6585ad01 (diff) | |
parent | 5d2361db48899789fb466ff62db5d5fc7b070e86 (diff) |
Merge branch 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs fixes from Chris Mason:
"A few more btrfs fixes.
These range from corners Filipe found in the new free space cache
writeback to a grab bag of fixes from the list"
* 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
Btrfs: btrfs_release_extent_buffer_page didn't free pages of dummy extent
Btrfs: fill ->last_trans for delayed inode in btrfs_fill_inode.
btrfs: unlock i_mutex after attempting to delete subvolume during send
btrfs: check io_ctl_prepare_pages return in __btrfs_write_out_cache
btrfs: fix race on ENOMEM in alloc_extent_buffer
btrfs: handle ENOMEM in btrfs_alloc_tree_block
Btrfs: fix find_free_dev_extent() malfunction in case device tree has hole
Btrfs: don't check for delalloc_bytes in cache_save_setup
Btrfs: fix deadlock when starting writeback of bg caches
Btrfs: fix race between start dirty bg cache writeout and bg deletion
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/delayed-inode.c | 2 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 90 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 54 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 10 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 21 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 3 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 15 |
7 files changed, 118 insertions, 77 deletions
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index cde698a07d21..a2ae42720a6a 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -1802,6 +1802,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) | |||
1802 | set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); | 1802 | set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); |
1803 | inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); | 1803 | inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); |
1804 | BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); | 1804 | BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); |
1805 | BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item); | ||
1806 | |||
1805 | inode->i_version = btrfs_stack_inode_sequence(inode_item); | 1807 | inode->i_version = btrfs_stack_inode_sequence(inode_item); |
1806 | inode->i_rdev = 0; | 1808 | inode->i_rdev = 0; |
1807 | *rdev = btrfs_stack_inode_rdev(inode_item); | 1809 | *rdev = btrfs_stack_inode_rdev(inode_item); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1eef4ee01d1a..0ec8e228b89f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -3178,8 +3178,8 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, | |||
3178 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); | 3178 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); |
3179 | write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); | 3179 | write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); |
3180 | btrfs_mark_buffer_dirty(leaf); | 3180 | btrfs_mark_buffer_dirty(leaf); |
3181 | btrfs_release_path(path); | ||
3182 | fail: | 3181 | fail: |
3182 | btrfs_release_path(path); | ||
3183 | if (ret) | 3183 | if (ret) |
3184 | btrfs_abort_transaction(trans, root, ret); | 3184 | btrfs_abort_transaction(trans, root, ret); |
3185 | return ret; | 3185 | return ret; |
@@ -3305,8 +3305,7 @@ again: | |||
3305 | 3305 | ||
3306 | spin_lock(&block_group->lock); | 3306 | spin_lock(&block_group->lock); |
3307 | if (block_group->cached != BTRFS_CACHE_FINISHED || | 3307 | if (block_group->cached != BTRFS_CACHE_FINISHED || |
3308 | !btrfs_test_opt(root, SPACE_CACHE) || | 3308 | !btrfs_test_opt(root, SPACE_CACHE)) { |
3309 | block_group->delalloc_bytes) { | ||
3310 | /* | 3309 | /* |
3311 | * don't bother trying to write stuff out _if_ | 3310 | * don't bother trying to write stuff out _if_ |
3312 | * a) we're not cached, | 3311 | * a) we're not cached, |
@@ -3408,17 +3407,14 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans, | |||
3408 | int loops = 0; | 3407 | int loops = 0; |
3409 | 3408 | ||
3410 | spin_lock(&cur_trans->dirty_bgs_lock); | 3409 | spin_lock(&cur_trans->dirty_bgs_lock); |
3411 | if (!list_empty(&cur_trans->dirty_bgs)) { | 3410 | if (list_empty(&cur_trans->dirty_bgs)) { |
3412 | list_splice_init(&cur_trans->dirty_bgs, &dirty); | 3411 | spin_unlock(&cur_trans->dirty_bgs_lock); |
3412 | return 0; | ||
3413 | } | 3413 | } |
3414 | list_splice_init(&cur_trans->dirty_bgs, &dirty); | ||
3414 | spin_unlock(&cur_trans->dirty_bgs_lock); | 3415 | spin_unlock(&cur_trans->dirty_bgs_lock); |
3415 | 3416 | ||
3416 | again: | 3417 | again: |
3417 | if (list_empty(&dirty)) { | ||
3418 | btrfs_free_path(path); | ||
3419 | return 0; | ||
3420 | } | ||
3421 | |||
3422 | /* | 3418 | /* |
3423 | * make sure all the block groups on our dirty list actually | 3419 | * make sure all the block groups on our dirty list actually |
3424 | * exist | 3420 | * exist |
@@ -3431,18 +3427,16 @@ again: | |||
3431 | return -ENOMEM; | 3427 | return -ENOMEM; |
3432 | } | 3428 | } |
3433 | 3429 | ||
3430 | /* | ||
3431 | * cache_write_mutex is here only to save us from balance or automatic | ||
3432 | * removal of empty block groups deleting this block group while we are | ||
3433 | * writing out the cache | ||
3434 | */ | ||
3435 | mutex_lock(&trans->transaction->cache_write_mutex); | ||
3434 | while (!list_empty(&dirty)) { | 3436 | while (!list_empty(&dirty)) { |
3435 | cache = list_first_entry(&dirty, | 3437 | cache = list_first_entry(&dirty, |
3436 | struct btrfs_block_group_cache, | 3438 | struct btrfs_block_group_cache, |
3437 | dirty_list); | 3439 | dirty_list); |
3438 | |||
3439 | /* | ||
3440 | * cache_write_mutex is here only to save us from balance | ||
3441 | * deleting this block group while we are writing out the | ||
3442 | * cache | ||
3443 | */ | ||
3444 | mutex_lock(&trans->transaction->cache_write_mutex); | ||
3445 | |||
3446 | /* | 3440 | /* |
3447 | * this can happen if something re-dirties a block | 3441 | * this can happen if something re-dirties a block |
3448 | * group that is already under IO. Just wait for it to | 3442 | * group that is already under IO. Just wait for it to |
@@ -3495,7 +3489,6 @@ again: | |||
3495 | } | 3489 | } |
3496 | if (!ret) | 3490 | if (!ret) |
3497 | ret = write_one_cache_group(trans, root, path, cache); | 3491 | ret = write_one_cache_group(trans, root, path, cache); |
3498 | mutex_unlock(&trans->transaction->cache_write_mutex); | ||
3499 | 3492 | ||
3500 | /* if its not on the io list, we need to put the block group */ | 3493 | /* if its not on the io list, we need to put the block group */ |
3501 | if (should_put) | 3494 | if (should_put) |
@@ -3503,7 +3496,16 @@ again: | |||
3503 | 3496 | ||
3504 | if (ret) | 3497 | if (ret) |
3505 | break; | 3498 | break; |
3499 | |||
3500 | /* | ||
3501 | * Avoid blocking other tasks for too long. It might even save | ||
3502 | * us from writing caches for block groups that are going to be | ||
3503 | * removed. | ||
3504 | */ | ||
3505 | mutex_unlock(&trans->transaction->cache_write_mutex); | ||
3506 | mutex_lock(&trans->transaction->cache_write_mutex); | ||
3506 | } | 3507 | } |
3508 | mutex_unlock(&trans->transaction->cache_write_mutex); | ||
3507 | 3509 | ||
3508 | /* | 3510 | /* |
3509 | * go through delayed refs for all the stuff we've just kicked off | 3511 | * go through delayed refs for all the stuff we've just kicked off |
@@ -3514,8 +3516,15 @@ again: | |||
3514 | loops++; | 3516 | loops++; |
3515 | spin_lock(&cur_trans->dirty_bgs_lock); | 3517 | spin_lock(&cur_trans->dirty_bgs_lock); |
3516 | list_splice_init(&cur_trans->dirty_bgs, &dirty); | 3518 | list_splice_init(&cur_trans->dirty_bgs, &dirty); |
3519 | /* | ||
3520 | * dirty_bgs_lock protects us from concurrent block group | ||
3521 | * deletes too (not just cache_write_mutex). | ||
3522 | */ | ||
3523 | if (!list_empty(&dirty)) { | ||
3524 | spin_unlock(&cur_trans->dirty_bgs_lock); | ||
3525 | goto again; | ||
3526 | } | ||
3517 | spin_unlock(&cur_trans->dirty_bgs_lock); | 3527 | spin_unlock(&cur_trans->dirty_bgs_lock); |
3518 | goto again; | ||
3519 | } | 3528 | } |
3520 | 3529 | ||
3521 | btrfs_free_path(path); | 3530 | btrfs_free_path(path); |
@@ -7537,7 +7546,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info, | |||
7537 | * returns the key for the extent through ins, and a tree buffer for | 7546 | * returns the key for the extent through ins, and a tree buffer for |
7538 | * the first block of the extent through buf. | 7547 | * the first block of the extent through buf. |
7539 | * | 7548 | * |
7540 | * returns the tree buffer or NULL. | 7549 | * returns the tree buffer or an ERR_PTR on error. |
7541 | */ | 7550 | */ |
7542 | struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | 7551 | struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, |
7543 | struct btrfs_root *root, | 7552 | struct btrfs_root *root, |
@@ -7548,6 +7557,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | |||
7548 | struct btrfs_key ins; | 7557 | struct btrfs_key ins; |
7549 | struct btrfs_block_rsv *block_rsv; | 7558 | struct btrfs_block_rsv *block_rsv; |
7550 | struct extent_buffer *buf; | 7559 | struct extent_buffer *buf; |
7560 | struct btrfs_delayed_extent_op *extent_op; | ||
7551 | u64 flags = 0; | 7561 | u64 flags = 0; |
7552 | int ret; | 7562 | int ret; |
7553 | u32 blocksize = root->nodesize; | 7563 | u32 blocksize = root->nodesize; |
@@ -7568,13 +7578,14 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | |||
7568 | 7578 | ||
7569 | ret = btrfs_reserve_extent(root, blocksize, blocksize, | 7579 | ret = btrfs_reserve_extent(root, blocksize, blocksize, |
7570 | empty_size, hint, &ins, 0, 0); | 7580 | empty_size, hint, &ins, 0, 0); |
7571 | if (ret) { | 7581 | if (ret) |
7572 | unuse_block_rsv(root->fs_info, block_rsv, blocksize); | 7582 | goto out_unuse; |
7573 | return ERR_PTR(ret); | ||
7574 | } | ||
7575 | 7583 | ||
7576 | buf = btrfs_init_new_buffer(trans, root, ins.objectid, level); | 7584 | buf = btrfs_init_new_buffer(trans, root, ins.objectid, level); |
7577 | BUG_ON(IS_ERR(buf)); /* -ENOMEM */ | 7585 | if (IS_ERR(buf)) { |
7586 | ret = PTR_ERR(buf); | ||
7587 | goto out_free_reserved; | ||
7588 | } | ||
7578 | 7589 | ||
7579 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { | 7590 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { |
7580 | if (parent == 0) | 7591 | if (parent == 0) |
@@ -7584,9 +7595,11 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | |||
7584 | BUG_ON(parent > 0); | 7595 | BUG_ON(parent > 0); |
7585 | 7596 | ||
7586 | if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { | 7597 | if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { |
7587 | struct btrfs_delayed_extent_op *extent_op; | ||
7588 | extent_op = btrfs_alloc_delayed_extent_op(); | 7598 | extent_op = btrfs_alloc_delayed_extent_op(); |
7589 | BUG_ON(!extent_op); /* -ENOMEM */ | 7599 | if (!extent_op) { |
7600 | ret = -ENOMEM; | ||
7601 | goto out_free_buf; | ||
7602 | } | ||
7590 | if (key) | 7603 | if (key) |
7591 | memcpy(&extent_op->key, key, sizeof(extent_op->key)); | 7604 | memcpy(&extent_op->key, key, sizeof(extent_op->key)); |
7592 | else | 7605 | else |
@@ -7601,13 +7614,24 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | |||
7601 | extent_op->level = level; | 7614 | extent_op->level = level; |
7602 | 7615 | ||
7603 | ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, | 7616 | ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, |
7604 | ins.objectid, | 7617 | ins.objectid, ins.offset, |
7605 | ins.offset, parent, root_objectid, | 7618 | parent, root_objectid, level, |
7606 | level, BTRFS_ADD_DELAYED_EXTENT, | 7619 | BTRFS_ADD_DELAYED_EXTENT, |
7607 | extent_op, 0); | 7620 | extent_op, 0); |
7608 | BUG_ON(ret); /* -ENOMEM */ | 7621 | if (ret) |
7622 | goto out_free_delayed; | ||
7609 | } | 7623 | } |
7610 | return buf; | 7624 | return buf; |
7625 | |||
7626 | out_free_delayed: | ||
7627 | btrfs_free_delayed_extent_op(extent_op); | ||
7628 | out_free_buf: | ||
7629 | free_extent_buffer(buf); | ||
7630 | out_free_reserved: | ||
7631 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0); | ||
7632 | out_unuse: | ||
7633 | unuse_block_rsv(root->fs_info, block_rsv, blocksize); | ||
7634 | return ERR_PTR(ret); | ||
7611 | } | 7635 | } |
7612 | 7636 | ||
7613 | struct walk_control { | 7637 | struct walk_control { |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 782f3bc4651d..43af5a61ad25 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -4560,36 +4560,37 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) | |||
4560 | do { | 4560 | do { |
4561 | index--; | 4561 | index--; |
4562 | page = eb->pages[index]; | 4562 | page = eb->pages[index]; |
4563 | if (page && mapped) { | 4563 | if (!page) |
4564 | continue; | ||
4565 | if (mapped) | ||
4564 | spin_lock(&page->mapping->private_lock); | 4566 | spin_lock(&page->mapping->private_lock); |
4567 | /* | ||
4568 | * We do this since we'll remove the pages after we've | ||
4569 | * removed the eb from the radix tree, so we could race | ||
4570 | * and have this page now attached to the new eb. So | ||
4571 | * only clear page_private if it's still connected to | ||
4572 | * this eb. | ||
4573 | */ | ||
4574 | if (PagePrivate(page) && | ||
4575 | page->private == (unsigned long)eb) { | ||
4576 | BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); | ||
4577 | BUG_ON(PageDirty(page)); | ||
4578 | BUG_ON(PageWriteback(page)); | ||
4565 | /* | 4579 | /* |
4566 | * We do this since we'll remove the pages after we've | 4580 | * We need to make sure we haven't be attached |
4567 | * removed the eb from the radix tree, so we could race | 4581 | * to a new eb. |
4568 | * and have this page now attached to the new eb. So | ||
4569 | * only clear page_private if it's still connected to | ||
4570 | * this eb. | ||
4571 | */ | 4582 | */ |
4572 | if (PagePrivate(page) && | 4583 | ClearPagePrivate(page); |
4573 | page->private == (unsigned long)eb) { | 4584 | set_page_private(page, 0); |
4574 | BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); | 4585 | /* One for the page private */ |
4575 | BUG_ON(PageDirty(page)); | ||
4576 | BUG_ON(PageWriteback(page)); | ||
4577 | /* | ||
4578 | * We need to make sure we haven't be attached | ||
4579 | * to a new eb. | ||
4580 | */ | ||
4581 | ClearPagePrivate(page); | ||
4582 | set_page_private(page, 0); | ||
4583 | /* One for the page private */ | ||
4584 | page_cache_release(page); | ||
4585 | } | ||
4586 | spin_unlock(&page->mapping->private_lock); | ||
4587 | |||
4588 | } | ||
4589 | if (page) { | ||
4590 | /* One for when we alloced the page */ | ||
4591 | page_cache_release(page); | 4586 | page_cache_release(page); |
4592 | } | 4587 | } |
4588 | |||
4589 | if (mapped) | ||
4590 | spin_unlock(&page->mapping->private_lock); | ||
4591 | |||
4592 | /* One for when we alloced the page */ | ||
4593 | page_cache_release(page); | ||
4593 | } while (index != 0); | 4594 | } while (index != 0); |
4594 | } | 4595 | } |
4595 | 4596 | ||
@@ -4870,6 +4871,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, | |||
4870 | mark_extent_buffer_accessed(exists, p); | 4871 | mark_extent_buffer_accessed(exists, p); |
4871 | goto free_eb; | 4872 | goto free_eb; |
4872 | } | 4873 | } |
4874 | exists = NULL; | ||
4873 | 4875 | ||
4874 | /* | 4876 | /* |
4875 | * Do this so attach doesn't complain and we need to | 4877 | * Do this so attach doesn't complain and we need to |
@@ -4933,12 +4935,12 @@ again: | |||
4933 | return eb; | 4935 | return eb; |
4934 | 4936 | ||
4935 | free_eb: | 4937 | free_eb: |
4938 | WARN_ON(!atomic_dec_and_test(&eb->refs)); | ||
4936 | for (i = 0; i < num_pages; i++) { | 4939 | for (i = 0; i < num_pages; i++) { |
4937 | if (eb->pages[i]) | 4940 | if (eb->pages[i]) |
4938 | unlock_page(eb->pages[i]); | 4941 | unlock_page(eb->pages[i]); |
4939 | } | 4942 | } |
4940 | 4943 | ||
4941 | WARN_ON(!atomic_dec_and_test(&eb->refs)); | ||
4942 | btrfs_release_extent_buffer(eb); | 4944 | btrfs_release_extent_buffer(eb); |
4943 | return exists; | 4945 | return exists; |
4944 | } | 4946 | } |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 81fa75a8e1f3..41c510b7cc11 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -1218,7 +1218,7 @@ out: | |||
1218 | * | 1218 | * |
1219 | * This function writes out a free space cache struct to disk for quick recovery | 1219 | * This function writes out a free space cache struct to disk for quick recovery |
1220 | * on mount. This will return 0 if it was successfull in writing the cache out, | 1220 | * on mount. This will return 0 if it was successfull in writing the cache out, |
1221 | * and -1 if it was not. | 1221 | * or an errno if it was not. |
1222 | */ | 1222 | */ |
1223 | static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | 1223 | static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, |
1224 | struct btrfs_free_space_ctl *ctl, | 1224 | struct btrfs_free_space_ctl *ctl, |
@@ -1235,12 +1235,12 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
1235 | int must_iput = 0; | 1235 | int must_iput = 0; |
1236 | 1236 | ||
1237 | if (!i_size_read(inode)) | 1237 | if (!i_size_read(inode)) |
1238 | return -1; | 1238 | return -EIO; |
1239 | 1239 | ||
1240 | WARN_ON(io_ctl->pages); | 1240 | WARN_ON(io_ctl->pages); |
1241 | ret = io_ctl_init(io_ctl, inode, root, 1); | 1241 | ret = io_ctl_init(io_ctl, inode, root, 1); |
1242 | if (ret) | 1242 | if (ret) |
1243 | return -1; | 1243 | return ret; |
1244 | 1244 | ||
1245 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { | 1245 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { |
1246 | down_write(&block_group->data_rwsem); | 1246 | down_write(&block_group->data_rwsem); |
@@ -1258,7 +1258,9 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
1258 | } | 1258 | } |
1259 | 1259 | ||
1260 | /* Lock all pages first so we can lock the extent safely. */ | 1260 | /* Lock all pages first so we can lock the extent safely. */ |
1261 | io_ctl_prepare_pages(io_ctl, inode, 0); | 1261 | ret = io_ctl_prepare_pages(io_ctl, inode, 0); |
1262 | if (ret) | ||
1263 | goto out; | ||
1262 | 1264 | ||
1263 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, | 1265 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, |
1264 | 0, &cached_state); | 1266 | 0, &cached_state); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ada4d24ed11b..8bb013672aee 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3632,25 +3632,28 @@ static void btrfs_read_locked_inode(struct inode *inode) | |||
3632 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); | 3632 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); |
3633 | BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); | 3633 | BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); |
3634 | 3634 | ||
3635 | inode->i_version = btrfs_inode_sequence(leaf, inode_item); | ||
3636 | inode->i_generation = BTRFS_I(inode)->generation; | ||
3637 | inode->i_rdev = 0; | ||
3638 | rdev = btrfs_inode_rdev(leaf, inode_item); | ||
3639 | |||
3640 | BTRFS_I(inode)->index_cnt = (u64)-1; | ||
3641 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); | ||
3642 | |||
3643 | cache_index: | ||
3635 | /* | 3644 | /* |
3636 | * If we were modified in the current generation and evicted from memory | 3645 | * If we were modified in the current generation and evicted from memory |
3637 | * and then re-read we need to do a full sync since we don't have any | 3646 | * and then re-read we need to do a full sync since we don't have any |
3638 | * idea about which extents were modified before we were evicted from | 3647 | * idea about which extents were modified before we were evicted from |
3639 | * cache. | 3648 | * cache. |
3649 | * | ||
3650 | * This is required for both inode re-read from disk and delayed inode | ||
3651 | * in delayed_nodes_tree. | ||
3640 | */ | 3652 | */ |
3641 | if (BTRFS_I(inode)->last_trans == root->fs_info->generation) | 3653 | if (BTRFS_I(inode)->last_trans == root->fs_info->generation) |
3642 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | 3654 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, |
3643 | &BTRFS_I(inode)->runtime_flags); | 3655 | &BTRFS_I(inode)->runtime_flags); |
3644 | 3656 | ||
3645 | inode->i_version = btrfs_inode_sequence(leaf, inode_item); | ||
3646 | inode->i_generation = BTRFS_I(inode)->generation; | ||
3647 | inode->i_rdev = 0; | ||
3648 | rdev = btrfs_inode_rdev(leaf, inode_item); | ||
3649 | |||
3650 | BTRFS_I(inode)->index_cnt = (u64)-1; | ||
3651 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); | ||
3652 | |||
3653 | cache_index: | ||
3654 | path->slots[0]++; | 3657 | path->slots[0]++; |
3655 | if (inode->i_nlink != 1 || | 3658 | if (inode->i_nlink != 1 || |
3656 | path->slots[0] >= btrfs_header_nritems(leaf)) | 3659 | path->slots[0] >= btrfs_header_nritems(leaf)) |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b05653f182c2..1c22c6518504 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -2410,7 +2410,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, | |||
2410 | "Attempt to delete subvolume %llu during send", | 2410 | "Attempt to delete subvolume %llu during send", |
2411 | dest->root_key.objectid); | 2411 | dest->root_key.objectid); |
2412 | err = -EPERM; | 2412 | err = -EPERM; |
2413 | goto out_dput; | 2413 | goto out_unlock_inode; |
2414 | } | 2414 | } |
2415 | 2415 | ||
2416 | d_invalidate(dentry); | 2416 | d_invalidate(dentry); |
@@ -2505,6 +2505,7 @@ out_up_write: | |||
2505 | root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); | 2505 | root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); |
2506 | spin_unlock(&dest->root_item_lock); | 2506 | spin_unlock(&dest->root_item_lock); |
2507 | } | 2507 | } |
2508 | out_unlock_inode: | ||
2508 | mutex_unlock(&inode->i_mutex); | 2509 | mutex_unlock(&inode->i_mutex); |
2509 | if (!err) { | 2510 | if (!err) { |
2510 | shrink_dcache_sb(root->fs_info->sb); | 2511 | shrink_dcache_sb(root->fs_info->sb); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8bcd2a007517..96aebf3bcd5b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1058,6 +1058,7 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans, | |||
1058 | struct extent_map *em; | 1058 | struct extent_map *em; |
1059 | struct list_head *search_list = &trans->transaction->pending_chunks; | 1059 | struct list_head *search_list = &trans->transaction->pending_chunks; |
1060 | int ret = 0; | 1060 | int ret = 0; |
1061 | u64 physical_start = *start; | ||
1061 | 1062 | ||
1062 | again: | 1063 | again: |
1063 | list_for_each_entry(em, search_list, list) { | 1064 | list_for_each_entry(em, search_list, list) { |
@@ -1068,9 +1069,9 @@ again: | |||
1068 | for (i = 0; i < map->num_stripes; i++) { | 1069 | for (i = 0; i < map->num_stripes; i++) { |
1069 | if (map->stripes[i].dev != device) | 1070 | if (map->stripes[i].dev != device) |
1070 | continue; | 1071 | continue; |
1071 | if (map->stripes[i].physical >= *start + len || | 1072 | if (map->stripes[i].physical >= physical_start + len || |
1072 | map->stripes[i].physical + em->orig_block_len <= | 1073 | map->stripes[i].physical + em->orig_block_len <= |
1073 | *start) | 1074 | physical_start) |
1074 | continue; | 1075 | continue; |
1075 | *start = map->stripes[i].physical + | 1076 | *start = map->stripes[i].physical + |
1076 | em->orig_block_len; | 1077 | em->orig_block_len; |
@@ -1193,8 +1194,14 @@ again: | |||
1193 | */ | 1194 | */ |
1194 | if (contains_pending_extent(trans, device, | 1195 | if (contains_pending_extent(trans, device, |
1195 | &search_start, | 1196 | &search_start, |
1196 | hole_size)) | 1197 | hole_size)) { |
1197 | hole_size = 0; | 1198 | if (key.offset >= search_start) { |
1199 | hole_size = key.offset - search_start; | ||
1200 | } else { | ||
1201 | WARN_ON_ONCE(1); | ||
1202 | hole_size = 0; | ||
1203 | } | ||
1204 | } | ||
1198 | 1205 | ||
1199 | if (hole_size > max_hole_size) { | 1206 | if (hole_size > max_hole_size) { |
1200 | max_hole_start = search_start; | 1207 | max_hole_start = search_start; |