diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-07 14:34:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-07 14:34:19 -0400 |
commit | 9f2e3a53f7ec9ef55e9d01bc29a6285d291c151e (patch) | |
tree | c25b0eb20dac1a39a6b55c521b2658dcceb7d532 /fs/btrfs/ref-verify.c | |
parent | 78438ce18f26dbcaa8993bb45d20ffb0cec3bc3e (diff) | |
parent | b1c16ac978fd40ae636e629bb69a652df7eebdc2 (diff) |
Merge tag 'for-5.2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba:
"This time the majority of changes are cleanups, though there's still a
number of changes of user interest.
User visible changes:
- better read time and write checks to catch errors early and before
writing data to disk (to catch potential memory corruption on data
that get checksummed)
- qgroups + metadata relocation: last speed up patch int the series
to address the slowness, there should be no overhead comparing
balance with and without qgroups
- FIEMAP ioctl does not start a transaction unnecessarily, this can
result in a speed up and less blocking due to IO
- LOGICAL_INO (v1, v2) does not start transaction unnecessarily, this
can speed up the mentioned ioctl and scrub as well
- fsync on files with many (but not too many) hardlinks is faster,
finer decision if the links should be fsynced individually or
completely
- send tries harder to find ranges to clone
- trim/discard will skip unallocated chunks that haven't been touched
since the last mount
Fixes:
- send flushes delayed allocation before start, otherwise it could
miss some changes in case of a very recent rw->ro switch of a
subvolume
- fix fallocate with qgroups that could lead to space accounting
underflow, reported as a warning
- trim/discard ioctl honours the requested range
- starting send and dedupe on a subvolume at the same time will let
only one of them succeed, this is to prevent changes that send
could miss due to dedupe; both operations are restartable
Core changes:
- more tree-checker validations, errors reported by fuzzing tools:
- device item
- inode item
- block group profiles
- tracepoints for extent buffer locking
- async cow preallocates memory to avoid errors happening too deep in
the call chain
- metadata reservations for delalloc reworked to better adapt in
many-writers/low-space scenarios
- improved space flushing logic for intense DIO vs buffered workloads
- lots of cleanups
- removed unused struct members
- redundant argument removal
- properties and xattrs
- extent buffer locking
- selftests
- use common file type conversions
- many-argument functions reduction"
* tag 'for-5.2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (227 commits)
btrfs: Use kvmalloc for allocating compressed path context
btrfs: Factor out common extent locking code in submit_compressed_extents
btrfs: Set io_tree only once in submit_compressed_extents
btrfs: Replace clear_extent_bit with unlock_extent
btrfs: Make compress_file_range take only struct async_chunk
btrfs: Remove fs_info from struct async_chunk
btrfs: Rename async_cow to async_chunk
btrfs: Preallocate chunks in cow_file_range_async
btrfs: reserve delalloc metadata differently
btrfs: track DIO bytes in flight
btrfs: merge calls of btrfs_setxattr and btrfs_setxattr_trans in btrfs_set_prop
btrfs: delete unused function btrfs_set_prop_trans
btrfs: start transaction in xattr_handler_set_prop
btrfs: drop local copy of inode i_mode
btrfs: drop old_fsflags in btrfs_ioctl_setflags
btrfs: modify local copy of btrfs_inode flags
btrfs: drop useless inode i_flags copy and restore
btrfs: start transaction in btrfs_ioctl_setflags()
btrfs: export btrfs_set_prop
btrfs: refactor btrfs_set_props to validate externally
...
Diffstat (limited to 'fs/btrfs/ref-verify.c')
-rw-r--r-- | fs/btrfs/ref-verify.c | 53 |
1 files changed, 30 insertions, 23 deletions
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index b283d3a6e837..5cec2c6970f2 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c | |||
@@ -659,36 +659,43 @@ static void dump_block_entry(struct btrfs_fs_info *fs_info, | |||
659 | 659 | ||
660 | /* | 660 | /* |
661 | * btrfs_ref_tree_mod: called when we modify a ref for a bytenr | 661 | * btrfs_ref_tree_mod: called when we modify a ref for a bytenr |
662 | * @root: the root we are making this modification from. | ||
663 | * @bytenr: the bytenr we are modifying. | ||
664 | * @num_bytes: number of bytes. | ||
665 | * @parent: the parent bytenr. | ||
666 | * @ref_root: the original root owner of the bytenr. | ||
667 | * @owner: level in the case of metadata, inode in the case of data. | ||
668 | * @offset: 0 for metadata, file offset for data. | ||
669 | * @action: the action that we are doing, this is the same as the delayed ref | ||
670 | * action. | ||
671 | * | 662 | * |
672 | * This will add an action item to the given bytenr and do sanity checks to make | 663 | * This will add an action item to the given bytenr and do sanity checks to make |
673 | * sure we haven't messed something up. If we are making a new allocation and | 664 | * sure we haven't messed something up. If we are making a new allocation and |
674 | * this block entry has history we will delete all previous actions as long as | 665 | * this block entry has history we will delete all previous actions as long as |
675 | * our sanity checks pass as they are no longer needed. | 666 | * our sanity checks pass as they are no longer needed. |
676 | */ | 667 | */ |
677 | int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes, | 668 | int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, |
678 | u64 parent, u64 ref_root, u64 owner, u64 offset, | 669 | struct btrfs_ref *generic_ref) |
679 | int action) | ||
680 | { | 670 | { |
681 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
682 | struct ref_entry *ref = NULL, *exist; | 671 | struct ref_entry *ref = NULL, *exist; |
683 | struct ref_action *ra = NULL; | 672 | struct ref_action *ra = NULL; |
684 | struct block_entry *be = NULL; | 673 | struct block_entry *be = NULL; |
685 | struct root_entry *re = NULL; | 674 | struct root_entry *re = NULL; |
675 | int action = generic_ref->action; | ||
686 | int ret = 0; | 676 | int ret = 0; |
687 | bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID; | 677 | bool metadata; |
678 | u64 bytenr = generic_ref->bytenr; | ||
679 | u64 num_bytes = generic_ref->len; | ||
680 | u64 parent = generic_ref->parent; | ||
681 | u64 ref_root; | ||
682 | u64 owner; | ||
683 | u64 offset; | ||
688 | 684 | ||
689 | if (!btrfs_test_opt(root->fs_info, REF_VERIFY)) | 685 | if (!btrfs_test_opt(fs_info, REF_VERIFY)) |
690 | return 0; | 686 | return 0; |
691 | 687 | ||
688 | if (generic_ref->type == BTRFS_REF_METADATA) { | ||
689 | ref_root = generic_ref->tree_ref.root; | ||
690 | owner = generic_ref->tree_ref.level; | ||
691 | offset = 0; | ||
692 | } else { | ||
693 | ref_root = generic_ref->data_ref.ref_root; | ||
694 | owner = generic_ref->data_ref.ino; | ||
695 | offset = generic_ref->data_ref.offset; | ||
696 | } | ||
697 | metadata = owner < BTRFS_FIRST_FREE_OBJECTID; | ||
698 | |||
692 | ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS); | 699 | ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS); |
693 | ra = kmalloc(sizeof(struct ref_action), GFP_NOFS); | 700 | ra = kmalloc(sizeof(struct ref_action), GFP_NOFS); |
694 | if (!ra || !ref) { | 701 | if (!ra || !ref) { |
@@ -721,7 +728,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes, | |||
721 | 728 | ||
722 | INIT_LIST_HEAD(&ra->list); | 729 | INIT_LIST_HEAD(&ra->list); |
723 | ra->action = action; | 730 | ra->action = action; |
724 | ra->root = root->root_key.objectid; | 731 | ra->root = generic_ref->real_root; |
725 | 732 | ||
726 | /* | 733 | /* |
727 | * This is an allocation, preallocate the block_entry in case we haven't | 734 | * This is an allocation, preallocate the block_entry in case we haven't |
@@ -734,7 +741,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes, | |||
734 | * is and the new root objectid, so let's not treat the passed | 741 | * is and the new root objectid, so let's not treat the passed |
735 | * in root as if it really has a ref for this bytenr. | 742 | * in root as if it really has a ref for this bytenr. |
736 | */ | 743 | */ |
737 | be = add_block_entry(root->fs_info, bytenr, num_bytes, ref_root); | 744 | be = add_block_entry(fs_info, bytenr, num_bytes, ref_root); |
738 | if (IS_ERR(be)) { | 745 | if (IS_ERR(be)) { |
739 | kfree(ra); | 746 | kfree(ra); |
740 | ret = PTR_ERR(be); | 747 | ret = PTR_ERR(be); |
@@ -776,13 +783,13 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes, | |||
776 | * one we want to lookup below when we modify the | 783 | * one we want to lookup below when we modify the |
777 | * re->num_refs. | 784 | * re->num_refs. |
778 | */ | 785 | */ |
779 | ref_root = root->root_key.objectid; | 786 | ref_root = generic_ref->real_root; |
780 | re->root_objectid = root->root_key.objectid; | 787 | re->root_objectid = generic_ref->real_root; |
781 | re->num_refs = 0; | 788 | re->num_refs = 0; |
782 | } | 789 | } |
783 | 790 | ||
784 | spin_lock(&root->fs_info->ref_verify_lock); | 791 | spin_lock(&fs_info->ref_verify_lock); |
785 | be = lookup_block_entry(&root->fs_info->block_tree, bytenr); | 792 | be = lookup_block_entry(&fs_info->block_tree, bytenr); |
786 | if (!be) { | 793 | if (!be) { |
787 | btrfs_err(fs_info, | 794 | btrfs_err(fs_info, |
788 | "trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!", | 795 | "trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!", |
@@ -851,7 +858,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes, | |||
851 | * didn't think of some other corner case. | 858 | * didn't think of some other corner case. |
852 | */ | 859 | */ |
853 | btrfs_err(fs_info, "failed to find root %llu for %llu", | 860 | btrfs_err(fs_info, "failed to find root %llu for %llu", |
854 | root->root_key.objectid, be->bytenr); | 861 | generic_ref->real_root, be->bytenr); |
855 | dump_block_entry(fs_info, be); | 862 | dump_block_entry(fs_info, be); |
856 | dump_ref_action(fs_info, ra); | 863 | dump_ref_action(fs_info, ra); |
857 | kfree(ra); | 864 | kfree(ra); |
@@ -870,7 +877,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes, | |||
870 | list_add_tail(&ra->list, &be->actions); | 877 | list_add_tail(&ra->list, &be->actions); |
871 | ret = 0; | 878 | ret = 0; |
872 | out_unlock: | 879 | out_unlock: |
873 | spin_unlock(&root->fs_info->ref_verify_lock); | 880 | spin_unlock(&fs_info->ref_verify_lock); |
874 | out: | 881 | out: |
875 | if (ret) | 882 | if (ret) |
876 | btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); | 883 | btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); |