diff options
Diffstat (limited to 'fs/btrfs/ioctl.c')
-rw-r--r-- | fs/btrfs/ioctl.c | 118 |
1 files changed, 67 insertions, 51 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 24b776c08d99..0e92e5763005 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include "locking.h" | 52 | #include "locking.h" |
53 | #include "inode-map.h" | 53 | #include "inode-map.h" |
54 | #include "backref.h" | 54 | #include "backref.h" |
55 | #include "rcu-string.h" | ||
55 | 56 | ||
56 | /* Mask out flags that are inappropriate for the given type of inode. */ | 57 | /* Mask out flags that are inappropriate for the given type of inode. */ |
57 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) | 58 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) |
@@ -785,39 +786,57 @@ none: | |||
785 | return -ENOENT; | 786 | return -ENOENT; |
786 | } | 787 | } |
787 | 788 | ||
788 | /* | 789 | static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) |
789 | * Validaty check of prev em and next em: | ||
790 | * 1) no prev/next em | ||
791 | * 2) prev/next em is an hole/inline extent | ||
792 | */ | ||
793 | static int check_adjacent_extents(struct inode *inode, struct extent_map *em) | ||
794 | { | 790 | { |
795 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 791 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
796 | struct extent_map *prev = NULL, *next = NULL; | 792 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
797 | int ret = 0; | 793 | struct extent_map *em; |
794 | u64 len = PAGE_CACHE_SIZE; | ||
798 | 795 | ||
796 | /* | ||
797 | * hopefully we have this extent in the tree already, try without | ||
798 | * the full extent lock | ||
799 | */ | ||
799 | read_lock(&em_tree->lock); | 800 | read_lock(&em_tree->lock); |
800 | prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1); | 801 | em = lookup_extent_mapping(em_tree, start, len); |
801 | next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1); | ||
802 | read_unlock(&em_tree->lock); | 802 | read_unlock(&em_tree->lock); |
803 | 803 | ||
804 | if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) && | 804 | if (!em) { |
805 | (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)) | 805 | /* get the big lock and read metadata off disk */ |
806 | ret = 1; | 806 | lock_extent(io_tree, start, start + len - 1); |
807 | free_extent_map(prev); | 807 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); |
808 | free_extent_map(next); | 808 | unlock_extent(io_tree, start, start + len - 1); |
809 | |||
810 | if (IS_ERR(em)) | ||
811 | return NULL; | ||
812 | } | ||
813 | |||
814 | return em; | ||
815 | } | ||
816 | |||
817 | static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) | ||
818 | { | ||
819 | struct extent_map *next; | ||
820 | bool ret = true; | ||
809 | 821 | ||
822 | /* this is the last extent */ | ||
823 | if (em->start + em->len >= i_size_read(inode)) | ||
824 | return false; | ||
825 | |||
826 | next = defrag_lookup_extent(inode, em->start + em->len); | ||
827 | if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) | ||
828 | ret = false; | ||
829 | |||
830 | free_extent_map(next); | ||
810 | return ret; | 831 | return ret; |
811 | } | 832 | } |
812 | 833 | ||
813 | static int should_defrag_range(struct inode *inode, u64 start, u64 len, | 834 | static int should_defrag_range(struct inode *inode, u64 start, int thresh, |
814 | int thresh, u64 *last_len, u64 *skip, | 835 | u64 *last_len, u64 *skip, u64 *defrag_end) |
815 | u64 *defrag_end) | ||
816 | { | 836 | { |
817 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 837 | struct extent_map *em; |
818 | struct extent_map *em = NULL; | ||
819 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
820 | int ret = 1; | 838 | int ret = 1; |
839 | bool next_mergeable = true; | ||
821 | 840 | ||
822 | /* | 841 | /* |
823 | * make sure that once we start defragging an extent, we keep on | 842 | * make sure that once we start defragging an extent, we keep on |
@@ -828,23 +847,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
828 | 847 | ||
829 | *skip = 0; | 848 | *skip = 0; |
830 | 849 | ||
831 | /* | 850 | em = defrag_lookup_extent(inode, start); |
832 | * hopefully we have this extent in the tree already, try without | 851 | if (!em) |
833 | * the full extent lock | 852 | return 0; |
834 | */ | ||
835 | read_lock(&em_tree->lock); | ||
836 | em = lookup_extent_mapping(em_tree, start, len); | ||
837 | read_unlock(&em_tree->lock); | ||
838 | |||
839 | if (!em) { | ||
840 | /* get the big lock and read metadata off disk */ | ||
841 | lock_extent(io_tree, start, start + len - 1); | ||
842 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | ||
843 | unlock_extent(io_tree, start, start + len - 1); | ||
844 | |||
845 | if (IS_ERR(em)) | ||
846 | return 0; | ||
847 | } | ||
848 | 853 | ||
849 | /* this will cover holes, and inline extents */ | 854 | /* this will cover holes, and inline extents */ |
850 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | 855 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { |
@@ -852,18 +857,15 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
852 | goto out; | 857 | goto out; |
853 | } | 858 | } |
854 | 859 | ||
855 | /* If we have nothing to merge with us, just skip. */ | 860 | next_mergeable = defrag_check_next_extent(inode, em); |
856 | if (check_adjacent_extents(inode, em)) { | ||
857 | ret = 0; | ||
858 | goto out; | ||
859 | } | ||
860 | 861 | ||
861 | /* | 862 | /* |
862 | * we hit a real extent, if it is big don't bother defragging it again | 863 | * we hit a real extent, if it is big or the next extent is not a |
864 | * real extent, don't bother defragging it | ||
863 | */ | 865 | */ |
864 | if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) | 866 | if ((*last_len == 0 || *last_len >= thresh) && |
867 | (em->len >= thresh || !next_mergeable)) | ||
865 | ret = 0; | 868 | ret = 0; |
866 | |||
867 | out: | 869 | out: |
868 | /* | 870 | /* |
869 | * last_len ends up being a counter of how many bytes we've defragged. | 871 | * last_len ends up being a counter of how many bytes we've defragged. |
@@ -1142,8 +1144,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, | |||
1142 | break; | 1144 | break; |
1143 | 1145 | ||
1144 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, | 1146 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, |
1145 | PAGE_CACHE_SIZE, extent_thresh, | 1147 | extent_thresh, &last_len, &skip, |
1146 | &last_len, &skip, &defrag_end)) { | 1148 | &defrag_end)) { |
1147 | unsigned long next; | 1149 | unsigned long next; |
1148 | /* | 1150 | /* |
1149 | * the should_defrag function tells us how much to skip | 1151 | * the should_defrag function tells us how much to skip |
@@ -1304,6 +1306,14 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1304 | ret = -EINVAL; | 1306 | ret = -EINVAL; |
1305 | goto out_free; | 1307 | goto out_free; |
1306 | } | 1308 | } |
1309 | if (device->fs_devices && device->fs_devices->seeding) { | ||
1310 | printk(KERN_INFO "btrfs: resizer unable to apply on " | ||
1311 | "seeding device %llu\n", | ||
1312 | (unsigned long long)devid); | ||
1313 | ret = -EINVAL; | ||
1314 | goto out_free; | ||
1315 | } | ||
1316 | |||
1307 | if (!strcmp(sizestr, "max")) | 1317 | if (!strcmp(sizestr, "max")) |
1308 | new_size = device->bdev->bd_inode->i_size; | 1318 | new_size = device->bdev->bd_inode->i_size; |
1309 | else { | 1319 | else { |
@@ -1345,8 +1355,9 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1345 | do_div(new_size, root->sectorsize); | 1355 | do_div(new_size, root->sectorsize); |
1346 | new_size *= root->sectorsize; | 1356 | new_size *= root->sectorsize; |
1347 | 1357 | ||
1348 | printk(KERN_INFO "btrfs: new size for %s is %llu\n", | 1358 | printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n", |
1349 | device->name, (unsigned long long)new_size); | 1359 | rcu_str_deref(device->name), |
1360 | (unsigned long long)new_size); | ||
1350 | 1361 | ||
1351 | if (new_size > old_size) { | 1362 | if (new_size > old_size) { |
1352 | trans = btrfs_start_transaction(root, 0); | 1363 | trans = btrfs_start_transaction(root, 0); |
@@ -2264,7 +2275,12 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) | |||
2264 | di_args->total_bytes = dev->total_bytes; | 2275 | di_args->total_bytes = dev->total_bytes; |
2265 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); | 2276 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); |
2266 | if (dev->name) { | 2277 | if (dev->name) { |
2267 | strncpy(di_args->path, dev->name, sizeof(di_args->path)); | 2278 | struct rcu_string *name; |
2279 | |||
2280 | rcu_read_lock(); | ||
2281 | name = rcu_dereference(dev->name); | ||
2282 | strncpy(di_args->path, name->str, sizeof(di_args->path)); | ||
2283 | rcu_read_unlock(); | ||
2268 | di_args->path[sizeof(di_args->path) - 1] = 0; | 2284 | di_args->path[sizeof(di_args->path) - 1] = 0; |
2269 | } else { | 2285 | } else { |
2270 | di_args->path[0] = '\0'; | 2286 | di_args->path[0] = '\0'; |