diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-18 19:50:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-18 19:50:55 -0400 |
commit | 441f4058a04b2943685ff94e0f5f1992b0b3649e (patch) | |
tree | 80a61f6dddcf7d5831a8bd3771ffa2b430af3935 /fs/btrfs | |
parent | 7c34691abe23741bfc7d2514efd5a39f0e0ecb06 (diff) | |
parent | 8ad6fcab564c5bc956bdc3dfa440ab152b6e780f (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (30 commits)
Btrfs: fix the inode ref searches done by btrfs_search_path_in_tree
Btrfs: allow treeid==0 in the inode lookup ioctl
Btrfs: return keys for large items to the search ioctl
Btrfs: fix key checks and advance in the search ioctl
Btrfs: buffer results in the space_info ioctl
Btrfs: use __u64 types in ioctl.h
Btrfs: fix search_ioctl key advance
Btrfs: fix gfp flags masking in the compression code
Btrfs: don't look at bio flags after submit_bio
btrfs: using btrfs_stack_device_id() get devid
btrfs: use memparse
Btrfs: add a "df" ioctl for btrfs
Btrfs: cache the extent state everywhere we possibly can V2
Btrfs: cache ordered extent when completing io
Btrfs: cache extent state in find_delalloc_range
Btrfs: change the ordered tree to use a spinlock instead of a mutex
Btrfs: finish read pages in the order they are submitted
btrfs: fix btrfs_mkdir goto for no free objectids
Btrfs: flush data on snapshot creation
Btrfs: make df be a little bit more understandable
...
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/btrfs_inode.h | 5 | ||||
-rw-r--r-- | fs/btrfs/compression.c | 2 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 13 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 15 | ||||
-rw-r--r-- | fs/btrfs/export.c | 4 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 11 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 79 | ||||
-rw-r--r-- | fs/btrfs/extent_io.h | 10 | ||||
-rw-r--r-- | fs/btrfs/file.c | 23 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 139 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 706 | ||||
-rw-r--r-- | fs/btrfs/ioctl.h | 111 | ||||
-rw-r--r-- | fs/btrfs/ordered-data.c | 41 | ||||
-rw-r--r-- | fs/btrfs/ordered-data.h | 7 | ||||
-rw-r--r-- | fs/btrfs/relocation.c | 4 | ||||
-rw-r--r-- | fs/btrfs/super.c | 238 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 5 | ||||
-rw-r--r-- | fs/btrfs/tree-log.c | 2 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 39 |
19 files changed, 1225 insertions, 229 deletions
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 3f1f50d9d916..7a4dee199832 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -153,6 +153,11 @@ struct btrfs_inode { | |||
153 | unsigned ordered_data_close:1; | 153 | unsigned ordered_data_close:1; |
154 | unsigned dummy_inode:1; | 154 | unsigned dummy_inode:1; |
155 | 155 | ||
156 | /* | ||
157 | * always compress this one file | ||
158 | */ | ||
159 | unsigned force_compress:1; | ||
160 | |||
156 | struct inode vfs_inode; | 161 | struct inode vfs_inode; |
157 | }; | 162 | }; |
158 | 163 | ||
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index a11a32058b50..28b92a7218ab 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -478,7 +478,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
478 | goto next; | 478 | goto next; |
479 | } | 479 | } |
480 | 480 | ||
481 | page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS); | 481 | page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS); |
482 | if (!page) | 482 | if (!page) |
483 | break; | 483 | break; |
484 | 484 | ||
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8b5cfdd4bfc1..0af2e3868573 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -373,11 +373,13 @@ struct btrfs_super_block { | |||
373 | * ones specified below then we will fail to mount | 373 | * ones specified below then we will fail to mount |
374 | */ | 374 | */ |
375 | #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) | 375 | #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) |
376 | #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (2ULL << 0) | ||
376 | 377 | ||
377 | #define BTRFS_FEATURE_COMPAT_SUPP 0ULL | 378 | #define BTRFS_FEATURE_COMPAT_SUPP 0ULL |
378 | #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL | 379 | #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL |
379 | #define BTRFS_FEATURE_INCOMPAT_SUPP \ | 380 | #define BTRFS_FEATURE_INCOMPAT_SUPP \ |
380 | BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | 381 | (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ |
382 | BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL) | ||
381 | 383 | ||
382 | /* | 384 | /* |
383 | * A leaf is full of items. offset and size tell us where to find | 385 | * A leaf is full of items. offset and size tell us where to find |
@@ -1182,7 +1184,6 @@ struct btrfs_root { | |||
1182 | #define BTRFS_INODE_NOATIME (1 << 9) | 1184 | #define BTRFS_INODE_NOATIME (1 << 9) |
1183 | #define BTRFS_INODE_DIRSYNC (1 << 10) | 1185 | #define BTRFS_INODE_DIRSYNC (1 << 10) |
1184 | 1186 | ||
1185 | |||
1186 | /* some macros to generate set/get funcs for the struct fields. This | 1187 | /* some macros to generate set/get funcs for the struct fields. This |
1187 | * assumes there is a lefoo_to_cpu for every type, so lets make a simple | 1188 | * assumes there is a lefoo_to_cpu for every type, so lets make a simple |
1188 | * one for u8: | 1189 | * one for u8: |
@@ -1842,7 +1843,7 @@ BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, | |||
1842 | BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, | 1843 | BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, |
1843 | compat_flags, 64); | 1844 | compat_flags, 64); |
1844 | BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, | 1845 | BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, |
1845 | compat_flags, 64); | 1846 | compat_ro_flags, 64); |
1846 | BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, | 1847 | BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, |
1847 | incompat_flags, 64); | 1848 | incompat_flags, 64); |
1848 | BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, | 1849 | BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, |
@@ -2310,7 +2311,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | |||
2310 | u32 min_type); | 2311 | u32 min_type); |
2311 | 2312 | ||
2312 | int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); | 2313 | int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); |
2313 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end); | 2314 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, |
2315 | struct extent_state **cached_state); | ||
2314 | int btrfs_writepages(struct address_space *mapping, | 2316 | int btrfs_writepages(struct address_space *mapping, |
2315 | struct writeback_control *wbc); | 2317 | struct writeback_control *wbc); |
2316 | int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, | 2318 | int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, |
@@ -2335,7 +2337,7 @@ int btrfs_init_cachep(void); | |||
2335 | void btrfs_destroy_cachep(void); | 2337 | void btrfs_destroy_cachep(void); |
2336 | long btrfs_ioctl_trans_end(struct file *file); | 2338 | long btrfs_ioctl_trans_end(struct file *file); |
2337 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | 2339 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, |
2338 | struct btrfs_root *root); | 2340 | struct btrfs_root *root, int *was_new); |
2339 | int btrfs_commit_write(struct file *file, struct page *page, | 2341 | int btrfs_commit_write(struct file *file, struct page *page, |
2340 | unsigned from, unsigned to); | 2342 | unsigned from, unsigned to); |
2341 | struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, | 2343 | struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, |
@@ -2386,7 +2388,6 @@ void btrfs_sysfs_del_super(struct btrfs_fs_info *root); | |||
2386 | ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); | 2388 | ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); |
2387 | 2389 | ||
2388 | /* super.c */ | 2390 | /* super.c */ |
2389 | u64 btrfs_parse_size(char *str); | ||
2390 | int btrfs_parse_options(struct btrfs_root *root, char *options); | 2391 | int btrfs_parse_options(struct btrfs_root *root, char *options); |
2391 | int btrfs_sync_fs(struct super_block *sb, int wait); | 2392 | int btrfs_sync_fs(struct super_block *sb, int wait); |
2392 | 2393 | ||
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0427183e3e05..11d0ad30e203 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -263,13 +263,15 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, | |||
263 | static int verify_parent_transid(struct extent_io_tree *io_tree, | 263 | static int verify_parent_transid(struct extent_io_tree *io_tree, |
264 | struct extent_buffer *eb, u64 parent_transid) | 264 | struct extent_buffer *eb, u64 parent_transid) |
265 | { | 265 | { |
266 | struct extent_state *cached_state = NULL; | ||
266 | int ret; | 267 | int ret; |
267 | 268 | ||
268 | if (!parent_transid || btrfs_header_generation(eb) == parent_transid) | 269 | if (!parent_transid || btrfs_header_generation(eb) == parent_transid) |
269 | return 0; | 270 | return 0; |
270 | 271 | ||
271 | lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); | 272 | lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, |
272 | if (extent_buffer_uptodate(io_tree, eb) && | 273 | 0, &cached_state, GFP_NOFS); |
274 | if (extent_buffer_uptodate(io_tree, eb, cached_state) && | ||
273 | btrfs_header_generation(eb) == parent_transid) { | 275 | btrfs_header_generation(eb) == parent_transid) { |
274 | ret = 0; | 276 | ret = 0; |
275 | goto out; | 277 | goto out; |
@@ -282,10 +284,10 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, | |||
282 | (unsigned long long)btrfs_header_generation(eb)); | 284 | (unsigned long long)btrfs_header_generation(eb)); |
283 | } | 285 | } |
284 | ret = 1; | 286 | ret = 1; |
285 | clear_extent_buffer_uptodate(io_tree, eb); | 287 | clear_extent_buffer_uptodate(io_tree, eb, &cached_state); |
286 | out: | 288 | out: |
287 | unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, | 289 | unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, |
288 | GFP_NOFS); | 290 | &cached_state, GFP_NOFS); |
289 | return ret; | 291 | return ret; |
290 | } | 292 | } |
291 | 293 | ||
@@ -2497,7 +2499,8 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) | |||
2497 | int ret; | 2499 | int ret; |
2498 | struct inode *btree_inode = buf->first_page->mapping->host; | 2500 | struct inode *btree_inode = buf->first_page->mapping->host; |
2499 | 2501 | ||
2500 | ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf); | 2502 | ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf, |
2503 | NULL); | ||
2501 | if (!ret) | 2504 | if (!ret) |
2502 | return ret; | 2505 | return ret; |
2503 | 2506 | ||
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index ba5c3fd5ab8c..951ef09b82f4 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c | |||
@@ -95,7 +95,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, | |||
95 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); | 95 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); |
96 | key.offset = 0; | 96 | key.offset = 0; |
97 | 97 | ||
98 | inode = btrfs_iget(sb, &key, root); | 98 | inode = btrfs_iget(sb, &key, root, NULL); |
99 | if (IS_ERR(inode)) { | 99 | if (IS_ERR(inode)) { |
100 | err = PTR_ERR(inode); | 100 | err = PTR_ERR(inode); |
101 | goto fail; | 101 | goto fail; |
@@ -223,7 +223,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) | |||
223 | 223 | ||
224 | key.type = BTRFS_INODE_ITEM_KEY; | 224 | key.type = BTRFS_INODE_ITEM_KEY; |
225 | key.offset = 0; | 225 | key.offset = 0; |
226 | dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root)); | 226 | dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); |
227 | if (!IS_ERR(dentry)) | 227 | if (!IS_ERR(dentry)) |
228 | dentry->d_op = &btrfs_dentry_operations; | 228 | dentry->d_op = &btrfs_dentry_operations; |
229 | return dentry; | 229 | return dentry; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 559f72489b3b..1727b26fb194 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -6561,6 +6561,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, | |||
6561 | struct btrfs_key key; | 6561 | struct btrfs_key key; |
6562 | struct inode *inode = NULL; | 6562 | struct inode *inode = NULL; |
6563 | struct btrfs_file_extent_item *fi; | 6563 | struct btrfs_file_extent_item *fi; |
6564 | struct extent_state *cached_state = NULL; | ||
6564 | u64 num_bytes; | 6565 | u64 num_bytes; |
6565 | u64 skip_objectid = 0; | 6566 | u64 skip_objectid = 0; |
6566 | u32 nritems; | 6567 | u32 nritems; |
@@ -6589,12 +6590,14 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, | |||
6589 | } | 6590 | } |
6590 | num_bytes = btrfs_file_extent_num_bytes(leaf, fi); | 6591 | num_bytes = btrfs_file_extent_num_bytes(leaf, fi); |
6591 | 6592 | ||
6592 | lock_extent(&BTRFS_I(inode)->io_tree, key.offset, | 6593 | lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset, |
6593 | key.offset + num_bytes - 1, GFP_NOFS); | 6594 | key.offset + num_bytes - 1, 0, &cached_state, |
6595 | GFP_NOFS); | ||
6594 | btrfs_drop_extent_cache(inode, key.offset, | 6596 | btrfs_drop_extent_cache(inode, key.offset, |
6595 | key.offset + num_bytes - 1, 1); | 6597 | key.offset + num_bytes - 1, 1); |
6596 | unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, | 6598 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset, |
6597 | key.offset + num_bytes - 1, GFP_NOFS); | 6599 | key.offset + num_bytes - 1, &cached_state, |
6600 | GFP_NOFS); | ||
6598 | cond_resched(); | 6601 | cond_resched(); |
6599 | } | 6602 | } |
6600 | iput(inode); | 6603 | iput(inode); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7073cbb1b2d4..c99121ac5d6b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -513,7 +513,10 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
513 | u64 last_end; | 513 | u64 last_end; |
514 | int err; | 514 | int err; |
515 | int set = 0; | 515 | int set = 0; |
516 | int clear = 0; | ||
516 | 517 | ||
518 | if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY)) | ||
519 | clear = 1; | ||
517 | again: | 520 | again: |
518 | if (!prealloc && (mask & __GFP_WAIT)) { | 521 | if (!prealloc && (mask & __GFP_WAIT)) { |
519 | prealloc = alloc_extent_state(mask); | 522 | prealloc = alloc_extent_state(mask); |
@@ -524,14 +527,20 @@ again: | |||
524 | spin_lock(&tree->lock); | 527 | spin_lock(&tree->lock); |
525 | if (cached_state) { | 528 | if (cached_state) { |
526 | cached = *cached_state; | 529 | cached = *cached_state; |
527 | *cached_state = NULL; | 530 | |
528 | cached_state = NULL; | 531 | if (clear) { |
532 | *cached_state = NULL; | ||
533 | cached_state = NULL; | ||
534 | } | ||
535 | |||
529 | if (cached && cached->tree && cached->start == start) { | 536 | if (cached && cached->tree && cached->start == start) { |
530 | atomic_dec(&cached->refs); | 537 | if (clear) |
538 | atomic_dec(&cached->refs); | ||
531 | state = cached; | 539 | state = cached; |
532 | goto hit_next; | 540 | goto hit_next; |
533 | } | 541 | } |
534 | free_extent_state(cached); | 542 | if (clear) |
543 | free_extent_state(cached); | ||
535 | } | 544 | } |
536 | /* | 545 | /* |
537 | * this search will find the extents that end after | 546 | * this search will find the extents that end after |
@@ -946,11 +955,11 @@ int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, | |||
946 | } | 955 | } |
947 | 956 | ||
948 | int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, | 957 | int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, |
949 | gfp_t mask) | 958 | struct extent_state **cached_state, gfp_t mask) |
950 | { | 959 | { |
951 | return set_extent_bit(tree, start, end, | 960 | return set_extent_bit(tree, start, end, |
952 | EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE, | 961 | EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE, |
953 | 0, NULL, NULL, mask); | 962 | 0, NULL, cached_state, mask); |
954 | } | 963 | } |
955 | 964 | ||
956 | int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, | 965 | int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, |
@@ -984,10 +993,11 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, | |||
984 | } | 993 | } |
985 | 994 | ||
986 | static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, | 995 | static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, |
987 | u64 end, gfp_t mask) | 996 | u64 end, struct extent_state **cached_state, |
997 | gfp_t mask) | ||
988 | { | 998 | { |
989 | return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, | 999 | return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, |
990 | NULL, mask); | 1000 | cached_state, mask); |
991 | } | 1001 | } |
992 | 1002 | ||
993 | int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) | 1003 | int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) |
@@ -1171,7 +1181,8 @@ out: | |||
1171 | * 1 is returned if we find something, 0 if nothing was in the tree | 1181 | * 1 is returned if we find something, 0 if nothing was in the tree |
1172 | */ | 1182 | */ |
1173 | static noinline u64 find_delalloc_range(struct extent_io_tree *tree, | 1183 | static noinline u64 find_delalloc_range(struct extent_io_tree *tree, |
1174 | u64 *start, u64 *end, u64 max_bytes) | 1184 | u64 *start, u64 *end, u64 max_bytes, |
1185 | struct extent_state **cached_state) | ||
1175 | { | 1186 | { |
1176 | struct rb_node *node; | 1187 | struct rb_node *node; |
1177 | struct extent_state *state; | 1188 | struct extent_state *state; |
@@ -1203,8 +1214,11 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree, | |||
1203 | *end = state->end; | 1214 | *end = state->end; |
1204 | goto out; | 1215 | goto out; |
1205 | } | 1216 | } |
1206 | if (!found) | 1217 | if (!found) { |
1207 | *start = state->start; | 1218 | *start = state->start; |
1219 | *cached_state = state; | ||
1220 | atomic_inc(&state->refs); | ||
1221 | } | ||
1208 | found++; | 1222 | found++; |
1209 | *end = state->end; | 1223 | *end = state->end; |
1210 | cur_start = state->end + 1; | 1224 | cur_start = state->end + 1; |
@@ -1336,10 +1350,11 @@ again: | |||
1336 | delalloc_start = *start; | 1350 | delalloc_start = *start; |
1337 | delalloc_end = 0; | 1351 | delalloc_end = 0; |
1338 | found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, | 1352 | found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, |
1339 | max_bytes); | 1353 | max_bytes, &cached_state); |
1340 | if (!found || delalloc_end <= *start) { | 1354 | if (!found || delalloc_end <= *start) { |
1341 | *start = delalloc_start; | 1355 | *start = delalloc_start; |
1342 | *end = delalloc_end; | 1356 | *end = delalloc_end; |
1357 | free_extent_state(cached_state); | ||
1343 | return found; | 1358 | return found; |
1344 | } | 1359 | } |
1345 | 1360 | ||
@@ -1722,7 +1737,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err) | |||
1722 | } | 1737 | } |
1723 | 1738 | ||
1724 | if (!uptodate) { | 1739 | if (!uptodate) { |
1725 | clear_extent_uptodate(tree, start, end, GFP_NOFS); | 1740 | clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); |
1726 | ClearPageUptodate(page); | 1741 | ClearPageUptodate(page); |
1727 | SetPageError(page); | 1742 | SetPageError(page); |
1728 | } | 1743 | } |
@@ -1750,7 +1765,8 @@ static void end_bio_extent_writepage(struct bio *bio, int err) | |||
1750 | static void end_bio_extent_readpage(struct bio *bio, int err) | 1765 | static void end_bio_extent_readpage(struct bio *bio, int err) |
1751 | { | 1766 | { |
1752 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 1767 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
1753 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 1768 | struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; |
1769 | struct bio_vec *bvec = bio->bi_io_vec; | ||
1754 | struct extent_io_tree *tree; | 1770 | struct extent_io_tree *tree; |
1755 | u64 start; | 1771 | u64 start; |
1756 | u64 end; | 1772 | u64 end; |
@@ -1773,7 +1789,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
1773 | else | 1789 | else |
1774 | whole_page = 0; | 1790 | whole_page = 0; |
1775 | 1791 | ||
1776 | if (--bvec >= bio->bi_io_vec) | 1792 | if (++bvec <= bvec_end) |
1777 | prefetchw(&bvec->bv_page->flags); | 1793 | prefetchw(&bvec->bv_page->flags); |
1778 | 1794 | ||
1779 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { | 1795 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { |
@@ -1818,7 +1834,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
1818 | } | 1834 | } |
1819 | check_page_locked(tree, page); | 1835 | check_page_locked(tree, page); |
1820 | } | 1836 | } |
1821 | } while (bvec >= bio->bi_io_vec); | 1837 | } while (bvec <= bvec_end); |
1822 | 1838 | ||
1823 | bio_put(bio); | 1839 | bio_put(bio); |
1824 | } | 1840 | } |
@@ -2704,6 +2720,7 @@ int extent_readpages(struct extent_io_tree *tree, | |||
2704 | int extent_invalidatepage(struct extent_io_tree *tree, | 2720 | int extent_invalidatepage(struct extent_io_tree *tree, |
2705 | struct page *page, unsigned long offset) | 2721 | struct page *page, unsigned long offset) |
2706 | { | 2722 | { |
2723 | struct extent_state *cached_state = NULL; | ||
2707 | u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); | 2724 | u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); |
2708 | u64 end = start + PAGE_CACHE_SIZE - 1; | 2725 | u64 end = start + PAGE_CACHE_SIZE - 1; |
2709 | size_t blocksize = page->mapping->host->i_sb->s_blocksize; | 2726 | size_t blocksize = page->mapping->host->i_sb->s_blocksize; |
@@ -2712,12 +2729,12 @@ int extent_invalidatepage(struct extent_io_tree *tree, | |||
2712 | if (start > end) | 2729 | if (start > end) |
2713 | return 0; | 2730 | return 0; |
2714 | 2731 | ||
2715 | lock_extent(tree, start, end, GFP_NOFS); | 2732 | lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); |
2716 | wait_on_page_writeback(page); | 2733 | wait_on_page_writeback(page); |
2717 | clear_extent_bit(tree, start, end, | 2734 | clear_extent_bit(tree, start, end, |
2718 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | | 2735 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | |
2719 | EXTENT_DO_ACCOUNTING, | 2736 | EXTENT_DO_ACCOUNTING, |
2720 | 1, 1, NULL, GFP_NOFS); | 2737 | 1, 1, &cached_state, GFP_NOFS); |
2721 | return 0; | 2738 | return 0; |
2722 | } | 2739 | } |
2723 | 2740 | ||
@@ -2920,16 +2937,17 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, | |||
2920 | get_extent_t *get_extent) | 2937 | get_extent_t *get_extent) |
2921 | { | 2938 | { |
2922 | struct inode *inode = mapping->host; | 2939 | struct inode *inode = mapping->host; |
2940 | struct extent_state *cached_state = NULL; | ||
2923 | u64 start = iblock << inode->i_blkbits; | 2941 | u64 start = iblock << inode->i_blkbits; |
2924 | sector_t sector = 0; | 2942 | sector_t sector = 0; |
2925 | size_t blksize = (1 << inode->i_blkbits); | 2943 | size_t blksize = (1 << inode->i_blkbits); |
2926 | struct extent_map *em; | 2944 | struct extent_map *em; |
2927 | 2945 | ||
2928 | lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, | 2946 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, |
2929 | GFP_NOFS); | 2947 | 0, &cached_state, GFP_NOFS); |
2930 | em = get_extent(inode, NULL, 0, start, blksize, 0); | 2948 | em = get_extent(inode, NULL, 0, start, blksize, 0); |
2931 | unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, | 2949 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, |
2932 | GFP_NOFS); | 2950 | start + blksize - 1, &cached_state, GFP_NOFS); |
2933 | if (!em || IS_ERR(em)) | 2951 | if (!em || IS_ERR(em)) |
2934 | return 0; | 2952 | return 0; |
2935 | 2953 | ||
@@ -2951,6 +2969,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2951 | u32 flags = 0; | 2969 | u32 flags = 0; |
2952 | u64 disko = 0; | 2970 | u64 disko = 0; |
2953 | struct extent_map *em = NULL; | 2971 | struct extent_map *em = NULL; |
2972 | struct extent_state *cached_state = NULL; | ||
2954 | int end = 0; | 2973 | int end = 0; |
2955 | u64 em_start = 0, em_len = 0; | 2974 | u64 em_start = 0, em_len = 0; |
2956 | unsigned long emflags; | 2975 | unsigned long emflags; |
@@ -2959,8 +2978,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2959 | if (len == 0) | 2978 | if (len == 0) |
2960 | return -EINVAL; | 2979 | return -EINVAL; |
2961 | 2980 | ||
2962 | lock_extent(&BTRFS_I(inode)->io_tree, start, start + len, | 2981 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, |
2963 | GFP_NOFS); | 2982 | &cached_state, GFP_NOFS); |
2964 | em = get_extent(inode, NULL, 0, off, max - off, 0); | 2983 | em = get_extent(inode, NULL, 0, off, max - off, 0); |
2965 | if (!em) | 2984 | if (!em) |
2966 | goto out; | 2985 | goto out; |
@@ -3023,8 +3042,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3023 | out_free: | 3042 | out_free: |
3024 | free_extent_map(em); | 3043 | free_extent_map(em); |
3025 | out: | 3044 | out: |
3026 | unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len, | 3045 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, |
3027 | GFP_NOFS); | 3046 | &cached_state, GFP_NOFS); |
3028 | return ret; | 3047 | return ret; |
3029 | } | 3048 | } |
3030 | 3049 | ||
@@ -3264,7 +3283,8 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, | |||
3264 | } | 3283 | } |
3265 | 3284 | ||
3266 | int clear_extent_buffer_uptodate(struct extent_io_tree *tree, | 3285 | int clear_extent_buffer_uptodate(struct extent_io_tree *tree, |
3267 | struct extent_buffer *eb) | 3286 | struct extent_buffer *eb, |
3287 | struct extent_state **cached_state) | ||
3268 | { | 3288 | { |
3269 | unsigned long i; | 3289 | unsigned long i; |
3270 | struct page *page; | 3290 | struct page *page; |
@@ -3274,7 +3294,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, | |||
3274 | clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); | 3294 | clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
3275 | 3295 | ||
3276 | clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, | 3296 | clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, |
3277 | GFP_NOFS); | 3297 | cached_state, GFP_NOFS); |
3278 | for (i = 0; i < num_pages; i++) { | 3298 | for (i = 0; i < num_pages; i++) { |
3279 | page = extent_buffer_page(eb, i); | 3299 | page = extent_buffer_page(eb, i); |
3280 | if (page) | 3300 | if (page) |
@@ -3334,7 +3354,8 @@ int extent_range_uptodate(struct extent_io_tree *tree, | |||
3334 | } | 3354 | } |
3335 | 3355 | ||
3336 | int extent_buffer_uptodate(struct extent_io_tree *tree, | 3356 | int extent_buffer_uptodate(struct extent_io_tree *tree, |
3337 | struct extent_buffer *eb) | 3357 | struct extent_buffer *eb, |
3358 | struct extent_state *cached_state) | ||
3338 | { | 3359 | { |
3339 | int ret = 0; | 3360 | int ret = 0; |
3340 | unsigned long num_pages; | 3361 | unsigned long num_pages; |
@@ -3346,7 +3367,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, | |||
3346 | return 1; | 3367 | return 1; |
3347 | 3368 | ||
3348 | ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, | 3369 | ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, |
3349 | EXTENT_UPTODATE, 1, NULL); | 3370 | EXTENT_UPTODATE, 1, cached_state); |
3350 | if (ret) | 3371 | if (ret) |
3351 | return ret; | 3372 | return ret; |
3352 | 3373 | ||
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 36de250a7b2b..bbab4813646f 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -163,6 +163,8 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); | |||
163 | int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, | 163 | int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
164 | int bits, struct extent_state **cached, gfp_t mask); | 164 | int bits, struct extent_state **cached, gfp_t mask); |
165 | int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); | 165 | int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); |
166 | int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, | ||
167 | struct extent_state **cached, gfp_t mask); | ||
166 | int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, | 168 | int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, |
167 | gfp_t mask); | 169 | gfp_t mask); |
168 | int extent_read_full_page(struct extent_io_tree *tree, struct page *page, | 170 | int extent_read_full_page(struct extent_io_tree *tree, struct page *page, |
@@ -196,7 +198,7 @@ int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, | |||
196 | int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, | 198 | int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, |
197 | u64 end, gfp_t mask); | 199 | u64 end, gfp_t mask); |
198 | int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, | 200 | int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, |
199 | gfp_t mask); | 201 | struct extent_state **cached_state, gfp_t mask); |
200 | int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, | 202 | int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, |
201 | gfp_t mask); | 203 | gfp_t mask); |
202 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, | 204 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, |
@@ -281,9 +283,11 @@ int test_extent_buffer_dirty(struct extent_io_tree *tree, | |||
281 | int set_extent_buffer_uptodate(struct extent_io_tree *tree, | 283 | int set_extent_buffer_uptodate(struct extent_io_tree *tree, |
282 | struct extent_buffer *eb); | 284 | struct extent_buffer *eb); |
283 | int clear_extent_buffer_uptodate(struct extent_io_tree *tree, | 285 | int clear_extent_buffer_uptodate(struct extent_io_tree *tree, |
284 | struct extent_buffer *eb); | 286 | struct extent_buffer *eb, |
287 | struct extent_state **cached_state); | ||
285 | int extent_buffer_uptodate(struct extent_io_tree *tree, | 288 | int extent_buffer_uptodate(struct extent_io_tree *tree, |
286 | struct extent_buffer *eb); | 289 | struct extent_buffer *eb, |
290 | struct extent_state *cached_state); | ||
287 | int map_extent_buffer(struct extent_buffer *eb, unsigned long offset, | 291 | int map_extent_buffer(struct extent_buffer *eb, unsigned long offset, |
288 | unsigned long min_len, char **token, char **map, | 292 | unsigned long min_len, char **token, char **map, |
289 | unsigned long *map_start, | 293 | unsigned long *map_start, |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 6ed434ac037f..ee3323c7fc1c 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -123,7 +123,8 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, | |||
123 | root->sectorsize - 1) & ~((u64)root->sectorsize - 1); | 123 | root->sectorsize - 1) & ~((u64)root->sectorsize - 1); |
124 | 124 | ||
125 | end_of_last_block = start_pos + num_bytes - 1; | 125 | end_of_last_block = start_pos + num_bytes - 1; |
126 | err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); | 126 | err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, |
127 | NULL); | ||
127 | if (err) | 128 | if (err) |
128 | return err; | 129 | return err; |
129 | 130 | ||
@@ -753,6 +754,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, | |||
753 | loff_t pos, unsigned long first_index, | 754 | loff_t pos, unsigned long first_index, |
754 | unsigned long last_index, size_t write_bytes) | 755 | unsigned long last_index, size_t write_bytes) |
755 | { | 756 | { |
757 | struct extent_state *cached_state = NULL; | ||
756 | int i; | 758 | int i; |
757 | unsigned long index = pos >> PAGE_CACHE_SHIFT; | 759 | unsigned long index = pos >> PAGE_CACHE_SHIFT; |
758 | struct inode *inode = fdentry(file)->d_inode; | 760 | struct inode *inode = fdentry(file)->d_inode; |
@@ -781,16 +783,18 @@ again: | |||
781 | } | 783 | } |
782 | if (start_pos < inode->i_size) { | 784 | if (start_pos < inode->i_size) { |
783 | struct btrfs_ordered_extent *ordered; | 785 | struct btrfs_ordered_extent *ordered; |
784 | lock_extent(&BTRFS_I(inode)->io_tree, | 786 | lock_extent_bits(&BTRFS_I(inode)->io_tree, |
785 | start_pos, last_pos - 1, GFP_NOFS); | 787 | start_pos, last_pos - 1, 0, &cached_state, |
788 | GFP_NOFS); | ||
786 | ordered = btrfs_lookup_first_ordered_extent(inode, | 789 | ordered = btrfs_lookup_first_ordered_extent(inode, |
787 | last_pos - 1); | 790 | last_pos - 1); |
788 | if (ordered && | 791 | if (ordered && |
789 | ordered->file_offset + ordered->len > start_pos && | 792 | ordered->file_offset + ordered->len > start_pos && |
790 | ordered->file_offset < last_pos) { | 793 | ordered->file_offset < last_pos) { |
791 | btrfs_put_ordered_extent(ordered); | 794 | btrfs_put_ordered_extent(ordered); |
792 | unlock_extent(&BTRFS_I(inode)->io_tree, | 795 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, |
793 | start_pos, last_pos - 1, GFP_NOFS); | 796 | start_pos, last_pos - 1, |
797 | &cached_state, GFP_NOFS); | ||
794 | for (i = 0; i < num_pages; i++) { | 798 | for (i = 0; i < num_pages; i++) { |
795 | unlock_page(pages[i]); | 799 | unlock_page(pages[i]); |
796 | page_cache_release(pages[i]); | 800 | page_cache_release(pages[i]); |
@@ -802,12 +806,13 @@ again: | |||
802 | if (ordered) | 806 | if (ordered) |
803 | btrfs_put_ordered_extent(ordered); | 807 | btrfs_put_ordered_extent(ordered); |
804 | 808 | ||
805 | clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, | 809 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, |
806 | last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | | 810 | last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | |
807 | EXTENT_DO_ACCOUNTING, | 811 | EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, |
808 | GFP_NOFS); | 812 | GFP_NOFS); |
809 | unlock_extent(&BTRFS_I(inode)->io_tree, | 813 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, |
810 | start_pos, last_pos - 1, GFP_NOFS); | 814 | start_pos, last_pos - 1, &cached_state, |
815 | GFP_NOFS); | ||
811 | } | 816 | } |
812 | for (i = 0; i < num_pages; i++) { | 817 | for (i = 0; i < num_pages; i++) { |
813 | clear_page_dirty_for_io(pages[i]); | 818 | clear_page_dirty_for_io(pages[i]); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c41db6d45ab6..02bb099845fd 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -379,7 +379,8 @@ again: | |||
379 | * change at any time if we discover bad compression ratios. | 379 | * change at any time if we discover bad compression ratios. |
380 | */ | 380 | */ |
381 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && | 381 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && |
382 | btrfs_test_opt(root, COMPRESS)) { | 382 | (btrfs_test_opt(root, COMPRESS) || |
383 | (BTRFS_I(inode)->force_compress))) { | ||
383 | WARN_ON(pages); | 384 | WARN_ON(pages); |
384 | pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); | 385 | pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); |
385 | 386 | ||
@@ -483,8 +484,10 @@ again: | |||
483 | nr_pages_ret = 0; | 484 | nr_pages_ret = 0; |
484 | 485 | ||
485 | /* flag the file so we don't compress in the future */ | 486 | /* flag the file so we don't compress in the future */ |
486 | if (!btrfs_test_opt(root, FORCE_COMPRESS)) | 487 | if (!btrfs_test_opt(root, FORCE_COMPRESS) && |
488 | !(BTRFS_I(inode)->force_compress)) { | ||
487 | BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; | 489 | BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; |
490 | } | ||
488 | } | 491 | } |
489 | if (will_compress) { | 492 | if (will_compress) { |
490 | *num_added += 1; | 493 | *num_added += 1; |
@@ -570,8 +573,8 @@ retry: | |||
570 | unsigned long nr_written = 0; | 573 | unsigned long nr_written = 0; |
571 | 574 | ||
572 | lock_extent(io_tree, async_extent->start, | 575 | lock_extent(io_tree, async_extent->start, |
573 | async_extent->start + | 576 | async_extent->start + |
574 | async_extent->ram_size - 1, GFP_NOFS); | 577 | async_extent->ram_size - 1, GFP_NOFS); |
575 | 578 | ||
576 | /* allocate blocks */ | 579 | /* allocate blocks */ |
577 | ret = cow_file_range(inode, async_cow->locked_page, | 580 | ret = cow_file_range(inode, async_cow->locked_page, |
@@ -1211,7 +1214,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, | |||
1211 | else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) | 1214 | else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) |
1212 | ret = run_delalloc_nocow(inode, locked_page, start, end, | 1215 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
1213 | page_started, 0, nr_written); | 1216 | page_started, 0, nr_written); |
1214 | else if (!btrfs_test_opt(root, COMPRESS)) | 1217 | else if (!btrfs_test_opt(root, COMPRESS) && |
1218 | !(BTRFS_I(inode)->force_compress)) | ||
1215 | ret = cow_file_range(inode, locked_page, start, end, | 1219 | ret = cow_file_range(inode, locked_page, start, end, |
1216 | page_started, nr_written, 1); | 1220 | page_started, nr_written, 1); |
1217 | else | 1221 | else |
@@ -1508,12 +1512,13 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, | |||
1508 | return 0; | 1512 | return 0; |
1509 | } | 1513 | } |
1510 | 1514 | ||
1511 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) | 1515 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, |
1516 | struct extent_state **cached_state) | ||
1512 | { | 1517 | { |
1513 | if ((end & (PAGE_CACHE_SIZE - 1)) == 0) | 1518 | if ((end & (PAGE_CACHE_SIZE - 1)) == 0) |
1514 | WARN_ON(1); | 1519 | WARN_ON(1); |
1515 | return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, | 1520 | return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, |
1516 | GFP_NOFS); | 1521 | cached_state, GFP_NOFS); |
1517 | } | 1522 | } |
1518 | 1523 | ||
1519 | /* see btrfs_writepage_start_hook for details on why this is required */ | 1524 | /* see btrfs_writepage_start_hook for details on why this is required */ |
@@ -1526,6 +1531,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) | |||
1526 | { | 1531 | { |
1527 | struct btrfs_writepage_fixup *fixup; | 1532 | struct btrfs_writepage_fixup *fixup; |
1528 | struct btrfs_ordered_extent *ordered; | 1533 | struct btrfs_ordered_extent *ordered; |
1534 | struct extent_state *cached_state = NULL; | ||
1529 | struct page *page; | 1535 | struct page *page; |
1530 | struct inode *inode; | 1536 | struct inode *inode; |
1531 | u64 page_start; | 1537 | u64 page_start; |
@@ -1544,7 +1550,8 @@ again: | |||
1544 | page_start = page_offset(page); | 1550 | page_start = page_offset(page); |
1545 | page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; | 1551 | page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; |
1546 | 1552 | ||
1547 | lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); | 1553 | lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, |
1554 | &cached_state, GFP_NOFS); | ||
1548 | 1555 | ||
1549 | /* already ordered? We're done */ | 1556 | /* already ordered? We're done */ |
1550 | if (PagePrivate2(page)) | 1557 | if (PagePrivate2(page)) |
@@ -1552,17 +1559,18 @@ again: | |||
1552 | 1559 | ||
1553 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | 1560 | ordered = btrfs_lookup_ordered_extent(inode, page_start); |
1554 | if (ordered) { | 1561 | if (ordered) { |
1555 | unlock_extent(&BTRFS_I(inode)->io_tree, page_start, | 1562 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, |
1556 | page_end, GFP_NOFS); | 1563 | page_end, &cached_state, GFP_NOFS); |
1557 | unlock_page(page); | 1564 | unlock_page(page); |
1558 | btrfs_start_ordered_extent(inode, ordered, 1); | 1565 | btrfs_start_ordered_extent(inode, ordered, 1); |
1559 | goto again; | 1566 | goto again; |
1560 | } | 1567 | } |
1561 | 1568 | ||
1562 | btrfs_set_extent_delalloc(inode, page_start, page_end); | 1569 | btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); |
1563 | ClearPageChecked(page); | 1570 | ClearPageChecked(page); |
1564 | out: | 1571 | out: |
1565 | unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); | 1572 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, |
1573 | &cached_state, GFP_NOFS); | ||
1566 | out_page: | 1574 | out_page: |
1567 | unlock_page(page); | 1575 | unlock_page(page); |
1568 | page_cache_release(page); | 1576 | page_cache_release(page); |
@@ -1691,14 +1699,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1691 | struct btrfs_trans_handle *trans; | 1699 | struct btrfs_trans_handle *trans; |
1692 | struct btrfs_ordered_extent *ordered_extent = NULL; | 1700 | struct btrfs_ordered_extent *ordered_extent = NULL; |
1693 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 1701 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
1702 | struct extent_state *cached_state = NULL; | ||
1694 | int compressed = 0; | 1703 | int compressed = 0; |
1695 | int ret; | 1704 | int ret; |
1696 | 1705 | ||
1697 | ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); | 1706 | ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, |
1707 | end - start + 1); | ||
1698 | if (!ret) | 1708 | if (!ret) |
1699 | return 0; | 1709 | return 0; |
1700 | |||
1701 | ordered_extent = btrfs_lookup_ordered_extent(inode, start); | ||
1702 | BUG_ON(!ordered_extent); | 1710 | BUG_ON(!ordered_extent); |
1703 | 1711 | ||
1704 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { | 1712 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { |
@@ -1713,9 +1721,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1713 | goto out; | 1721 | goto out; |
1714 | } | 1722 | } |
1715 | 1723 | ||
1716 | lock_extent(io_tree, ordered_extent->file_offset, | 1724 | lock_extent_bits(io_tree, ordered_extent->file_offset, |
1717 | ordered_extent->file_offset + ordered_extent->len - 1, | 1725 | ordered_extent->file_offset + ordered_extent->len - 1, |
1718 | GFP_NOFS); | 1726 | 0, &cached_state, GFP_NOFS); |
1719 | 1727 | ||
1720 | trans = btrfs_join_transaction(root, 1); | 1728 | trans = btrfs_join_transaction(root, 1); |
1721 | 1729 | ||
@@ -1742,9 +1750,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1742 | ordered_extent->len); | 1750 | ordered_extent->len); |
1743 | BUG_ON(ret); | 1751 | BUG_ON(ret); |
1744 | } | 1752 | } |
1745 | unlock_extent(io_tree, ordered_extent->file_offset, | 1753 | unlock_extent_cached(io_tree, ordered_extent->file_offset, |
1746 | ordered_extent->file_offset + ordered_extent->len - 1, | 1754 | ordered_extent->file_offset + |
1747 | GFP_NOFS); | 1755 | ordered_extent->len - 1, &cached_state, GFP_NOFS); |
1756 | |||
1748 | add_pending_csums(trans, inode, ordered_extent->file_offset, | 1757 | add_pending_csums(trans, inode, ordered_extent->file_offset, |
1749 | &ordered_extent->list); | 1758 | &ordered_extent->list); |
1750 | 1759 | ||
@@ -2153,7 +2162,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2153 | found_key.objectid = found_key.offset; | 2162 | found_key.objectid = found_key.offset; |
2154 | found_key.type = BTRFS_INODE_ITEM_KEY; | 2163 | found_key.type = BTRFS_INODE_ITEM_KEY; |
2155 | found_key.offset = 0; | 2164 | found_key.offset = 0; |
2156 | inode = btrfs_iget(root->fs_info->sb, &found_key, root); | 2165 | inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); |
2157 | if (IS_ERR(inode)) | 2166 | if (IS_ERR(inode)) |
2158 | break; | 2167 | break; |
2159 | 2168 | ||
@@ -3081,6 +3090,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) | |||
3081 | struct btrfs_root *root = BTRFS_I(inode)->root; | 3090 | struct btrfs_root *root = BTRFS_I(inode)->root; |
3082 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 3091 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
3083 | struct btrfs_ordered_extent *ordered; | 3092 | struct btrfs_ordered_extent *ordered; |
3093 | struct extent_state *cached_state = NULL; | ||
3084 | char *kaddr; | 3094 | char *kaddr; |
3085 | u32 blocksize = root->sectorsize; | 3095 | u32 blocksize = root->sectorsize; |
3086 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | 3096 | pgoff_t index = from >> PAGE_CACHE_SHIFT; |
@@ -3127,12 +3137,14 @@ again: | |||
3127 | } | 3137 | } |
3128 | wait_on_page_writeback(page); | 3138 | wait_on_page_writeback(page); |
3129 | 3139 | ||
3130 | lock_extent(io_tree, page_start, page_end, GFP_NOFS); | 3140 | lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, |
3141 | GFP_NOFS); | ||
3131 | set_page_extent_mapped(page); | 3142 | set_page_extent_mapped(page); |
3132 | 3143 | ||
3133 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | 3144 | ordered = btrfs_lookup_ordered_extent(inode, page_start); |
3134 | if (ordered) { | 3145 | if (ordered) { |
3135 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 3146 | unlock_extent_cached(io_tree, page_start, page_end, |
3147 | &cached_state, GFP_NOFS); | ||
3136 | unlock_page(page); | 3148 | unlock_page(page); |
3137 | page_cache_release(page); | 3149 | page_cache_release(page); |
3138 | btrfs_start_ordered_extent(inode, ordered, 1); | 3150 | btrfs_start_ordered_extent(inode, ordered, 1); |
@@ -3140,13 +3152,15 @@ again: | |||
3140 | goto again; | 3152 | goto again; |
3141 | } | 3153 | } |
3142 | 3154 | ||
3143 | clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, | 3155 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, |
3144 | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, | 3156 | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, |
3145 | GFP_NOFS); | 3157 | 0, 0, &cached_state, GFP_NOFS); |
3146 | 3158 | ||
3147 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end); | 3159 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, |
3160 | &cached_state); | ||
3148 | if (ret) { | 3161 | if (ret) { |
3149 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 3162 | unlock_extent_cached(io_tree, page_start, page_end, |
3163 | &cached_state, GFP_NOFS); | ||
3150 | goto out_unlock; | 3164 | goto out_unlock; |
3151 | } | 3165 | } |
3152 | 3166 | ||
@@ -3159,7 +3173,8 @@ again: | |||
3159 | } | 3173 | } |
3160 | ClearPageChecked(page); | 3174 | ClearPageChecked(page); |
3161 | set_page_dirty(page); | 3175 | set_page_dirty(page); |
3162 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 3176 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, |
3177 | GFP_NOFS); | ||
3163 | 3178 | ||
3164 | out_unlock: | 3179 | out_unlock: |
3165 | if (ret) | 3180 | if (ret) |
@@ -3177,6 +3192,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
3177 | struct btrfs_root *root = BTRFS_I(inode)->root; | 3192 | struct btrfs_root *root = BTRFS_I(inode)->root; |
3178 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 3193 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
3179 | struct extent_map *em; | 3194 | struct extent_map *em; |
3195 | struct extent_state *cached_state = NULL; | ||
3180 | u64 mask = root->sectorsize - 1; | 3196 | u64 mask = root->sectorsize - 1; |
3181 | u64 hole_start = (inode->i_size + mask) & ~mask; | 3197 | u64 hole_start = (inode->i_size + mask) & ~mask; |
3182 | u64 block_end = (size + mask) & ~mask; | 3198 | u64 block_end = (size + mask) & ~mask; |
@@ -3192,11 +3208,13 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
3192 | struct btrfs_ordered_extent *ordered; | 3208 | struct btrfs_ordered_extent *ordered; |
3193 | btrfs_wait_ordered_range(inode, hole_start, | 3209 | btrfs_wait_ordered_range(inode, hole_start, |
3194 | block_end - hole_start); | 3210 | block_end - hole_start); |
3195 | lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); | 3211 | lock_extent_bits(io_tree, hole_start, block_end - 1, 0, |
3212 | &cached_state, GFP_NOFS); | ||
3196 | ordered = btrfs_lookup_ordered_extent(inode, hole_start); | 3213 | ordered = btrfs_lookup_ordered_extent(inode, hole_start); |
3197 | if (!ordered) | 3214 | if (!ordered) |
3198 | break; | 3215 | break; |
3199 | unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); | 3216 | unlock_extent_cached(io_tree, hole_start, block_end - 1, |
3217 | &cached_state, GFP_NOFS); | ||
3200 | btrfs_put_ordered_extent(ordered); | 3218 | btrfs_put_ordered_extent(ordered); |
3201 | } | 3219 | } |
3202 | 3220 | ||
@@ -3241,7 +3259,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
3241 | break; | 3259 | break; |
3242 | } | 3260 | } |
3243 | 3261 | ||
3244 | unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); | 3262 | unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, |
3263 | GFP_NOFS); | ||
3245 | return err; | 3264 | return err; |
3246 | } | 3265 | } |
3247 | 3266 | ||
@@ -3639,6 +3658,7 @@ static noinline void init_btrfs_i(struct inode *inode) | |||
3639 | bi->index_cnt = (u64)-1; | 3658 | bi->index_cnt = (u64)-1; |
3640 | bi->last_unlink_trans = 0; | 3659 | bi->last_unlink_trans = 0; |
3641 | bi->ordered_data_close = 0; | 3660 | bi->ordered_data_close = 0; |
3661 | bi->force_compress = 0; | ||
3642 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); | 3662 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); |
3643 | extent_io_tree_init(&BTRFS_I(inode)->io_tree, | 3663 | extent_io_tree_init(&BTRFS_I(inode)->io_tree, |
3644 | inode->i_mapping, GFP_NOFS); | 3664 | inode->i_mapping, GFP_NOFS); |
@@ -3687,7 +3707,7 @@ static struct inode *btrfs_iget_locked(struct super_block *s, | |||
3687 | * Returns in *is_new if the inode was read from disk | 3707 | * Returns in *is_new if the inode was read from disk |
3688 | */ | 3708 | */ |
3689 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | 3709 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, |
3690 | struct btrfs_root *root) | 3710 | struct btrfs_root *root, int *new) |
3691 | { | 3711 | { |
3692 | struct inode *inode; | 3712 | struct inode *inode; |
3693 | 3713 | ||
@@ -3702,6 +3722,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | |||
3702 | 3722 | ||
3703 | inode_tree_add(inode); | 3723 | inode_tree_add(inode); |
3704 | unlock_new_inode(inode); | 3724 | unlock_new_inode(inode); |
3725 | if (new) | ||
3726 | *new = 1; | ||
3705 | } | 3727 | } |
3706 | 3728 | ||
3707 | return inode; | 3729 | return inode; |
@@ -3754,7 +3776,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) | |||
3754 | return NULL; | 3776 | return NULL; |
3755 | 3777 | ||
3756 | if (location.type == BTRFS_INODE_ITEM_KEY) { | 3778 | if (location.type == BTRFS_INODE_ITEM_KEY) { |
3757 | inode = btrfs_iget(dir->i_sb, &location, root); | 3779 | inode = btrfs_iget(dir->i_sb, &location, root, NULL); |
3758 | return inode; | 3780 | return inode; |
3759 | } | 3781 | } |
3760 | 3782 | ||
@@ -3769,7 +3791,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) | |||
3769 | else | 3791 | else |
3770 | inode = new_simple_dir(dir->i_sb, &location, sub_root); | 3792 | inode = new_simple_dir(dir->i_sb, &location, sub_root); |
3771 | } else { | 3793 | } else { |
3772 | inode = btrfs_iget(dir->i_sb, &location, sub_root); | 3794 | inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); |
3773 | } | 3795 | } |
3774 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); | 3796 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); |
3775 | 3797 | ||
@@ -4501,7 +4523,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
4501 | err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); | 4523 | err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); |
4502 | if (err) { | 4524 | if (err) { |
4503 | err = -ENOSPC; | 4525 | err = -ENOSPC; |
4504 | goto out_unlock; | 4526 | goto out_fail; |
4505 | } | 4527 | } |
4506 | 4528 | ||
4507 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | 4529 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
@@ -4979,6 +5001,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
4979 | { | 5001 | { |
4980 | struct extent_io_tree *tree; | 5002 | struct extent_io_tree *tree; |
4981 | struct btrfs_ordered_extent *ordered; | 5003 | struct btrfs_ordered_extent *ordered; |
5004 | struct extent_state *cached_state = NULL; | ||
4982 | u64 page_start = page_offset(page); | 5005 | u64 page_start = page_offset(page); |
4983 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; | 5006 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; |
4984 | 5007 | ||
@@ -4997,7 +5020,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
4997 | btrfs_releasepage(page, GFP_NOFS); | 5020 | btrfs_releasepage(page, GFP_NOFS); |
4998 | return; | 5021 | return; |
4999 | } | 5022 | } |
5000 | lock_extent(tree, page_start, page_end, GFP_NOFS); | 5023 | lock_extent_bits(tree, page_start, page_end, 0, &cached_state, |
5024 | GFP_NOFS); | ||
5001 | ordered = btrfs_lookup_ordered_extent(page->mapping->host, | 5025 | ordered = btrfs_lookup_ordered_extent(page->mapping->host, |
5002 | page_offset(page)); | 5026 | page_offset(page)); |
5003 | if (ordered) { | 5027 | if (ordered) { |
@@ -5008,7 +5032,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
5008 | clear_extent_bit(tree, page_start, page_end, | 5032 | clear_extent_bit(tree, page_start, page_end, |
5009 | EXTENT_DIRTY | EXTENT_DELALLOC | | 5033 | EXTENT_DIRTY | EXTENT_DELALLOC | |
5010 | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, | 5034 | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, |
5011 | NULL, GFP_NOFS); | 5035 | &cached_state, GFP_NOFS); |
5012 | /* | 5036 | /* |
5013 | * whoever cleared the private bit is responsible | 5037 | * whoever cleared the private bit is responsible |
5014 | * for the finish_ordered_io | 5038 | * for the finish_ordered_io |
@@ -5018,11 +5042,13 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
5018 | page_start, page_end); | 5042 | page_start, page_end); |
5019 | } | 5043 | } |
5020 | btrfs_put_ordered_extent(ordered); | 5044 | btrfs_put_ordered_extent(ordered); |
5021 | lock_extent(tree, page_start, page_end, GFP_NOFS); | 5045 | cached_state = NULL; |
5046 | lock_extent_bits(tree, page_start, page_end, 0, &cached_state, | ||
5047 | GFP_NOFS); | ||
5022 | } | 5048 | } |
5023 | clear_extent_bit(tree, page_start, page_end, | 5049 | clear_extent_bit(tree, page_start, page_end, |
5024 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | | 5050 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | |
5025 | EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS); | 5051 | EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); |
5026 | __btrfs_releasepage(page, GFP_NOFS); | 5052 | __btrfs_releasepage(page, GFP_NOFS); |
5027 | 5053 | ||
5028 | ClearPageChecked(page); | 5054 | ClearPageChecked(page); |
@@ -5055,6 +5081,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
5055 | struct btrfs_root *root = BTRFS_I(inode)->root; | 5081 | struct btrfs_root *root = BTRFS_I(inode)->root; |
5056 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 5082 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
5057 | struct btrfs_ordered_extent *ordered; | 5083 | struct btrfs_ordered_extent *ordered; |
5084 | struct extent_state *cached_state = NULL; | ||
5058 | char *kaddr; | 5085 | char *kaddr; |
5059 | unsigned long zero_start; | 5086 | unsigned long zero_start; |
5060 | loff_t size; | 5087 | loff_t size; |
@@ -5093,7 +5120,8 @@ again: | |||
5093 | } | 5120 | } |
5094 | wait_on_page_writeback(page); | 5121 | wait_on_page_writeback(page); |
5095 | 5122 | ||
5096 | lock_extent(io_tree, page_start, page_end, GFP_NOFS); | 5123 | lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, |
5124 | GFP_NOFS); | ||
5097 | set_page_extent_mapped(page); | 5125 | set_page_extent_mapped(page); |
5098 | 5126 | ||
5099 | /* | 5127 | /* |
@@ -5102,7 +5130,8 @@ again: | |||
5102 | */ | 5130 | */ |
5103 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | 5131 | ordered = btrfs_lookup_ordered_extent(inode, page_start); |
5104 | if (ordered) { | 5132 | if (ordered) { |
5105 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 5133 | unlock_extent_cached(io_tree, page_start, page_end, |
5134 | &cached_state, GFP_NOFS); | ||
5106 | unlock_page(page); | 5135 | unlock_page(page); |
5107 | btrfs_start_ordered_extent(inode, ordered, 1); | 5136 | btrfs_start_ordered_extent(inode, ordered, 1); |
5108 | btrfs_put_ordered_extent(ordered); | 5137 | btrfs_put_ordered_extent(ordered); |
@@ -5116,13 +5145,15 @@ again: | |||
5116 | * is probably a better way to do this, but for now keep consistent with | 5145 | * is probably a better way to do this, but for now keep consistent with |
5117 | * prepare_pages in the normal write path. | 5146 | * prepare_pages in the normal write path. |
5118 | */ | 5147 | */ |
5119 | clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, | 5148 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, |
5120 | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, | 5149 | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, |
5121 | GFP_NOFS); | 5150 | 0, 0, &cached_state, GFP_NOFS); |
5122 | 5151 | ||
5123 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end); | 5152 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, |
5153 | &cached_state); | ||
5124 | if (ret) { | 5154 | if (ret) { |
5125 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 5155 | unlock_extent_cached(io_tree, page_start, page_end, |
5156 | &cached_state, GFP_NOFS); | ||
5126 | ret = VM_FAULT_SIGBUS; | 5157 | ret = VM_FAULT_SIGBUS; |
5127 | btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); | 5158 | btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); |
5128 | goto out_unlock; | 5159 | goto out_unlock; |
@@ -5148,7 +5179,7 @@ again: | |||
5148 | BTRFS_I(inode)->last_trans = root->fs_info->generation; | 5179 | BTRFS_I(inode)->last_trans = root->fs_info->generation; |
5149 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; | 5180 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; |
5150 | 5181 | ||
5151 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 5182 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); |
5152 | 5183 | ||
5153 | out_unlock: | 5184 | out_unlock: |
5154 | btrfs_unreserve_metadata_for_delalloc(root, inode, 1); | 5185 | btrfs_unreserve_metadata_for_delalloc(root, inode, 1); |
@@ -5827,6 +5858,7 @@ stop_trans: | |||
5827 | static long btrfs_fallocate(struct inode *inode, int mode, | 5858 | static long btrfs_fallocate(struct inode *inode, int mode, |
5828 | loff_t offset, loff_t len) | 5859 | loff_t offset, loff_t len) |
5829 | { | 5860 | { |
5861 | struct extent_state *cached_state = NULL; | ||
5830 | u64 cur_offset; | 5862 | u64 cur_offset; |
5831 | u64 last_byte; | 5863 | u64 last_byte; |
5832 | u64 alloc_start; | 5864 | u64 alloc_start; |
@@ -5865,16 +5897,17 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
5865 | /* the extent lock is ordered inside the running | 5897 | /* the extent lock is ordered inside the running |
5866 | * transaction | 5898 | * transaction |
5867 | */ | 5899 | */ |
5868 | lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, | 5900 | lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, |
5869 | GFP_NOFS); | 5901 | locked_end, 0, &cached_state, GFP_NOFS); |
5870 | ordered = btrfs_lookup_first_ordered_extent(inode, | 5902 | ordered = btrfs_lookup_first_ordered_extent(inode, |
5871 | alloc_end - 1); | 5903 | alloc_end - 1); |
5872 | if (ordered && | 5904 | if (ordered && |
5873 | ordered->file_offset + ordered->len > alloc_start && | 5905 | ordered->file_offset + ordered->len > alloc_start && |
5874 | ordered->file_offset < alloc_end) { | 5906 | ordered->file_offset < alloc_end) { |
5875 | btrfs_put_ordered_extent(ordered); | 5907 | btrfs_put_ordered_extent(ordered); |
5876 | unlock_extent(&BTRFS_I(inode)->io_tree, | 5908 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, |
5877 | alloc_start, locked_end, GFP_NOFS); | 5909 | alloc_start, locked_end, |
5910 | &cached_state, GFP_NOFS); | ||
5878 | /* | 5911 | /* |
5879 | * we can't wait on the range with the transaction | 5912 | * we can't wait on the range with the transaction |
5880 | * running or with the extent lock held | 5913 | * running or with the extent lock held |
@@ -5916,8 +5949,8 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
5916 | break; | 5949 | break; |
5917 | } | 5950 | } |
5918 | } | 5951 | } |
5919 | unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, | 5952 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, |
5920 | GFP_NOFS); | 5953 | &cached_state, GFP_NOFS); |
5921 | 5954 | ||
5922 | btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode, | 5955 | btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode, |
5923 | alloc_end - alloc_start); | 5956 | alloc_end - alloc_start); |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 645a17927a8f..2845c6ceecd2 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include "print-tree.h" | 48 | #include "print-tree.h" |
49 | #include "volumes.h" | 49 | #include "volumes.h" |
50 | #include "locking.h" | 50 | #include "locking.h" |
51 | #include "ctree.h" | ||
51 | 52 | ||
52 | /* Mask out flags that are inappropriate for the given type of inode. */ | 53 | /* Mask out flags that are inappropriate for the given type of inode. */ |
53 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) | 54 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) |
@@ -474,7 +475,79 @@ out_unlock: | |||
474 | return error; | 475 | return error; |
475 | } | 476 | } |
476 | 477 | ||
477 | static int btrfs_defrag_file(struct file *file) | 478 | static int should_defrag_range(struct inode *inode, u64 start, u64 len, |
479 | int thresh, u64 *last_len, u64 *skip, | ||
480 | u64 *defrag_end) | ||
481 | { | ||
482 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | ||
483 | struct extent_map *em = NULL; | ||
484 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
485 | int ret = 1; | ||
486 | |||
487 | |||
488 | if (thresh == 0) | ||
489 | thresh = 256 * 1024; | ||
490 | |||
491 | /* | ||
492 | * make sure that once we start defragging and extent, we keep on | ||
493 | * defragging it | ||
494 | */ | ||
495 | if (start < *defrag_end) | ||
496 | return 1; | ||
497 | |||
498 | *skip = 0; | ||
499 | |||
500 | /* | ||
501 | * hopefully we have this extent in the tree already, try without | ||
502 | * the full extent lock | ||
503 | */ | ||
504 | read_lock(&em_tree->lock); | ||
505 | em = lookup_extent_mapping(em_tree, start, len); | ||
506 | read_unlock(&em_tree->lock); | ||
507 | |||
508 | if (!em) { | ||
509 | /* get the big lock and read metadata off disk */ | ||
510 | lock_extent(io_tree, start, start + len - 1, GFP_NOFS); | ||
511 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | ||
512 | unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); | ||
513 | |||
514 | if (!em) | ||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | /* this will cover holes, and inline extents */ | ||
519 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) | ||
520 | ret = 0; | ||
521 | |||
522 | /* | ||
523 | * we hit a real extent, if it is big don't bother defragging it again | ||
524 | */ | ||
525 | if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) | ||
526 | ret = 0; | ||
527 | |||
528 | /* | ||
529 | * last_len ends up being a counter of how many bytes we've defragged. | ||
530 | * every time we choose not to defrag an extent, we reset *last_len | ||
531 | * so that the next tiny extent will force a defrag. | ||
532 | * | ||
533 | * The end result of this is that tiny extents before a single big | ||
534 | * extent will force at least part of that big extent to be defragged. | ||
535 | */ | ||
536 | if (ret) { | ||
537 | *last_len += len; | ||
538 | *defrag_end = extent_map_end(em); | ||
539 | } else { | ||
540 | *last_len = 0; | ||
541 | *skip = extent_map_end(em); | ||
542 | *defrag_end = 0; | ||
543 | } | ||
544 | |||
545 | free_extent_map(em); | ||
546 | return ret; | ||
547 | } | ||
548 | |||
549 | static int btrfs_defrag_file(struct file *file, | ||
550 | struct btrfs_ioctl_defrag_range_args *range) | ||
478 | { | 551 | { |
479 | struct inode *inode = fdentry(file)->d_inode; | 552 | struct inode *inode = fdentry(file)->d_inode; |
480 | struct btrfs_root *root = BTRFS_I(inode)->root; | 553 | struct btrfs_root *root = BTRFS_I(inode)->root; |
@@ -486,37 +559,96 @@ static int btrfs_defrag_file(struct file *file) | |||
486 | unsigned long total_read = 0; | 559 | unsigned long total_read = 0; |
487 | u64 page_start; | 560 | u64 page_start; |
488 | u64 page_end; | 561 | u64 page_end; |
562 | u64 last_len = 0; | ||
563 | u64 skip = 0; | ||
564 | u64 defrag_end = 0; | ||
489 | unsigned long i; | 565 | unsigned long i; |
490 | int ret; | 566 | int ret; |
491 | 567 | ||
492 | ret = btrfs_check_data_free_space(root, inode, inode->i_size); | 568 | if (inode->i_size == 0) |
493 | if (ret) | 569 | return 0; |
494 | return -ENOSPC; | 570 | |
571 | if (range->start + range->len > range->start) { | ||
572 | last_index = min_t(u64, inode->i_size - 1, | ||
573 | range->start + range->len - 1) >> PAGE_CACHE_SHIFT; | ||
574 | } else { | ||
575 | last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; | ||
576 | } | ||
577 | |||
578 | i = range->start >> PAGE_CACHE_SHIFT; | ||
579 | while (i <= last_index) { | ||
580 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, | ||
581 | PAGE_CACHE_SIZE, | ||
582 | range->extent_thresh, | ||
583 | &last_len, &skip, | ||
584 | &defrag_end)) { | ||
585 | unsigned long next; | ||
586 | /* | ||
587 | * the should_defrag function tells us how much to skip | ||
588 | * bump our counter by the suggested amount | ||
589 | */ | ||
590 | next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
591 | i = max(i + 1, next); | ||
592 | continue; | ||
593 | } | ||
495 | 594 | ||
496 | mutex_lock(&inode->i_mutex); | ||
497 | last_index = inode->i_size >> PAGE_CACHE_SHIFT; | ||
498 | for (i = 0; i <= last_index; i++) { | ||
499 | if (total_read % ra_pages == 0) { | 595 | if (total_read % ra_pages == 0) { |
500 | btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, | 596 | btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, |
501 | min(last_index, i + ra_pages - 1)); | 597 | min(last_index, i + ra_pages - 1)); |
502 | } | 598 | } |
503 | total_read++; | 599 | total_read++; |
600 | mutex_lock(&inode->i_mutex); | ||
601 | if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) | ||
602 | BTRFS_I(inode)->force_compress = 1; | ||
603 | |||
604 | ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); | ||
605 | if (ret) { | ||
606 | ret = -ENOSPC; | ||
607 | break; | ||
608 | } | ||
609 | |||
610 | ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1); | ||
611 | if (ret) { | ||
612 | btrfs_free_reserved_data_space(root, inode, | ||
613 | PAGE_CACHE_SIZE); | ||
614 | ret = -ENOSPC; | ||
615 | break; | ||
616 | } | ||
504 | again: | 617 | again: |
618 | if (inode->i_size == 0 || | ||
619 | i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { | ||
620 | ret = 0; | ||
621 | goto err_reservations; | ||
622 | } | ||
623 | |||
505 | page = grab_cache_page(inode->i_mapping, i); | 624 | page = grab_cache_page(inode->i_mapping, i); |
506 | if (!page) | 625 | if (!page) |
507 | goto out_unlock; | 626 | goto err_reservations; |
627 | |||
508 | if (!PageUptodate(page)) { | 628 | if (!PageUptodate(page)) { |
509 | btrfs_readpage(NULL, page); | 629 | btrfs_readpage(NULL, page); |
510 | lock_page(page); | 630 | lock_page(page); |
511 | if (!PageUptodate(page)) { | 631 | if (!PageUptodate(page)) { |
512 | unlock_page(page); | 632 | unlock_page(page); |
513 | page_cache_release(page); | 633 | page_cache_release(page); |
514 | goto out_unlock; | 634 | goto err_reservations; |
515 | } | 635 | } |
516 | } | 636 | } |
517 | 637 | ||
638 | if (page->mapping != inode->i_mapping) { | ||
639 | unlock_page(page); | ||
640 | page_cache_release(page); | ||
641 | goto again; | ||
642 | } | ||
643 | |||
518 | wait_on_page_writeback(page); | 644 | wait_on_page_writeback(page); |
519 | 645 | ||
646 | if (PageDirty(page)) { | ||
647 | btrfs_free_reserved_data_space(root, inode, | ||
648 | PAGE_CACHE_SIZE); | ||
649 | goto loop_unlock; | ||
650 | } | ||
651 | |||
520 | page_start = (u64)page->index << PAGE_CACHE_SHIFT; | 652 | page_start = (u64)page->index << PAGE_CACHE_SHIFT; |
521 | page_end = page_start + PAGE_CACHE_SIZE - 1; | 653 | page_end = page_start + PAGE_CACHE_SIZE - 1; |
522 | lock_extent(io_tree, page_start, page_end, GFP_NOFS); | 654 | lock_extent(io_tree, page_start, page_end, GFP_NOFS); |
@@ -537,18 +669,54 @@ again: | |||
537 | * page if it is dirtied again later | 669 | * page if it is dirtied again later |
538 | */ | 670 | */ |
539 | clear_page_dirty_for_io(page); | 671 | clear_page_dirty_for_io(page); |
672 | clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, | ||
673 | page_end, EXTENT_DIRTY | EXTENT_DELALLOC | | ||
674 | EXTENT_DO_ACCOUNTING, GFP_NOFS); | ||
540 | 675 | ||
541 | btrfs_set_extent_delalloc(inode, page_start, page_end); | 676 | btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); |
677 | ClearPageChecked(page); | ||
542 | set_page_dirty(page); | 678 | set_page_dirty(page); |
543 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 679 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); |
680 | |||
681 | loop_unlock: | ||
544 | unlock_page(page); | 682 | unlock_page(page); |
545 | page_cache_release(page); | 683 | page_cache_release(page); |
684 | mutex_unlock(&inode->i_mutex); | ||
685 | |||
686 | btrfs_unreserve_metadata_for_delalloc(root, inode, 1); | ||
546 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); | 687 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); |
688 | i++; | ||
689 | } | ||
690 | |||
691 | if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) | ||
692 | filemap_flush(inode->i_mapping); | ||
693 | |||
694 | if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { | ||
695 | /* the filemap_flush will queue IO into the worker threads, but | ||
696 | * we have to make sure the IO is actually started and that | ||
697 | * ordered extents get created before we return | ||
698 | */ | ||
699 | atomic_inc(&root->fs_info->async_submit_draining); | ||
700 | while (atomic_read(&root->fs_info->nr_async_submits) || | ||
701 | atomic_read(&root->fs_info->async_delalloc_pages)) { | ||
702 | wait_event(root->fs_info->async_submit_wait, | ||
703 | (atomic_read(&root->fs_info->nr_async_submits) == 0 && | ||
704 | atomic_read(&root->fs_info->async_delalloc_pages) == 0)); | ||
705 | } | ||
706 | atomic_dec(&root->fs_info->async_submit_draining); | ||
707 | |||
708 | mutex_lock(&inode->i_mutex); | ||
709 | BTRFS_I(inode)->force_compress = 0; | ||
710 | mutex_unlock(&inode->i_mutex); | ||
547 | } | 711 | } |
548 | 712 | ||
549 | out_unlock: | ||
550 | mutex_unlock(&inode->i_mutex); | ||
551 | return 0; | 713 | return 0; |
714 | |||
715 | err_reservations: | ||
716 | mutex_unlock(&inode->i_mutex); | ||
717 | btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); | ||
718 | btrfs_unreserve_metadata_for_delalloc(root, inode, 1); | ||
719 | return ret; | ||
552 | } | 720 | } |
553 | 721 | ||
554 | static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | 722 | static noinline int btrfs_ioctl_resize(struct btrfs_root *root, |
@@ -608,7 +776,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
608 | mod = 1; | 776 | mod = 1; |
609 | sizestr++; | 777 | sizestr++; |
610 | } | 778 | } |
611 | new_size = btrfs_parse_size(sizestr); | 779 | new_size = memparse(sizestr, NULL); |
612 | if (new_size == 0) { | 780 | if (new_size == 0) { |
613 | ret = -EINVAL; | 781 | ret = -EINVAL; |
614 | goto out_unlock; | 782 | goto out_unlock; |
@@ -743,6 +911,327 @@ out: | |||
743 | return ret; | 911 | return ret; |
744 | } | 912 | } |
745 | 913 | ||
914 | static noinline int key_in_sk(struct btrfs_key *key, | ||
915 | struct btrfs_ioctl_search_key *sk) | ||
916 | { | ||
917 | struct btrfs_key test; | ||
918 | int ret; | ||
919 | |||
920 | test.objectid = sk->min_objectid; | ||
921 | test.type = sk->min_type; | ||
922 | test.offset = sk->min_offset; | ||
923 | |||
924 | ret = btrfs_comp_cpu_keys(key, &test); | ||
925 | if (ret < 0) | ||
926 | return 0; | ||
927 | |||
928 | test.objectid = sk->max_objectid; | ||
929 | test.type = sk->max_type; | ||
930 | test.offset = sk->max_offset; | ||
931 | |||
932 | ret = btrfs_comp_cpu_keys(key, &test); | ||
933 | if (ret > 0) | ||
934 | return 0; | ||
935 | return 1; | ||
936 | } | ||
937 | |||
938 | static noinline int copy_to_sk(struct btrfs_root *root, | ||
939 | struct btrfs_path *path, | ||
940 | struct btrfs_key *key, | ||
941 | struct btrfs_ioctl_search_key *sk, | ||
942 | char *buf, | ||
943 | unsigned long *sk_offset, | ||
944 | int *num_found) | ||
945 | { | ||
946 | u64 found_transid; | ||
947 | struct extent_buffer *leaf; | ||
948 | struct btrfs_ioctl_search_header sh; | ||
949 | unsigned long item_off; | ||
950 | unsigned long item_len; | ||
951 | int nritems; | ||
952 | int i; | ||
953 | int slot; | ||
954 | int found = 0; | ||
955 | int ret = 0; | ||
956 | |||
957 | leaf = path->nodes[0]; | ||
958 | slot = path->slots[0]; | ||
959 | nritems = btrfs_header_nritems(leaf); | ||
960 | |||
961 | if (btrfs_header_generation(leaf) > sk->max_transid) { | ||
962 | i = nritems; | ||
963 | goto advance_key; | ||
964 | } | ||
965 | found_transid = btrfs_header_generation(leaf); | ||
966 | |||
967 | for (i = slot; i < nritems; i++) { | ||
968 | item_off = btrfs_item_ptr_offset(leaf, i); | ||
969 | item_len = btrfs_item_size_nr(leaf, i); | ||
970 | |||
971 | if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) | ||
972 | item_len = 0; | ||
973 | |||
974 | if (sizeof(sh) + item_len + *sk_offset > | ||
975 | BTRFS_SEARCH_ARGS_BUFSIZE) { | ||
976 | ret = 1; | ||
977 | goto overflow; | ||
978 | } | ||
979 | |||
980 | btrfs_item_key_to_cpu(leaf, key, i); | ||
981 | if (!key_in_sk(key, sk)) | ||
982 | continue; | ||
983 | |||
984 | sh.objectid = key->objectid; | ||
985 | sh.offset = key->offset; | ||
986 | sh.type = key->type; | ||
987 | sh.len = item_len; | ||
988 | sh.transid = found_transid; | ||
989 | |||
990 | /* copy search result header */ | ||
991 | memcpy(buf + *sk_offset, &sh, sizeof(sh)); | ||
992 | *sk_offset += sizeof(sh); | ||
993 | |||
994 | if (item_len) { | ||
995 | char *p = buf + *sk_offset; | ||
996 | /* copy the item */ | ||
997 | read_extent_buffer(leaf, p, | ||
998 | item_off, item_len); | ||
999 | *sk_offset += item_len; | ||
1000 | } | ||
1001 | found++; | ||
1002 | |||
1003 | if (*num_found >= sk->nr_items) | ||
1004 | break; | ||
1005 | } | ||
1006 | advance_key: | ||
1007 | ret = 0; | ||
1008 | if (key->offset < (u64)-1 && key->offset < sk->max_offset) | ||
1009 | key->offset++; | ||
1010 | else if (key->type < (u8)-1 && key->type < sk->max_type) { | ||
1011 | key->offset = 0; | ||
1012 | key->type++; | ||
1013 | } else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) { | ||
1014 | key->offset = 0; | ||
1015 | key->type = 0; | ||
1016 | key->objectid++; | ||
1017 | } else | ||
1018 | ret = 1; | ||
1019 | overflow: | ||
1020 | *num_found += found; | ||
1021 | return ret; | ||
1022 | } | ||
1023 | |||
1024 | static noinline int search_ioctl(struct inode *inode, | ||
1025 | struct btrfs_ioctl_search_args *args) | ||
1026 | { | ||
1027 | struct btrfs_root *root; | ||
1028 | struct btrfs_key key; | ||
1029 | struct btrfs_key max_key; | ||
1030 | struct btrfs_path *path; | ||
1031 | struct btrfs_ioctl_search_key *sk = &args->key; | ||
1032 | struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info; | ||
1033 | int ret; | ||
1034 | int num_found = 0; | ||
1035 | unsigned long sk_offset = 0; | ||
1036 | |||
1037 | path = btrfs_alloc_path(); | ||
1038 | if (!path) | ||
1039 | return -ENOMEM; | ||
1040 | |||
1041 | if (sk->tree_id == 0) { | ||
1042 | /* search the root of the inode that was passed */ | ||
1043 | root = BTRFS_I(inode)->root; | ||
1044 | } else { | ||
1045 | key.objectid = sk->tree_id; | ||
1046 | key.type = BTRFS_ROOT_ITEM_KEY; | ||
1047 | key.offset = (u64)-1; | ||
1048 | root = btrfs_read_fs_root_no_name(info, &key); | ||
1049 | if (IS_ERR(root)) { | ||
1050 | printk(KERN_ERR "could not find root %llu\n", | ||
1051 | sk->tree_id); | ||
1052 | btrfs_free_path(path); | ||
1053 | return -ENOENT; | ||
1054 | } | ||
1055 | } | ||
1056 | |||
1057 | key.objectid = sk->min_objectid; | ||
1058 | key.type = sk->min_type; | ||
1059 | key.offset = sk->min_offset; | ||
1060 | |||
1061 | max_key.objectid = sk->max_objectid; | ||
1062 | max_key.type = sk->max_type; | ||
1063 | max_key.offset = sk->max_offset; | ||
1064 | |||
1065 | path->keep_locks = 1; | ||
1066 | |||
1067 | while(1) { | ||
1068 | ret = btrfs_search_forward(root, &key, &max_key, path, 0, | ||
1069 | sk->min_transid); | ||
1070 | if (ret != 0) { | ||
1071 | if (ret > 0) | ||
1072 | ret = 0; | ||
1073 | goto err; | ||
1074 | } | ||
1075 | ret = copy_to_sk(root, path, &key, sk, args->buf, | ||
1076 | &sk_offset, &num_found); | ||
1077 | btrfs_release_path(root, path); | ||
1078 | if (ret || num_found >= sk->nr_items) | ||
1079 | break; | ||
1080 | |||
1081 | } | ||
1082 | ret = 0; | ||
1083 | err: | ||
1084 | sk->nr_items = num_found; | ||
1085 | btrfs_free_path(path); | ||
1086 | return ret; | ||
1087 | } | ||
1088 | |||
1089 | static noinline int btrfs_ioctl_tree_search(struct file *file, | ||
1090 | void __user *argp) | ||
1091 | { | ||
1092 | struct btrfs_ioctl_search_args *args; | ||
1093 | struct inode *inode; | ||
1094 | int ret; | ||
1095 | |||
1096 | if (!capable(CAP_SYS_ADMIN)) | ||
1097 | return -EPERM; | ||
1098 | |||
1099 | args = kmalloc(sizeof(*args), GFP_KERNEL); | ||
1100 | if (!args) | ||
1101 | return -ENOMEM; | ||
1102 | |||
1103 | if (copy_from_user(args, argp, sizeof(*args))) { | ||
1104 | kfree(args); | ||
1105 | return -EFAULT; | ||
1106 | } | ||
1107 | inode = fdentry(file)->d_inode; | ||
1108 | ret = search_ioctl(inode, args); | ||
1109 | if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) | ||
1110 | ret = -EFAULT; | ||
1111 | kfree(args); | ||
1112 | return ret; | ||
1113 | } | ||
1114 | |||
1115 | /* | ||
1116 | * Search INODE_REFs to identify path name of 'dirid' directory | ||
1117 | * in a 'tree_id' tree. and sets path name to 'name'. | ||
1118 | */ | ||
1119 | static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, | ||
1120 | u64 tree_id, u64 dirid, char *name) | ||
1121 | { | ||
1122 | struct btrfs_root *root; | ||
1123 | struct btrfs_key key; | ||
1124 | char *ptr; | ||
1125 | int ret = -1; | ||
1126 | int slot; | ||
1127 | int len; | ||
1128 | int total_len = 0; | ||
1129 | struct btrfs_inode_ref *iref; | ||
1130 | struct extent_buffer *l; | ||
1131 | struct btrfs_path *path; | ||
1132 | |||
1133 | if (dirid == BTRFS_FIRST_FREE_OBJECTID) { | ||
1134 | name[0]='\0'; | ||
1135 | return 0; | ||
1136 | } | ||
1137 | |||
1138 | path = btrfs_alloc_path(); | ||
1139 | if (!path) | ||
1140 | return -ENOMEM; | ||
1141 | |||
1142 | ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX]; | ||
1143 | |||
1144 | key.objectid = tree_id; | ||
1145 | key.type = BTRFS_ROOT_ITEM_KEY; | ||
1146 | key.offset = (u64)-1; | ||
1147 | root = btrfs_read_fs_root_no_name(info, &key); | ||
1148 | if (IS_ERR(root)) { | ||
1149 | printk(KERN_ERR "could not find root %llu\n", tree_id); | ||
1150 | ret = -ENOENT; | ||
1151 | goto out; | ||
1152 | } | ||
1153 | |||
1154 | key.objectid = dirid; | ||
1155 | key.type = BTRFS_INODE_REF_KEY; | ||
1156 | key.offset = (u64)-1; | ||
1157 | |||
1158 | while(1) { | ||
1159 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
1160 | if (ret < 0) | ||
1161 | goto out; | ||
1162 | |||
1163 | l = path->nodes[0]; | ||
1164 | slot = path->slots[0]; | ||
1165 | if (ret > 0 && slot > 0) | ||
1166 | slot--; | ||
1167 | btrfs_item_key_to_cpu(l, &key, slot); | ||
1168 | |||
1169 | if (ret > 0 && (key.objectid != dirid || | ||
1170 | key.type != BTRFS_INODE_REF_KEY)) { | ||
1171 | ret = -ENOENT; | ||
1172 | goto out; | ||
1173 | } | ||
1174 | |||
1175 | iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); | ||
1176 | len = btrfs_inode_ref_name_len(l, iref); | ||
1177 | ptr -= len + 1; | ||
1178 | total_len += len + 1; | ||
1179 | if (ptr < name) | ||
1180 | goto out; | ||
1181 | |||
1182 | *(ptr + len) = '/'; | ||
1183 | read_extent_buffer(l, ptr,(unsigned long)(iref + 1), len); | ||
1184 | |||
1185 | if (key.offset == BTRFS_FIRST_FREE_OBJECTID) | ||
1186 | break; | ||
1187 | |||
1188 | btrfs_release_path(root, path); | ||
1189 | key.objectid = key.offset; | ||
1190 | key.offset = (u64)-1; | ||
1191 | dirid = key.objectid; | ||
1192 | |||
1193 | } | ||
1194 | if (ptr < name) | ||
1195 | goto out; | ||
1196 | memcpy(name, ptr, total_len); | ||
1197 | name[total_len]='\0'; | ||
1198 | ret = 0; | ||
1199 | out: | ||
1200 | btrfs_free_path(path); | ||
1201 | return ret; | ||
1202 | } | ||
1203 | |||
1204 | static noinline int btrfs_ioctl_ino_lookup(struct file *file, | ||
1205 | void __user *argp) | ||
1206 | { | ||
1207 | struct btrfs_ioctl_ino_lookup_args *args; | ||
1208 | struct inode *inode; | ||
1209 | int ret; | ||
1210 | |||
1211 | if (!capable(CAP_SYS_ADMIN)) | ||
1212 | return -EPERM; | ||
1213 | |||
1214 | args = kmalloc(sizeof(*args), GFP_KERNEL); | ||
1215 | if (copy_from_user(args, argp, sizeof(*args))) { | ||
1216 | kfree(args); | ||
1217 | return -EFAULT; | ||
1218 | } | ||
1219 | inode = fdentry(file)->d_inode; | ||
1220 | |||
1221 | if (args->treeid == 0) | ||
1222 | args->treeid = BTRFS_I(inode)->root->root_key.objectid; | ||
1223 | |||
1224 | ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info, | ||
1225 | args->treeid, args->objectid, | ||
1226 | args->name); | ||
1227 | |||
1228 | if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) | ||
1229 | ret = -EFAULT; | ||
1230 | |||
1231 | kfree(args); | ||
1232 | return ret; | ||
1233 | } | ||
1234 | |||
746 | static noinline int btrfs_ioctl_snap_destroy(struct file *file, | 1235 | static noinline int btrfs_ioctl_snap_destroy(struct file *file, |
747 | void __user *arg) | 1236 | void __user *arg) |
748 | { | 1237 | { |
@@ -849,10 +1338,11 @@ out: | |||
849 | return err; | 1338 | return err; |
850 | } | 1339 | } |
851 | 1340 | ||
852 | static int btrfs_ioctl_defrag(struct file *file) | 1341 | static int btrfs_ioctl_defrag(struct file *file, void __user *argp) |
853 | { | 1342 | { |
854 | struct inode *inode = fdentry(file)->d_inode; | 1343 | struct inode *inode = fdentry(file)->d_inode; |
855 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1344 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1345 | struct btrfs_ioctl_defrag_range_args *range; | ||
856 | int ret; | 1346 | int ret; |
857 | 1347 | ||
858 | ret = mnt_want_write(file->f_path.mnt); | 1348 | ret = mnt_want_write(file->f_path.mnt); |
@@ -873,7 +1363,30 @@ static int btrfs_ioctl_defrag(struct file *file) | |||
873 | ret = -EINVAL; | 1363 | ret = -EINVAL; |
874 | goto out; | 1364 | goto out; |
875 | } | 1365 | } |
876 | btrfs_defrag_file(file); | 1366 | |
1367 | range = kzalloc(sizeof(*range), GFP_KERNEL); | ||
1368 | if (!range) { | ||
1369 | ret = -ENOMEM; | ||
1370 | goto out; | ||
1371 | } | ||
1372 | |||
1373 | if (argp) { | ||
1374 | if (copy_from_user(range, argp, | ||
1375 | sizeof(*range))) { | ||
1376 | ret = -EFAULT; | ||
1377 | kfree(range); | ||
1378 | } | ||
1379 | /* compression requires us to start the IO */ | ||
1380 | if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { | ||
1381 | range->flags |= BTRFS_DEFRAG_RANGE_START_IO; | ||
1382 | range->extent_thresh = (u32)-1; | ||
1383 | } | ||
1384 | } else { | ||
1385 | /* the rest are all set to zero by kzalloc */ | ||
1386 | range->len = (u64)-1; | ||
1387 | } | ||
1388 | btrfs_defrag_file(file, range); | ||
1389 | kfree(range); | ||
877 | break; | 1390 | break; |
878 | } | 1391 | } |
879 | out: | 1392 | out: |
@@ -1274,6 +1787,157 @@ out: | |||
1274 | return ret; | 1787 | return ret; |
1275 | } | 1788 | } |
1276 | 1789 | ||
1790 | static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) | ||
1791 | { | ||
1792 | struct inode *inode = fdentry(file)->d_inode; | ||
1793 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
1794 | struct btrfs_root *new_root; | ||
1795 | struct btrfs_dir_item *di; | ||
1796 | struct btrfs_trans_handle *trans; | ||
1797 | struct btrfs_path *path; | ||
1798 | struct btrfs_key location; | ||
1799 | struct btrfs_disk_key disk_key; | ||
1800 | struct btrfs_super_block *disk_super; | ||
1801 | u64 features; | ||
1802 | u64 objectid = 0; | ||
1803 | u64 dir_id; | ||
1804 | |||
1805 | if (!capable(CAP_SYS_ADMIN)) | ||
1806 | return -EPERM; | ||
1807 | |||
1808 | if (copy_from_user(&objectid, argp, sizeof(objectid))) | ||
1809 | return -EFAULT; | ||
1810 | |||
1811 | if (!objectid) | ||
1812 | objectid = root->root_key.objectid; | ||
1813 | |||
1814 | location.objectid = objectid; | ||
1815 | location.type = BTRFS_ROOT_ITEM_KEY; | ||
1816 | location.offset = (u64)-1; | ||
1817 | |||
1818 | new_root = btrfs_read_fs_root_no_name(root->fs_info, &location); | ||
1819 | if (IS_ERR(new_root)) | ||
1820 | return PTR_ERR(new_root); | ||
1821 | |||
1822 | if (btrfs_root_refs(&new_root->root_item) == 0) | ||
1823 | return -ENOENT; | ||
1824 | |||
1825 | path = btrfs_alloc_path(); | ||
1826 | if (!path) | ||
1827 | return -ENOMEM; | ||
1828 | path->leave_spinning = 1; | ||
1829 | |||
1830 | trans = btrfs_start_transaction(root, 1); | ||
1831 | if (!trans) { | ||
1832 | btrfs_free_path(path); | ||
1833 | return -ENOMEM; | ||
1834 | } | ||
1835 | |||
1836 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); | ||
1837 | di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path, | ||
1838 | dir_id, "default", 7, 1); | ||
1839 | if (!di) { | ||
1840 | btrfs_free_path(path); | ||
1841 | btrfs_end_transaction(trans, root); | ||
1842 | printk(KERN_ERR "Umm, you don't have the default dir item, " | ||
1843 | "this isn't going to work\n"); | ||
1844 | return -ENOENT; | ||
1845 | } | ||
1846 | |||
1847 | btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key); | ||
1848 | btrfs_set_dir_item_key(path->nodes[0], di, &disk_key); | ||
1849 | btrfs_mark_buffer_dirty(path->nodes[0]); | ||
1850 | btrfs_free_path(path); | ||
1851 | |||
1852 | disk_super = &root->fs_info->super_copy; | ||
1853 | features = btrfs_super_incompat_flags(disk_super); | ||
1854 | if (!(features & BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)) { | ||
1855 | features |= BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL; | ||
1856 | btrfs_set_super_incompat_flags(disk_super, features); | ||
1857 | } | ||
1858 | btrfs_end_transaction(trans, root); | ||
1859 | |||
1860 | return 0; | ||
1861 | } | ||
1862 | |||
1863 | long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | ||
1864 | { | ||
1865 | struct btrfs_ioctl_space_args space_args; | ||
1866 | struct btrfs_ioctl_space_info space; | ||
1867 | struct btrfs_ioctl_space_info *dest; | ||
1868 | struct btrfs_ioctl_space_info *dest_orig; | ||
1869 | struct btrfs_ioctl_space_info *user_dest; | ||
1870 | struct btrfs_space_info *info; | ||
1871 | int alloc_size; | ||
1872 | int ret = 0; | ||
1873 | int slot_count = 0; | ||
1874 | |||
1875 | if (copy_from_user(&space_args, | ||
1876 | (struct btrfs_ioctl_space_args __user *)arg, | ||
1877 | sizeof(space_args))) | ||
1878 | return -EFAULT; | ||
1879 | |||
1880 | /* first we count slots */ | ||
1881 | rcu_read_lock(); | ||
1882 | list_for_each_entry_rcu(info, &root->fs_info->space_info, list) | ||
1883 | slot_count++; | ||
1884 | rcu_read_unlock(); | ||
1885 | |||
1886 | /* space_slots == 0 means they are asking for a count */ | ||
1887 | if (space_args.space_slots == 0) { | ||
1888 | space_args.total_spaces = slot_count; | ||
1889 | goto out; | ||
1890 | } | ||
1891 | alloc_size = sizeof(*dest) * slot_count; | ||
1892 | /* we generally have at most 6 or so space infos, one for each raid | ||
1893 | * level. So, a whole page should be more than enough for everyone | ||
1894 | */ | ||
1895 | if (alloc_size > PAGE_CACHE_SIZE) | ||
1896 | return -ENOMEM; | ||
1897 | |||
1898 | space_args.total_spaces = 0; | ||
1899 | dest = kmalloc(alloc_size, GFP_NOFS); | ||
1900 | if (!dest) | ||
1901 | return -ENOMEM; | ||
1902 | dest_orig = dest; | ||
1903 | |||
1904 | /* now we have a buffer to copy into */ | ||
1905 | rcu_read_lock(); | ||
1906 | list_for_each_entry_rcu(info, &root->fs_info->space_info, list) { | ||
1907 | /* make sure we don't copy more than we allocated | ||
1908 | * in our buffer | ||
1909 | */ | ||
1910 | if (slot_count == 0) | ||
1911 | break; | ||
1912 | slot_count--; | ||
1913 | |||
1914 | /* make sure userland has enough room in their buffer */ | ||
1915 | if (space_args.total_spaces >= space_args.space_slots) | ||
1916 | break; | ||
1917 | |||
1918 | space.flags = info->flags; | ||
1919 | space.total_bytes = info->total_bytes; | ||
1920 | space.used_bytes = info->bytes_used; | ||
1921 | memcpy(dest, &space, sizeof(space)); | ||
1922 | dest++; | ||
1923 | space_args.total_spaces++; | ||
1924 | } | ||
1925 | rcu_read_unlock(); | ||
1926 | |||
1927 | user_dest = (struct btrfs_ioctl_space_info *) | ||
1928 | (arg + sizeof(struct btrfs_ioctl_space_args)); | ||
1929 | |||
1930 | if (copy_to_user(user_dest, dest_orig, alloc_size)) | ||
1931 | ret = -EFAULT; | ||
1932 | |||
1933 | kfree(dest_orig); | ||
1934 | out: | ||
1935 | if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args))) | ||
1936 | ret = -EFAULT; | ||
1937 | |||
1938 | return ret; | ||
1939 | } | ||
1940 | |||
1277 | /* | 1941 | /* |
1278 | * there are many ways the trans_start and trans_end ioctls can lead | 1942 | * there are many ways the trans_start and trans_end ioctls can lead |
1279 | * to deadlocks. They should only be used by applications that | 1943 | * to deadlocks. They should only be used by applications that |
@@ -1320,8 +1984,12 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
1320 | return btrfs_ioctl_snap_create(file, argp, 1); | 1984 | return btrfs_ioctl_snap_create(file, argp, 1); |
1321 | case BTRFS_IOC_SNAP_DESTROY: | 1985 | case BTRFS_IOC_SNAP_DESTROY: |
1322 | return btrfs_ioctl_snap_destroy(file, argp); | 1986 | return btrfs_ioctl_snap_destroy(file, argp); |
1987 | case BTRFS_IOC_DEFAULT_SUBVOL: | ||
1988 | return btrfs_ioctl_default_subvol(file, argp); | ||
1323 | case BTRFS_IOC_DEFRAG: | 1989 | case BTRFS_IOC_DEFRAG: |
1324 | return btrfs_ioctl_defrag(file); | 1990 | return btrfs_ioctl_defrag(file, NULL); |
1991 | case BTRFS_IOC_DEFRAG_RANGE: | ||
1992 | return btrfs_ioctl_defrag(file, argp); | ||
1325 | case BTRFS_IOC_RESIZE: | 1993 | case BTRFS_IOC_RESIZE: |
1326 | return btrfs_ioctl_resize(root, argp); | 1994 | return btrfs_ioctl_resize(root, argp); |
1327 | case BTRFS_IOC_ADD_DEV: | 1995 | case BTRFS_IOC_ADD_DEV: |
@@ -1338,6 +2006,12 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
1338 | return btrfs_ioctl_trans_start(file); | 2006 | return btrfs_ioctl_trans_start(file); |
1339 | case BTRFS_IOC_TRANS_END: | 2007 | case BTRFS_IOC_TRANS_END: |
1340 | return btrfs_ioctl_trans_end(file); | 2008 | return btrfs_ioctl_trans_end(file); |
2009 | case BTRFS_IOC_TREE_SEARCH: | ||
2010 | return btrfs_ioctl_tree_search(file, argp); | ||
2011 | case BTRFS_IOC_INO_LOOKUP: | ||
2012 | return btrfs_ioctl_ino_lookup(file, argp); | ||
2013 | case BTRFS_IOC_SPACE_INFO: | ||
2014 | return btrfs_ioctl_space_info(root, argp); | ||
1341 | case BTRFS_IOC_SYNC: | 2015 | case BTRFS_IOC_SYNC: |
1342 | btrfs_sync_fs(file->f_dentry->d_sb, 1); | 2016 | btrfs_sync_fs(file->f_dentry->d_sb, 1); |
1343 | return 0; | 2017 | return 0; |
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index bc49914475eb..424694aa517f 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h | |||
@@ -30,12 +30,114 @@ struct btrfs_ioctl_vol_args { | |||
30 | char name[BTRFS_PATH_NAME_MAX + 1]; | 30 | char name[BTRFS_PATH_NAME_MAX + 1]; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | #define BTRFS_INO_LOOKUP_PATH_MAX 4080 | ||
34 | struct btrfs_ioctl_ino_lookup_args { | ||
35 | __u64 treeid; | ||
36 | __u64 objectid; | ||
37 | char name[BTRFS_INO_LOOKUP_PATH_MAX]; | ||
38 | }; | ||
39 | |||
40 | struct btrfs_ioctl_search_key { | ||
41 | /* which root are we searching. 0 is the tree of tree roots */ | ||
42 | __u64 tree_id; | ||
43 | |||
44 | /* keys returned will be >= min and <= max */ | ||
45 | __u64 min_objectid; | ||
46 | __u64 max_objectid; | ||
47 | |||
48 | /* keys returned will be >= min and <= max */ | ||
49 | __u64 min_offset; | ||
50 | __u64 max_offset; | ||
51 | |||
52 | /* max and min transids to search for */ | ||
53 | __u64 min_transid; | ||
54 | __u64 max_transid; | ||
55 | |||
56 | /* keys returned will be >= min and <= max */ | ||
57 | __u32 min_type; | ||
58 | __u32 max_type; | ||
59 | |||
60 | /* | ||
61 | * how many items did userland ask for, and how many are we | ||
62 | * returning | ||
63 | */ | ||
64 | __u32 nr_items; | ||
65 | |||
66 | /* align to 64 bits */ | ||
67 | __u32 unused; | ||
68 | |||
69 | /* some extra for later */ | ||
70 | __u64 unused1; | ||
71 | __u64 unused2; | ||
72 | __u64 unused3; | ||
73 | __u64 unused4; | ||
74 | }; | ||
75 | |||
76 | struct btrfs_ioctl_search_header { | ||
77 | __u64 transid; | ||
78 | __u64 objectid; | ||
79 | __u64 offset; | ||
80 | __u32 type; | ||
81 | __u32 len; | ||
82 | }; | ||
83 | |||
84 | #define BTRFS_SEARCH_ARGS_BUFSIZE (4096 - sizeof(struct btrfs_ioctl_search_key)) | ||
85 | /* | ||
86 | * the buf is an array of search headers where | ||
87 | * each header is followed by the actual item | ||
88 | * the type field is expanded to 32 bits for alignment | ||
89 | */ | ||
90 | struct btrfs_ioctl_search_args { | ||
91 | struct btrfs_ioctl_search_key key; | ||
92 | char buf[BTRFS_SEARCH_ARGS_BUFSIZE]; | ||
93 | }; | ||
94 | |||
33 | struct btrfs_ioctl_clone_range_args { | 95 | struct btrfs_ioctl_clone_range_args { |
34 | __s64 src_fd; | 96 | __s64 src_fd; |
35 | __u64 src_offset, src_length; | 97 | __u64 src_offset, src_length; |
36 | __u64 dest_offset; | 98 | __u64 dest_offset; |
37 | }; | 99 | }; |
38 | 100 | ||
101 | /* flags for the defrag range ioctl */ | ||
102 | #define BTRFS_DEFRAG_RANGE_COMPRESS 1 | ||
103 | #define BTRFS_DEFRAG_RANGE_START_IO 2 | ||
104 | |||
105 | struct btrfs_ioctl_defrag_range_args { | ||
106 | /* start of the defrag operation */ | ||
107 | __u64 start; | ||
108 | |||
109 | /* number of bytes to defrag, use (u64)-1 to say all */ | ||
110 | __u64 len; | ||
111 | |||
112 | /* | ||
113 | * flags for the operation, which can include turning | ||
114 | * on compression for this one defrag | ||
115 | */ | ||
116 | __u64 flags; | ||
117 | |||
118 | /* | ||
119 | * any extent bigger than this will be considered | ||
120 | * already defragged. Use 0 to take the kernel default | ||
121 | * Use 1 to say every single extent must be rewritten | ||
122 | */ | ||
123 | __u32 extent_thresh; | ||
124 | |||
125 | /* spare for later */ | ||
126 | __u32 unused[5]; | ||
127 | }; | ||
128 | |||
129 | struct btrfs_ioctl_space_info { | ||
130 | __u64 flags; | ||
131 | __u64 total_bytes; | ||
132 | __u64 used_bytes; | ||
133 | }; | ||
134 | |||
135 | struct btrfs_ioctl_space_args { | ||
136 | __u64 space_slots; | ||
137 | __u64 total_spaces; | ||
138 | struct btrfs_ioctl_space_info spaces[0]; | ||
139 | }; | ||
140 | |||
39 | #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ | 141 | #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ |
40 | struct btrfs_ioctl_vol_args) | 142 | struct btrfs_ioctl_vol_args) |
41 | #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \ | 143 | #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \ |
@@ -67,4 +169,13 @@ struct btrfs_ioctl_clone_range_args { | |||
67 | struct btrfs_ioctl_vol_args) | 169 | struct btrfs_ioctl_vol_args) |
68 | #define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \ | 170 | #define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \ |
69 | struct btrfs_ioctl_vol_args) | 171 | struct btrfs_ioctl_vol_args) |
172 | #define BTRFS_IOC_DEFRAG_RANGE _IOW(BTRFS_IOCTL_MAGIC, 16, \ | ||
173 | struct btrfs_ioctl_defrag_range_args) | ||
174 | #define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \ | ||
175 | struct btrfs_ioctl_search_args) | ||
176 | #define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \ | ||
177 | struct btrfs_ioctl_ino_lookup_args) | ||
178 | #define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, u64) | ||
179 | #define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \ | ||
180 | struct btrfs_ioctl_space_args) | ||
70 | #endif | 181 | #endif |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 5c2a9e78a949..a8ffecd0b491 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -174,7 +174,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | |||
174 | if (!entry) | 174 | if (!entry) |
175 | return -ENOMEM; | 175 | return -ENOMEM; |
176 | 176 | ||
177 | mutex_lock(&tree->mutex); | ||
178 | entry->file_offset = file_offset; | 177 | entry->file_offset = file_offset; |
179 | entry->start = start; | 178 | entry->start = start; |
180 | entry->len = len; | 179 | entry->len = len; |
@@ -190,16 +189,17 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | |||
190 | INIT_LIST_HEAD(&entry->list); | 189 | INIT_LIST_HEAD(&entry->list); |
191 | INIT_LIST_HEAD(&entry->root_extent_list); | 190 | INIT_LIST_HEAD(&entry->root_extent_list); |
192 | 191 | ||
192 | spin_lock(&tree->lock); | ||
193 | node = tree_insert(&tree->tree, file_offset, | 193 | node = tree_insert(&tree->tree, file_offset, |
194 | &entry->rb_node); | 194 | &entry->rb_node); |
195 | BUG_ON(node); | 195 | BUG_ON(node); |
196 | spin_unlock(&tree->lock); | ||
196 | 197 | ||
197 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | 198 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
198 | list_add_tail(&entry->root_extent_list, | 199 | list_add_tail(&entry->root_extent_list, |
199 | &BTRFS_I(inode)->root->fs_info->ordered_extents); | 200 | &BTRFS_I(inode)->root->fs_info->ordered_extents); |
200 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | 201 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
201 | 202 | ||
202 | mutex_unlock(&tree->mutex); | ||
203 | BUG_ON(node); | 203 | BUG_ON(node); |
204 | return 0; | 204 | return 0; |
205 | } | 205 | } |
@@ -216,9 +216,9 @@ int btrfs_add_ordered_sum(struct inode *inode, | |||
216 | struct btrfs_ordered_inode_tree *tree; | 216 | struct btrfs_ordered_inode_tree *tree; |
217 | 217 | ||
218 | tree = &BTRFS_I(inode)->ordered_tree; | 218 | tree = &BTRFS_I(inode)->ordered_tree; |
219 | mutex_lock(&tree->mutex); | 219 | spin_lock(&tree->lock); |
220 | list_add_tail(&sum->list, &entry->list); | 220 | list_add_tail(&sum->list, &entry->list); |
221 | mutex_unlock(&tree->mutex); | 221 | spin_unlock(&tree->lock); |
222 | return 0; | 222 | return 0; |
223 | } | 223 | } |
224 | 224 | ||
@@ -232,15 +232,16 @@ int btrfs_add_ordered_sum(struct inode *inode, | |||
232 | * to make sure this function only returns 1 once for a given ordered extent. | 232 | * to make sure this function only returns 1 once for a given ordered extent. |
233 | */ | 233 | */ |
234 | int btrfs_dec_test_ordered_pending(struct inode *inode, | 234 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
235 | struct btrfs_ordered_extent **cached, | ||
235 | u64 file_offset, u64 io_size) | 236 | u64 file_offset, u64 io_size) |
236 | { | 237 | { |
237 | struct btrfs_ordered_inode_tree *tree; | 238 | struct btrfs_ordered_inode_tree *tree; |
238 | struct rb_node *node; | 239 | struct rb_node *node; |
239 | struct btrfs_ordered_extent *entry; | 240 | struct btrfs_ordered_extent *entry = NULL; |
240 | int ret; | 241 | int ret; |
241 | 242 | ||
242 | tree = &BTRFS_I(inode)->ordered_tree; | 243 | tree = &BTRFS_I(inode)->ordered_tree; |
243 | mutex_lock(&tree->mutex); | 244 | spin_lock(&tree->lock); |
244 | node = tree_search(tree, file_offset); | 245 | node = tree_search(tree, file_offset); |
245 | if (!node) { | 246 | if (!node) { |
246 | ret = 1; | 247 | ret = 1; |
@@ -264,7 +265,11 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, | |||
264 | else | 265 | else |
265 | ret = 1; | 266 | ret = 1; |
266 | out: | 267 | out: |
267 | mutex_unlock(&tree->mutex); | 268 | if (!ret && cached && entry) { |
269 | *cached = entry; | ||
270 | atomic_inc(&entry->refs); | ||
271 | } | ||
272 | spin_unlock(&tree->lock); | ||
268 | return ret == 0; | 273 | return ret == 0; |
269 | } | 274 | } |
270 | 275 | ||
@@ -291,7 +296,7 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) | |||
291 | 296 | ||
292 | /* | 297 | /* |
293 | * remove an ordered extent from the tree. No references are dropped | 298 | * remove an ordered extent from the tree. No references are dropped |
294 | * and you must wake_up entry->wait. You must hold the tree mutex | 299 | * and you must wake_up entry->wait. You must hold the tree lock |
295 | * while you call this function. | 300 | * while you call this function. |
296 | */ | 301 | */ |
297 | static int __btrfs_remove_ordered_extent(struct inode *inode, | 302 | static int __btrfs_remove_ordered_extent(struct inode *inode, |
@@ -340,9 +345,9 @@ int btrfs_remove_ordered_extent(struct inode *inode, | |||
340 | int ret; | 345 | int ret; |
341 | 346 | ||
342 | tree = &BTRFS_I(inode)->ordered_tree; | 347 | tree = &BTRFS_I(inode)->ordered_tree; |
343 | mutex_lock(&tree->mutex); | 348 | spin_lock(&tree->lock); |
344 | ret = __btrfs_remove_ordered_extent(inode, entry); | 349 | ret = __btrfs_remove_ordered_extent(inode, entry); |
345 | mutex_unlock(&tree->mutex); | 350 | spin_unlock(&tree->lock); |
346 | wake_up(&entry->wait); | 351 | wake_up(&entry->wait); |
347 | 352 | ||
348 | return ret; | 353 | return ret; |
@@ -567,7 +572,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, | |||
567 | struct btrfs_ordered_extent *entry = NULL; | 572 | struct btrfs_ordered_extent *entry = NULL; |
568 | 573 | ||
569 | tree = &BTRFS_I(inode)->ordered_tree; | 574 | tree = &BTRFS_I(inode)->ordered_tree; |
570 | mutex_lock(&tree->mutex); | 575 | spin_lock(&tree->lock); |
571 | node = tree_search(tree, file_offset); | 576 | node = tree_search(tree, file_offset); |
572 | if (!node) | 577 | if (!node) |
573 | goto out; | 578 | goto out; |
@@ -578,7 +583,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, | |||
578 | if (entry) | 583 | if (entry) |
579 | atomic_inc(&entry->refs); | 584 | atomic_inc(&entry->refs); |
580 | out: | 585 | out: |
581 | mutex_unlock(&tree->mutex); | 586 | spin_unlock(&tree->lock); |
582 | return entry; | 587 | return entry; |
583 | } | 588 | } |
584 | 589 | ||
@@ -594,7 +599,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) | |||
594 | struct btrfs_ordered_extent *entry = NULL; | 599 | struct btrfs_ordered_extent *entry = NULL; |
595 | 600 | ||
596 | tree = &BTRFS_I(inode)->ordered_tree; | 601 | tree = &BTRFS_I(inode)->ordered_tree; |
597 | mutex_lock(&tree->mutex); | 602 | spin_lock(&tree->lock); |
598 | node = tree_search(tree, file_offset); | 603 | node = tree_search(tree, file_offset); |
599 | if (!node) | 604 | if (!node) |
600 | goto out; | 605 | goto out; |
@@ -602,7 +607,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) | |||
602 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | 607 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
603 | atomic_inc(&entry->refs); | 608 | atomic_inc(&entry->refs); |
604 | out: | 609 | out: |
605 | mutex_unlock(&tree->mutex); | 610 | spin_unlock(&tree->lock); |
606 | return entry; | 611 | return entry; |
607 | } | 612 | } |
608 | 613 | ||
@@ -629,7 +634,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
629 | else | 634 | else |
630 | offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); | 635 | offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); |
631 | 636 | ||
632 | mutex_lock(&tree->mutex); | 637 | spin_lock(&tree->lock); |
633 | disk_i_size = BTRFS_I(inode)->disk_i_size; | 638 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
634 | 639 | ||
635 | /* truncate file */ | 640 | /* truncate file */ |
@@ -735,7 +740,7 @@ out: | |||
735 | */ | 740 | */ |
736 | if (ordered) | 741 | if (ordered) |
737 | __btrfs_remove_ordered_extent(inode, ordered); | 742 | __btrfs_remove_ordered_extent(inode, ordered); |
738 | mutex_unlock(&tree->mutex); | 743 | spin_unlock(&tree->lock); |
739 | if (ordered) | 744 | if (ordered) |
740 | wake_up(&ordered->wait); | 745 | wake_up(&ordered->wait); |
741 | return ret; | 746 | return ret; |
@@ -762,7 +767,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, | |||
762 | if (!ordered) | 767 | if (!ordered) |
763 | return 1; | 768 | return 1; |
764 | 769 | ||
765 | mutex_lock(&tree->mutex); | 770 | spin_lock(&tree->lock); |
766 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { | 771 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
767 | if (disk_bytenr >= ordered_sum->bytenr) { | 772 | if (disk_bytenr >= ordered_sum->bytenr) { |
768 | num_sectors = ordered_sum->len / sectorsize; | 773 | num_sectors = ordered_sum->len / sectorsize; |
@@ -777,7 +782,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, | |||
777 | } | 782 | } |
778 | } | 783 | } |
779 | out: | 784 | out: |
780 | mutex_unlock(&tree->mutex); | 785 | spin_unlock(&tree->lock); |
781 | btrfs_put_ordered_extent(ordered); | 786 | btrfs_put_ordered_extent(ordered); |
782 | return ret; | 787 | return ret; |
783 | } | 788 | } |
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 9116c6d0c5a9..c82f76a9f040 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | /* one of these per inode */ | 22 | /* one of these per inode */ |
23 | struct btrfs_ordered_inode_tree { | 23 | struct btrfs_ordered_inode_tree { |
24 | struct mutex mutex; | 24 | spinlock_t lock; |
25 | struct rb_root tree; | 25 | struct rb_root tree; |
26 | struct rb_node *last; | 26 | struct rb_node *last; |
27 | }; | 27 | }; |
@@ -128,7 +128,7 @@ static inline int btrfs_ordered_sum_size(struct btrfs_root *root, | |||
128 | static inline void | 128 | static inline void |
129 | btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) | 129 | btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) |
130 | { | 130 | { |
131 | mutex_init(&t->mutex); | 131 | spin_lock_init(&t->lock); |
132 | t->tree = RB_ROOT; | 132 | t->tree = RB_ROOT; |
133 | t->last = NULL; | 133 | t->last = NULL; |
134 | } | 134 | } |
@@ -137,7 +137,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); | |||
137 | int btrfs_remove_ordered_extent(struct inode *inode, | 137 | int btrfs_remove_ordered_extent(struct inode *inode, |
138 | struct btrfs_ordered_extent *entry); | 138 | struct btrfs_ordered_extent *entry); |
139 | int btrfs_dec_test_ordered_pending(struct inode *inode, | 139 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
140 | u64 file_offset, u64 io_size); | 140 | struct btrfs_ordered_extent **cached, |
141 | u64 file_offset, u64 io_size); | ||
141 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | 142 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
142 | u64 start, u64 len, u64 disk_len, int tyep); | 143 | u64 start, u64 len, u64 disk_len, int tyep); |
143 | int btrfs_add_ordered_sum(struct inode *inode, | 144 | int btrfs_add_ordered_sum(struct inode *inode, |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 0109e5606bad..0b23942cbc0d 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -2659,7 +2659,7 @@ static int relocate_file_extent_cluster(struct inode *inode, | |||
2659 | EXTENT_BOUNDARY, GFP_NOFS); | 2659 | EXTENT_BOUNDARY, GFP_NOFS); |
2660 | nr++; | 2660 | nr++; |
2661 | } | 2661 | } |
2662 | btrfs_set_extent_delalloc(inode, page_start, page_end); | 2662 | btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); |
2663 | 2663 | ||
2664 | set_page_dirty(page); | 2664 | set_page_dirty(page); |
2665 | dirty_page++; | 2665 | dirty_page++; |
@@ -3487,7 +3487,7 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, | |||
3487 | key.objectid = objectid; | 3487 | key.objectid = objectid; |
3488 | key.type = BTRFS_INODE_ITEM_KEY; | 3488 | key.type = BTRFS_INODE_ITEM_KEY; |
3489 | key.offset = 0; | 3489 | key.offset = 0; |
3490 | inode = btrfs_iget(root->fs_info->sb, &key, root); | 3490 | inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); |
3491 | BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); | 3491 | BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); |
3492 | BTRFS_I(inode)->index_cnt = group->key.objectid; | 3492 | BTRFS_I(inode)->index_cnt = group->key.objectid; |
3493 | 3493 | ||
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f8b4521de907..9ac612e6ca60 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -63,10 +63,10 @@ static void btrfs_put_super(struct super_block *sb) | |||
63 | } | 63 | } |
64 | 64 | ||
65 | enum { | 65 | enum { |
66 | Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, | 66 | Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum, |
67 | Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, | 67 | Opt_nodatacow, Opt_max_extent, Opt_max_inline, Opt_alloc_start, |
68 | Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, | 68 | Opt_nobarrier, Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, |
69 | Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, | 69 | Opt_noacl, Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, |
70 | Opt_flushoncommit, | 70 | Opt_flushoncommit, |
71 | Opt_discard, Opt_err, | 71 | Opt_discard, Opt_err, |
72 | }; | 72 | }; |
@@ -74,6 +74,7 @@ enum { | |||
74 | static match_table_t tokens = { | 74 | static match_table_t tokens = { |
75 | {Opt_degraded, "degraded"}, | 75 | {Opt_degraded, "degraded"}, |
76 | {Opt_subvol, "subvol=%s"}, | 76 | {Opt_subvol, "subvol=%s"}, |
77 | {Opt_subvolid, "subvolid=%d"}, | ||
77 | {Opt_device, "device=%s"}, | 78 | {Opt_device, "device=%s"}, |
78 | {Opt_nodatasum, "nodatasum"}, | 79 | {Opt_nodatasum, "nodatasum"}, |
79 | {Opt_nodatacow, "nodatacow"}, | 80 | {Opt_nodatacow, "nodatacow"}, |
@@ -95,31 +96,6 @@ static match_table_t tokens = { | |||
95 | {Opt_err, NULL}, | 96 | {Opt_err, NULL}, |
96 | }; | 97 | }; |
97 | 98 | ||
98 | u64 btrfs_parse_size(char *str) | ||
99 | { | ||
100 | u64 res; | ||
101 | int mult = 1; | ||
102 | char *end; | ||
103 | char last; | ||
104 | |||
105 | res = simple_strtoul(str, &end, 10); | ||
106 | |||
107 | last = end[0]; | ||
108 | if (isalpha(last)) { | ||
109 | last = tolower(last); | ||
110 | switch (last) { | ||
111 | case 'g': | ||
112 | mult *= 1024; | ||
113 | case 'm': | ||
114 | mult *= 1024; | ||
115 | case 'k': | ||
116 | mult *= 1024; | ||
117 | } | ||
118 | res = res * mult; | ||
119 | } | ||
120 | return res; | ||
121 | } | ||
122 | |||
123 | /* | 99 | /* |
124 | * Regular mount options parser. Everything that is needed only when | 100 | * Regular mount options parser. Everything that is needed only when |
125 | * reading in a new superblock is parsed here. | 101 | * reading in a new superblock is parsed here. |
@@ -157,6 +133,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
157 | btrfs_set_opt(info->mount_opt, DEGRADED); | 133 | btrfs_set_opt(info->mount_opt, DEGRADED); |
158 | break; | 134 | break; |
159 | case Opt_subvol: | 135 | case Opt_subvol: |
136 | case Opt_subvolid: | ||
160 | case Opt_device: | 137 | case Opt_device: |
161 | /* | 138 | /* |
162 | * These are parsed by btrfs_parse_early_options | 139 | * These are parsed by btrfs_parse_early_options |
@@ -214,7 +191,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
214 | case Opt_max_extent: | 191 | case Opt_max_extent: |
215 | num = match_strdup(&args[0]); | 192 | num = match_strdup(&args[0]); |
216 | if (num) { | 193 | if (num) { |
217 | info->max_extent = btrfs_parse_size(num); | 194 | info->max_extent = memparse(num, NULL); |
218 | kfree(num); | 195 | kfree(num); |
219 | 196 | ||
220 | info->max_extent = max_t(u64, | 197 | info->max_extent = max_t(u64, |
@@ -226,7 +203,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
226 | case Opt_max_inline: | 203 | case Opt_max_inline: |
227 | num = match_strdup(&args[0]); | 204 | num = match_strdup(&args[0]); |
228 | if (num) { | 205 | if (num) { |
229 | info->max_inline = btrfs_parse_size(num); | 206 | info->max_inline = memparse(num, NULL); |
230 | kfree(num); | 207 | kfree(num); |
231 | 208 | ||
232 | if (info->max_inline) { | 209 | if (info->max_inline) { |
@@ -241,7 +218,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
241 | case Opt_alloc_start: | 218 | case Opt_alloc_start: |
242 | num = match_strdup(&args[0]); | 219 | num = match_strdup(&args[0]); |
243 | if (num) { | 220 | if (num) { |
244 | info->alloc_start = btrfs_parse_size(num); | 221 | info->alloc_start = memparse(num, NULL); |
245 | kfree(num); | 222 | kfree(num); |
246 | printk(KERN_INFO | 223 | printk(KERN_INFO |
247 | "btrfs: allocations start at %llu\n", | 224 | "btrfs: allocations start at %llu\n", |
@@ -292,12 +269,13 @@ out: | |||
292 | * only when we need to allocate a new super block. | 269 | * only when we need to allocate a new super block. |
293 | */ | 270 | */ |
294 | static int btrfs_parse_early_options(const char *options, fmode_t flags, | 271 | static int btrfs_parse_early_options(const char *options, fmode_t flags, |
295 | void *holder, char **subvol_name, | 272 | void *holder, char **subvol_name, u64 *subvol_objectid, |
296 | struct btrfs_fs_devices **fs_devices) | 273 | struct btrfs_fs_devices **fs_devices) |
297 | { | 274 | { |
298 | substring_t args[MAX_OPT_ARGS]; | 275 | substring_t args[MAX_OPT_ARGS]; |
299 | char *opts, *p; | 276 | char *opts, *p; |
300 | int error = 0; | 277 | int error = 0; |
278 | int intarg; | ||
301 | 279 | ||
302 | if (!options) | 280 | if (!options) |
303 | goto out; | 281 | goto out; |
@@ -320,6 +298,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, | |||
320 | case Opt_subvol: | 298 | case Opt_subvol: |
321 | *subvol_name = match_strdup(&args[0]); | 299 | *subvol_name = match_strdup(&args[0]); |
322 | break; | 300 | break; |
301 | case Opt_subvolid: | ||
302 | intarg = 0; | ||
303 | error = match_int(&args[0], &intarg); | ||
304 | if (!error) { | ||
305 | /* we want the original fs_tree */ | ||
306 | if (!intarg) | ||
307 | *subvol_objectid = | ||
308 | BTRFS_FS_TREE_OBJECTID; | ||
309 | else | ||
310 | *subvol_objectid = intarg; | ||
311 | } | ||
312 | break; | ||
323 | case Opt_device: | 313 | case Opt_device: |
324 | error = btrfs_scan_one_device(match_strdup(&args[0]), | 314 | error = btrfs_scan_one_device(match_strdup(&args[0]), |
325 | flags, holder, fs_devices); | 315 | flags, holder, fs_devices); |
@@ -347,6 +337,110 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, | |||
347 | return error; | 337 | return error; |
348 | } | 338 | } |
349 | 339 | ||
340 | static struct dentry *get_default_root(struct super_block *sb, | ||
341 | u64 subvol_objectid) | ||
342 | { | ||
343 | struct btrfs_root *root = sb->s_fs_info; | ||
344 | struct btrfs_root *new_root; | ||
345 | struct btrfs_dir_item *di; | ||
346 | struct btrfs_path *path; | ||
347 | struct btrfs_key location; | ||
348 | struct inode *inode; | ||
349 | struct dentry *dentry; | ||
350 | u64 dir_id; | ||
351 | int new = 0; | ||
352 | |||
353 | /* | ||
354 | * We have a specific subvol we want to mount, just setup location and | ||
355 | * go look up the root. | ||
356 | */ | ||
357 | if (subvol_objectid) { | ||
358 | location.objectid = subvol_objectid; | ||
359 | location.type = BTRFS_ROOT_ITEM_KEY; | ||
360 | location.offset = (u64)-1; | ||
361 | goto find_root; | ||
362 | } | ||
363 | |||
364 | path = btrfs_alloc_path(); | ||
365 | if (!path) | ||
366 | return ERR_PTR(-ENOMEM); | ||
367 | path->leave_spinning = 1; | ||
368 | |||
369 | /* | ||
370 | * Find the "default" dir item which points to the root item that we | ||
371 | * will mount by default if we haven't been given a specific subvolume | ||
372 | * to mount. | ||
373 | */ | ||
374 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); | ||
375 | di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); | ||
376 | if (!di) { | ||
377 | /* | ||
378 | * Ok the default dir item isn't there. This is weird since | ||
379 | * it's always been there, but don't freak out, just try and | ||
380 | * mount to root most subvolume. | ||
381 | */ | ||
382 | btrfs_free_path(path); | ||
383 | dir_id = BTRFS_FIRST_FREE_OBJECTID; | ||
384 | new_root = root->fs_info->fs_root; | ||
385 | goto setup_root; | ||
386 | } | ||
387 | |||
388 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); | ||
389 | btrfs_free_path(path); | ||
390 | |||
391 | find_root: | ||
392 | new_root = btrfs_read_fs_root_no_name(root->fs_info, &location); | ||
393 | if (IS_ERR(new_root)) | ||
394 | return ERR_PTR(PTR_ERR(new_root)); | ||
395 | |||
396 | if (btrfs_root_refs(&new_root->root_item) == 0) | ||
397 | return ERR_PTR(-ENOENT); | ||
398 | |||
399 | dir_id = btrfs_root_dirid(&new_root->root_item); | ||
400 | setup_root: | ||
401 | location.objectid = dir_id; | ||
402 | location.type = BTRFS_INODE_ITEM_KEY; | ||
403 | location.offset = 0; | ||
404 | |||
405 | inode = btrfs_iget(sb, &location, new_root, &new); | ||
406 | if (!inode) | ||
407 | return ERR_PTR(-ENOMEM); | ||
408 | |||
409 | /* | ||
410 | * If we're just mounting the root most subvol put the inode and return | ||
411 | * a reference to the dentry. We will have already gotten a reference | ||
412 | * to the inode in btrfs_fill_super so we're good to go. | ||
413 | */ | ||
414 | if (!new && sb->s_root->d_inode == inode) { | ||
415 | iput(inode); | ||
416 | return dget(sb->s_root); | ||
417 | } | ||
418 | |||
419 | if (new) { | ||
420 | const struct qstr name = { .name = "/", .len = 1 }; | ||
421 | |||
422 | /* | ||
423 | * New inode, we need to make the dentry a sibling of s_root so | ||
424 | * everything gets cleaned up properly on unmount. | ||
425 | */ | ||
426 | dentry = d_alloc(sb->s_root, &name); | ||
427 | if (!dentry) { | ||
428 | iput(inode); | ||
429 | return ERR_PTR(-ENOMEM); | ||
430 | } | ||
431 | d_splice_alias(inode, dentry); | ||
432 | } else { | ||
433 | /* | ||
434 | * We found the inode in cache, just find a dentry for it and | ||
435 | * put the reference to the inode we just got. | ||
436 | */ | ||
437 | dentry = d_find_alias(inode); | ||
438 | iput(inode); | ||
439 | } | ||
440 | |||
441 | return dentry; | ||
442 | } | ||
443 | |||
350 | static int btrfs_fill_super(struct super_block *sb, | 444 | static int btrfs_fill_super(struct super_block *sb, |
351 | struct btrfs_fs_devices *fs_devices, | 445 | struct btrfs_fs_devices *fs_devices, |
352 | void *data, int silent) | 446 | void *data, int silent) |
@@ -380,7 +474,7 @@ static int btrfs_fill_super(struct super_block *sb, | |||
380 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; | 474 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; |
381 | key.type = BTRFS_INODE_ITEM_KEY; | 475 | key.type = BTRFS_INODE_ITEM_KEY; |
382 | key.offset = 0; | 476 | key.offset = 0; |
383 | inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root); | 477 | inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root, NULL); |
384 | if (IS_ERR(inode)) { | 478 | if (IS_ERR(inode)) { |
385 | err = PTR_ERR(inode); | 479 | err = PTR_ERR(inode); |
386 | goto fail_close; | 480 | goto fail_close; |
@@ -392,12 +486,6 @@ static int btrfs_fill_super(struct super_block *sb, | |||
392 | err = -ENOMEM; | 486 | err = -ENOMEM; |
393 | goto fail_close; | 487 | goto fail_close; |
394 | } | 488 | } |
395 | #if 0 | ||
396 | /* this does the super kobj at the same time */ | ||
397 | err = btrfs_sysfs_add_super(tree_root->fs_info); | ||
398 | if (err) | ||
399 | goto fail_close; | ||
400 | #endif | ||
401 | 489 | ||
402 | sb->s_root = root_dentry; | 490 | sb->s_root = root_dentry; |
403 | 491 | ||
@@ -489,19 +577,22 @@ static int btrfs_test_super(struct super_block *s, void *data) | |||
489 | static int btrfs_get_sb(struct file_system_type *fs_type, int flags, | 577 | static int btrfs_get_sb(struct file_system_type *fs_type, int flags, |
490 | const char *dev_name, void *data, struct vfsmount *mnt) | 578 | const char *dev_name, void *data, struct vfsmount *mnt) |
491 | { | 579 | { |
492 | char *subvol_name = NULL; | ||
493 | struct block_device *bdev = NULL; | 580 | struct block_device *bdev = NULL; |
494 | struct super_block *s; | 581 | struct super_block *s; |
495 | struct dentry *root; | 582 | struct dentry *root; |
496 | struct btrfs_fs_devices *fs_devices = NULL; | 583 | struct btrfs_fs_devices *fs_devices = NULL; |
497 | fmode_t mode = FMODE_READ; | 584 | fmode_t mode = FMODE_READ; |
585 | char *subvol_name = NULL; | ||
586 | u64 subvol_objectid = 0; | ||
498 | int error = 0; | 587 | int error = 0; |
588 | int found = 0; | ||
499 | 589 | ||
500 | if (!(flags & MS_RDONLY)) | 590 | if (!(flags & MS_RDONLY)) |
501 | mode |= FMODE_WRITE; | 591 | mode |= FMODE_WRITE; |
502 | 592 | ||
503 | error = btrfs_parse_early_options(data, mode, fs_type, | 593 | error = btrfs_parse_early_options(data, mode, fs_type, |
504 | &subvol_name, &fs_devices); | 594 | &subvol_name, &subvol_objectid, |
595 | &fs_devices); | ||
505 | if (error) | 596 | if (error) |
506 | return error; | 597 | return error; |
507 | 598 | ||
@@ -530,6 +621,7 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, | |||
530 | goto error_close_devices; | 621 | goto error_close_devices; |
531 | } | 622 | } |
532 | 623 | ||
624 | found = 1; | ||
533 | btrfs_close_devices(fs_devices); | 625 | btrfs_close_devices(fs_devices); |
534 | } else { | 626 | } else { |
535 | char b[BDEVNAME_SIZE]; | 627 | char b[BDEVNAME_SIZE]; |
@@ -547,25 +639,35 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, | |||
547 | s->s_flags |= MS_ACTIVE; | 639 | s->s_flags |= MS_ACTIVE; |
548 | } | 640 | } |
549 | 641 | ||
550 | if (!strcmp(subvol_name, ".")) | 642 | root = get_default_root(s, subvol_objectid); |
551 | root = dget(s->s_root); | 643 | if (IS_ERR(root)) { |
552 | else { | 644 | error = PTR_ERR(root); |
553 | mutex_lock(&s->s_root->d_inode->i_mutex); | 645 | deactivate_locked_super(s); |
554 | root = lookup_one_len(subvol_name, s->s_root, | 646 | goto error; |
647 | } | ||
648 | /* if they gave us a subvolume name bind mount into that */ | ||
649 | if (strcmp(subvol_name, ".")) { | ||
650 | struct dentry *new_root; | ||
651 | mutex_lock(&root->d_inode->i_mutex); | ||
652 | new_root = lookup_one_len(subvol_name, root, | ||
555 | strlen(subvol_name)); | 653 | strlen(subvol_name)); |
556 | mutex_unlock(&s->s_root->d_inode->i_mutex); | 654 | mutex_unlock(&root->d_inode->i_mutex); |
557 | 655 | ||
558 | if (IS_ERR(root)) { | 656 | if (IS_ERR(new_root)) { |
559 | deactivate_locked_super(s); | 657 | deactivate_locked_super(s); |
560 | error = PTR_ERR(root); | 658 | error = PTR_ERR(new_root); |
561 | goto error_free_subvol_name; | 659 | dput(root); |
660 | goto error_close_devices; | ||
562 | } | 661 | } |
563 | if (!root->d_inode) { | 662 | if (!new_root->d_inode) { |
564 | dput(root); | 663 | dput(root); |
664 | dput(new_root); | ||
565 | deactivate_locked_super(s); | 665 | deactivate_locked_super(s); |
566 | error = -ENXIO; | 666 | error = -ENXIO; |
567 | goto error_free_subvol_name; | 667 | goto error_close_devices; |
568 | } | 668 | } |
669 | dput(root); | ||
670 | root = new_root; | ||
569 | } | 671 | } |
570 | 672 | ||
571 | mnt->mnt_sb = s; | 673 | mnt->mnt_sb = s; |
@@ -580,6 +682,7 @@ error_close_devices: | |||
580 | btrfs_close_devices(fs_devices); | 682 | btrfs_close_devices(fs_devices); |
581 | error_free_subvol_name: | 683 | error_free_subvol_name: |
582 | kfree(subvol_name); | 684 | kfree(subvol_name); |
685 | error: | ||
583 | return error; | 686 | return error; |
584 | } | 687 | } |
585 | 688 | ||
@@ -624,14 +727,37 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
624 | { | 727 | { |
625 | struct btrfs_root *root = btrfs_sb(dentry->d_sb); | 728 | struct btrfs_root *root = btrfs_sb(dentry->d_sb); |
626 | struct btrfs_super_block *disk_super = &root->fs_info->super_copy; | 729 | struct btrfs_super_block *disk_super = &root->fs_info->super_copy; |
730 | struct list_head *head = &root->fs_info->space_info; | ||
731 | struct btrfs_space_info *found; | ||
732 | u64 total_used = 0; | ||
733 | u64 data_used = 0; | ||
627 | int bits = dentry->d_sb->s_blocksize_bits; | 734 | int bits = dentry->d_sb->s_blocksize_bits; |
628 | __be32 *fsid = (__be32 *)root->fs_info->fsid; | 735 | __be32 *fsid = (__be32 *)root->fs_info->fsid; |
629 | 736 | ||
737 | rcu_read_lock(); | ||
738 | list_for_each_entry_rcu(found, head, list) { | ||
739 | if (found->flags & (BTRFS_BLOCK_GROUP_DUP| | ||
740 | BTRFS_BLOCK_GROUP_RAID10| | ||
741 | BTRFS_BLOCK_GROUP_RAID1)) { | ||
742 | total_used += found->bytes_used; | ||
743 | if (found->flags & BTRFS_BLOCK_GROUP_DATA) | ||
744 | data_used += found->bytes_used; | ||
745 | else | ||
746 | data_used += found->total_bytes; | ||
747 | } | ||
748 | |||
749 | total_used += found->bytes_used; | ||
750 | if (found->flags & BTRFS_BLOCK_GROUP_DATA) | ||
751 | data_used += found->bytes_used; | ||
752 | else | ||
753 | data_used += found->total_bytes; | ||
754 | } | ||
755 | rcu_read_unlock(); | ||
756 | |||
630 | buf->f_namelen = BTRFS_NAME_LEN; | 757 | buf->f_namelen = BTRFS_NAME_LEN; |
631 | buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits; | 758 | buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits; |
632 | buf->f_bfree = buf->f_blocks - | 759 | buf->f_bfree = buf->f_blocks - (total_used >> bits); |
633 | (btrfs_super_bytes_used(disk_super) >> bits); | 760 | buf->f_bavail = buf->f_blocks - (data_used >> bits); |
634 | buf->f_bavail = buf->f_bfree; | ||
635 | buf->f_bsize = dentry->d_sb->s_blocksize; | 761 | buf->f_bsize = dentry->d_sb->s_blocksize; |
636 | buf->f_type = BTRFS_SUPER_MAGIC; | 762 | buf->f_type = BTRFS_SUPER_MAGIC; |
637 | 763 | ||
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2a36e236a492..2d654c1c794d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -997,13 +997,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
997 | 997 | ||
998 | mutex_unlock(&root->fs_info->trans_mutex); | 998 | mutex_unlock(&root->fs_info->trans_mutex); |
999 | 999 | ||
1000 | if (flush_on_commit) { | 1000 | if (flush_on_commit || snap_pending) { |
1001 | btrfs_start_delalloc_inodes(root, 1); | 1001 | btrfs_start_delalloc_inodes(root, 1); |
1002 | ret = btrfs_wait_ordered_extents(root, 0, 1); | 1002 | ret = btrfs_wait_ordered_extents(root, 0, 1); |
1003 | BUG_ON(ret); | 1003 | BUG_ON(ret); |
1004 | } else if (snap_pending) { | ||
1005 | ret = btrfs_wait_ordered_extents(root, 0, 1); | ||
1006 | BUG_ON(ret); | ||
1007 | } | 1004 | } |
1008 | 1005 | ||
1009 | /* | 1006 | /* |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 4a9434b622ec..1255fcc8ade5 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -445,7 +445,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root, | |||
445 | key.objectid = objectid; | 445 | key.objectid = objectid; |
446 | key.type = BTRFS_INODE_ITEM_KEY; | 446 | key.type = BTRFS_INODE_ITEM_KEY; |
447 | key.offset = 0; | 447 | key.offset = 0; |
448 | inode = btrfs_iget(root->fs_info->sb, &key, root); | 448 | inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); |
449 | if (IS_ERR(inode)) { | 449 | if (IS_ERR(inode)) { |
450 | inode = NULL; | 450 | inode = NULL; |
451 | } else if (is_bad_inode(inode)) { | 451 | } else if (is_bad_inode(inode)) { |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 41ecbb2347f2..9df8e3f1ccab 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -256,13 +256,13 @@ loop_lock: | |||
256 | wake_up(&fs_info->async_submit_wait); | 256 | wake_up(&fs_info->async_submit_wait); |
257 | 257 | ||
258 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); | 258 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); |
259 | submit_bio(cur->bi_rw, cur); | ||
260 | num_run++; | ||
261 | batch_run++; | ||
262 | 259 | ||
263 | if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) | 260 | if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) |
264 | num_sync_run++; | 261 | num_sync_run++; |
265 | 262 | ||
263 | submit_bio(cur->bi_rw, cur); | ||
264 | num_run++; | ||
265 | batch_run++; | ||
266 | if (need_resched()) { | 266 | if (need_resched()) { |
267 | if (num_sync_run) { | 267 | if (num_sync_run) { |
268 | blk_run_backing_dev(bdi, NULL); | 268 | blk_run_backing_dev(bdi, NULL); |
@@ -325,16 +325,6 @@ loop_lock: | |||
325 | num_sync_run = 0; | 325 | num_sync_run = 0; |
326 | blk_run_backing_dev(bdi, NULL); | 326 | blk_run_backing_dev(bdi, NULL); |
327 | } | 327 | } |
328 | |||
329 | cond_resched(); | ||
330 | if (again) | ||
331 | goto loop; | ||
332 | |||
333 | spin_lock(&device->io_lock); | ||
334 | if (device->pending_bios.head || device->pending_sync_bios.head) | ||
335 | goto loop_lock; | ||
336 | spin_unlock(&device->io_lock); | ||
337 | |||
338 | /* | 328 | /* |
339 | * IO has already been through a long path to get here. Checksumming, | 329 | * IO has already been through a long path to get here. Checksumming, |
340 | * async helper threads, perhaps compression. We've done a pretty | 330 | * async helper threads, perhaps compression. We've done a pretty |
@@ -346,6 +336,16 @@ loop_lock: | |||
346 | * cared about found its way down here. | 336 | * cared about found its way down here. |
347 | */ | 337 | */ |
348 | blk_run_backing_dev(bdi, NULL); | 338 | blk_run_backing_dev(bdi, NULL); |
339 | |||
340 | cond_resched(); | ||
341 | if (again) | ||
342 | goto loop; | ||
343 | |||
344 | spin_lock(&device->io_lock); | ||
345 | if (device->pending_bios.head || device->pending_sync_bios.head) | ||
346 | goto loop_lock; | ||
347 | spin_unlock(&device->io_lock); | ||
348 | |||
349 | done: | 349 | done: |
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
@@ -365,6 +365,7 @@ static noinline int device_list_add(const char *path, | |||
365 | struct btrfs_device *device; | 365 | struct btrfs_device *device; |
366 | struct btrfs_fs_devices *fs_devices; | 366 | struct btrfs_fs_devices *fs_devices; |
367 | u64 found_transid = btrfs_super_generation(disk_super); | 367 | u64 found_transid = btrfs_super_generation(disk_super); |
368 | char *name; | ||
368 | 369 | ||
369 | fs_devices = find_fsid(disk_super->fsid); | 370 | fs_devices = find_fsid(disk_super->fsid); |
370 | if (!fs_devices) { | 371 | if (!fs_devices) { |
@@ -411,6 +412,12 @@ static noinline int device_list_add(const char *path, | |||
411 | 412 | ||
412 | device->fs_devices = fs_devices; | 413 | device->fs_devices = fs_devices; |
413 | fs_devices->num_devices++; | 414 | fs_devices->num_devices++; |
415 | } else if (strcmp(device->name, path)) { | ||
416 | name = kstrdup(path, GFP_NOFS); | ||
417 | if (!name) | ||
418 | return -ENOMEM; | ||
419 | kfree(device->name); | ||
420 | device->name = name; | ||
414 | } | 421 | } |
415 | 422 | ||
416 | if (found_transid > fs_devices->latest_trans) { | 423 | if (found_transid > fs_devices->latest_trans) { |
@@ -592,7 +599,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
592 | goto error_close; | 599 | goto error_close; |
593 | 600 | ||
594 | disk_super = (struct btrfs_super_block *)bh->b_data; | 601 | disk_super = (struct btrfs_super_block *)bh->b_data; |
595 | devid = le64_to_cpu(disk_super->dev_item.devid); | 602 | devid = btrfs_stack_device_id(&disk_super->dev_item); |
596 | if (devid != device->devid) | 603 | if (devid != device->devid) |
597 | goto error_brelse; | 604 | goto error_brelse; |
598 | 605 | ||
@@ -694,7 +701,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, | |||
694 | goto error_close; | 701 | goto error_close; |
695 | } | 702 | } |
696 | disk_super = (struct btrfs_super_block *)bh->b_data; | 703 | disk_super = (struct btrfs_super_block *)bh->b_data; |
697 | devid = le64_to_cpu(disk_super->dev_item.devid); | 704 | devid = btrfs_stack_device_id(&disk_super->dev_item); |
698 | transid = btrfs_super_generation(disk_super); | 705 | transid = btrfs_super_generation(disk_super); |
699 | if (disk_super->label[0]) | 706 | if (disk_super->label[0]) |
700 | printk(KERN_INFO "device label %s ", disk_super->label); | 707 | printk(KERN_INFO "device label %s ", disk_super->label); |
@@ -1187,7 +1194,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1187 | goto error_close; | 1194 | goto error_close; |
1188 | } | 1195 | } |
1189 | disk_super = (struct btrfs_super_block *)bh->b_data; | 1196 | disk_super = (struct btrfs_super_block *)bh->b_data; |
1190 | devid = le64_to_cpu(disk_super->dev_item.devid); | 1197 | devid = btrfs_stack_device_id(&disk_super->dev_item); |
1191 | dev_uuid = disk_super->dev_item.uuid; | 1198 | dev_uuid = disk_super->dev_item.uuid; |
1192 | device = btrfs_find_device(root, devid, dev_uuid, | 1199 | device = btrfs_find_device(root, devid, dev_uuid, |
1193 | disk_super->fsid); | 1200 | disk_super->fsid); |