diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-28 12:30:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-28 12:30:07 -0400 |
commit | f7b006931751f029620ad2f8310ac7a1484fbdb4 (patch) | |
tree | 71120f4c4c51752902317fbf853e3b0316c2adb0 /fs | |
parent | b990f9b3cb068578b8aefd3a34f8c8555661ef95 (diff) | |
parent | dc7fdde39e4962b1a88741f7eba2a6b3be1285d8 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs fixes from Chris Mason:
"This has our collection of bug fixes. I missed the last rc because I
thought our patches were making NFS crash during my xfs test runs.
Turns out it was an NFS client bug fixed by someone else while I tried
to bisect it.
All of these fixes are small, but some are fairly high impact. The
biggest are fixes for our mount -o remount handling, a deadlock due to
GFP_KERNEL allocations in readdir, and a RAID10 error handling bug.
This was tested against both 3.3 and Linus' master as of this morning."
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (26 commits)
Btrfs: reduce lock contention during extent insertion
Btrfs: avoid deadlocks from GFP_KERNEL allocations during btrfs_real_readdir
Btrfs: Fix space checking during fs resize
Btrfs: fix block_rsv and space_info lock ordering
Btrfs: Prevent root_list corruption
Btrfs: fix repair code for RAID10
Btrfs: do not start delalloc inodes during sync
Btrfs: fix that check_int_data mount option was ignored
Btrfs: don't count CRC or header errors twice while scrubbing
Btrfs: fix btrfs_ioctl_dev_info() crash on missing device
btrfs: don't return EINTR
Btrfs: double unlock bug in error handling
Btrfs: always store the mirror we read the eb from
fs/btrfs/volumes.c: add missing free_fs_devices
btrfs: fix early abort in 'remount'
Btrfs: fix max chunk size check in chunk allocator
Btrfs: add missing read locks in backref.c
Btrfs: don't call free_extent_buffer twice in iterate_irefs
Btrfs: Make free_ipath() deal gracefully with NULL pointers
Btrfs: avoid possible use-after-free in clear_extent_bit()
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/backref.c | 27 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 2 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 22 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 15 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 56 | ||||
-rw-r--r-- | fs/btrfs/extent_io.h | 4 | ||||
-rw-r--r-- | fs/btrfs/file.c | 9 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 54 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 5 | ||||
-rw-r--r-- | fs/btrfs/reada.c | 48 | ||||
-rw-r--r-- | fs/btrfs/relocation.c | 4 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 15 | ||||
-rw-r--r-- | fs/btrfs/super.c | 7 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 6 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 13 |
15 files changed, 148 insertions, 139 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index f4e90748940a..bcec06750232 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include "ulist.h" | 22 | #include "ulist.h" |
23 | #include "transaction.h" | 23 | #include "transaction.h" |
24 | #include "delayed-ref.h" | 24 | #include "delayed-ref.h" |
25 | #include "locking.h" | ||
25 | 26 | ||
26 | /* | 27 | /* |
27 | * this structure records all encountered refs on the way up to the root | 28 | * this structure records all encountered refs on the way up to the root |
@@ -893,18 +894,22 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | |||
893 | s64 bytes_left = size - 1; | 894 | s64 bytes_left = size - 1; |
894 | struct extent_buffer *eb = eb_in; | 895 | struct extent_buffer *eb = eb_in; |
895 | struct btrfs_key found_key; | 896 | struct btrfs_key found_key; |
897 | int leave_spinning = path->leave_spinning; | ||
896 | 898 | ||
897 | if (bytes_left >= 0) | 899 | if (bytes_left >= 0) |
898 | dest[bytes_left] = '\0'; | 900 | dest[bytes_left] = '\0'; |
899 | 901 | ||
902 | path->leave_spinning = 1; | ||
900 | while (1) { | 903 | while (1) { |
901 | len = btrfs_inode_ref_name_len(eb, iref); | 904 | len = btrfs_inode_ref_name_len(eb, iref); |
902 | bytes_left -= len; | 905 | bytes_left -= len; |
903 | if (bytes_left >= 0) | 906 | if (bytes_left >= 0) |
904 | read_extent_buffer(eb, dest + bytes_left, | 907 | read_extent_buffer(eb, dest + bytes_left, |
905 | (unsigned long)(iref + 1), len); | 908 | (unsigned long)(iref + 1), len); |
906 | if (eb != eb_in) | 909 | if (eb != eb_in) { |
910 | btrfs_tree_read_unlock_blocking(eb); | ||
907 | free_extent_buffer(eb); | 911 | free_extent_buffer(eb); |
912 | } | ||
908 | ret = inode_ref_info(parent, 0, fs_root, path, &found_key); | 913 | ret = inode_ref_info(parent, 0, fs_root, path, &found_key); |
909 | if (ret > 0) | 914 | if (ret > 0) |
910 | ret = -ENOENT; | 915 | ret = -ENOENT; |
@@ -919,8 +924,11 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | |||
919 | slot = path->slots[0]; | 924 | slot = path->slots[0]; |
920 | eb = path->nodes[0]; | 925 | eb = path->nodes[0]; |
921 | /* make sure we can use eb after releasing the path */ | 926 | /* make sure we can use eb after releasing the path */ |
922 | if (eb != eb_in) | 927 | if (eb != eb_in) { |
923 | atomic_inc(&eb->refs); | 928 | atomic_inc(&eb->refs); |
929 | btrfs_tree_read_lock(eb); | ||
930 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); | ||
931 | } | ||
924 | btrfs_release_path(path); | 932 | btrfs_release_path(path); |
925 | 933 | ||
926 | iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); | 934 | iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); |
@@ -931,6 +939,7 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | |||
931 | } | 939 | } |
932 | 940 | ||
933 | btrfs_release_path(path); | 941 | btrfs_release_path(path); |
942 | path->leave_spinning = leave_spinning; | ||
934 | 943 | ||
935 | if (ret) | 944 | if (ret) |
936 | return ERR_PTR(ret); | 945 | return ERR_PTR(ret); |
@@ -1247,7 +1256,7 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | |||
1247 | struct btrfs_path *path, | 1256 | struct btrfs_path *path, |
1248 | iterate_irefs_t *iterate, void *ctx) | 1257 | iterate_irefs_t *iterate, void *ctx) |
1249 | { | 1258 | { |
1250 | int ret; | 1259 | int ret = 0; |
1251 | int slot; | 1260 | int slot; |
1252 | u32 cur; | 1261 | u32 cur; |
1253 | u32 len; | 1262 | u32 len; |
@@ -1259,7 +1268,8 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | |||
1259 | struct btrfs_inode_ref *iref; | 1268 | struct btrfs_inode_ref *iref; |
1260 | struct btrfs_key found_key; | 1269 | struct btrfs_key found_key; |
1261 | 1270 | ||
1262 | while (1) { | 1271 | while (!ret) { |
1272 | path->leave_spinning = 1; | ||
1263 | ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, | 1273 | ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, |
1264 | &found_key); | 1274 | &found_key); |
1265 | if (ret < 0) | 1275 | if (ret < 0) |
@@ -1275,6 +1285,8 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | |||
1275 | eb = path->nodes[0]; | 1285 | eb = path->nodes[0]; |
1276 | /* make sure we can use eb after releasing the path */ | 1286 | /* make sure we can use eb after releasing the path */ |
1277 | atomic_inc(&eb->refs); | 1287 | atomic_inc(&eb->refs); |
1288 | btrfs_tree_read_lock(eb); | ||
1289 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); | ||
1278 | btrfs_release_path(path); | 1290 | btrfs_release_path(path); |
1279 | 1291 | ||
1280 | item = btrfs_item_nr(eb, slot); | 1292 | item = btrfs_item_nr(eb, slot); |
@@ -1288,13 +1300,12 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | |||
1288 | (unsigned long long)found_key.objectid, | 1300 | (unsigned long long)found_key.objectid, |
1289 | (unsigned long long)fs_root->objectid); | 1301 | (unsigned long long)fs_root->objectid); |
1290 | ret = iterate(parent, iref, eb, ctx); | 1302 | ret = iterate(parent, iref, eb, ctx); |
1291 | if (ret) { | 1303 | if (ret) |
1292 | free_extent_buffer(eb); | ||
1293 | break; | 1304 | break; |
1294 | } | ||
1295 | len = sizeof(*iref) + name_len; | 1305 | len = sizeof(*iref) + name_len; |
1296 | iref = (struct btrfs_inode_ref *)((char *)iref + len); | 1306 | iref = (struct btrfs_inode_ref *)((char *)iref + len); |
1297 | } | 1307 | } |
1308 | btrfs_tree_read_unlock_blocking(eb); | ||
1298 | free_extent_buffer(eb); | 1309 | free_extent_buffer(eb); |
1299 | } | 1310 | } |
1300 | 1311 | ||
@@ -1414,6 +1425,8 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, | |||
1414 | 1425 | ||
1415 | void free_ipath(struct inode_fs_paths *ipath) | 1426 | void free_ipath(struct inode_fs_paths *ipath) |
1416 | { | 1427 | { |
1428 | if (!ipath) | ||
1429 | return; | ||
1417 | kfree(ipath->fspath); | 1430 | kfree(ipath->fspath); |
1418 | kfree(ipath); | 1431 | kfree(ipath); |
1419 | } | 1432 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3f65a812e282..8fd72331d600 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -1078,7 +1078,7 @@ struct btrfs_fs_info { | |||
1078 | * is required instead of the faster short fsync log commits | 1078 | * is required instead of the faster short fsync log commits |
1079 | */ | 1079 | */ |
1080 | u64 last_trans_log_full_commit; | 1080 | u64 last_trans_log_full_commit; |
1081 | unsigned long mount_opt:21; | 1081 | unsigned long mount_opt; |
1082 | unsigned long compress_type:4; | 1082 | unsigned long compress_type:4; |
1083 | u64 max_inline; | 1083 | u64 max_inline; |
1084 | u64 alloc_start; | 1084 | u64 alloc_start; |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 20196f411206..d0c969beaad4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -383,17 +383,16 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, | |||
383 | if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) | 383 | if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) |
384 | break; | 384 | break; |
385 | 385 | ||
386 | if (!failed_mirror) { | ||
387 | failed = 1; | ||
388 | printk(KERN_ERR "failed mirror was %d\n", eb->failed_mirror); | ||
389 | failed_mirror = eb->failed_mirror; | ||
390 | } | ||
391 | |||
392 | num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, | 386 | num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, |
393 | eb->start, eb->len); | 387 | eb->start, eb->len); |
394 | if (num_copies == 1) | 388 | if (num_copies == 1) |
395 | break; | 389 | break; |
396 | 390 | ||
391 | if (!failed_mirror) { | ||
392 | failed = 1; | ||
393 | failed_mirror = eb->read_mirror; | ||
394 | } | ||
395 | |||
397 | mirror_num++; | 396 | mirror_num++; |
398 | if (mirror_num == failed_mirror) | 397 | if (mirror_num == failed_mirror) |
399 | mirror_num++; | 398 | mirror_num++; |
@@ -564,7 +563,7 @@ struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree, | |||
564 | } | 563 | } |
565 | 564 | ||
566 | static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, | 565 | static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, |
567 | struct extent_state *state) | 566 | struct extent_state *state, int mirror) |
568 | { | 567 | { |
569 | struct extent_io_tree *tree; | 568 | struct extent_io_tree *tree; |
570 | u64 found_start; | 569 | u64 found_start; |
@@ -589,6 +588,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, | |||
589 | if (!reads_done) | 588 | if (!reads_done) |
590 | goto err; | 589 | goto err; |
591 | 590 | ||
591 | eb->read_mirror = mirror; | ||
592 | if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) { | 592 | if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) { |
593 | ret = -EIO; | 593 | ret = -EIO; |
594 | goto err; | 594 | goto err; |
@@ -652,7 +652,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror) | |||
652 | 652 | ||
653 | eb = (struct extent_buffer *)page->private; | 653 | eb = (struct extent_buffer *)page->private; |
654 | set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); | 654 | set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); |
655 | eb->failed_mirror = failed_mirror; | 655 | eb->read_mirror = failed_mirror; |
656 | if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) | 656 | if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) |
657 | btree_readahead_hook(root, eb, eb->start, -EIO); | 657 | btree_readahead_hook(root, eb, eb->start, -EIO); |
658 | return -EIO; /* we fixed nothing */ | 658 | return -EIO; /* we fixed nothing */ |
@@ -2254,9 +2254,9 @@ int open_ctree(struct super_block *sb, | |||
2254 | goto fail_sb_buffer; | 2254 | goto fail_sb_buffer; |
2255 | } | 2255 | } |
2256 | 2256 | ||
2257 | if (sectorsize < PAGE_SIZE) { | 2257 | if (sectorsize != PAGE_SIZE) { |
2258 | printk(KERN_WARNING "btrfs: Incompatible sector size " | 2258 | printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) " |
2259 | "found on %s\n", sb->s_id); | 2259 | "found on %s\n", (unsigned long)sectorsize, sb->s_id); |
2260 | goto fail_sb_buffer; | 2260 | goto fail_sb_buffer; |
2261 | } | 2261 | } |
2262 | 2262 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2b35f8d14bb9..6fc2e6f5aab8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2301,6 +2301,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, | |||
2301 | 2301 | ||
2302 | if (ret) { | 2302 | if (ret) { |
2303 | printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret); | 2303 | printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret); |
2304 | spin_lock(&delayed_refs->lock); | ||
2304 | return ret; | 2305 | return ret; |
2305 | } | 2306 | } |
2306 | 2307 | ||
@@ -2331,6 +2332,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, | |||
2331 | 2332 | ||
2332 | if (ret) { | 2333 | if (ret) { |
2333 | printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret); | 2334 | printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret); |
2335 | spin_lock(&delayed_refs->lock); | ||
2334 | return ret; | 2336 | return ret; |
2335 | } | 2337 | } |
2336 | 2338 | ||
@@ -3769,13 +3771,10 @@ again: | |||
3769 | */ | 3771 | */ |
3770 | if (current->journal_info) | 3772 | if (current->journal_info) |
3771 | return -EAGAIN; | 3773 | return -EAGAIN; |
3772 | ret = wait_event_interruptible(space_info->wait, | 3774 | ret = wait_event_killable(space_info->wait, !space_info->flush); |
3773 | !space_info->flush); | 3775 | /* Must have been killed, return */ |
3774 | /* Must have been interrupted, return */ | 3776 | if (ret) |
3775 | if (ret) { | ||
3776 | printk(KERN_DEBUG "btrfs: %s returning -EINTR\n", __func__); | ||
3777 | return -EINTR; | 3777 | return -EINTR; |
3778 | } | ||
3779 | 3778 | ||
3780 | spin_lock(&space_info->lock); | 3779 | spin_lock(&space_info->lock); |
3781 | } | 3780 | } |
@@ -4215,8 +4214,8 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) | |||
4215 | 4214 | ||
4216 | num_bytes = calc_global_metadata_size(fs_info); | 4215 | num_bytes = calc_global_metadata_size(fs_info); |
4217 | 4216 | ||
4218 | spin_lock(&block_rsv->lock); | ||
4219 | spin_lock(&sinfo->lock); | 4217 | spin_lock(&sinfo->lock); |
4218 | spin_lock(&block_rsv->lock); | ||
4220 | 4219 | ||
4221 | block_rsv->size = num_bytes; | 4220 | block_rsv->size = num_bytes; |
4222 | 4221 | ||
@@ -4242,8 +4241,8 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) | |||
4242 | block_rsv->full = 1; | 4241 | block_rsv->full = 1; |
4243 | } | 4242 | } |
4244 | 4243 | ||
4245 | spin_unlock(&sinfo->lock); | ||
4246 | spin_unlock(&block_rsv->lock); | 4244 | spin_unlock(&block_rsv->lock); |
4245 | spin_unlock(&sinfo->lock); | ||
4247 | } | 4246 | } |
4248 | 4247 | ||
4249 | static void init_global_block_rsv(struct btrfs_fs_info *fs_info) | 4248 | static void init_global_block_rsv(struct btrfs_fs_info *fs_info) |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index cd4b5e400221..198c2ba2fa40 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -402,20 +402,28 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, | |||
402 | return 0; | 402 | return 0; |
403 | } | 403 | } |
404 | 404 | ||
405 | static struct extent_state *next_state(struct extent_state *state) | ||
406 | { | ||
407 | struct rb_node *next = rb_next(&state->rb_node); | ||
408 | if (next) | ||
409 | return rb_entry(next, struct extent_state, rb_node); | ||
410 | else | ||
411 | return NULL; | ||
412 | } | ||
413 | |||
405 | /* | 414 | /* |
406 | * utility function to clear some bits in an extent state struct. | 415 | * utility function to clear some bits in an extent state struct. |
407 | * it will optionally wake up any one waiting on this state (wake == 1), or | 416 | * it will optionally wake up any one waiting on this state (wake == 1) |
408 | * forcibly remove the state from the tree (delete == 1). | ||
409 | * | 417 | * |
410 | * If no bits are set on the state struct after clearing things, the | 418 | * If no bits are set on the state struct after clearing things, the |
411 | * struct is freed and removed from the tree | 419 | * struct is freed and removed from the tree |
412 | */ | 420 | */ |
413 | static int clear_state_bit(struct extent_io_tree *tree, | 421 | static struct extent_state *clear_state_bit(struct extent_io_tree *tree, |
414 | struct extent_state *state, | 422 | struct extent_state *state, |
415 | int *bits, int wake) | 423 | int *bits, int wake) |
416 | { | 424 | { |
425 | struct extent_state *next; | ||
417 | int bits_to_clear = *bits & ~EXTENT_CTLBITS; | 426 | int bits_to_clear = *bits & ~EXTENT_CTLBITS; |
418 | int ret = state->state & bits_to_clear; | ||
419 | 427 | ||
420 | if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { | 428 | if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { |
421 | u64 range = state->end - state->start + 1; | 429 | u64 range = state->end - state->start + 1; |
@@ -427,6 +435,7 @@ static int clear_state_bit(struct extent_io_tree *tree, | |||
427 | if (wake) | 435 | if (wake) |
428 | wake_up(&state->wq); | 436 | wake_up(&state->wq); |
429 | if (state->state == 0) { | 437 | if (state->state == 0) { |
438 | next = next_state(state); | ||
430 | if (state->tree) { | 439 | if (state->tree) { |
431 | rb_erase(&state->rb_node, &tree->state); | 440 | rb_erase(&state->rb_node, &tree->state); |
432 | state->tree = NULL; | 441 | state->tree = NULL; |
@@ -436,8 +445,9 @@ static int clear_state_bit(struct extent_io_tree *tree, | |||
436 | } | 445 | } |
437 | } else { | 446 | } else { |
438 | merge_state(tree, state); | 447 | merge_state(tree, state); |
448 | next = next_state(state); | ||
439 | } | 449 | } |
440 | return ret; | 450 | return next; |
441 | } | 451 | } |
442 | 452 | ||
443 | static struct extent_state * | 453 | static struct extent_state * |
@@ -476,7 +486,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
476 | struct extent_state *state; | 486 | struct extent_state *state; |
477 | struct extent_state *cached; | 487 | struct extent_state *cached; |
478 | struct extent_state *prealloc = NULL; | 488 | struct extent_state *prealloc = NULL; |
479 | struct rb_node *next_node; | ||
480 | struct rb_node *node; | 489 | struct rb_node *node; |
481 | u64 last_end; | 490 | u64 last_end; |
482 | int err; | 491 | int err; |
@@ -528,14 +537,11 @@ hit_next: | |||
528 | WARN_ON(state->end < start); | 537 | WARN_ON(state->end < start); |
529 | last_end = state->end; | 538 | last_end = state->end; |
530 | 539 | ||
531 | if (state->end < end && !need_resched()) | ||
532 | next_node = rb_next(&state->rb_node); | ||
533 | else | ||
534 | next_node = NULL; | ||
535 | |||
536 | /* the state doesn't have the wanted bits, go ahead */ | 540 | /* the state doesn't have the wanted bits, go ahead */ |
537 | if (!(state->state & bits)) | 541 | if (!(state->state & bits)) { |
542 | state = next_state(state); | ||
538 | goto next; | 543 | goto next; |
544 | } | ||
539 | 545 | ||
540 | /* | 546 | /* |
541 | * | ---- desired range ---- | | 547 | * | ---- desired range ---- | |
@@ -593,16 +599,13 @@ hit_next: | |||
593 | goto out; | 599 | goto out; |
594 | } | 600 | } |
595 | 601 | ||
596 | clear_state_bit(tree, state, &bits, wake); | 602 | state = clear_state_bit(tree, state, &bits, wake); |
597 | next: | 603 | next: |
598 | if (last_end == (u64)-1) | 604 | if (last_end == (u64)-1) |
599 | goto out; | 605 | goto out; |
600 | start = last_end + 1; | 606 | start = last_end + 1; |
601 | if (start <= end && next_node) { | 607 | if (start <= end && state && !need_resched()) |
602 | state = rb_entry(next_node, struct extent_state, | ||
603 | rb_node); | ||
604 | goto hit_next; | 608 | goto hit_next; |
605 | } | ||
606 | goto search_again; | 609 | goto search_again; |
607 | 610 | ||
608 | out: | 611 | out: |
@@ -2301,7 +2304,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2301 | u64 start; | 2304 | u64 start; |
2302 | u64 end; | 2305 | u64 end; |
2303 | int whole_page; | 2306 | int whole_page; |
2304 | int failed_mirror; | 2307 | int mirror; |
2305 | int ret; | 2308 | int ret; |
2306 | 2309 | ||
2307 | if (err) | 2310 | if (err) |
@@ -2340,20 +2343,18 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2340 | } | 2343 | } |
2341 | spin_unlock(&tree->lock); | 2344 | spin_unlock(&tree->lock); |
2342 | 2345 | ||
2346 | mirror = (int)(unsigned long)bio->bi_bdev; | ||
2343 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { | 2347 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { |
2344 | ret = tree->ops->readpage_end_io_hook(page, start, end, | 2348 | ret = tree->ops->readpage_end_io_hook(page, start, end, |
2345 | state); | 2349 | state, mirror); |
2346 | if (ret) | 2350 | if (ret) |
2347 | uptodate = 0; | 2351 | uptodate = 0; |
2348 | else | 2352 | else |
2349 | clean_io_failure(start, page); | 2353 | clean_io_failure(start, page); |
2350 | } | 2354 | } |
2351 | 2355 | ||
2352 | if (!uptodate) | ||
2353 | failed_mirror = (int)(unsigned long)bio->bi_bdev; | ||
2354 | |||
2355 | if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { | 2356 | if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { |
2356 | ret = tree->ops->readpage_io_failed_hook(page, failed_mirror); | 2357 | ret = tree->ops->readpage_io_failed_hook(page, mirror); |
2357 | if (!ret && !err && | 2358 | if (!ret && !err && |
2358 | test_bit(BIO_UPTODATE, &bio->bi_flags)) | 2359 | test_bit(BIO_UPTODATE, &bio->bi_flags)) |
2359 | uptodate = 1; | 2360 | uptodate = 1; |
@@ -2368,8 +2369,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2368 | * can't handle the error it will return -EIO and we | 2369 | * can't handle the error it will return -EIO and we |
2369 | * remain responsible for that page. | 2370 | * remain responsible for that page. |
2370 | */ | 2371 | */ |
2371 | ret = bio_readpage_error(bio, page, start, end, | 2372 | ret = bio_readpage_error(bio, page, start, end, mirror, NULL); |
2372 | failed_mirror, NULL); | ||
2373 | if (ret == 0) { | 2373 | if (ret == 0) { |
2374 | uptodate = | 2374 | uptodate = |
2375 | test_bit(BIO_UPTODATE, &bio->bi_flags); | 2375 | test_bit(BIO_UPTODATE, &bio->bi_flags); |
@@ -4462,7 +4462,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
4462 | } | 4462 | } |
4463 | 4463 | ||
4464 | clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags); | 4464 | clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags); |
4465 | eb->failed_mirror = 0; | 4465 | eb->read_mirror = 0; |
4466 | atomic_set(&eb->io_pages, num_reads); | 4466 | atomic_set(&eb->io_pages, num_reads); |
4467 | for (i = start_i; i < num_pages; i++) { | 4467 | for (i = start_i; i < num_pages; i++) { |
4468 | page = extent_buffer_page(eb, i); | 4468 | page = extent_buffer_page(eb, i); |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index faf10eb57f75..b516c3b8dec6 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -79,7 +79,7 @@ struct extent_io_ops { | |||
79 | u64 start, u64 end, | 79 | u64 start, u64 end, |
80 | struct extent_state *state); | 80 | struct extent_state *state); |
81 | int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, | 81 | int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, |
82 | struct extent_state *state); | 82 | struct extent_state *state, int mirror); |
83 | int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, | 83 | int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, |
84 | struct extent_state *state, int uptodate); | 84 | struct extent_state *state, int uptodate); |
85 | void (*set_bit_hook)(struct inode *inode, struct extent_state *state, | 85 | void (*set_bit_hook)(struct inode *inode, struct extent_state *state, |
@@ -135,7 +135,7 @@ struct extent_buffer { | |||
135 | spinlock_t refs_lock; | 135 | spinlock_t refs_lock; |
136 | atomic_t refs; | 136 | atomic_t refs; |
137 | atomic_t io_pages; | 137 | atomic_t io_pages; |
138 | int failed_mirror; | 138 | int read_mirror; |
139 | struct list_head leak_list; | 139 | struct list_head leak_list; |
140 | struct rcu_head rcu_head; | 140 | struct rcu_head rcu_head; |
141 | pid_t lock_owner; | 141 | pid_t lock_owner; |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index d83260d7498f..53bf2d764bbc 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -567,6 +567,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, | |||
567 | int extent_type; | 567 | int extent_type; |
568 | int recow; | 568 | int recow; |
569 | int ret; | 569 | int ret; |
570 | int modify_tree = -1; | ||
570 | 571 | ||
571 | if (drop_cache) | 572 | if (drop_cache) |
572 | btrfs_drop_extent_cache(inode, start, end - 1, 0); | 573 | btrfs_drop_extent_cache(inode, start, end - 1, 0); |
@@ -575,10 +576,13 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, | |||
575 | if (!path) | 576 | if (!path) |
576 | return -ENOMEM; | 577 | return -ENOMEM; |
577 | 578 | ||
579 | if (start >= BTRFS_I(inode)->disk_i_size) | ||
580 | modify_tree = 0; | ||
581 | |||
578 | while (1) { | 582 | while (1) { |
579 | recow = 0; | 583 | recow = 0; |
580 | ret = btrfs_lookup_file_extent(trans, root, path, ino, | 584 | ret = btrfs_lookup_file_extent(trans, root, path, ino, |
581 | search_start, -1); | 585 | search_start, modify_tree); |
582 | if (ret < 0) | 586 | if (ret < 0) |
583 | break; | 587 | break; |
584 | if (ret > 0 && path->slots[0] > 0 && search_start == start) { | 588 | if (ret > 0 && path->slots[0] > 0 && search_start == start) { |
@@ -634,7 +638,8 @@ next_slot: | |||
634 | } | 638 | } |
635 | 639 | ||
636 | search_start = max(key.offset, start); | 640 | search_start = max(key.offset, start); |
637 | if (recow) { | 641 | if (recow || !modify_tree) { |
642 | modify_tree = -1; | ||
638 | btrfs_release_path(path); | 643 | btrfs_release_path(path); |
639 | continue; | 644 | continue; |
640 | } | 645 | } |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 115bc05e42b0..61b16c641ce0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1947,7 +1947,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, | |||
1947 | * extent_io.c will try to find good copies for us. | 1947 | * extent_io.c will try to find good copies for us. |
1948 | */ | 1948 | */ |
1949 | static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, | 1949 | static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, |
1950 | struct extent_state *state) | 1950 | struct extent_state *state, int mirror) |
1951 | { | 1951 | { |
1952 | size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); | 1952 | size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); |
1953 | struct inode *inode = page->mapping->host; | 1953 | struct inode *inode = page->mapping->host; |
@@ -4069,7 +4069,7 @@ static struct inode *new_simple_dir(struct super_block *s, | |||
4069 | BTRFS_I(inode)->dummy_inode = 1; | 4069 | BTRFS_I(inode)->dummy_inode = 1; |
4070 | 4070 | ||
4071 | inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; | 4071 | inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; |
4072 | inode->i_op = &simple_dir_inode_operations; | 4072 | inode->i_op = &btrfs_dir_ro_inode_operations; |
4073 | inode->i_fop = &simple_dir_operations; | 4073 | inode->i_fop = &simple_dir_operations; |
4074 | inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; | 4074 | inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; |
4075 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 4075 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
@@ -4140,14 +4140,18 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) | |||
4140 | static int btrfs_dentry_delete(const struct dentry *dentry) | 4140 | static int btrfs_dentry_delete(const struct dentry *dentry) |
4141 | { | 4141 | { |
4142 | struct btrfs_root *root; | 4142 | struct btrfs_root *root; |
4143 | struct inode *inode = dentry->d_inode; | ||
4143 | 4144 | ||
4144 | if (!dentry->d_inode && !IS_ROOT(dentry)) | 4145 | if (!inode && !IS_ROOT(dentry)) |
4145 | dentry = dentry->d_parent; | 4146 | inode = dentry->d_parent->d_inode; |
4146 | 4147 | ||
4147 | if (dentry->d_inode) { | 4148 | if (inode) { |
4148 | root = BTRFS_I(dentry->d_inode)->root; | 4149 | root = BTRFS_I(inode)->root; |
4149 | if (btrfs_root_refs(&root->root_item) == 0) | 4150 | if (btrfs_root_refs(&root->root_item) == 0) |
4150 | return 1; | 4151 | return 1; |
4152 | |||
4153 | if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) | ||
4154 | return 1; | ||
4151 | } | 4155 | } |
4152 | return 0; | 4156 | return 0; |
4153 | } | 4157 | } |
@@ -4188,7 +4192,6 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
4188 | struct btrfs_path *path; | 4192 | struct btrfs_path *path; |
4189 | struct list_head ins_list; | 4193 | struct list_head ins_list; |
4190 | struct list_head del_list; | 4194 | struct list_head del_list; |
4191 | struct qstr q; | ||
4192 | int ret; | 4195 | int ret; |
4193 | struct extent_buffer *leaf; | 4196 | struct extent_buffer *leaf; |
4194 | int slot; | 4197 | int slot; |
@@ -4279,7 +4282,6 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
4279 | 4282 | ||
4280 | while (di_cur < di_total) { | 4283 | while (di_cur < di_total) { |
4281 | struct btrfs_key location; | 4284 | struct btrfs_key location; |
4282 | struct dentry *tmp; | ||
4283 | 4285 | ||
4284 | if (verify_dir_item(root, leaf, di)) | 4286 | if (verify_dir_item(root, leaf, di)) |
4285 | break; | 4287 | break; |
@@ -4300,35 +4302,15 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
4300 | d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; | 4302 | d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; |
4301 | btrfs_dir_item_key_to_cpu(leaf, di, &location); | 4303 | btrfs_dir_item_key_to_cpu(leaf, di, &location); |
4302 | 4304 | ||
4303 | q.name = name_ptr; | 4305 | |
4304 | q.len = name_len; | ||
4305 | q.hash = full_name_hash(q.name, q.len); | ||
4306 | tmp = d_lookup(filp->f_dentry, &q); | ||
4307 | if (!tmp) { | ||
4308 | struct btrfs_key *newkey; | ||
4309 | |||
4310 | newkey = kzalloc(sizeof(struct btrfs_key), | ||
4311 | GFP_NOFS); | ||
4312 | if (!newkey) | ||
4313 | goto no_dentry; | ||
4314 | tmp = d_alloc(filp->f_dentry, &q); | ||
4315 | if (!tmp) { | ||
4316 | kfree(newkey); | ||
4317 | dput(tmp); | ||
4318 | goto no_dentry; | ||
4319 | } | ||
4320 | memcpy(newkey, &location, | ||
4321 | sizeof(struct btrfs_key)); | ||
4322 | tmp->d_fsdata = newkey; | ||
4323 | tmp->d_flags |= DCACHE_NEED_LOOKUP; | ||
4324 | d_rehash(tmp); | ||
4325 | dput(tmp); | ||
4326 | } else { | ||
4327 | dput(tmp); | ||
4328 | } | ||
4329 | no_dentry: | ||
4330 | /* is this a reference to our own snapshot? If so | 4306 | /* is this a reference to our own snapshot? If so |
4331 | * skip it | 4307 | * skip it. |
4308 | * | ||
4309 | * In contrast to old kernels, we insert the snapshot's | ||
4310 | * dir item and dir index after it has been created, so | ||
4311 | * we won't find a reference to our own snapshot. We | ||
4312 | * still keep the following code for backward | ||
4313 | * compatibility. | ||
4332 | */ | 4314 | */ |
4333 | if (location.type == BTRFS_ROOT_ITEM_KEY && | 4315 | if (location.type == BTRFS_ROOT_ITEM_KEY && |
4334 | location.objectid == root->root_key.objectid) { | 4316 | location.objectid == root->root_key.objectid) { |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 18cc23d164a8..14f8e1faa46e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -2262,7 +2262,10 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) | |||
2262 | di_args->bytes_used = dev->bytes_used; | 2262 | di_args->bytes_used = dev->bytes_used; |
2263 | di_args->total_bytes = dev->total_bytes; | 2263 | di_args->total_bytes = dev->total_bytes; |
2264 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); | 2264 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); |
2265 | strncpy(di_args->path, dev->name, sizeof(di_args->path)); | 2265 | if (dev->name) |
2266 | strncpy(di_args->path, dev->name, sizeof(di_args->path)); | ||
2267 | else | ||
2268 | di_args->path[0] = '\0'; | ||
2266 | 2269 | ||
2267 | out: | 2270 | out: |
2268 | if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) | 2271 | if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) |
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index dc5d33146fdb..ac5d01085884 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c | |||
@@ -250,14 +250,12 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info, | |||
250 | struct btrfs_bio *bbio) | 250 | struct btrfs_bio *bbio) |
251 | { | 251 | { |
252 | int ret; | 252 | int ret; |
253 | int looped = 0; | ||
254 | struct reada_zone *zone; | 253 | struct reada_zone *zone; |
255 | struct btrfs_block_group_cache *cache = NULL; | 254 | struct btrfs_block_group_cache *cache = NULL; |
256 | u64 start; | 255 | u64 start; |
257 | u64 end; | 256 | u64 end; |
258 | int i; | 257 | int i; |
259 | 258 | ||
260 | again: | ||
261 | zone = NULL; | 259 | zone = NULL; |
262 | spin_lock(&fs_info->reada_lock); | 260 | spin_lock(&fs_info->reada_lock); |
263 | ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, | 261 | ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, |
@@ -274,9 +272,6 @@ again: | |||
274 | spin_unlock(&fs_info->reada_lock); | 272 | spin_unlock(&fs_info->reada_lock); |
275 | } | 273 | } |
276 | 274 | ||
277 | if (looped) | ||
278 | return NULL; | ||
279 | |||
280 | cache = btrfs_lookup_block_group(fs_info, logical); | 275 | cache = btrfs_lookup_block_group(fs_info, logical); |
281 | if (!cache) | 276 | if (!cache) |
282 | return NULL; | 277 | return NULL; |
@@ -307,13 +302,15 @@ again: | |||
307 | ret = radix_tree_insert(&dev->reada_zones, | 302 | ret = radix_tree_insert(&dev->reada_zones, |
308 | (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), | 303 | (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), |
309 | zone); | 304 | zone); |
310 | spin_unlock(&fs_info->reada_lock); | ||
311 | 305 | ||
312 | if (ret) { | 306 | if (ret == -EEXIST) { |
313 | kfree(zone); | 307 | kfree(zone); |
314 | looped = 1; | 308 | ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, |
315 | goto again; | 309 | logical >> PAGE_CACHE_SHIFT, 1); |
310 | if (ret == 1) | ||
311 | kref_get(&zone->refcnt); | ||
316 | } | 312 | } |
313 | spin_unlock(&fs_info->reada_lock); | ||
317 | 314 | ||
318 | return zone; | 315 | return zone; |
319 | } | 316 | } |
@@ -323,26 +320,26 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root, | |||
323 | struct btrfs_key *top, int level) | 320 | struct btrfs_key *top, int level) |
324 | { | 321 | { |
325 | int ret; | 322 | int ret; |
326 | int looped = 0; | ||
327 | struct reada_extent *re = NULL; | 323 | struct reada_extent *re = NULL; |
324 | struct reada_extent *re_exist = NULL; | ||
328 | struct btrfs_fs_info *fs_info = root->fs_info; | 325 | struct btrfs_fs_info *fs_info = root->fs_info; |
329 | struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; | 326 | struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; |
330 | struct btrfs_bio *bbio = NULL; | 327 | struct btrfs_bio *bbio = NULL; |
331 | struct btrfs_device *dev; | 328 | struct btrfs_device *dev; |
329 | struct btrfs_device *prev_dev; | ||
332 | u32 blocksize; | 330 | u32 blocksize; |
333 | u64 length; | 331 | u64 length; |
334 | int nzones = 0; | 332 | int nzones = 0; |
335 | int i; | 333 | int i; |
336 | unsigned long index = logical >> PAGE_CACHE_SHIFT; | 334 | unsigned long index = logical >> PAGE_CACHE_SHIFT; |
337 | 335 | ||
338 | again: | ||
339 | spin_lock(&fs_info->reada_lock); | 336 | spin_lock(&fs_info->reada_lock); |
340 | re = radix_tree_lookup(&fs_info->reada_tree, index); | 337 | re = radix_tree_lookup(&fs_info->reada_tree, index); |
341 | if (re) | 338 | if (re) |
342 | kref_get(&re->refcnt); | 339 | kref_get(&re->refcnt); |
343 | spin_unlock(&fs_info->reada_lock); | 340 | spin_unlock(&fs_info->reada_lock); |
344 | 341 | ||
345 | if (re || looped) | 342 | if (re) |
346 | return re; | 343 | return re; |
347 | 344 | ||
348 | re = kzalloc(sizeof(*re), GFP_NOFS); | 345 | re = kzalloc(sizeof(*re), GFP_NOFS); |
@@ -398,16 +395,31 @@ again: | |||
398 | /* insert extent in reada_tree + all per-device trees, all or nothing */ | 395 | /* insert extent in reada_tree + all per-device trees, all or nothing */ |
399 | spin_lock(&fs_info->reada_lock); | 396 | spin_lock(&fs_info->reada_lock); |
400 | ret = radix_tree_insert(&fs_info->reada_tree, index, re); | 397 | ret = radix_tree_insert(&fs_info->reada_tree, index, re); |
398 | if (ret == -EEXIST) { | ||
399 | re_exist = radix_tree_lookup(&fs_info->reada_tree, index); | ||
400 | BUG_ON(!re_exist); | ||
401 | kref_get(&re_exist->refcnt); | ||
402 | spin_unlock(&fs_info->reada_lock); | ||
403 | goto error; | ||
404 | } | ||
401 | if (ret) { | 405 | if (ret) { |
402 | spin_unlock(&fs_info->reada_lock); | 406 | spin_unlock(&fs_info->reada_lock); |
403 | if (ret != -ENOMEM) { | ||
404 | /* someone inserted the extent in the meantime */ | ||
405 | looped = 1; | ||
406 | } | ||
407 | goto error; | 407 | goto error; |
408 | } | 408 | } |
409 | prev_dev = NULL; | ||
409 | for (i = 0; i < nzones; ++i) { | 410 | for (i = 0; i < nzones; ++i) { |
410 | dev = bbio->stripes[i].dev; | 411 | dev = bbio->stripes[i].dev; |
412 | if (dev == prev_dev) { | ||
413 | /* | ||
414 | * in case of DUP, just add the first zone. As both | ||
415 | * are on the same device, there's nothing to gain | ||
416 | * from adding both. | ||
417 | * Also, it wouldn't work, as the tree is per device | ||
418 | * and adding would fail with EEXIST | ||
419 | */ | ||
420 | continue; | ||
421 | } | ||
422 | prev_dev = dev; | ||
411 | ret = radix_tree_insert(&dev->reada_extents, index, re); | 423 | ret = radix_tree_insert(&dev->reada_extents, index, re); |
412 | if (ret) { | 424 | if (ret) { |
413 | while (--i >= 0) { | 425 | while (--i >= 0) { |
@@ -450,9 +462,7 @@ error: | |||
450 | } | 462 | } |
451 | kfree(bbio); | 463 | kfree(bbio); |
452 | kfree(re); | 464 | kfree(re); |
453 | if (looped) | 465 | return re_exist; |
454 | goto again; | ||
455 | return NULL; | ||
456 | } | 466 | } |
457 | 467 | ||
458 | static void reada_kref_dummy(struct kref *kr) | 468 | static void reada_kref_dummy(struct kref *kr) |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 017281dbb2a7..646ee21bb035 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -1279,7 +1279,9 @@ static int __update_reloc_root(struct btrfs_root *root, int del) | |||
1279 | if (rb_node) | 1279 | if (rb_node) |
1280 | backref_tree_panic(rb_node, -EEXIST, node->bytenr); | 1280 | backref_tree_panic(rb_node, -EEXIST, node->bytenr); |
1281 | } else { | 1281 | } else { |
1282 | spin_lock(&root->fs_info->trans_lock); | ||
1282 | list_del_init(&root->root_list); | 1283 | list_del_init(&root->root_list); |
1284 | spin_unlock(&root->fs_info->trans_lock); | ||
1283 | kfree(node); | 1285 | kfree(node); |
1284 | } | 1286 | } |
1285 | return 0; | 1287 | return 0; |
@@ -3811,7 +3813,7 @@ restart: | |||
3811 | 3813 | ||
3812 | ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5); | 3814 | ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5); |
3813 | if (ret < 0) { | 3815 | if (ret < 0) { |
3814 | if (ret != -EAGAIN) { | 3816 | if (ret != -ENOSPC) { |
3815 | err = ret; | 3817 | err = ret; |
3816 | WARN_ON(1); | 3818 | WARN_ON(1); |
3817 | break; | 3819 | break; |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index bc015f77f3ea..4f76fc3f8e89 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -1257,12 +1257,6 @@ static int scrub_checksum_data(struct scrub_block *sblock) | |||
1257 | if (memcmp(csum, on_disk_csum, sdev->csum_size)) | 1257 | if (memcmp(csum, on_disk_csum, sdev->csum_size)) |
1258 | fail = 1; | 1258 | fail = 1; |
1259 | 1259 | ||
1260 | if (fail) { | ||
1261 | spin_lock(&sdev->stat_lock); | ||
1262 | ++sdev->stat.csum_errors; | ||
1263 | spin_unlock(&sdev->stat_lock); | ||
1264 | } | ||
1265 | |||
1266 | return fail; | 1260 | return fail; |
1267 | } | 1261 | } |
1268 | 1262 | ||
@@ -1335,15 +1329,6 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock) | |||
1335 | if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) | 1329 | if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) |
1336 | ++crc_fail; | 1330 | ++crc_fail; |
1337 | 1331 | ||
1338 | if (crc_fail || fail) { | ||
1339 | spin_lock(&sdev->stat_lock); | ||
1340 | if (crc_fail) | ||
1341 | ++sdev->stat.csum_errors; | ||
1342 | if (fail) | ||
1343 | ++sdev->stat.verify_errors; | ||
1344 | spin_unlock(&sdev->stat_lock); | ||
1345 | } | ||
1346 | |||
1347 | return fail || crc_fail; | 1332 | return fail || crc_fail; |
1348 | } | 1333 | } |
1349 | 1334 | ||
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 8d5d380f7bdb..c5f8fca4195f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -815,7 +815,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait) | |||
815 | return 0; | 815 | return 0; |
816 | } | 816 | } |
817 | 817 | ||
818 | btrfs_start_delalloc_inodes(root, 0); | ||
819 | btrfs_wait_ordered_extents(root, 0, 0); | 818 | btrfs_wait_ordered_extents(root, 0, 0); |
820 | 819 | ||
821 | trans = btrfs_start_transaction(root, 0); | 820 | trans = btrfs_start_transaction(root, 0); |
@@ -1148,13 +1147,15 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) | |||
1148 | if (ret) | 1147 | if (ret) |
1149 | goto restore; | 1148 | goto restore; |
1150 | } else { | 1149 | } else { |
1151 | if (fs_info->fs_devices->rw_devices == 0) | 1150 | if (fs_info->fs_devices->rw_devices == 0) { |
1152 | ret = -EACCES; | 1151 | ret = -EACCES; |
1153 | goto restore; | 1152 | goto restore; |
1153 | } | ||
1154 | 1154 | ||
1155 | if (btrfs_super_log_root(fs_info->super_copy) != 0) | 1155 | if (btrfs_super_log_root(fs_info->super_copy) != 0) { |
1156 | ret = -EINVAL; | 1156 | ret = -EINVAL; |
1157 | goto restore; | 1157 | goto restore; |
1158 | } | ||
1158 | 1159 | ||
1159 | ret = btrfs_cleanup_fs_roots(fs_info); | 1160 | ret = btrfs_cleanup_fs_roots(fs_info); |
1160 | if (ret) | 1161 | if (ret) |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 11b77a59db62..36422254ef67 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -73,8 +73,10 @@ loop: | |||
73 | 73 | ||
74 | cur_trans = root->fs_info->running_transaction; | 74 | cur_trans = root->fs_info->running_transaction; |
75 | if (cur_trans) { | 75 | if (cur_trans) { |
76 | if (cur_trans->aborted) | 76 | if (cur_trans->aborted) { |
77 | spin_unlock(&root->fs_info->trans_lock); | ||
77 | return cur_trans->aborted; | 78 | return cur_trans->aborted; |
79 | } | ||
78 | atomic_inc(&cur_trans->use_count); | 80 | atomic_inc(&cur_trans->use_count); |
79 | atomic_inc(&cur_trans->num_writers); | 81 | atomic_inc(&cur_trans->num_writers); |
80 | cur_trans->num_joined++; | 82 | cur_trans->num_joined++; |
@@ -1400,6 +1402,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1400 | ret = commit_fs_roots(trans, root); | 1402 | ret = commit_fs_roots(trans, root); |
1401 | if (ret) { | 1403 | if (ret) { |
1402 | mutex_unlock(&root->fs_info->tree_log_mutex); | 1404 | mutex_unlock(&root->fs_info->tree_log_mutex); |
1405 | mutex_unlock(&root->fs_info->reloc_mutex); | ||
1403 | goto cleanup_transaction; | 1406 | goto cleanup_transaction; |
1404 | } | 1407 | } |
1405 | 1408 | ||
@@ -1411,6 +1414,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1411 | ret = commit_cowonly_roots(trans, root); | 1414 | ret = commit_cowonly_roots(trans, root); |
1412 | if (ret) { | 1415 | if (ret) { |
1413 | mutex_unlock(&root->fs_info->tree_log_mutex); | 1416 | mutex_unlock(&root->fs_info->tree_log_mutex); |
1417 | mutex_unlock(&root->fs_info->reloc_mutex); | ||
1414 | goto cleanup_transaction; | 1418 | goto cleanup_transaction; |
1415 | } | 1419 | } |
1416 | 1420 | ||
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 759d02486d7c..1411b99555a4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -3324,12 +3324,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
3324 | stripe_size = devices_info[ndevs-1].max_avail; | 3324 | stripe_size = devices_info[ndevs-1].max_avail; |
3325 | num_stripes = ndevs * dev_stripes; | 3325 | num_stripes = ndevs * dev_stripes; |
3326 | 3326 | ||
3327 | if (stripe_size * num_stripes > max_chunk_size * ncopies) { | 3327 | if (stripe_size * ndevs > max_chunk_size * ncopies) { |
3328 | stripe_size = max_chunk_size * ncopies; | 3328 | stripe_size = max_chunk_size * ncopies; |
3329 | do_div(stripe_size, num_stripes); | 3329 | do_div(stripe_size, ndevs); |
3330 | } | 3330 | } |
3331 | 3331 | ||
3332 | do_div(stripe_size, dev_stripes); | 3332 | do_div(stripe_size, dev_stripes); |
3333 | |||
3334 | /* align to BTRFS_STRIPE_LEN */ | ||
3333 | do_div(stripe_size, BTRFS_STRIPE_LEN); | 3335 | do_div(stripe_size, BTRFS_STRIPE_LEN); |
3334 | stripe_size *= BTRFS_STRIPE_LEN; | 3336 | stripe_size *= BTRFS_STRIPE_LEN; |
3335 | 3337 | ||
@@ -3805,10 +3807,11 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
3805 | else if (mirror_num) | 3807 | else if (mirror_num) |
3806 | stripe_index += mirror_num - 1; | 3808 | stripe_index += mirror_num - 1; |
3807 | else { | 3809 | else { |
3810 | int old_stripe_index = stripe_index; | ||
3808 | stripe_index = find_live_mirror(map, stripe_index, | 3811 | stripe_index = find_live_mirror(map, stripe_index, |
3809 | map->sub_stripes, stripe_index + | 3812 | map->sub_stripes, stripe_index + |
3810 | current->pid % map->sub_stripes); | 3813 | current->pid % map->sub_stripes); |
3811 | mirror_num = stripe_index + 1; | 3814 | mirror_num = stripe_index - old_stripe_index + 1; |
3812 | } | 3815 | } |
3813 | } else { | 3816 | } else { |
3814 | /* | 3817 | /* |
@@ -4350,8 +4353,10 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid) | |||
4350 | 4353 | ||
4351 | ret = __btrfs_open_devices(fs_devices, FMODE_READ, | 4354 | ret = __btrfs_open_devices(fs_devices, FMODE_READ, |
4352 | root->fs_info->bdev_holder); | 4355 | root->fs_info->bdev_holder); |
4353 | if (ret) | 4356 | if (ret) { |
4357 | free_fs_devices(fs_devices); | ||
4354 | goto out; | 4358 | goto out; |
4359 | } | ||
4355 | 4360 | ||
4356 | if (!fs_devices->seeding) { | 4361 | if (!fs_devices->seeding) { |
4357 | __btrfs_close_devices(fs_devices); | 4362 | __btrfs_close_devices(fs_devices); |