diff options
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r-- | fs/btrfs/disk-io.c | 656 |
1 files changed, 331 insertions, 325 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3a57f99d96aa..08b74daf35d0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -64,19 +64,18 @@ | |||
64 | static const struct extent_io_ops btree_extent_io_ops; | 64 | static const struct extent_io_ops btree_extent_io_ops; |
65 | static void end_workqueue_fn(struct btrfs_work *work); | 65 | static void end_workqueue_fn(struct btrfs_work *work); |
66 | static void free_fs_root(struct btrfs_root *root); | 66 | static void free_fs_root(struct btrfs_root *root); |
67 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | 67 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info); |
68 | int read_only); | ||
69 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root); | 68 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root); |
70 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | 69 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
71 | struct btrfs_root *root); | 70 | struct btrfs_fs_info *fs_info); |
72 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); | 71 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); |
73 | static int btrfs_destroy_marked_extents(struct btrfs_root *root, | 72 | static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, |
74 | struct extent_io_tree *dirty_pages, | 73 | struct extent_io_tree *dirty_pages, |
75 | int mark); | 74 | int mark); |
76 | static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | 75 | static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
77 | struct extent_io_tree *pinned_extents); | 76 | struct extent_io_tree *pinned_extents); |
78 | static int btrfs_cleanup_transaction(struct btrfs_root *root); | 77 | static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info); |
79 | static void btrfs_error_commit_super(struct btrfs_root *root); | 78 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info); |
80 | 79 | ||
81 | /* | 80 | /* |
82 | * btrfs_end_io_wq structs are used to do processing in task context when an IO | 81 | * btrfs_end_io_wq structs are used to do processing in task context when an IO |
@@ -220,19 +219,19 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, | |||
220 | * extents on the btree inode are pretty simple, there's one extent | 219 | * extents on the btree inode are pretty simple, there's one extent |
221 | * that covers the entire device | 220 | * that covers the entire device |
222 | */ | 221 | */ |
223 | static struct extent_map *btree_get_extent(struct inode *inode, | 222 | static struct extent_map *btree_get_extent(struct btrfs_inode *inode, |
224 | struct page *page, size_t pg_offset, u64 start, u64 len, | 223 | struct page *page, size_t pg_offset, u64 start, u64 len, |
225 | int create) | 224 | int create) |
226 | { | 225 | { |
227 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 226 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); |
227 | struct extent_map_tree *em_tree = &inode->extent_tree; | ||
228 | struct extent_map *em; | 228 | struct extent_map *em; |
229 | int ret; | 229 | int ret; |
230 | 230 | ||
231 | read_lock(&em_tree->lock); | 231 | read_lock(&em_tree->lock); |
232 | em = lookup_extent_mapping(em_tree, start, len); | 232 | em = lookup_extent_mapping(em_tree, start, len); |
233 | if (em) { | 233 | if (em) { |
234 | em->bdev = | 234 | em->bdev = fs_info->fs_devices->latest_bdev; |
235 | BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | ||
236 | read_unlock(&em_tree->lock); | 235 | read_unlock(&em_tree->lock); |
237 | goto out; | 236 | goto out; |
238 | } | 237 | } |
@@ -247,7 +246,7 @@ static struct extent_map *btree_get_extent(struct inode *inode, | |||
247 | em->len = (u64)-1; | 246 | em->len = (u64)-1; |
248 | em->block_len = (u64)-1; | 247 | em->block_len = (u64)-1; |
249 | em->block_start = 0; | 248 | em->block_start = 0; |
250 | em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | 249 | em->bdev = fs_info->fs_devices->latest_bdev; |
251 | 250 | ||
252 | write_lock(&em_tree->lock); | 251 | write_lock(&em_tree->lock); |
253 | ret = add_extent_mapping(em_tree, em, 0); | 252 | ret = add_extent_mapping(em_tree, em, 0); |
@@ -266,12 +265,12 @@ out: | |||
266 | return em; | 265 | return em; |
267 | } | 266 | } |
268 | 267 | ||
269 | u32 btrfs_csum_data(char *data, u32 seed, size_t len) | 268 | u32 btrfs_csum_data(const char *data, u32 seed, size_t len) |
270 | { | 269 | { |
271 | return btrfs_crc32c(seed, data, len); | 270 | return btrfs_crc32c(seed, data, len); |
272 | } | 271 | } |
273 | 272 | ||
274 | void btrfs_csum_final(u32 crc, char *result) | 273 | void btrfs_csum_final(u32 crc, u8 *result) |
275 | { | 274 | { |
276 | put_unaligned_le32(~crc, result); | 275 | put_unaligned_le32(~crc, result); |
277 | } | 276 | } |
@@ -440,7 +439,7 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, | |||
440 | * helper to read a given tree block, doing retries as required when | 439 | * helper to read a given tree block, doing retries as required when |
441 | * the checksums don't match and we have alternate mirrors to try. | 440 | * the checksums don't match and we have alternate mirrors to try. |
442 | */ | 441 | */ |
443 | static int btree_read_extent_buffer_pages(struct btrfs_root *root, | 442 | static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info, |
444 | struct extent_buffer *eb, | 443 | struct extent_buffer *eb, |
445 | u64 parent_transid) | 444 | u64 parent_transid) |
446 | { | 445 | { |
@@ -452,7 +451,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, | |||
452 | int failed_mirror = 0; | 451 | int failed_mirror = 0; |
453 | 452 | ||
454 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); | 453 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
455 | io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; | 454 | io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; |
456 | while (1) { | 455 | while (1) { |
457 | ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE, | 456 | ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE, |
458 | btree_get_extent, mirror_num); | 457 | btree_get_extent, mirror_num); |
@@ -472,7 +471,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, | |||
472 | if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) | 471 | if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) |
473 | break; | 472 | break; |
474 | 473 | ||
475 | num_copies = btrfs_num_copies(root->fs_info, | 474 | num_copies = btrfs_num_copies(fs_info, |
476 | eb->start, eb->len); | 475 | eb->start, eb->len); |
477 | if (num_copies == 1) | 476 | if (num_copies == 1) |
478 | break; | 477 | break; |
@@ -491,7 +490,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, | |||
491 | } | 490 | } |
492 | 491 | ||
493 | if (failed && !ret && failed_mirror) | 492 | if (failed && !ret && failed_mirror) |
494 | repair_eb_io_failure(root, eb, failed_mirror); | 493 | repair_eb_io_failure(fs_info, eb, failed_mirror); |
495 | 494 | ||
496 | return ret; | 495 | return ret; |
497 | } | 496 | } |
@@ -545,47 +544,63 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, | |||
545 | return ret; | 544 | return ret; |
546 | } | 545 | } |
547 | 546 | ||
548 | #define CORRUPT(reason, eb, root, slot) \ | 547 | #define CORRUPT(reason, eb, root, slot) \ |
549 | btrfs_crit(root->fs_info, "corrupt %s, %s: block=%llu," \ | 548 | btrfs_crit(root->fs_info, \ |
550 | " root=%llu, slot=%d", \ | 549 | "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \ |
551 | btrfs_header_level(eb) == 0 ? "leaf" : "node",\ | 550 | btrfs_header_level(eb) == 0 ? "leaf" : "node", \ |
552 | reason, btrfs_header_bytenr(eb), root->objectid, slot) | 551 | reason, btrfs_header_bytenr(eb), root->objectid, slot) |
553 | 552 | ||
554 | static noinline int check_leaf(struct btrfs_root *root, | 553 | static noinline int check_leaf(struct btrfs_root *root, |
555 | struct extent_buffer *leaf) | 554 | struct extent_buffer *leaf) |
556 | { | 555 | { |
556 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
557 | struct btrfs_key key; | 557 | struct btrfs_key key; |
558 | struct btrfs_key leaf_key; | 558 | struct btrfs_key leaf_key; |
559 | u32 nritems = btrfs_header_nritems(leaf); | 559 | u32 nritems = btrfs_header_nritems(leaf); |
560 | int slot; | 560 | int slot; |
561 | 561 | ||
562 | if (nritems == 0) { | 562 | /* |
563 | * Extent buffers from a relocation tree have a owner field that | ||
564 | * corresponds to the subvolume tree they are based on. So just from an | ||
565 | * extent buffer alone we can not find out what is the id of the | ||
566 | * corresponding subvolume tree, so we can not figure out if the extent | ||
567 | * buffer corresponds to the root of the relocation tree or not. So skip | ||
568 | * this check for relocation trees. | ||
569 | */ | ||
570 | if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) { | ||
563 | struct btrfs_root *check_root; | 571 | struct btrfs_root *check_root; |
564 | 572 | ||
565 | key.objectid = btrfs_header_owner(leaf); | 573 | key.objectid = btrfs_header_owner(leaf); |
566 | key.type = BTRFS_ROOT_ITEM_KEY; | 574 | key.type = BTRFS_ROOT_ITEM_KEY; |
567 | key.offset = (u64)-1; | 575 | key.offset = (u64)-1; |
568 | 576 | ||
569 | check_root = btrfs_get_fs_root(root->fs_info, &key, false); | 577 | check_root = btrfs_get_fs_root(fs_info, &key, false); |
570 | /* | 578 | /* |
571 | * The only reason we also check NULL here is that during | 579 | * The only reason we also check NULL here is that during |
572 | * open_ctree() some roots has not yet been set up. | 580 | * open_ctree() some roots has not yet been set up. |
573 | */ | 581 | */ |
574 | if (!IS_ERR_OR_NULL(check_root)) { | 582 | if (!IS_ERR_OR_NULL(check_root)) { |
583 | struct extent_buffer *eb; | ||
584 | |||
585 | eb = btrfs_root_node(check_root); | ||
575 | /* if leaf is the root, then it's fine */ | 586 | /* if leaf is the root, then it's fine */ |
576 | if (leaf->start != | 587 | if (leaf != eb) { |
577 | btrfs_root_bytenr(&check_root->root_item)) { | ||
578 | CORRUPT("non-root leaf's nritems is 0", | 588 | CORRUPT("non-root leaf's nritems is 0", |
579 | leaf, root, 0); | 589 | leaf, check_root, 0); |
590 | free_extent_buffer(eb); | ||
580 | return -EIO; | 591 | return -EIO; |
581 | } | 592 | } |
593 | free_extent_buffer(eb); | ||
582 | } | 594 | } |
583 | return 0; | 595 | return 0; |
584 | } | 596 | } |
585 | 597 | ||
598 | if (nritems == 0) | ||
599 | return 0; | ||
600 | |||
586 | /* Check the 0 item */ | 601 | /* Check the 0 item */ |
587 | if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != | 602 | if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != |
588 | BTRFS_LEAF_DATA_SIZE(root)) { | 603 | BTRFS_LEAF_DATA_SIZE(fs_info)) { |
589 | CORRUPT("invalid item offset size pair", leaf, root, 0); | 604 | CORRUPT("invalid item offset size pair", leaf, root, 0); |
590 | return -EIO; | 605 | return -EIO; |
591 | } | 606 | } |
@@ -624,7 +639,7 @@ static noinline int check_leaf(struct btrfs_root *root, | |||
624 | * all point outside of the leaf. | 639 | * all point outside of the leaf. |
625 | */ | 640 | */ |
626 | if (btrfs_item_end_nr(leaf, slot) > | 641 | if (btrfs_item_end_nr(leaf, slot) > |
627 | BTRFS_LEAF_DATA_SIZE(root)) { | 642 | BTRFS_LEAF_DATA_SIZE(fs_info)) { |
628 | CORRUPT("slot end outside of leaf", leaf, root, slot); | 643 | CORRUPT("slot end outside of leaf", leaf, root, slot); |
629 | return -EIO; | 644 | return -EIO; |
630 | } | 645 | } |
@@ -641,7 +656,7 @@ static int check_node(struct btrfs_root *root, struct extent_buffer *node) | |||
641 | u64 bytenr; | 656 | u64 bytenr; |
642 | int ret = 0; | 657 | int ret = 0; |
643 | 658 | ||
644 | if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) { | 659 | if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) { |
645 | btrfs_crit(root->fs_info, | 660 | btrfs_crit(root->fs_info, |
646 | "corrupt node: block %llu root %llu nritems %lu", | 661 | "corrupt node: block %llu root %llu nritems %lu", |
647 | node->start, root->objectid, nr); | 662 | node->start, root->objectid, nr); |
@@ -747,7 +762,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, | |||
747 | err: | 762 | err: |
748 | if (reads_done && | 763 | if (reads_done && |
749 | test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) | 764 | test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) |
750 | btree_readahead_hook(fs_info, eb, eb->start, ret); | 765 | btree_readahead_hook(fs_info, eb, ret); |
751 | 766 | ||
752 | if (ret) { | 767 | if (ret) { |
753 | /* | 768 | /* |
@@ -772,7 +787,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror) | |||
772 | eb->read_mirror = failed_mirror; | 787 | eb->read_mirror = failed_mirror; |
773 | atomic_dec(&eb->io_pages); | 788 | atomic_dec(&eb->io_pages); |
774 | if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) | 789 | if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) |
775 | btree_readahead_hook(eb->fs_info, eb, eb->start, -EIO); | 790 | btree_readahead_hook(eb->fs_info, eb, -EIO); |
776 | return -EIO; /* we fixed nothing */ | 791 | return -EIO; /* we fixed nothing */ |
777 | } | 792 | } |
778 | 793 | ||
@@ -930,7 +945,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
930 | 945 | ||
931 | atomic_inc(&fs_info->nr_async_submits); | 946 | atomic_inc(&fs_info->nr_async_submits); |
932 | 947 | ||
933 | if (bio->bi_opf & REQ_SYNC) | 948 | if (op_is_sync(bio->bi_opf)) |
934 | btrfs_set_work_high_priority(&async->work); | 949 | btrfs_set_work_high_priority(&async->work); |
935 | 950 | ||
936 | btrfs_queue_work(fs_info->workers, &async->work); | 951 | btrfs_queue_work(fs_info->workers, &async->work); |
@@ -981,7 +996,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio, | |||
981 | * when we're called for a write, we're already in the async | 996 | * when we're called for a write, we're already in the async |
982 | * submission context. Just jump into btrfs_map_bio | 997 | * submission context. Just jump into btrfs_map_bio |
983 | */ | 998 | */ |
984 | ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1); | 999 | ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1); |
985 | if (ret) { | 1000 | if (ret) { |
986 | bio->bi_error = ret; | 1001 | bio->bi_error = ret; |
987 | bio_endio(bio); | 1002 | bio_endio(bio); |
@@ -989,7 +1004,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio, | |||
989 | return ret; | 1004 | return ret; |
990 | } | 1005 | } |
991 | 1006 | ||
992 | static int check_async_write(struct inode *inode, unsigned long bio_flags) | 1007 | static int check_async_write(unsigned long bio_flags) |
993 | { | 1008 | { |
994 | if (bio_flags & EXTENT_BIO_TREE_LOG) | 1009 | if (bio_flags & EXTENT_BIO_TREE_LOG) |
995 | return 0; | 1010 | return 0; |
@@ -1004,7 +1019,8 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio, | |||
1004 | int mirror_num, unsigned long bio_flags, | 1019 | int mirror_num, unsigned long bio_flags, |
1005 | u64 bio_offset) | 1020 | u64 bio_offset) |
1006 | { | 1021 | { |
1007 | int async = check_async_write(inode, bio_flags); | 1022 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
1023 | int async = check_async_write(bio_flags); | ||
1008 | int ret; | 1024 | int ret; |
1009 | 1025 | ||
1010 | if (bio_op(bio) != REQ_OP_WRITE) { | 1026 | if (bio_op(bio) != REQ_OP_WRITE) { |
@@ -1012,23 +1028,22 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio, | |||
1012 | * called for a read, do the setup so that checksum validation | 1028 | * called for a read, do the setup so that checksum validation |
1013 | * can happen in the async kernel threads | 1029 | * can happen in the async kernel threads |
1014 | */ | 1030 | */ |
1015 | ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, | 1031 | ret = btrfs_bio_wq_end_io(fs_info, bio, |
1016 | bio, BTRFS_WQ_ENDIO_METADATA); | 1032 | BTRFS_WQ_ENDIO_METADATA); |
1017 | if (ret) | 1033 | if (ret) |
1018 | goto out_w_error; | 1034 | goto out_w_error; |
1019 | ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0); | 1035 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
1020 | } else if (!async) { | 1036 | } else if (!async) { |
1021 | ret = btree_csum_one_bio(bio); | 1037 | ret = btree_csum_one_bio(bio); |
1022 | if (ret) | 1038 | if (ret) |
1023 | goto out_w_error; | 1039 | goto out_w_error; |
1024 | ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0); | 1040 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
1025 | } else { | 1041 | } else { |
1026 | /* | 1042 | /* |
1027 | * kthread helpers are used to submit writes so that | 1043 | * kthread helpers are used to submit writes so that |
1028 | * checksumming can happen in parallel across all CPUs | 1044 | * checksumming can happen in parallel across all CPUs |
1029 | */ | 1045 | */ |
1030 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, | 1046 | ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num, 0, |
1031 | inode, bio, mirror_num, 0, | ||
1032 | bio_offset, | 1047 | bio_offset, |
1033 | __btree_submit_bio_start, | 1048 | __btree_submit_bio_start, |
1034 | __btree_submit_bio_done); | 1049 | __btree_submit_bio_done); |
@@ -1146,12 +1161,12 @@ static const struct address_space_operations btree_aops = { | |||
1146 | .set_page_dirty = btree_set_page_dirty, | 1161 | .set_page_dirty = btree_set_page_dirty, |
1147 | }; | 1162 | }; |
1148 | 1163 | ||
1149 | void readahead_tree_block(struct btrfs_root *root, u64 bytenr) | 1164 | void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) |
1150 | { | 1165 | { |
1151 | struct extent_buffer *buf = NULL; | 1166 | struct extent_buffer *buf = NULL; |
1152 | struct inode *btree_inode = root->fs_info->btree_inode; | 1167 | struct inode *btree_inode = fs_info->btree_inode; |
1153 | 1168 | ||
1154 | buf = btrfs_find_create_tree_block(root, bytenr); | 1169 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
1155 | if (IS_ERR(buf)) | 1170 | if (IS_ERR(buf)) |
1156 | return; | 1171 | return; |
1157 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, | 1172 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, |
@@ -1159,15 +1174,15 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr) | |||
1159 | free_extent_buffer(buf); | 1174 | free_extent_buffer(buf); |
1160 | } | 1175 | } |
1161 | 1176 | ||
1162 | int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, | 1177 | int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, |
1163 | int mirror_num, struct extent_buffer **eb) | 1178 | int mirror_num, struct extent_buffer **eb) |
1164 | { | 1179 | { |
1165 | struct extent_buffer *buf = NULL; | 1180 | struct extent_buffer *buf = NULL; |
1166 | struct inode *btree_inode = root->fs_info->btree_inode; | 1181 | struct inode *btree_inode = fs_info->btree_inode; |
1167 | struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; | 1182 | struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; |
1168 | int ret; | 1183 | int ret; |
1169 | 1184 | ||
1170 | buf = btrfs_find_create_tree_block(root, bytenr); | 1185 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
1171 | if (IS_ERR(buf)) | 1186 | if (IS_ERR(buf)) |
1172 | return 0; | 1187 | return 0; |
1173 | 1188 | ||
@@ -1191,19 +1206,13 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, | |||
1191 | return 0; | 1206 | return 0; |
1192 | } | 1207 | } |
1193 | 1208 | ||
1194 | struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info, | 1209 | struct extent_buffer *btrfs_find_create_tree_block( |
1195 | u64 bytenr) | 1210 | struct btrfs_fs_info *fs_info, |
1211 | u64 bytenr) | ||
1196 | { | 1212 | { |
1197 | return find_extent_buffer(fs_info, bytenr); | 1213 | if (btrfs_is_testing(fs_info)) |
1198 | } | 1214 | return alloc_test_extent_buffer(fs_info, bytenr); |
1199 | 1215 | return alloc_extent_buffer(fs_info, bytenr); | |
1200 | struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, | ||
1201 | u64 bytenr) | ||
1202 | { | ||
1203 | if (btrfs_is_testing(root->fs_info)) | ||
1204 | return alloc_test_extent_buffer(root->fs_info, bytenr, | ||
1205 | root->nodesize); | ||
1206 | return alloc_extent_buffer(root->fs_info, bytenr); | ||
1207 | } | 1216 | } |
1208 | 1217 | ||
1209 | 1218 | ||
@@ -1219,17 +1228,17 @@ int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) | |||
1219 | buf->start, buf->start + buf->len - 1); | 1228 | buf->start, buf->start + buf->len - 1); |
1220 | } | 1229 | } |
1221 | 1230 | ||
1222 | struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, | 1231 | struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, |
1223 | u64 parent_transid) | 1232 | u64 parent_transid) |
1224 | { | 1233 | { |
1225 | struct extent_buffer *buf = NULL; | 1234 | struct extent_buffer *buf = NULL; |
1226 | int ret; | 1235 | int ret; |
1227 | 1236 | ||
1228 | buf = btrfs_find_create_tree_block(root, bytenr); | 1237 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
1229 | if (IS_ERR(buf)) | 1238 | if (IS_ERR(buf)) |
1230 | return buf; | 1239 | return buf; |
1231 | 1240 | ||
1232 | ret = btree_read_extent_buffer_pages(root, buf, parent_transid); | 1241 | ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid); |
1233 | if (ret) { | 1242 | if (ret) { |
1234 | free_extent_buffer(buf); | 1243 | free_extent_buffer(buf); |
1235 | return ERR_PTR(ret); | 1244 | return ERR_PTR(ret); |
@@ -1238,8 +1247,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, | |||
1238 | 1247 | ||
1239 | } | 1248 | } |
1240 | 1249 | ||
1241 | void clean_tree_block(struct btrfs_trans_handle *trans, | 1250 | void clean_tree_block(struct btrfs_fs_info *fs_info, |
1242 | struct btrfs_fs_info *fs_info, | ||
1243 | struct extent_buffer *buf) | 1251 | struct extent_buffer *buf) |
1244 | { | 1252 | { |
1245 | if (btrfs_header_generation(buf) == | 1253 | if (btrfs_header_generation(buf) == |
@@ -1283,16 +1291,12 @@ btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) | |||
1283 | kfree(writers); | 1291 | kfree(writers); |
1284 | } | 1292 | } |
1285 | 1293 | ||
1286 | static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize, | 1294 | static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, |
1287 | struct btrfs_root *root, struct btrfs_fs_info *fs_info, | ||
1288 | u64 objectid) | 1295 | u64 objectid) |
1289 | { | 1296 | { |
1290 | bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); | 1297 | bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); |
1291 | root->node = NULL; | 1298 | root->node = NULL; |
1292 | root->commit_root = NULL; | 1299 | root->commit_root = NULL; |
1293 | root->sectorsize = sectorsize; | ||
1294 | root->nodesize = nodesize; | ||
1295 | root->stripesize = stripesize; | ||
1296 | root->state = 0; | 1300 | root->state = 0; |
1297 | root->orphan_cleanup_state = 0; | 1301 | root->orphan_cleanup_state = 0; |
1298 | 1302 | ||
@@ -1370,8 +1374,7 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, | |||
1370 | 1374 | ||
1371 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 1375 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
1372 | /* Should only be used by the testing infrastructure */ | 1376 | /* Should only be used by the testing infrastructure */ |
1373 | struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info, | 1377 | struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info) |
1374 | u32 sectorsize, u32 nodesize) | ||
1375 | { | 1378 | { |
1376 | struct btrfs_root *root; | 1379 | struct btrfs_root *root; |
1377 | 1380 | ||
@@ -1381,9 +1384,9 @@ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info, | |||
1381 | root = btrfs_alloc_root(fs_info, GFP_KERNEL); | 1384 | root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
1382 | if (!root) | 1385 | if (!root) |
1383 | return ERR_PTR(-ENOMEM); | 1386 | return ERR_PTR(-ENOMEM); |
1387 | |||
1384 | /* We don't use the stripesize in selftest, set it as sectorsize */ | 1388 | /* We don't use the stripesize in selftest, set it as sectorsize */ |
1385 | __setup_root(nodesize, sectorsize, sectorsize, root, fs_info, | 1389 | __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID); |
1386 | BTRFS_ROOT_TREE_OBJECTID); | ||
1387 | root->alloc_bytenr = 0; | 1390 | root->alloc_bytenr = 0; |
1388 | 1391 | ||
1389 | return root; | 1392 | return root; |
@@ -1405,8 +1408,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, | |||
1405 | if (!root) | 1408 | if (!root) |
1406 | return ERR_PTR(-ENOMEM); | 1409 | return ERR_PTR(-ENOMEM); |
1407 | 1410 | ||
1408 | __setup_root(tree_root->nodesize, tree_root->sectorsize, | 1411 | __setup_root(root, fs_info, objectid); |
1409 | tree_root->stripesize, root, fs_info, objectid); | ||
1410 | root->root_key.objectid = objectid; | 1412 | root->root_key.objectid = objectid; |
1411 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; | 1413 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; |
1412 | root->root_key.offset = 0; | 1414 | root->root_key.offset = 0; |
@@ -1418,18 +1420,15 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, | |||
1418 | goto fail; | 1420 | goto fail; |
1419 | } | 1421 | } |
1420 | 1422 | ||
1421 | memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); | 1423 | memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header)); |
1422 | btrfs_set_header_bytenr(leaf, leaf->start); | 1424 | btrfs_set_header_bytenr(leaf, leaf->start); |
1423 | btrfs_set_header_generation(leaf, trans->transid); | 1425 | btrfs_set_header_generation(leaf, trans->transid); |
1424 | btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); | 1426 | btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); |
1425 | btrfs_set_header_owner(leaf, objectid); | 1427 | btrfs_set_header_owner(leaf, objectid); |
1426 | root->node = leaf; | 1428 | root->node = leaf; |
1427 | 1429 | ||
1428 | write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(), | 1430 | write_extent_buffer_fsid(leaf, fs_info->fsid); |
1429 | BTRFS_FSID_SIZE); | 1431 | write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid); |
1430 | write_extent_buffer(leaf, fs_info->chunk_tree_uuid, | ||
1431 | btrfs_header_chunk_tree_uuid(leaf), | ||
1432 | BTRFS_UUID_SIZE); | ||
1433 | btrfs_mark_buffer_dirty(leaf); | 1432 | btrfs_mark_buffer_dirty(leaf); |
1434 | 1433 | ||
1435 | root->commit_root = btrfs_root_node(root); | 1434 | root->commit_root = btrfs_root_node(root); |
@@ -1474,16 +1473,13 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, | |||
1474 | struct btrfs_fs_info *fs_info) | 1473 | struct btrfs_fs_info *fs_info) |
1475 | { | 1474 | { |
1476 | struct btrfs_root *root; | 1475 | struct btrfs_root *root; |
1477 | struct btrfs_root *tree_root = fs_info->tree_root; | ||
1478 | struct extent_buffer *leaf; | 1476 | struct extent_buffer *leaf; |
1479 | 1477 | ||
1480 | root = btrfs_alloc_root(fs_info, GFP_NOFS); | 1478 | root = btrfs_alloc_root(fs_info, GFP_NOFS); |
1481 | if (!root) | 1479 | if (!root) |
1482 | return ERR_PTR(-ENOMEM); | 1480 | return ERR_PTR(-ENOMEM); |
1483 | 1481 | ||
1484 | __setup_root(tree_root->nodesize, tree_root->sectorsize, | 1482 | __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
1485 | tree_root->stripesize, root, fs_info, | ||
1486 | BTRFS_TREE_LOG_OBJECTID); | ||
1487 | 1483 | ||
1488 | root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; | 1484 | root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; |
1489 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; | 1485 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; |
@@ -1505,15 +1501,14 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, | |||
1505 | return ERR_CAST(leaf); | 1501 | return ERR_CAST(leaf); |
1506 | } | 1502 | } |
1507 | 1503 | ||
1508 | memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); | 1504 | memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header)); |
1509 | btrfs_set_header_bytenr(leaf, leaf->start); | 1505 | btrfs_set_header_bytenr(leaf, leaf->start); |
1510 | btrfs_set_header_generation(leaf, trans->transid); | 1506 | btrfs_set_header_generation(leaf, trans->transid); |
1511 | btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); | 1507 | btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); |
1512 | btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); | 1508 | btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); |
1513 | root->node = leaf; | 1509 | root->node = leaf; |
1514 | 1510 | ||
1515 | write_extent_buffer(root->node, root->fs_info->fsid, | 1511 | write_extent_buffer_fsid(root->node, fs_info->fsid); |
1516 | btrfs_header_fsid(), BTRFS_FSID_SIZE); | ||
1517 | btrfs_mark_buffer_dirty(root->node); | 1512 | btrfs_mark_buffer_dirty(root->node); |
1518 | btrfs_tree_unlock(root->node); | 1513 | btrfs_tree_unlock(root->node); |
1519 | return root; | 1514 | return root; |
@@ -1535,10 +1530,11 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, | |||
1535 | int btrfs_add_log_tree(struct btrfs_trans_handle *trans, | 1530 | int btrfs_add_log_tree(struct btrfs_trans_handle *trans, |
1536 | struct btrfs_root *root) | 1531 | struct btrfs_root *root) |
1537 | { | 1532 | { |
1533 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1538 | struct btrfs_root *log_root; | 1534 | struct btrfs_root *log_root; |
1539 | struct btrfs_inode_item *inode_item; | 1535 | struct btrfs_inode_item *inode_item; |
1540 | 1536 | ||
1541 | log_root = alloc_log_tree(trans, root->fs_info); | 1537 | log_root = alloc_log_tree(trans, fs_info); |
1542 | if (IS_ERR(log_root)) | 1538 | if (IS_ERR(log_root)) |
1543 | return PTR_ERR(log_root); | 1539 | return PTR_ERR(log_root); |
1544 | 1540 | ||
@@ -1549,7 +1545,8 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans, | |||
1549 | btrfs_set_stack_inode_generation(inode_item, 1); | 1545 | btrfs_set_stack_inode_generation(inode_item, 1); |
1550 | btrfs_set_stack_inode_size(inode_item, 3); | 1546 | btrfs_set_stack_inode_size(inode_item, 3); |
1551 | btrfs_set_stack_inode_nlink(inode_item, 1); | 1547 | btrfs_set_stack_inode_nlink(inode_item, 1); |
1552 | btrfs_set_stack_inode_nbytes(inode_item, root->nodesize); | 1548 | btrfs_set_stack_inode_nbytes(inode_item, |
1549 | fs_info->nodesize); | ||
1553 | btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); | 1550 | btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); |
1554 | 1551 | ||
1555 | btrfs_set_root_node(&log_root->root_item, log_root->node); | 1552 | btrfs_set_root_node(&log_root->root_item, log_root->node); |
@@ -1581,8 +1578,7 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, | |||
1581 | goto alloc_fail; | 1578 | goto alloc_fail; |
1582 | } | 1579 | } |
1583 | 1580 | ||
1584 | __setup_root(tree_root->nodesize, tree_root->sectorsize, | 1581 | __setup_root(root, fs_info, key->objectid); |
1585 | tree_root->stripesize, root, fs_info, key->objectid); | ||
1586 | 1582 | ||
1587 | ret = btrfs_find_root(tree_root, key, path, | 1583 | ret = btrfs_find_root(tree_root, key, path, |
1588 | &root->root_item, &root->root_key); | 1584 | &root->root_item, &root->root_key); |
@@ -1593,7 +1589,8 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, | |||
1593 | } | 1589 | } |
1594 | 1590 | ||
1595 | generation = btrfs_root_generation(&root->root_item); | 1591 | generation = btrfs_root_generation(&root->root_item); |
1596 | root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), | 1592 | root->node = read_tree_block(fs_info, |
1593 | btrfs_root_bytenr(&root->root_item), | ||
1597 | generation); | 1594 | generation); |
1598 | if (IS_ERR(root->node)) { | 1595 | if (IS_ERR(root->node)) { |
1599 | ret = PTR_ERR(root->node); | 1596 | ret = PTR_ERR(root->node); |
@@ -1801,7 +1798,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) | |||
1801 | list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { | 1798 | list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { |
1802 | if (!device->bdev) | 1799 | if (!device->bdev) |
1803 | continue; | 1800 | continue; |
1804 | bdi = blk_get_backing_dev_info(device->bdev); | 1801 | bdi = device->bdev->bd_bdi; |
1805 | if (bdi_congested(bdi, bdi_bits)) { | 1802 | if (bdi_congested(bdi, bdi_bits)) { |
1806 | ret = 1; | 1803 | ret = 1; |
1807 | break; | 1804 | break; |
@@ -1848,6 +1845,7 @@ static void end_workqueue_fn(struct btrfs_work *work) | |||
1848 | static int cleaner_kthread(void *arg) | 1845 | static int cleaner_kthread(void *arg) |
1849 | { | 1846 | { |
1850 | struct btrfs_root *root = arg; | 1847 | struct btrfs_root *root = arg; |
1848 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1851 | int again; | 1849 | int again; |
1852 | struct btrfs_trans_handle *trans; | 1850 | struct btrfs_trans_handle *trans; |
1853 | 1851 | ||
@@ -1855,40 +1853,40 @@ static int cleaner_kthread(void *arg) | |||
1855 | again = 0; | 1853 | again = 0; |
1856 | 1854 | ||
1857 | /* Make the cleaner go to sleep early. */ | 1855 | /* Make the cleaner go to sleep early. */ |
1858 | if (btrfs_need_cleaner_sleep(root)) | 1856 | if (btrfs_need_cleaner_sleep(fs_info)) |
1859 | goto sleep; | 1857 | goto sleep; |
1860 | 1858 | ||
1861 | /* | 1859 | /* |
1862 | * Do not do anything if we might cause open_ctree() to block | 1860 | * Do not do anything if we might cause open_ctree() to block |
1863 | * before we have finished mounting the filesystem. | 1861 | * before we have finished mounting the filesystem. |
1864 | */ | 1862 | */ |
1865 | if (!test_bit(BTRFS_FS_OPEN, &root->fs_info->flags)) | 1863 | if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) |
1866 | goto sleep; | 1864 | goto sleep; |
1867 | 1865 | ||
1868 | if (!mutex_trylock(&root->fs_info->cleaner_mutex)) | 1866 | if (!mutex_trylock(&fs_info->cleaner_mutex)) |
1869 | goto sleep; | 1867 | goto sleep; |
1870 | 1868 | ||
1871 | /* | 1869 | /* |
1872 | * Avoid the problem that we change the status of the fs | 1870 | * Avoid the problem that we change the status of the fs |
1873 | * during the above check and trylock. | 1871 | * during the above check and trylock. |
1874 | */ | 1872 | */ |
1875 | if (btrfs_need_cleaner_sleep(root)) { | 1873 | if (btrfs_need_cleaner_sleep(fs_info)) { |
1876 | mutex_unlock(&root->fs_info->cleaner_mutex); | 1874 | mutex_unlock(&fs_info->cleaner_mutex); |
1877 | goto sleep; | 1875 | goto sleep; |
1878 | } | 1876 | } |
1879 | 1877 | ||
1880 | mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex); | 1878 | mutex_lock(&fs_info->cleaner_delayed_iput_mutex); |
1881 | btrfs_run_delayed_iputs(root); | 1879 | btrfs_run_delayed_iputs(fs_info); |
1882 | mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex); | 1880 | mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); |
1883 | 1881 | ||
1884 | again = btrfs_clean_one_deleted_snapshot(root); | 1882 | again = btrfs_clean_one_deleted_snapshot(root); |
1885 | mutex_unlock(&root->fs_info->cleaner_mutex); | 1883 | mutex_unlock(&fs_info->cleaner_mutex); |
1886 | 1884 | ||
1887 | /* | 1885 | /* |
1888 | * The defragger has dealt with the R/O remount and umount, | 1886 | * The defragger has dealt with the R/O remount and umount, |
1889 | * needn't do anything special here. | 1887 | * needn't do anything special here. |
1890 | */ | 1888 | */ |
1891 | btrfs_run_defrag_inodes(root->fs_info); | 1889 | btrfs_run_defrag_inodes(fs_info); |
1892 | 1890 | ||
1893 | /* | 1891 | /* |
1894 | * Acquires fs_info->delete_unused_bgs_mutex to avoid racing | 1892 | * Acquires fs_info->delete_unused_bgs_mutex to avoid racing |
@@ -1898,7 +1896,7 @@ static int cleaner_kthread(void *arg) | |||
1898 | * can't hold, nor need to, fs_info->cleaner_mutex when deleting | 1896 | * can't hold, nor need to, fs_info->cleaner_mutex when deleting |
1899 | * unused block groups. | 1897 | * unused block groups. |
1900 | */ | 1898 | */ |
1901 | btrfs_delete_unused_bgs(root->fs_info); | 1899 | btrfs_delete_unused_bgs(fs_info); |
1902 | sleep: | 1900 | sleep: |
1903 | if (!again) { | 1901 | if (!again) { |
1904 | set_current_state(TASK_INTERRUPTIBLE); | 1902 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -1922,15 +1920,15 @@ sleep: | |||
1922 | trans = btrfs_attach_transaction(root); | 1920 | trans = btrfs_attach_transaction(root); |
1923 | if (IS_ERR(trans)) { | 1921 | if (IS_ERR(trans)) { |
1924 | if (PTR_ERR(trans) != -ENOENT) | 1922 | if (PTR_ERR(trans) != -ENOENT) |
1925 | btrfs_err(root->fs_info, | 1923 | btrfs_err(fs_info, |
1926 | "cleaner transaction attach returned %ld", | 1924 | "cleaner transaction attach returned %ld", |
1927 | PTR_ERR(trans)); | 1925 | PTR_ERR(trans)); |
1928 | } else { | 1926 | } else { |
1929 | int ret; | 1927 | int ret; |
1930 | 1928 | ||
1931 | ret = btrfs_commit_transaction(trans, root); | 1929 | ret = btrfs_commit_transaction(trans); |
1932 | if (ret) | 1930 | if (ret) |
1933 | btrfs_err(root->fs_info, | 1931 | btrfs_err(fs_info, |
1934 | "cleaner open transaction commit returned %d", | 1932 | "cleaner open transaction commit returned %d", |
1935 | ret); | 1933 | ret); |
1936 | } | 1934 | } |
@@ -1941,6 +1939,7 @@ sleep: | |||
1941 | static int transaction_kthread(void *arg) | 1939 | static int transaction_kthread(void *arg) |
1942 | { | 1940 | { |
1943 | struct btrfs_root *root = arg; | 1941 | struct btrfs_root *root = arg; |
1942 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1944 | struct btrfs_trans_handle *trans; | 1943 | struct btrfs_trans_handle *trans; |
1945 | struct btrfs_transaction *cur; | 1944 | struct btrfs_transaction *cur; |
1946 | u64 transid; | 1945 | u64 transid; |
@@ -1950,26 +1949,26 @@ static int transaction_kthread(void *arg) | |||
1950 | 1949 | ||
1951 | do { | 1950 | do { |
1952 | cannot_commit = false; | 1951 | cannot_commit = false; |
1953 | delay = HZ * root->fs_info->commit_interval; | 1952 | delay = HZ * fs_info->commit_interval; |
1954 | mutex_lock(&root->fs_info->transaction_kthread_mutex); | 1953 | mutex_lock(&fs_info->transaction_kthread_mutex); |
1955 | 1954 | ||
1956 | spin_lock(&root->fs_info->trans_lock); | 1955 | spin_lock(&fs_info->trans_lock); |
1957 | cur = root->fs_info->running_transaction; | 1956 | cur = fs_info->running_transaction; |
1958 | if (!cur) { | 1957 | if (!cur) { |
1959 | spin_unlock(&root->fs_info->trans_lock); | 1958 | spin_unlock(&fs_info->trans_lock); |
1960 | goto sleep; | 1959 | goto sleep; |
1961 | } | 1960 | } |
1962 | 1961 | ||
1963 | now = get_seconds(); | 1962 | now = get_seconds(); |
1964 | if (cur->state < TRANS_STATE_BLOCKED && | 1963 | if (cur->state < TRANS_STATE_BLOCKED && |
1965 | (now < cur->start_time || | 1964 | (now < cur->start_time || |
1966 | now - cur->start_time < root->fs_info->commit_interval)) { | 1965 | now - cur->start_time < fs_info->commit_interval)) { |
1967 | spin_unlock(&root->fs_info->trans_lock); | 1966 | spin_unlock(&fs_info->trans_lock); |
1968 | delay = HZ * 5; | 1967 | delay = HZ * 5; |
1969 | goto sleep; | 1968 | goto sleep; |
1970 | } | 1969 | } |
1971 | transid = cur->transid; | 1970 | transid = cur->transid; |
1972 | spin_unlock(&root->fs_info->trans_lock); | 1971 | spin_unlock(&fs_info->trans_lock); |
1973 | 1972 | ||
1974 | /* If the file system is aborted, this will always fail. */ | 1973 | /* If the file system is aborted, this will always fail. */ |
1975 | trans = btrfs_attach_transaction(root); | 1974 | trans = btrfs_attach_transaction(root); |
@@ -1979,20 +1978,20 @@ static int transaction_kthread(void *arg) | |||
1979 | goto sleep; | 1978 | goto sleep; |
1980 | } | 1979 | } |
1981 | if (transid == trans->transid) { | 1980 | if (transid == trans->transid) { |
1982 | btrfs_commit_transaction(trans, root); | 1981 | btrfs_commit_transaction(trans); |
1983 | } else { | 1982 | } else { |
1984 | btrfs_end_transaction(trans, root); | 1983 | btrfs_end_transaction(trans); |
1985 | } | 1984 | } |
1986 | sleep: | 1985 | sleep: |
1987 | wake_up_process(root->fs_info->cleaner_kthread); | 1986 | wake_up_process(fs_info->cleaner_kthread); |
1988 | mutex_unlock(&root->fs_info->transaction_kthread_mutex); | 1987 | mutex_unlock(&fs_info->transaction_kthread_mutex); |
1989 | 1988 | ||
1990 | if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, | 1989 | if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, |
1991 | &root->fs_info->fs_state))) | 1990 | &fs_info->fs_state))) |
1992 | btrfs_cleanup_transaction(root); | 1991 | btrfs_cleanup_transaction(fs_info); |
1993 | set_current_state(TASK_INTERRUPTIBLE); | 1992 | set_current_state(TASK_INTERRUPTIBLE); |
1994 | if (!kthread_should_stop() && | 1993 | if (!kthread_should_stop() && |
1995 | (!btrfs_transaction_blocked(root->fs_info) || | 1994 | (!btrfs_transaction_blocked(fs_info) || |
1996 | cannot_commit)) | 1995 | cannot_commit)) |
1997 | schedule_timeout(delay); | 1996 | schedule_timeout(delay); |
1998 | __set_current_state(TASK_RUNNING); | 1997 | __set_current_state(TASK_RUNNING); |
@@ -2206,11 +2205,9 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) | |||
2206 | btrfs_destroy_workqueue(fs_info->delalloc_workers); | 2205 | btrfs_destroy_workqueue(fs_info->delalloc_workers); |
2207 | btrfs_destroy_workqueue(fs_info->workers); | 2206 | btrfs_destroy_workqueue(fs_info->workers); |
2208 | btrfs_destroy_workqueue(fs_info->endio_workers); | 2207 | btrfs_destroy_workqueue(fs_info->endio_workers); |
2209 | btrfs_destroy_workqueue(fs_info->endio_meta_workers); | ||
2210 | btrfs_destroy_workqueue(fs_info->endio_raid56_workers); | 2208 | btrfs_destroy_workqueue(fs_info->endio_raid56_workers); |
2211 | btrfs_destroy_workqueue(fs_info->endio_repair_workers); | 2209 | btrfs_destroy_workqueue(fs_info->endio_repair_workers); |
2212 | btrfs_destroy_workqueue(fs_info->rmw_workers); | 2210 | btrfs_destroy_workqueue(fs_info->rmw_workers); |
2213 | btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); | ||
2214 | btrfs_destroy_workqueue(fs_info->endio_write_workers); | 2211 | btrfs_destroy_workqueue(fs_info->endio_write_workers); |
2215 | btrfs_destroy_workqueue(fs_info->endio_freespace_worker); | 2212 | btrfs_destroy_workqueue(fs_info->endio_freespace_worker); |
2216 | btrfs_destroy_workqueue(fs_info->submit_workers); | 2213 | btrfs_destroy_workqueue(fs_info->submit_workers); |
@@ -2220,6 +2217,13 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) | |||
2220 | btrfs_destroy_workqueue(fs_info->flush_workers); | 2217 | btrfs_destroy_workqueue(fs_info->flush_workers); |
2221 | btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); | 2218 | btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); |
2222 | btrfs_destroy_workqueue(fs_info->extent_workers); | 2219 | btrfs_destroy_workqueue(fs_info->extent_workers); |
2220 | /* | ||
2221 | * Now that all other work queues are destroyed, we can safely destroy | ||
2222 | * the queues used for metadata I/O, since tasks from those other work | ||
2223 | * queues can do metadata I/O operations. | ||
2224 | */ | ||
2225 | btrfs_destroy_workqueue(fs_info->endio_meta_workers); | ||
2226 | btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); | ||
2223 | } | 2227 | } |
2224 | 2228 | ||
2225 | static void free_root_extent_buffers(struct btrfs_root *root) | 2229 | static void free_root_extent_buffers(struct btrfs_root *root) |
@@ -2279,8 +2283,7 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) | |||
2279 | 2283 | ||
2280 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { | 2284 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { |
2281 | btrfs_free_log_root_tree(NULL, fs_info); | 2285 | btrfs_free_log_root_tree(NULL, fs_info); |
2282 | btrfs_destroy_pinned_extent(fs_info->tree_root, | 2286 | btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); |
2283 | fs_info->pinned_extents); | ||
2284 | } | 2287 | } |
2285 | } | 2288 | } |
2286 | 2289 | ||
@@ -2306,33 +2309,31 @@ static void btrfs_init_balance(struct btrfs_fs_info *fs_info) | |||
2306 | init_waitqueue_head(&fs_info->balance_wait_q); | 2309 | init_waitqueue_head(&fs_info->balance_wait_q); |
2307 | } | 2310 | } |
2308 | 2311 | ||
2309 | static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info, | 2312 | static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) |
2310 | struct btrfs_root *tree_root) | ||
2311 | { | 2313 | { |
2312 | fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; | 2314 | struct inode *inode = fs_info->btree_inode; |
2313 | set_nlink(fs_info->btree_inode, 1); | 2315 | |
2316 | inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; | ||
2317 | set_nlink(inode, 1); | ||
2314 | /* | 2318 | /* |
2315 | * we set the i_size on the btree inode to the max possible int. | 2319 | * we set the i_size on the btree inode to the max possible int. |
2316 | * the real end of the address space is determined by all of | 2320 | * the real end of the address space is determined by all of |
2317 | * the devices in the system | 2321 | * the devices in the system |
2318 | */ | 2322 | */ |
2319 | fs_info->btree_inode->i_size = OFFSET_MAX; | 2323 | inode->i_size = OFFSET_MAX; |
2320 | fs_info->btree_inode->i_mapping->a_ops = &btree_aops; | 2324 | inode->i_mapping->a_ops = &btree_aops; |
2321 | 2325 | ||
2322 | RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); | 2326 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); |
2323 | extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, | 2327 | extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping); |
2324 | fs_info->btree_inode->i_mapping); | 2328 | BTRFS_I(inode)->io_tree.track_uptodate = 0; |
2325 | BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0; | 2329 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree); |
2326 | extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); | ||
2327 | 2330 | ||
2328 | BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; | 2331 | BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops; |
2329 | 2332 | ||
2330 | BTRFS_I(fs_info->btree_inode)->root = tree_root; | 2333 | BTRFS_I(inode)->root = fs_info->tree_root; |
2331 | memset(&BTRFS_I(fs_info->btree_inode)->location, 0, | 2334 | memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key)); |
2332 | sizeof(struct btrfs_key)); | 2335 | set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); |
2333 | set_bit(BTRFS_INODE_DUMMY, | 2336 | btrfs_insert_inode_hash(inode); |
2334 | &BTRFS_I(fs_info->btree_inode)->runtime_flags); | ||
2335 | btrfs_insert_inode_hash(fs_info->btree_inode); | ||
2336 | } | 2337 | } |
2337 | 2338 | ||
2338 | static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) | 2339 | static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) |
@@ -2453,7 +2454,6 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info, | |||
2453 | struct btrfs_fs_devices *fs_devices) | 2454 | struct btrfs_fs_devices *fs_devices) |
2454 | { | 2455 | { |
2455 | int ret; | 2456 | int ret; |
2456 | struct btrfs_root *tree_root = fs_info->tree_root; | ||
2457 | struct btrfs_root *log_tree_root; | 2457 | struct btrfs_root *log_tree_root; |
2458 | struct btrfs_super_block *disk_super = fs_info->super_copy; | 2458 | struct btrfs_super_block *disk_super = fs_info->super_copy; |
2459 | u64 bytenr = btrfs_super_log_root(disk_super); | 2459 | u64 bytenr = btrfs_super_log_root(disk_super); |
@@ -2467,12 +2467,10 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info, | |||
2467 | if (!log_tree_root) | 2467 | if (!log_tree_root) |
2468 | return -ENOMEM; | 2468 | return -ENOMEM; |
2469 | 2469 | ||
2470 | __setup_root(tree_root->nodesize, tree_root->sectorsize, | 2470 | __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
2471 | tree_root->stripesize, log_tree_root, fs_info, | ||
2472 | BTRFS_TREE_LOG_OBJECTID); | ||
2473 | 2471 | ||
2474 | log_tree_root->node = read_tree_block(tree_root, bytenr, | 2472 | log_tree_root->node = read_tree_block(fs_info, bytenr, |
2475 | fs_info->generation + 1); | 2473 | fs_info->generation + 1); |
2476 | if (IS_ERR(log_tree_root->node)) { | 2474 | if (IS_ERR(log_tree_root->node)) { |
2477 | btrfs_warn(fs_info, "failed to read log tree"); | 2475 | btrfs_warn(fs_info, "failed to read log tree"); |
2478 | ret = PTR_ERR(log_tree_root->node); | 2476 | ret = PTR_ERR(log_tree_root->node); |
@@ -2487,15 +2485,15 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info, | |||
2487 | /* returns with log_tree_root freed on success */ | 2485 | /* returns with log_tree_root freed on success */ |
2488 | ret = btrfs_recover_log_trees(log_tree_root); | 2486 | ret = btrfs_recover_log_trees(log_tree_root); |
2489 | if (ret) { | 2487 | if (ret) { |
2490 | btrfs_handle_fs_error(tree_root->fs_info, ret, | 2488 | btrfs_handle_fs_error(fs_info, ret, |
2491 | "Failed to recover log tree"); | 2489 | "Failed to recover log tree"); |
2492 | free_extent_buffer(log_tree_root->node); | 2490 | free_extent_buffer(log_tree_root->node); |
2493 | kfree(log_tree_root); | 2491 | kfree(log_tree_root); |
2494 | return ret; | 2492 | return ret; |
2495 | } | 2493 | } |
2496 | 2494 | ||
2497 | if (fs_info->sb->s_flags & MS_RDONLY) { | 2495 | if (fs_info->sb->s_flags & MS_RDONLY) { |
2498 | ret = btrfs_commit_super(tree_root); | 2496 | ret = btrfs_commit_super(fs_info); |
2499 | if (ret) | 2497 | if (ret) |
2500 | return ret; | 2498 | return ret; |
2501 | } | 2499 | } |
@@ -2503,13 +2501,15 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info, | |||
2503 | return 0; | 2501 | return 0; |
2504 | } | 2502 | } |
2505 | 2503 | ||
2506 | static int btrfs_read_roots(struct btrfs_fs_info *fs_info, | 2504 | static int btrfs_read_roots(struct btrfs_fs_info *fs_info) |
2507 | struct btrfs_root *tree_root) | ||
2508 | { | 2505 | { |
2506 | struct btrfs_root *tree_root = fs_info->tree_root; | ||
2509 | struct btrfs_root *root; | 2507 | struct btrfs_root *root; |
2510 | struct btrfs_key location; | 2508 | struct btrfs_key location; |
2511 | int ret; | 2509 | int ret; |
2512 | 2510 | ||
2511 | BUG_ON(!fs_info->tree_root); | ||
2512 | |||
2513 | location.objectid = BTRFS_EXTENT_TREE_OBJECTID; | 2513 | location.objectid = BTRFS_EXTENT_TREE_OBJECTID; |
2514 | location.type = BTRFS_ROOT_ITEM_KEY; | 2514 | location.type = BTRFS_ROOT_ITEM_KEY; |
2515 | location.offset = 0; | 2515 | location.offset = 0; |
@@ -2720,7 +2720,7 @@ int open_ctree(struct super_block *sb, | |||
2720 | sb->s_blocksize_bits = blksize_bits(4096); | 2720 | sb->s_blocksize_bits = blksize_bits(4096); |
2721 | sb->s_bdi = &fs_info->bdi; | 2721 | sb->s_bdi = &fs_info->bdi; |
2722 | 2722 | ||
2723 | btrfs_init_btree_inode(fs_info, tree_root); | 2723 | btrfs_init_btree_inode(fs_info); |
2724 | 2724 | ||
2725 | spin_lock_init(&fs_info->block_group_cache_lock); | 2725 | spin_lock_init(&fs_info->block_group_cache_lock); |
2726 | fs_info->block_group_cache_tree = RB_ROOT; | 2726 | fs_info->block_group_cache_tree = RB_ROOT; |
@@ -2758,14 +2758,18 @@ int open_ctree(struct super_block *sb, | |||
2758 | 2758 | ||
2759 | INIT_LIST_HEAD(&fs_info->pinned_chunks); | 2759 | INIT_LIST_HEAD(&fs_info->pinned_chunks); |
2760 | 2760 | ||
2761 | /* Usable values until the real ones are cached from the superblock */ | ||
2762 | fs_info->nodesize = 4096; | ||
2763 | fs_info->sectorsize = 4096; | ||
2764 | fs_info->stripesize = 4096; | ||
2765 | |||
2761 | ret = btrfs_alloc_stripe_hash_table(fs_info); | 2766 | ret = btrfs_alloc_stripe_hash_table(fs_info); |
2762 | if (ret) { | 2767 | if (ret) { |
2763 | err = ret; | 2768 | err = ret; |
2764 | goto fail_alloc; | 2769 | goto fail_alloc; |
2765 | } | 2770 | } |
2766 | 2771 | ||
2767 | __setup_root(4096, 4096, 4096, tree_root, | 2772 | __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); |
2768 | fs_info, BTRFS_ROOT_TREE_OBJECTID); | ||
2769 | 2773 | ||
2770 | invalidate_bdev(fs_devices->latest_bdev); | 2774 | invalidate_bdev(fs_devices->latest_bdev); |
2771 | 2775 | ||
@@ -2801,7 +2805,7 @@ int open_ctree(struct super_block *sb, | |||
2801 | 2805 | ||
2802 | memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); | 2806 | memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); |
2803 | 2807 | ||
2804 | ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); | 2808 | ret = btrfs_check_super_valid(fs_info); |
2805 | if (ret) { | 2809 | if (ret) { |
2806 | btrfs_err(fs_info, "superblock contains fatal errors"); | 2810 | btrfs_err(fs_info, "superblock contains fatal errors"); |
2807 | err = -EINVAL; | 2811 | err = -EINVAL; |
@@ -2829,7 +2833,7 @@ int open_ctree(struct super_block *sb, | |||
2829 | */ | 2833 | */ |
2830 | fs_info->compress_type = BTRFS_COMPRESS_ZLIB; | 2834 | fs_info->compress_type = BTRFS_COMPRESS_ZLIB; |
2831 | 2835 | ||
2832 | ret = btrfs_parse_options(tree_root, options, sb->s_flags); | 2836 | ret = btrfs_parse_options(fs_info, options, sb->s_flags); |
2833 | if (ret) { | 2837 | if (ret) { |
2834 | err = ret; | 2838 | err = ret; |
2835 | goto fail_alloc; | 2839 | goto fail_alloc; |
@@ -2847,7 +2851,7 @@ int open_ctree(struct super_block *sb, | |||
2847 | 2851 | ||
2848 | features = btrfs_super_incompat_flags(disk_super); | 2852 | features = btrfs_super_incompat_flags(disk_super); |
2849 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; | 2853 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; |
2850 | if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) | 2854 | if (fs_info->compress_type == BTRFS_COMPRESS_LZO) |
2851 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; | 2855 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; |
2852 | 2856 | ||
2853 | if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) | 2857 | if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) |
@@ -2870,6 +2874,11 @@ int open_ctree(struct super_block *sb, | |||
2870 | fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); | 2874 | fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); |
2871 | fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); | 2875 | fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
2872 | 2876 | ||
2877 | /* Cache block sizes */ | ||
2878 | fs_info->nodesize = nodesize; | ||
2879 | fs_info->sectorsize = sectorsize; | ||
2880 | fs_info->stripesize = stripesize; | ||
2881 | |||
2873 | /* | 2882 | /* |
2874 | * mixed block groups end up with duplicate but slightly offset | 2883 | * mixed block groups end up with duplicate but slightly offset |
2875 | * extent buffers for the same range. It leads to corruptions | 2884 | * extent buffers for the same range. It leads to corruptions |
@@ -2910,15 +2919,11 @@ int open_ctree(struct super_block *sb, | |||
2910 | fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, | 2919 | fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, |
2911 | SZ_4M / PAGE_SIZE); | 2920 | SZ_4M / PAGE_SIZE); |
2912 | 2921 | ||
2913 | tree_root->nodesize = nodesize; | ||
2914 | tree_root->sectorsize = sectorsize; | ||
2915 | tree_root->stripesize = stripesize; | ||
2916 | |||
2917 | sb->s_blocksize = sectorsize; | 2922 | sb->s_blocksize = sectorsize; |
2918 | sb->s_blocksize_bits = blksize_bits(sectorsize); | 2923 | sb->s_blocksize_bits = blksize_bits(sectorsize); |
2919 | 2924 | ||
2920 | mutex_lock(&fs_info->chunk_mutex); | 2925 | mutex_lock(&fs_info->chunk_mutex); |
2921 | ret = btrfs_read_sys_array(tree_root); | 2926 | ret = btrfs_read_sys_array(fs_info); |
2922 | mutex_unlock(&fs_info->chunk_mutex); | 2927 | mutex_unlock(&fs_info->chunk_mutex); |
2923 | if (ret) { | 2928 | if (ret) { |
2924 | btrfs_err(fs_info, "failed to read the system array: %d", ret); | 2929 | btrfs_err(fs_info, "failed to read the system array: %d", ret); |
@@ -2927,10 +2932,9 @@ int open_ctree(struct super_block *sb, | |||
2927 | 2932 | ||
2928 | generation = btrfs_super_chunk_root_generation(disk_super); | 2933 | generation = btrfs_super_chunk_root_generation(disk_super); |
2929 | 2934 | ||
2930 | __setup_root(nodesize, sectorsize, stripesize, chunk_root, | 2935 | __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); |
2931 | fs_info, BTRFS_CHUNK_TREE_OBJECTID); | ||
2932 | 2936 | ||
2933 | chunk_root->node = read_tree_block(chunk_root, | 2937 | chunk_root->node = read_tree_block(fs_info, |
2934 | btrfs_super_chunk_root(disk_super), | 2938 | btrfs_super_chunk_root(disk_super), |
2935 | generation); | 2939 | generation); |
2936 | if (IS_ERR(chunk_root->node) || | 2940 | if (IS_ERR(chunk_root->node) || |
@@ -2947,7 +2951,7 @@ int open_ctree(struct super_block *sb, | |||
2947 | read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, | 2951 | read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, |
2948 | btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); | 2952 | btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); |
2949 | 2953 | ||
2950 | ret = btrfs_read_chunk_tree(chunk_root); | 2954 | ret = btrfs_read_chunk_tree(fs_info); |
2951 | if (ret) { | 2955 | if (ret) { |
2952 | btrfs_err(fs_info, "failed to read chunk tree: %d", ret); | 2956 | btrfs_err(fs_info, "failed to read chunk tree: %d", ret); |
2953 | goto fail_tree_roots; | 2957 | goto fail_tree_roots; |
@@ -2967,7 +2971,7 @@ int open_ctree(struct super_block *sb, | |||
2967 | retry_root_backup: | 2971 | retry_root_backup: |
2968 | generation = btrfs_super_generation(disk_super); | 2972 | generation = btrfs_super_generation(disk_super); |
2969 | 2973 | ||
2970 | tree_root->node = read_tree_block(tree_root, | 2974 | tree_root->node = read_tree_block(fs_info, |
2971 | btrfs_super_root(disk_super), | 2975 | btrfs_super_root(disk_super), |
2972 | generation); | 2976 | generation); |
2973 | if (IS_ERR(tree_root->node) || | 2977 | if (IS_ERR(tree_root->node) || |
@@ -2995,7 +2999,7 @@ retry_root_backup: | |||
2995 | 2999 | ||
2996 | mutex_unlock(&tree_root->objectid_mutex); | 3000 | mutex_unlock(&tree_root->objectid_mutex); |
2997 | 3001 | ||
2998 | ret = btrfs_read_roots(fs_info, tree_root); | 3002 | ret = btrfs_read_roots(fs_info); |
2999 | if (ret) | 3003 | if (ret) |
3000 | goto recovery_tree_root; | 3004 | goto recovery_tree_root; |
3001 | 3005 | ||
@@ -3048,7 +3052,7 @@ retry_root_backup: | |||
3048 | goto fail_sysfs; | 3052 | goto fail_sysfs; |
3049 | } | 3053 | } |
3050 | 3054 | ||
3051 | ret = btrfs_read_block_groups(fs_info->extent_root); | 3055 | ret = btrfs_read_block_groups(fs_info); |
3052 | if (ret) { | 3056 | if (ret) { |
3053 | btrfs_err(fs_info, "failed to read block groups: %d", ret); | 3057 | btrfs_err(fs_info, "failed to read block groups: %d", ret); |
3054 | goto fail_sysfs; | 3058 | goto fail_sysfs; |
@@ -3076,8 +3080,8 @@ retry_root_backup: | |||
3076 | if (IS_ERR(fs_info->transaction_kthread)) | 3080 | if (IS_ERR(fs_info->transaction_kthread)) |
3077 | goto fail_cleaner; | 3081 | goto fail_cleaner; |
3078 | 3082 | ||
3079 | if (!btrfs_test_opt(tree_root->fs_info, SSD) && | 3083 | if (!btrfs_test_opt(fs_info, SSD) && |
3080 | !btrfs_test_opt(tree_root->fs_info, NOSSD) && | 3084 | !btrfs_test_opt(fs_info, NOSSD) && |
3081 | !fs_info->fs_devices->rotating) { | 3085 | !fs_info->fs_devices->rotating) { |
3082 | btrfs_info(fs_info, "detected SSD devices, enabling SSD mode"); | 3086 | btrfs_info(fs_info, "detected SSD devices, enabling SSD mode"); |
3083 | btrfs_set_opt(fs_info->mount_opt, SSD); | 3087 | btrfs_set_opt(fs_info->mount_opt, SSD); |
@@ -3090,9 +3094,9 @@ retry_root_backup: | |||
3090 | btrfs_apply_pending_changes(fs_info); | 3094 | btrfs_apply_pending_changes(fs_info); |
3091 | 3095 | ||
3092 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY | 3096 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
3093 | if (btrfs_test_opt(tree_root->fs_info, CHECK_INTEGRITY)) { | 3097 | if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) { |
3094 | ret = btrfsic_mount(tree_root, fs_devices, | 3098 | ret = btrfsic_mount(fs_info, fs_devices, |
3095 | btrfs_test_opt(tree_root->fs_info, | 3099 | btrfs_test_opt(fs_info, |
3096 | CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? | 3100 | CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? |
3097 | 1 : 0, | 3101 | 1 : 0, |
3098 | fs_info->check_integrity_print_mask); | 3102 | fs_info->check_integrity_print_mask); |
@@ -3108,7 +3112,7 @@ retry_root_backup: | |||
3108 | 3112 | ||
3109 | /* do not make disk changes in broken FS or nologreplay is given */ | 3113 | /* do not make disk changes in broken FS or nologreplay is given */ |
3110 | if (btrfs_super_log_root(disk_super) != 0 && | 3114 | if (btrfs_super_log_root(disk_super) != 0 && |
3111 | !btrfs_test_opt(tree_root->fs_info, NOLOGREPLAY)) { | 3115 | !btrfs_test_opt(fs_info, NOLOGREPLAY)) { |
3112 | ret = btrfs_replay_log(fs_info, fs_devices); | 3116 | ret = btrfs_replay_log(fs_info, fs_devices); |
3113 | if (ret) { | 3117 | if (ret) { |
3114 | err = ret; | 3118 | err = ret; |
@@ -3116,7 +3120,7 @@ retry_root_backup: | |||
3116 | } | 3120 | } |
3117 | } | 3121 | } |
3118 | 3122 | ||
3119 | ret = btrfs_find_orphan_roots(tree_root); | 3123 | ret = btrfs_find_orphan_roots(fs_info); |
3120 | if (ret) | 3124 | if (ret) |
3121 | goto fail_qgroup; | 3125 | goto fail_qgroup; |
3122 | 3126 | ||
@@ -3164,19 +3168,19 @@ retry_root_backup: | |||
3164 | if (ret) { | 3168 | if (ret) { |
3165 | btrfs_warn(fs_info, | 3169 | btrfs_warn(fs_info, |
3166 | "failed to clear free space tree: %d", ret); | 3170 | "failed to clear free space tree: %d", ret); |
3167 | close_ctree(tree_root); | 3171 | close_ctree(fs_info); |
3168 | return ret; | 3172 | return ret; |
3169 | } | 3173 | } |
3170 | } | 3174 | } |
3171 | 3175 | ||
3172 | if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) && | 3176 | if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) && |
3173 | !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { | 3177 | !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { |
3174 | btrfs_info(fs_info, "creating free space tree"); | 3178 | btrfs_info(fs_info, "creating free space tree"); |
3175 | ret = btrfs_create_free_space_tree(fs_info); | 3179 | ret = btrfs_create_free_space_tree(fs_info); |
3176 | if (ret) { | 3180 | if (ret) { |
3177 | btrfs_warn(fs_info, | 3181 | btrfs_warn(fs_info, |
3178 | "failed to create free space tree: %d", ret); | 3182 | "failed to create free space tree: %d", ret); |
3179 | close_ctree(tree_root); | 3183 | close_ctree(fs_info); |
3180 | return ret; | 3184 | return ret; |
3181 | } | 3185 | } |
3182 | } | 3186 | } |
@@ -3185,7 +3189,7 @@ retry_root_backup: | |||
3185 | if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || | 3189 | if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || |
3186 | (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { | 3190 | (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { |
3187 | up_read(&fs_info->cleanup_work_sem); | 3191 | up_read(&fs_info->cleanup_work_sem); |
3188 | close_ctree(tree_root); | 3192 | close_ctree(fs_info); |
3189 | return ret; | 3193 | return ret; |
3190 | } | 3194 | } |
3191 | up_read(&fs_info->cleanup_work_sem); | 3195 | up_read(&fs_info->cleanup_work_sem); |
@@ -3193,14 +3197,14 @@ retry_root_backup: | |||
3193 | ret = btrfs_resume_balance_async(fs_info); | 3197 | ret = btrfs_resume_balance_async(fs_info); |
3194 | if (ret) { | 3198 | if (ret) { |
3195 | btrfs_warn(fs_info, "failed to resume balance: %d", ret); | 3199 | btrfs_warn(fs_info, "failed to resume balance: %d", ret); |
3196 | close_ctree(tree_root); | 3200 | close_ctree(fs_info); |
3197 | return ret; | 3201 | return ret; |
3198 | } | 3202 | } |
3199 | 3203 | ||
3200 | ret = btrfs_resume_dev_replace_async(fs_info); | 3204 | ret = btrfs_resume_dev_replace_async(fs_info); |
3201 | if (ret) { | 3205 | if (ret) { |
3202 | btrfs_warn(fs_info, "failed to resume device replace: %d", ret); | 3206 | btrfs_warn(fs_info, "failed to resume device replace: %d", ret); |
3203 | close_ctree(tree_root); | 3207 | close_ctree(fs_info); |
3204 | return ret; | 3208 | return ret; |
3205 | } | 3209 | } |
3206 | 3210 | ||
@@ -3212,10 +3216,10 @@ retry_root_backup: | |||
3212 | if (ret) { | 3216 | if (ret) { |
3213 | btrfs_warn(fs_info, | 3217 | btrfs_warn(fs_info, |
3214 | "failed to create the UUID tree: %d", ret); | 3218 | "failed to create the UUID tree: %d", ret); |
3215 | close_ctree(tree_root); | 3219 | close_ctree(fs_info); |
3216 | return ret; | 3220 | return ret; |
3217 | } | 3221 | } |
3218 | } else if (btrfs_test_opt(tree_root->fs_info, RESCAN_UUID_TREE) || | 3222 | } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) || |
3219 | fs_info->generation != | 3223 | fs_info->generation != |
3220 | btrfs_super_uuid_tree_generation(disk_super)) { | 3224 | btrfs_super_uuid_tree_generation(disk_super)) { |
3221 | btrfs_info(fs_info, "checking UUID tree"); | 3225 | btrfs_info(fs_info, "checking UUID tree"); |
@@ -3223,7 +3227,7 @@ retry_root_backup: | |||
3223 | if (ret) { | 3227 | if (ret) { |
3224 | btrfs_warn(fs_info, | 3228 | btrfs_warn(fs_info, |
3225 | "failed to check the UUID tree: %d", ret); | 3229 | "failed to check the UUID tree: %d", ret); |
3226 | close_ctree(tree_root); | 3230 | close_ctree(fs_info); |
3227 | return ret; | 3231 | return ret; |
3228 | } | 3232 | } |
3229 | } else { | 3233 | } else { |
@@ -3243,7 +3247,7 @@ fail_qgroup: | |||
3243 | btrfs_free_qgroup_config(fs_info); | 3247 | btrfs_free_qgroup_config(fs_info); |
3244 | fail_trans_kthread: | 3248 | fail_trans_kthread: |
3245 | kthread_stop(fs_info->transaction_kthread); | 3249 | kthread_stop(fs_info->transaction_kthread); |
3246 | btrfs_cleanup_transaction(fs_info->tree_root); | 3250 | btrfs_cleanup_transaction(fs_info); |
3247 | btrfs_free_fs_roots(fs_info); | 3251 | btrfs_free_fs_roots(fs_info); |
3248 | fail_cleaner: | 3252 | fail_cleaner: |
3249 | kthread_stop(fs_info->cleaner_kthread); | 3253 | kthread_stop(fs_info->cleaner_kthread); |
@@ -3262,7 +3266,6 @@ fail_fsdev_sysfs: | |||
3262 | 3266 | ||
3263 | fail_block_groups: | 3267 | fail_block_groups: |
3264 | btrfs_put_block_group_cache(fs_info); | 3268 | btrfs_put_block_group_cache(fs_info); |
3265 | btrfs_free_block_groups(fs_info); | ||
3266 | 3269 | ||
3267 | fail_tree_roots: | 3270 | fail_tree_roots: |
3268 | free_root_pointers(fs_info, 1); | 3271 | free_root_pointers(fs_info, 1); |
@@ -3270,6 +3273,7 @@ fail_tree_roots: | |||
3270 | 3273 | ||
3271 | fail_sb_buffer: | 3274 | fail_sb_buffer: |
3272 | btrfs_stop_all_workers(fs_info); | 3275 | btrfs_stop_all_workers(fs_info); |
3276 | btrfs_free_block_groups(fs_info); | ||
3273 | fail_alloc: | 3277 | fail_alloc: |
3274 | fail_iput: | 3278 | fail_iput: |
3275 | btrfs_mapping_tree_free(&fs_info->mapping_tree); | 3279 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
@@ -3291,7 +3295,7 @@ fail: | |||
3291 | return err; | 3295 | return err; |
3292 | 3296 | ||
3293 | recovery_tree_root: | 3297 | recovery_tree_root: |
3294 | if (!btrfs_test_opt(tree_root->fs_info, USEBACKUPROOT)) | 3298 | if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) |
3295 | goto fail_tree_roots; | 3299 | goto fail_tree_roots; |
3296 | 3300 | ||
3297 | free_root_pointers(fs_info, 0); | 3301 | free_root_pointers(fs_info, 0); |
@@ -3317,7 +3321,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) | |||
3317 | struct btrfs_device *device = (struct btrfs_device *) | 3321 | struct btrfs_device *device = (struct btrfs_device *) |
3318 | bh->b_private; | 3322 | bh->b_private; |
3319 | 3323 | ||
3320 | btrfs_warn_rl_in_rcu(device->dev_root->fs_info, | 3324 | btrfs_warn_rl_in_rcu(device->fs_info, |
3321 | "lost page write due to IO error on %s", | 3325 | "lost page write due to IO error on %s", |
3322 | rcu_str_deref(device->name)); | 3326 | rcu_str_deref(device->name)); |
3323 | /* note, we don't set_buffer_write_io_error because we have | 3327 | /* note, we don't set_buffer_write_io_error because we have |
@@ -3410,7 +3414,7 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) | |||
3410 | */ | 3414 | */ |
3411 | static int write_dev_supers(struct btrfs_device *device, | 3415 | static int write_dev_supers(struct btrfs_device *device, |
3412 | struct btrfs_super_block *sb, | 3416 | struct btrfs_super_block *sb, |
3413 | int do_barriers, int wait, int max_mirrors) | 3417 | int wait, int max_mirrors) |
3414 | { | 3418 | { |
3415 | struct buffer_head *bh; | 3419 | struct buffer_head *bh; |
3416 | int i; | 3420 | int i; |
@@ -3449,7 +3453,7 @@ static int write_dev_supers(struct btrfs_device *device, | |||
3449 | btrfs_set_super_bytenr(sb, bytenr); | 3453 | btrfs_set_super_bytenr(sb, bytenr); |
3450 | 3454 | ||
3451 | crc = ~(u32)0; | 3455 | crc = ~(u32)0; |
3452 | crc = btrfs_csum_data((char *)sb + | 3456 | crc = btrfs_csum_data((const char *)sb + |
3453 | BTRFS_CSUM_SIZE, crc, | 3457 | BTRFS_CSUM_SIZE, crc, |
3454 | BTRFS_SUPER_INFO_SIZE - | 3458 | BTRFS_SUPER_INFO_SIZE - |
3455 | BTRFS_CSUM_SIZE); | 3459 | BTRFS_CSUM_SIZE); |
@@ -3462,7 +3466,7 @@ static int write_dev_supers(struct btrfs_device *device, | |||
3462 | bh = __getblk(device->bdev, bytenr / 4096, | 3466 | bh = __getblk(device->bdev, bytenr / 4096, |
3463 | BTRFS_SUPER_INFO_SIZE); | 3467 | BTRFS_SUPER_INFO_SIZE); |
3464 | if (!bh) { | 3468 | if (!bh) { |
3465 | btrfs_err(device->dev_root->fs_info, | 3469 | btrfs_err(device->fs_info, |
3466 | "couldn't get super buffer head for bytenr %llu", | 3470 | "couldn't get super buffer head for bytenr %llu", |
3467 | bytenr); | 3471 | bytenr); |
3468 | errors++; | 3472 | errors++; |
@@ -3485,9 +3489,9 @@ static int write_dev_supers(struct btrfs_device *device, | |||
3485 | * to go down lazy. | 3489 | * to go down lazy. |
3486 | */ | 3490 | */ |
3487 | if (i == 0) | 3491 | if (i == 0) |
3488 | ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh); | 3492 | ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh); |
3489 | else | 3493 | else |
3490 | ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); | 3494 | ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); |
3491 | if (ret) | 3495 | if (ret) |
3492 | errors++; | 3496 | errors++; |
3493 | } | 3497 | } |
@@ -3551,7 +3555,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
3551 | 3555 | ||
3552 | bio->bi_end_io = btrfs_end_empty_barrier; | 3556 | bio->bi_end_io = btrfs_end_empty_barrier; |
3553 | bio->bi_bdev = device->bdev; | 3557 | bio->bi_bdev = device->bdev; |
3554 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); | 3558 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
3555 | init_completion(&device->flush_wait); | 3559 | init_completion(&device->flush_wait); |
3556 | bio->bi_private = &device->flush_wait; | 3560 | bio->bi_private = &device->flush_wait; |
3557 | device->flush_bio = bio; | 3561 | device->flush_bio = bio; |
@@ -3695,7 +3699,7 @@ int btrfs_calc_num_tolerated_disk_barrier_failures( | |||
3695 | return num_tolerated_disk_barrier_failures; | 3699 | return num_tolerated_disk_barrier_failures; |
3696 | } | 3700 | } |
3697 | 3701 | ||
3698 | static int write_all_supers(struct btrfs_root *root, int max_mirrors) | 3702 | int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) |
3699 | { | 3703 | { |
3700 | struct list_head *head; | 3704 | struct list_head *head; |
3701 | struct btrfs_device *dev; | 3705 | struct btrfs_device *dev; |
@@ -3707,23 +3711,23 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors) | |||
3707 | int total_errors = 0; | 3711 | int total_errors = 0; |
3708 | u64 flags; | 3712 | u64 flags; |
3709 | 3713 | ||
3710 | do_barriers = !btrfs_test_opt(root->fs_info, NOBARRIER); | 3714 | do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); |
3711 | backup_super_roots(root->fs_info); | 3715 | backup_super_roots(fs_info); |
3712 | 3716 | ||
3713 | sb = root->fs_info->super_for_commit; | 3717 | sb = fs_info->super_for_commit; |
3714 | dev_item = &sb->dev_item; | 3718 | dev_item = &sb->dev_item; |
3715 | 3719 | ||
3716 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 3720 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
3717 | head = &root->fs_info->fs_devices->devices; | 3721 | head = &fs_info->fs_devices->devices; |
3718 | max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1; | 3722 | max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1; |
3719 | 3723 | ||
3720 | if (do_barriers) { | 3724 | if (do_barriers) { |
3721 | ret = barrier_all_devices(root->fs_info); | 3725 | ret = barrier_all_devices(fs_info); |
3722 | if (ret) { | 3726 | if (ret) { |
3723 | mutex_unlock( | 3727 | mutex_unlock( |
3724 | &root->fs_info->fs_devices->device_list_mutex); | 3728 | &fs_info->fs_devices->device_list_mutex); |
3725 | btrfs_handle_fs_error(root->fs_info, ret, | 3729 | btrfs_handle_fs_error(fs_info, ret, |
3726 | "errors while submitting device barriers."); | 3730 | "errors while submitting device barriers."); |
3727 | return ret; | 3731 | return ret; |
3728 | } | 3732 | } |
3729 | } | 3733 | } |
@@ -3752,18 +3756,19 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors) | |||
3752 | flags = btrfs_super_flags(sb); | 3756 | flags = btrfs_super_flags(sb); |
3753 | btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); | 3757 | btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); |
3754 | 3758 | ||
3755 | ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors); | 3759 | ret = write_dev_supers(dev, sb, 0, max_mirrors); |
3756 | if (ret) | 3760 | if (ret) |
3757 | total_errors++; | 3761 | total_errors++; |
3758 | } | 3762 | } |
3759 | if (total_errors > max_errors) { | 3763 | if (total_errors > max_errors) { |
3760 | btrfs_err(root->fs_info, "%d errors while writing supers", | 3764 | btrfs_err(fs_info, "%d errors while writing supers", |
3761 | total_errors); | 3765 | total_errors); |
3762 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | 3766 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
3763 | 3767 | ||
3764 | /* FUA is masked off if unsupported and can't be the reason */ | 3768 | /* FUA is masked off if unsupported and can't be the reason */ |
3765 | btrfs_handle_fs_error(root->fs_info, -EIO, | 3769 | btrfs_handle_fs_error(fs_info, -EIO, |
3766 | "%d errors while writing supers", total_errors); | 3770 | "%d errors while writing supers", |
3771 | total_errors); | ||
3767 | return -EIO; | 3772 | return -EIO; |
3768 | } | 3773 | } |
3769 | 3774 | ||
@@ -3774,25 +3779,20 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors) | |||
3774 | if (!dev->in_fs_metadata || !dev->writeable) | 3779 | if (!dev->in_fs_metadata || !dev->writeable) |
3775 | continue; | 3780 | continue; |
3776 | 3781 | ||
3777 | ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors); | 3782 | ret = write_dev_supers(dev, sb, 1, max_mirrors); |
3778 | if (ret) | 3783 | if (ret) |
3779 | total_errors++; | 3784 | total_errors++; |
3780 | } | 3785 | } |
3781 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | 3786 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
3782 | if (total_errors > max_errors) { | 3787 | if (total_errors > max_errors) { |
3783 | btrfs_handle_fs_error(root->fs_info, -EIO, | 3788 | btrfs_handle_fs_error(fs_info, -EIO, |
3784 | "%d errors while writing supers", total_errors); | 3789 | "%d errors while writing supers", |
3790 | total_errors); | ||
3785 | return -EIO; | 3791 | return -EIO; |
3786 | } | 3792 | } |
3787 | return 0; | 3793 | return 0; |
3788 | } | 3794 | } |
3789 | 3795 | ||
3790 | int write_ctree_super(struct btrfs_trans_handle *trans, | ||
3791 | struct btrfs_root *root, int max_mirrors) | ||
3792 | { | ||
3793 | return write_all_supers(root, max_mirrors); | ||
3794 | } | ||
3795 | |||
3796 | /* Drop a fs root from the radix tree and free it. */ | 3796 | /* Drop a fs root from the radix tree and free it. */ |
3797 | void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, | 3797 | void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, |
3798 | struct btrfs_root *root) | 3798 | struct btrfs_root *root) |
@@ -3826,7 +3826,7 @@ static void free_fs_root(struct btrfs_root *root) | |||
3826 | { | 3826 | { |
3827 | iput(root->ino_cache_inode); | 3827 | iput(root->ino_cache_inode); |
3828 | WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); | 3828 | WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); |
3829 | btrfs_free_block_rsv(root, root->orphan_block_rsv); | 3829 | btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv); |
3830 | root->orphan_block_rsv = NULL; | 3830 | root->orphan_block_rsv = NULL; |
3831 | if (root->anon_dev) | 3831 | if (root->anon_dev) |
3832 | free_anon_bdev(root->anon_dev); | 3832 | free_anon_bdev(root->anon_dev); |
@@ -3896,28 +3896,29 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) | |||
3896 | return err; | 3896 | return err; |
3897 | } | 3897 | } |
3898 | 3898 | ||
3899 | int btrfs_commit_super(struct btrfs_root *root) | 3899 | int btrfs_commit_super(struct btrfs_fs_info *fs_info) |
3900 | { | 3900 | { |
3901 | struct btrfs_root *root = fs_info->tree_root; | ||
3901 | struct btrfs_trans_handle *trans; | 3902 | struct btrfs_trans_handle *trans; |
3902 | 3903 | ||
3903 | mutex_lock(&root->fs_info->cleaner_mutex); | 3904 | mutex_lock(&fs_info->cleaner_mutex); |
3904 | btrfs_run_delayed_iputs(root); | 3905 | btrfs_run_delayed_iputs(fs_info); |
3905 | mutex_unlock(&root->fs_info->cleaner_mutex); | 3906 | mutex_unlock(&fs_info->cleaner_mutex); |
3906 | wake_up_process(root->fs_info->cleaner_kthread); | 3907 | wake_up_process(fs_info->cleaner_kthread); |
3907 | 3908 | ||
3908 | /* wait until ongoing cleanup work done */ | 3909 | /* wait until ongoing cleanup work done */ |
3909 | down_write(&root->fs_info->cleanup_work_sem); | 3910 | down_write(&fs_info->cleanup_work_sem); |
3910 | up_write(&root->fs_info->cleanup_work_sem); | 3911 | up_write(&fs_info->cleanup_work_sem); |
3911 | 3912 | ||
3912 | trans = btrfs_join_transaction(root); | 3913 | trans = btrfs_join_transaction(root); |
3913 | if (IS_ERR(trans)) | 3914 | if (IS_ERR(trans)) |
3914 | return PTR_ERR(trans); | 3915 | return PTR_ERR(trans); |
3915 | return btrfs_commit_transaction(trans, root); | 3916 | return btrfs_commit_transaction(trans); |
3916 | } | 3917 | } |
3917 | 3918 | ||
3918 | void close_ctree(struct btrfs_root *root) | 3919 | void close_ctree(struct btrfs_fs_info *fs_info) |
3919 | { | 3920 | { |
3920 | struct btrfs_fs_info *fs_info = root->fs_info; | 3921 | struct btrfs_root *root = fs_info->tree_root; |
3921 | int ret; | 3922 | int ret; |
3922 | 3923 | ||
3923 | set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); | 3924 | set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); |
@@ -3952,15 +3953,15 @@ void close_ctree(struct btrfs_root *root) | |||
3952 | * block groups queued for removal, the deletion will be | 3953 | * block groups queued for removal, the deletion will be |
3953 | * skipped when we quit the cleaner thread. | 3954 | * skipped when we quit the cleaner thread. |
3954 | */ | 3955 | */ |
3955 | btrfs_delete_unused_bgs(root->fs_info); | 3956 | btrfs_delete_unused_bgs(fs_info); |
3956 | 3957 | ||
3957 | ret = btrfs_commit_super(root); | 3958 | ret = btrfs_commit_super(fs_info); |
3958 | if (ret) | 3959 | if (ret) |
3959 | btrfs_err(fs_info, "commit super ret %d", ret); | 3960 | btrfs_err(fs_info, "commit super ret %d", ret); |
3960 | } | 3961 | } |
3961 | 3962 | ||
3962 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) | 3963 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) |
3963 | btrfs_error_commit_super(root); | 3964 | btrfs_error_commit_super(fs_info); |
3964 | 3965 | ||
3965 | kthread_stop(fs_info->transaction_kthread); | 3966 | kthread_stop(fs_info->transaction_kthread); |
3966 | kthread_stop(fs_info->cleaner_kthread); | 3967 | kthread_stop(fs_info->cleaner_kthread); |
@@ -3981,8 +3982,6 @@ void close_ctree(struct btrfs_root *root) | |||
3981 | 3982 | ||
3982 | btrfs_put_block_group_cache(fs_info); | 3983 | btrfs_put_block_group_cache(fs_info); |
3983 | 3984 | ||
3984 | btrfs_free_block_groups(fs_info); | ||
3985 | |||
3986 | /* | 3985 | /* |
3987 | * we must make sure there is not any read request to | 3986 | * we must make sure there is not any read request to |
3988 | * submit after we stopping all workers. | 3987 | * submit after we stopping all workers. |
@@ -3990,14 +3989,16 @@ void close_ctree(struct btrfs_root *root) | |||
3990 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); | 3989 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); |
3991 | btrfs_stop_all_workers(fs_info); | 3990 | btrfs_stop_all_workers(fs_info); |
3992 | 3991 | ||
3992 | btrfs_free_block_groups(fs_info); | ||
3993 | |||
3993 | clear_bit(BTRFS_FS_OPEN, &fs_info->flags); | 3994 | clear_bit(BTRFS_FS_OPEN, &fs_info->flags); |
3994 | free_root_pointers(fs_info, 1); | 3995 | free_root_pointers(fs_info, 1); |
3995 | 3996 | ||
3996 | iput(fs_info->btree_inode); | 3997 | iput(fs_info->btree_inode); |
3997 | 3998 | ||
3998 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY | 3999 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
3999 | if (btrfs_test_opt(root->fs_info, CHECK_INTEGRITY)) | 4000 | if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) |
4000 | btrfsic_unmount(root, fs_info->fs_devices); | 4001 | btrfsic_unmount(fs_info->fs_devices); |
4001 | #endif | 4002 | #endif |
4002 | 4003 | ||
4003 | btrfs_close_devices(fs_info->fs_devices); | 4004 | btrfs_close_devices(fs_info->fs_devices); |
@@ -4014,7 +4015,7 @@ void close_ctree(struct btrfs_root *root) | |||
4014 | __btrfs_free_block_rsv(root->orphan_block_rsv); | 4015 | __btrfs_free_block_rsv(root->orphan_block_rsv); |
4015 | root->orphan_block_rsv = NULL; | 4016 | root->orphan_block_rsv = NULL; |
4016 | 4017 | ||
4017 | lock_chunks(root); | 4018 | mutex_lock(&fs_info->chunk_mutex); |
4018 | while (!list_empty(&fs_info->pinned_chunks)) { | 4019 | while (!list_empty(&fs_info->pinned_chunks)) { |
4019 | struct extent_map *em; | 4020 | struct extent_map *em; |
4020 | 4021 | ||
@@ -4023,7 +4024,7 @@ void close_ctree(struct btrfs_root *root) | |||
4023 | list_del_init(&em->list); | 4024 | list_del_init(&em->list); |
4024 | free_extent_map(em); | 4025 | free_extent_map(em); |
4025 | } | 4026 | } |
4026 | unlock_chunks(root); | 4027 | mutex_unlock(&fs_info->chunk_mutex); |
4027 | } | 4028 | } |
4028 | 4029 | ||
4029 | int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, | 4030 | int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, |
@@ -4045,6 +4046,7 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, | |||
4045 | 4046 | ||
4046 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf) | 4047 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf) |
4047 | { | 4048 | { |
4049 | struct btrfs_fs_info *fs_info; | ||
4048 | struct btrfs_root *root; | 4050 | struct btrfs_root *root; |
4049 | u64 transid = btrfs_header_generation(buf); | 4051 | u64 transid = btrfs_header_generation(buf); |
4050 | int was_dirty; | 4052 | int was_dirty; |
@@ -4059,24 +4061,25 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf) | |||
4059 | return; | 4061 | return; |
4060 | #endif | 4062 | #endif |
4061 | root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 4063 | root = BTRFS_I(buf->pages[0]->mapping->host)->root; |
4064 | fs_info = root->fs_info; | ||
4062 | btrfs_assert_tree_locked(buf); | 4065 | btrfs_assert_tree_locked(buf); |
4063 | if (transid != root->fs_info->generation) | 4066 | if (transid != fs_info->generation) |
4064 | WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n", | 4067 | WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n", |
4065 | buf->start, transid, root->fs_info->generation); | 4068 | buf->start, transid, fs_info->generation); |
4066 | was_dirty = set_extent_buffer_dirty(buf); | 4069 | was_dirty = set_extent_buffer_dirty(buf); |
4067 | if (!was_dirty) | 4070 | if (!was_dirty) |
4068 | __percpu_counter_add(&root->fs_info->dirty_metadata_bytes, | 4071 | __percpu_counter_add(&fs_info->dirty_metadata_bytes, |
4069 | buf->len, | 4072 | buf->len, |
4070 | root->fs_info->dirty_metadata_batch); | 4073 | fs_info->dirty_metadata_batch); |
4071 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY | 4074 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
4072 | if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) { | 4075 | if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) { |
4073 | btrfs_print_leaf(root, buf); | 4076 | btrfs_print_leaf(fs_info, buf); |
4074 | ASSERT(0); | 4077 | ASSERT(0); |
4075 | } | 4078 | } |
4076 | #endif | 4079 | #endif |
4077 | } | 4080 | } |
4078 | 4081 | ||
4079 | static void __btrfs_btree_balance_dirty(struct btrfs_root *root, | 4082 | static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, |
4080 | int flush_delayed) | 4083 | int flush_delayed) |
4081 | { | 4084 | { |
4082 | /* | 4085 | /* |
@@ -4089,34 +4092,34 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root, | |||
4089 | return; | 4092 | return; |
4090 | 4093 | ||
4091 | if (flush_delayed) | 4094 | if (flush_delayed) |
4092 | btrfs_balance_delayed_items(root); | 4095 | btrfs_balance_delayed_items(fs_info); |
4093 | 4096 | ||
4094 | ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes, | 4097 | ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, |
4095 | BTRFS_DIRTY_METADATA_THRESH); | 4098 | BTRFS_DIRTY_METADATA_THRESH); |
4096 | if (ret > 0) { | 4099 | if (ret > 0) { |
4097 | balance_dirty_pages_ratelimited( | 4100 | balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); |
4098 | root->fs_info->btree_inode->i_mapping); | ||
4099 | } | 4101 | } |
4100 | } | 4102 | } |
4101 | 4103 | ||
4102 | void btrfs_btree_balance_dirty(struct btrfs_root *root) | 4104 | void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info) |
4103 | { | 4105 | { |
4104 | __btrfs_btree_balance_dirty(root, 1); | 4106 | __btrfs_btree_balance_dirty(fs_info, 1); |
4105 | } | 4107 | } |
4106 | 4108 | ||
4107 | void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root) | 4109 | void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info) |
4108 | { | 4110 | { |
4109 | __btrfs_btree_balance_dirty(root, 0); | 4111 | __btrfs_btree_balance_dirty(fs_info, 0); |
4110 | } | 4112 | } |
4111 | 4113 | ||
4112 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) | 4114 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) |
4113 | { | 4115 | { |
4114 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 4116 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; |
4115 | return btree_read_extent_buffer_pages(root, buf, parent_transid); | 4117 | struct btrfs_fs_info *fs_info = root->fs_info; |
4118 | |||
4119 | return btree_read_extent_buffer_pages(fs_info, buf, parent_transid); | ||
4116 | } | 4120 | } |
4117 | 4121 | ||
4118 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | 4122 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info) |
4119 | int read_only) | ||
4120 | { | 4123 | { |
4121 | struct btrfs_super_block *sb = fs_info->super_copy; | 4124 | struct btrfs_super_block *sb = fs_info->super_copy; |
4122 | u64 nodesize = btrfs_super_nodesize(sb); | 4125 | u64 nodesize = btrfs_super_nodesize(sb); |
@@ -4263,17 +4266,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | |||
4263 | return ret; | 4266 | return ret; |
4264 | } | 4267 | } |
4265 | 4268 | ||
4266 | static void btrfs_error_commit_super(struct btrfs_root *root) | 4269 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) |
4267 | { | 4270 | { |
4268 | mutex_lock(&root->fs_info->cleaner_mutex); | 4271 | mutex_lock(&fs_info->cleaner_mutex); |
4269 | btrfs_run_delayed_iputs(root); | 4272 | btrfs_run_delayed_iputs(fs_info); |
4270 | mutex_unlock(&root->fs_info->cleaner_mutex); | 4273 | mutex_unlock(&fs_info->cleaner_mutex); |
4271 | 4274 | ||
4272 | down_write(&root->fs_info->cleanup_work_sem); | 4275 | down_write(&fs_info->cleanup_work_sem); |
4273 | up_write(&root->fs_info->cleanup_work_sem); | 4276 | up_write(&fs_info->cleanup_work_sem); |
4274 | 4277 | ||
4275 | /* cleanup FS via transaction */ | 4278 | /* cleanup FS via transaction */ |
4276 | btrfs_cleanup_transaction(root); | 4279 | btrfs_cleanup_transaction(fs_info); |
4277 | } | 4280 | } |
4278 | 4281 | ||
4279 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) | 4282 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) |
@@ -4316,7 +4319,7 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) | |||
4316 | } | 4319 | } |
4317 | 4320 | ||
4318 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | 4321 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
4319 | struct btrfs_root *root) | 4322 | struct btrfs_fs_info *fs_info) |
4320 | { | 4323 | { |
4321 | struct rb_node *node; | 4324 | struct rb_node *node; |
4322 | struct btrfs_delayed_ref_root *delayed_refs; | 4325 | struct btrfs_delayed_ref_root *delayed_refs; |
@@ -4328,7 +4331,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
4328 | spin_lock(&delayed_refs->lock); | 4331 | spin_lock(&delayed_refs->lock); |
4329 | if (atomic_read(&delayed_refs->num_entries) == 0) { | 4332 | if (atomic_read(&delayed_refs->num_entries) == 0) { |
4330 | spin_unlock(&delayed_refs->lock); | 4333 | spin_unlock(&delayed_refs->lock); |
4331 | btrfs_info(root->fs_info, "delayed_refs has NO entry"); | 4334 | btrfs_info(fs_info, "delayed_refs has NO entry"); |
4332 | return ret; | 4335 | return ret; |
4333 | } | 4336 | } |
4334 | 4337 | ||
@@ -4354,6 +4357,8 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
4354 | list) { | 4357 | list) { |
4355 | ref->in_tree = 0; | 4358 | ref->in_tree = 0; |
4356 | list_del(&ref->list); | 4359 | list_del(&ref->list); |
4360 | if (!list_empty(&ref->add_list)) | ||
4361 | list_del(&ref->add_list); | ||
4357 | atomic_dec(&delayed_refs->num_entries); | 4362 | atomic_dec(&delayed_refs->num_entries); |
4358 | btrfs_put_delayed_ref(ref); | 4363 | btrfs_put_delayed_ref(ref); |
4359 | } | 4364 | } |
@@ -4371,7 +4376,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
4371 | mutex_unlock(&head->mutex); | 4376 | mutex_unlock(&head->mutex); |
4372 | 4377 | ||
4373 | if (pin_bytes) | 4378 | if (pin_bytes) |
4374 | btrfs_pin_extent(root, head->node.bytenr, | 4379 | btrfs_pin_extent(fs_info, head->node.bytenr, |
4375 | head->node.num_bytes, 1); | 4380 | head->node.num_bytes, 1); |
4376 | btrfs_put_delayed_ref(&head->node); | 4381 | btrfs_put_delayed_ref(&head->node); |
4377 | cond_resched(); | 4382 | cond_resched(); |
@@ -4435,7 +4440,7 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) | |||
4435 | spin_unlock(&fs_info->delalloc_root_lock); | 4440 | spin_unlock(&fs_info->delalloc_root_lock); |
4436 | } | 4441 | } |
4437 | 4442 | ||
4438 | static int btrfs_destroy_marked_extents(struct btrfs_root *root, | 4443 | static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, |
4439 | struct extent_io_tree *dirty_pages, | 4444 | struct extent_io_tree *dirty_pages, |
4440 | int mark) | 4445 | int mark) |
4441 | { | 4446 | { |
@@ -4452,8 +4457,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
4452 | 4457 | ||
4453 | clear_extent_bits(dirty_pages, start, end, mark); | 4458 | clear_extent_bits(dirty_pages, start, end, mark); |
4454 | while (start <= end) { | 4459 | while (start <= end) { |
4455 | eb = btrfs_find_tree_block(root->fs_info, start); | 4460 | eb = find_extent_buffer(fs_info, start); |
4456 | start += root->nodesize; | 4461 | start += fs_info->nodesize; |
4457 | if (!eb) | 4462 | if (!eb) |
4458 | continue; | 4463 | continue; |
4459 | wait_on_extent_buffer_writeback(eb); | 4464 | wait_on_extent_buffer_writeback(eb); |
@@ -4468,7 +4473,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
4468 | return ret; | 4473 | return ret; |
4469 | } | 4474 | } |
4470 | 4475 | ||
4471 | static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | 4476 | static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
4472 | struct extent_io_tree *pinned_extents) | 4477 | struct extent_io_tree *pinned_extents) |
4473 | { | 4478 | { |
4474 | struct extent_io_tree *unpin; | 4479 | struct extent_io_tree *unpin; |
@@ -4486,15 +4491,15 @@ again: | |||
4486 | break; | 4491 | break; |
4487 | 4492 | ||
4488 | clear_extent_dirty(unpin, start, end); | 4493 | clear_extent_dirty(unpin, start, end); |
4489 | btrfs_error_unpin_extent_range(root, start, end); | 4494 | btrfs_error_unpin_extent_range(fs_info, start, end); |
4490 | cond_resched(); | 4495 | cond_resched(); |
4491 | } | 4496 | } |
4492 | 4497 | ||
4493 | if (loop) { | 4498 | if (loop) { |
4494 | if (unpin == &root->fs_info->freed_extents[0]) | 4499 | if (unpin == &fs_info->freed_extents[0]) |
4495 | unpin = &root->fs_info->freed_extents[1]; | 4500 | unpin = &fs_info->freed_extents[1]; |
4496 | else | 4501 | else |
4497 | unpin = &root->fs_info->freed_extents[0]; | 4502 | unpin = &fs_info->freed_extents[0]; |
4498 | loop = false; | 4503 | loop = false; |
4499 | goto again; | 4504 | goto again; |
4500 | } | 4505 | } |
@@ -4517,7 +4522,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) | |||
4517 | } | 4522 | } |
4518 | 4523 | ||
4519 | void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, | 4524 | void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, |
4520 | struct btrfs_root *root) | 4525 | struct btrfs_fs_info *fs_info) |
4521 | { | 4526 | { |
4522 | struct btrfs_block_group_cache *cache; | 4527 | struct btrfs_block_group_cache *cache; |
4523 | 4528 | ||
@@ -4527,8 +4532,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, | |||
4527 | struct btrfs_block_group_cache, | 4532 | struct btrfs_block_group_cache, |
4528 | dirty_list); | 4533 | dirty_list); |
4529 | if (!cache) { | 4534 | if (!cache) { |
4530 | btrfs_err(root->fs_info, | 4535 | btrfs_err(fs_info, "orphan block group dirty_bgs list"); |
4531 | "orphan block group dirty_bgs list"); | ||
4532 | spin_unlock(&cur_trans->dirty_bgs_lock); | 4536 | spin_unlock(&cur_trans->dirty_bgs_lock); |
4533 | return; | 4537 | return; |
4534 | } | 4538 | } |
@@ -4556,8 +4560,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, | |||
4556 | struct btrfs_block_group_cache, | 4560 | struct btrfs_block_group_cache, |
4557 | io_list); | 4561 | io_list); |
4558 | if (!cache) { | 4562 | if (!cache) { |
4559 | btrfs_err(root->fs_info, | 4563 | btrfs_err(fs_info, "orphan block group on io_bgs list"); |
4560 | "orphan block group on io_bgs list"); | ||
4561 | return; | 4564 | return; |
4562 | } | 4565 | } |
4563 | 4566 | ||
@@ -4570,27 +4573,27 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, | |||
4570 | } | 4573 | } |
4571 | 4574 | ||
4572 | void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, | 4575 | void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, |
4573 | struct btrfs_root *root) | 4576 | struct btrfs_fs_info *fs_info) |
4574 | { | 4577 | { |
4575 | btrfs_cleanup_dirty_bgs(cur_trans, root); | 4578 | btrfs_cleanup_dirty_bgs(cur_trans, fs_info); |
4576 | ASSERT(list_empty(&cur_trans->dirty_bgs)); | 4579 | ASSERT(list_empty(&cur_trans->dirty_bgs)); |
4577 | ASSERT(list_empty(&cur_trans->io_bgs)); | 4580 | ASSERT(list_empty(&cur_trans->io_bgs)); |
4578 | 4581 | ||
4579 | btrfs_destroy_delayed_refs(cur_trans, root); | 4582 | btrfs_destroy_delayed_refs(cur_trans, fs_info); |
4580 | 4583 | ||
4581 | cur_trans->state = TRANS_STATE_COMMIT_START; | 4584 | cur_trans->state = TRANS_STATE_COMMIT_START; |
4582 | wake_up(&root->fs_info->transaction_blocked_wait); | 4585 | wake_up(&fs_info->transaction_blocked_wait); |
4583 | 4586 | ||
4584 | cur_trans->state = TRANS_STATE_UNBLOCKED; | 4587 | cur_trans->state = TRANS_STATE_UNBLOCKED; |
4585 | wake_up(&root->fs_info->transaction_wait); | 4588 | wake_up(&fs_info->transaction_wait); |
4586 | 4589 | ||
4587 | btrfs_destroy_delayed_inodes(root); | 4590 | btrfs_destroy_delayed_inodes(fs_info); |
4588 | btrfs_assert_delayed_root_empty(root); | 4591 | btrfs_assert_delayed_root_empty(fs_info); |
4589 | 4592 | ||
4590 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, | 4593 | btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, |
4591 | EXTENT_DIRTY); | 4594 | EXTENT_DIRTY); |
4592 | btrfs_destroy_pinned_extent(root, | 4595 | btrfs_destroy_pinned_extent(fs_info, |
4593 | root->fs_info->pinned_extents); | 4596 | fs_info->pinned_extents); |
4594 | 4597 | ||
4595 | cur_trans->state =TRANS_STATE_COMPLETED; | 4598 | cur_trans->state =TRANS_STATE_COMPLETED; |
4596 | wake_up(&cur_trans->commit_wait); | 4599 | wake_up(&cur_trans->commit_wait); |
@@ -4601,27 +4604,27 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, | |||
4601 | */ | 4604 | */ |
4602 | } | 4605 | } |
4603 | 4606 | ||
4604 | static int btrfs_cleanup_transaction(struct btrfs_root *root) | 4607 | static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) |
4605 | { | 4608 | { |
4606 | struct btrfs_transaction *t; | 4609 | struct btrfs_transaction *t; |
4607 | 4610 | ||
4608 | mutex_lock(&root->fs_info->transaction_kthread_mutex); | 4611 | mutex_lock(&fs_info->transaction_kthread_mutex); |
4609 | 4612 | ||
4610 | spin_lock(&root->fs_info->trans_lock); | 4613 | spin_lock(&fs_info->trans_lock); |
4611 | while (!list_empty(&root->fs_info->trans_list)) { | 4614 | while (!list_empty(&fs_info->trans_list)) { |
4612 | t = list_first_entry(&root->fs_info->trans_list, | 4615 | t = list_first_entry(&fs_info->trans_list, |
4613 | struct btrfs_transaction, list); | 4616 | struct btrfs_transaction, list); |
4614 | if (t->state >= TRANS_STATE_COMMIT_START) { | 4617 | if (t->state >= TRANS_STATE_COMMIT_START) { |
4615 | atomic_inc(&t->use_count); | 4618 | atomic_inc(&t->use_count); |
4616 | spin_unlock(&root->fs_info->trans_lock); | 4619 | spin_unlock(&fs_info->trans_lock); |
4617 | btrfs_wait_for_commit(root, t->transid); | 4620 | btrfs_wait_for_commit(fs_info, t->transid); |
4618 | btrfs_put_transaction(t); | 4621 | btrfs_put_transaction(t); |
4619 | spin_lock(&root->fs_info->trans_lock); | 4622 | spin_lock(&fs_info->trans_lock); |
4620 | continue; | 4623 | continue; |
4621 | } | 4624 | } |
4622 | if (t == root->fs_info->running_transaction) { | 4625 | if (t == fs_info->running_transaction) { |
4623 | t->state = TRANS_STATE_COMMIT_DOING; | 4626 | t->state = TRANS_STATE_COMMIT_DOING; |
4624 | spin_unlock(&root->fs_info->trans_lock); | 4627 | spin_unlock(&fs_info->trans_lock); |
4625 | /* | 4628 | /* |
4626 | * We wait for 0 num_writers since we don't hold a trans | 4629 | * We wait for 0 num_writers since we don't hold a trans |
4627 | * handle open currently for this transaction. | 4630 | * handle open currently for this transaction. |
@@ -4629,35 +4632,38 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) | |||
4629 | wait_event(t->writer_wait, | 4632 | wait_event(t->writer_wait, |
4630 | atomic_read(&t->num_writers) == 0); | 4633 | atomic_read(&t->num_writers) == 0); |
4631 | } else { | 4634 | } else { |
4632 | spin_unlock(&root->fs_info->trans_lock); | 4635 | spin_unlock(&fs_info->trans_lock); |
4633 | } | 4636 | } |
4634 | btrfs_cleanup_one_transaction(t, root); | 4637 | btrfs_cleanup_one_transaction(t, fs_info); |
4635 | 4638 | ||
4636 | spin_lock(&root->fs_info->trans_lock); | 4639 | spin_lock(&fs_info->trans_lock); |
4637 | if (t == root->fs_info->running_transaction) | 4640 | if (t == fs_info->running_transaction) |
4638 | root->fs_info->running_transaction = NULL; | 4641 | fs_info->running_transaction = NULL; |
4639 | list_del_init(&t->list); | 4642 | list_del_init(&t->list); |
4640 | spin_unlock(&root->fs_info->trans_lock); | 4643 | spin_unlock(&fs_info->trans_lock); |
4641 | 4644 | ||
4642 | btrfs_put_transaction(t); | 4645 | btrfs_put_transaction(t); |
4643 | trace_btrfs_transaction_commit(root); | 4646 | trace_btrfs_transaction_commit(fs_info->tree_root); |
4644 | spin_lock(&root->fs_info->trans_lock); | 4647 | spin_lock(&fs_info->trans_lock); |
4645 | } | 4648 | } |
4646 | spin_unlock(&root->fs_info->trans_lock); | 4649 | spin_unlock(&fs_info->trans_lock); |
4647 | btrfs_destroy_all_ordered_extents(root->fs_info); | 4650 | btrfs_destroy_all_ordered_extents(fs_info); |
4648 | btrfs_destroy_delayed_inodes(root); | 4651 | btrfs_destroy_delayed_inodes(fs_info); |
4649 | btrfs_assert_delayed_root_empty(root); | 4652 | btrfs_assert_delayed_root_empty(fs_info); |
4650 | btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); | 4653 | btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); |
4651 | btrfs_destroy_all_delalloc_inodes(root->fs_info); | 4654 | btrfs_destroy_all_delalloc_inodes(fs_info); |
4652 | mutex_unlock(&root->fs_info->transaction_kthread_mutex); | 4655 | mutex_unlock(&fs_info->transaction_kthread_mutex); |
4653 | 4656 | ||
4654 | return 0; | 4657 | return 0; |
4655 | } | 4658 | } |
4656 | 4659 | ||
4657 | static const struct extent_io_ops btree_extent_io_ops = { | 4660 | static const struct extent_io_ops btree_extent_io_ops = { |
4658 | .readpage_end_io_hook = btree_readpage_end_io_hook, | 4661 | /* mandatory callbacks */ |
4659 | .readpage_io_failed_hook = btree_io_failed_hook, | ||
4660 | .submit_bio_hook = btree_submit_bio_hook, | 4662 | .submit_bio_hook = btree_submit_bio_hook, |
4663 | .readpage_end_io_hook = btree_readpage_end_io_hook, | ||
4661 | /* note we're sharing with inode.c for the merge bio hook */ | 4664 | /* note we're sharing with inode.c for the merge bio hook */ |
4662 | .merge_bio_hook = btrfs_merge_bio_hook, | 4665 | .merge_bio_hook = btrfs_merge_bio_hook, |
4666 | .readpage_io_failed_hook = btree_io_failed_hook, | ||
4667 | |||
4668 | /* optional callbacks */ | ||
4663 | }; | 4669 | }; |