aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c733
1 files changed, 575 insertions, 158 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 87b25543d7d1..b531c36455d8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -27,6 +27,8 @@
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/freezer.h> 28#include <linux/freezer.h>
29#include <linux/crc32c.h> 29#include <linux/crc32c.h>
30#include <linux/slab.h>
31#include <linux/migrate.h>
30#include "compat.h" 32#include "compat.h"
31#include "ctree.h" 33#include "ctree.h"
32#include "disk-io.h" 34#include "disk-io.h"
@@ -42,8 +44,20 @@
42static struct extent_io_ops btree_extent_io_ops; 44static struct extent_io_ops btree_extent_io_ops;
43static void end_workqueue_fn(struct btrfs_work *work); 45static void end_workqueue_fn(struct btrfs_work *work);
44static void free_fs_root(struct btrfs_root *root); 46static void free_fs_root(struct btrfs_root *root);
45 47static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
46static atomic_t btrfs_bdi_num = ATOMIC_INIT(0); 48 int read_only);
49static int btrfs_destroy_ordered_operations(struct btrfs_root *root);
50static int btrfs_destroy_ordered_extents(struct btrfs_root *root);
51static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
52 struct btrfs_root *root);
53static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
54static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
55static int btrfs_destroy_marked_extents(struct btrfs_root *root,
56 struct extent_io_tree *dirty_pages,
57 int mark);
58static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
59 struct extent_io_tree *pinned_extents);
60static int btrfs_cleanup_transaction(struct btrfs_root *root);
47 61
48/* 62/*
49 * end_io_wq structs are used to do processing in task context when an IO is 63 * end_io_wq structs are used to do processing in task context when an IO is
@@ -75,6 +89,11 @@ struct async_submit_bio {
75 int rw; 89 int rw;
76 int mirror_num; 90 int mirror_num;
77 unsigned long bio_flags; 91 unsigned long bio_flags;
92 /*
93 * bio_offset is optional, can be used if the pages in the bio
94 * can't tell us where in the file the bio should go
95 */
96 u64 bio_offset;
78 struct btrfs_work work; 97 struct btrfs_work work;
79}; 98};
80 99
@@ -263,13 +282,15 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
263static int verify_parent_transid(struct extent_io_tree *io_tree, 282static int verify_parent_transid(struct extent_io_tree *io_tree,
264 struct extent_buffer *eb, u64 parent_transid) 283 struct extent_buffer *eb, u64 parent_transid)
265{ 284{
285 struct extent_state *cached_state = NULL;
266 int ret; 286 int ret;
267 287
268 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) 288 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
269 return 0; 289 return 0;
270 290
271 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); 291 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
272 if (extent_buffer_uptodate(io_tree, eb) && 292 0, &cached_state, GFP_NOFS);
293 if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
273 btrfs_header_generation(eb) == parent_transid) { 294 btrfs_header_generation(eb) == parent_transid) {
274 ret = 0; 295 ret = 0;
275 goto out; 296 goto out;
@@ -282,10 +303,10 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
282 (unsigned long long)btrfs_header_generation(eb)); 303 (unsigned long long)btrfs_header_generation(eb));
283 } 304 }
284 ret = 1; 305 ret = 1;
285 clear_extent_buffer_uptodate(io_tree, eb); 306 clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
286out: 307out:
287 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, 308 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
288 GFP_NOFS); 309 &cached_state, GFP_NOFS);
289 return ret; 310 return ret;
290} 311}
291 312
@@ -332,7 +353,6 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
332 struct extent_io_tree *tree; 353 struct extent_io_tree *tree;
333 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 354 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
334 u64 found_start; 355 u64 found_start;
335 int found_level;
336 unsigned long len; 356 unsigned long len;
337 struct extent_buffer *eb; 357 struct extent_buffer *eb;
338 int ret; 358 int ret;
@@ -347,9 +367,15 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
347 WARN_ON(len == 0); 367 WARN_ON(len == 0);
348 368
349 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); 369 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
370 if (eb == NULL) {
371 WARN_ON(1);
372 goto out;
373 }
350 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, 374 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
351 btrfs_header_generation(eb)); 375 btrfs_header_generation(eb));
352 BUG_ON(ret); 376 BUG_ON(ret);
377 WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
378
353 found_start = btrfs_header_bytenr(eb); 379 found_start = btrfs_header_bytenr(eb);
354 if (found_start != start) { 380 if (found_start != start) {
355 WARN_ON(1); 381 WARN_ON(1);
@@ -363,8 +389,6 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
363 WARN_ON(1); 389 WARN_ON(1);
364 goto err; 390 goto err;
365 } 391 }
366 found_level = btrfs_header_level(eb);
367
368 csum_tree_block(root, eb, 0); 392 csum_tree_block(root, eb, 0);
369err: 393err:
370 free_extent_buffer(eb); 394 free_extent_buffer(eb);
@@ -421,6 +445,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
421 WARN_ON(len == 0); 445 WARN_ON(len == 0);
422 446
423 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); 447 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
448 if (eb == NULL) {
449 ret = -EIO;
450 goto out;
451 }
424 452
425 found_start = btrfs_header_bytenr(eb); 453 found_start = btrfs_header_bytenr(eb);
426 if (found_start != start) { 454 if (found_start != start) {
@@ -474,10 +502,13 @@ static void end_workqueue_bio(struct bio *bio, int err)
474 end_io_wq->work.func = end_workqueue_fn; 502 end_io_wq->work.func = end_workqueue_fn;
475 end_io_wq->work.flags = 0; 503 end_io_wq->work.flags = 0;
476 504
477 if (bio->bi_rw & (1 << BIO_RW)) { 505 if (bio->bi_rw & REQ_WRITE) {
478 if (end_io_wq->metadata) 506 if (end_io_wq->metadata == 1)
479 btrfs_queue_worker(&fs_info->endio_meta_write_workers, 507 btrfs_queue_worker(&fs_info->endio_meta_write_workers,
480 &end_io_wq->work); 508 &end_io_wq->work);
509 else if (end_io_wq->metadata == 2)
510 btrfs_queue_worker(&fs_info->endio_freespace_worker,
511 &end_io_wq->work);
481 else 512 else
482 btrfs_queue_worker(&fs_info->endio_write_workers, 513 btrfs_queue_worker(&fs_info->endio_write_workers,
483 &end_io_wq->work); 514 &end_io_wq->work);
@@ -491,6 +522,13 @@ static void end_workqueue_bio(struct bio *bio, int err)
491 } 522 }
492} 523}
493 524
525/*
526 * For the metadata arg you want
527 *
528 * 0 - if data
529 * 1 - if normal metadta
530 * 2 - if writing to the free space cache area
531 */
494int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 532int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
495 int metadata) 533 int metadata)
496{ 534{
@@ -527,13 +565,12 @@ int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
527 565
528static void run_one_async_start(struct btrfs_work *work) 566static void run_one_async_start(struct btrfs_work *work)
529{ 567{
530 struct btrfs_fs_info *fs_info;
531 struct async_submit_bio *async; 568 struct async_submit_bio *async;
532 569
533 async = container_of(work, struct async_submit_bio, work); 570 async = container_of(work, struct async_submit_bio, work);
534 fs_info = BTRFS_I(async->inode)->root->fs_info;
535 async->submit_bio_start(async->inode, async->rw, async->bio, 571 async->submit_bio_start(async->inode, async->rw, async->bio,
536 async->mirror_num, async->bio_flags); 572 async->mirror_num, async->bio_flags,
573 async->bio_offset);
537} 574}
538 575
539static void run_one_async_done(struct btrfs_work *work) 576static void run_one_async_done(struct btrfs_work *work)
@@ -555,7 +592,8 @@ static void run_one_async_done(struct btrfs_work *work)
555 wake_up(&fs_info->async_submit_wait); 592 wake_up(&fs_info->async_submit_wait);
556 593
557 async->submit_bio_done(async->inode, async->rw, async->bio, 594 async->submit_bio_done(async->inode, async->rw, async->bio,
558 async->mirror_num, async->bio_flags); 595 async->mirror_num, async->bio_flags,
596 async->bio_offset);
559} 597}
560 598
561static void run_one_async_free(struct btrfs_work *work) 599static void run_one_async_free(struct btrfs_work *work)
@@ -569,6 +607,7 @@ static void run_one_async_free(struct btrfs_work *work)
569int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, 607int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
570 int rw, struct bio *bio, int mirror_num, 608 int rw, struct bio *bio, int mirror_num,
571 unsigned long bio_flags, 609 unsigned long bio_flags,
610 u64 bio_offset,
572 extent_submit_bio_hook_t *submit_bio_start, 611 extent_submit_bio_hook_t *submit_bio_start,
573 extent_submit_bio_hook_t *submit_bio_done) 612 extent_submit_bio_hook_t *submit_bio_done)
574{ 613{
@@ -591,10 +630,11 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
591 630
592 async->work.flags = 0; 631 async->work.flags = 0;
593 async->bio_flags = bio_flags; 632 async->bio_flags = bio_flags;
633 async->bio_offset = bio_offset;
594 634
595 atomic_inc(&fs_info->nr_async_submits); 635 atomic_inc(&fs_info->nr_async_submits);
596 636
597 if (rw & (1 << BIO_RW_SYNCIO)) 637 if (rw & REQ_SYNC)
598 btrfs_set_work_high_prio(&async->work); 638 btrfs_set_work_high_prio(&async->work);
599 639
600 btrfs_queue_worker(&fs_info->workers, &async->work); 640 btrfs_queue_worker(&fs_info->workers, &async->work);
@@ -626,7 +666,8 @@ static int btree_csum_one_bio(struct bio *bio)
626 666
627static int __btree_submit_bio_start(struct inode *inode, int rw, 667static int __btree_submit_bio_start(struct inode *inode, int rw,
628 struct bio *bio, int mirror_num, 668 struct bio *bio, int mirror_num,
629 unsigned long bio_flags) 669 unsigned long bio_flags,
670 u64 bio_offset)
630{ 671{
631 /* 672 /*
632 * when we're called for a write, we're already in the async 673 * when we're called for a write, we're already in the async
@@ -637,7 +678,8 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,
637} 678}
638 679
639static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 680static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
640 int mirror_num, unsigned long bio_flags) 681 int mirror_num, unsigned long bio_flags,
682 u64 bio_offset)
641{ 683{
642 /* 684 /*
643 * when we're called for a write, we're already in the async 685 * when we're called for a write, we're already in the async
@@ -647,7 +689,8 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
647} 689}
648 690
649static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 691static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
650 int mirror_num, unsigned long bio_flags) 692 int mirror_num, unsigned long bio_flags,
693 u64 bio_offset)
651{ 694{
652 int ret; 695 int ret;
653 696
@@ -655,7 +698,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
655 bio, 1); 698 bio, 1);
656 BUG_ON(ret); 699 BUG_ON(ret);
657 700
658 if (!(rw & (1 << BIO_RW))) { 701 if (!(rw & REQ_WRITE)) {
659 /* 702 /*
660 * called for a read, do the setup so that checksum validation 703 * called for a read, do the setup so that checksum validation
661 * can happen in the async kernel threads 704 * can happen in the async kernel threads
@@ -670,10 +713,32 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
670 */ 713 */
671 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 714 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
672 inode, rw, bio, mirror_num, 0, 715 inode, rw, bio, mirror_num, 0,
716 bio_offset,
673 __btree_submit_bio_start, 717 __btree_submit_bio_start,
674 __btree_submit_bio_done); 718 __btree_submit_bio_done);
675} 719}
676 720
721#ifdef CONFIG_MIGRATION
722static int btree_migratepage(struct address_space *mapping,
723 struct page *newpage, struct page *page)
724{
725 /*
726 * we can't safely write a btree page from here,
727 * we haven't done the locking hook
728 */
729 if (PageDirty(page))
730 return -EAGAIN;
731 /*
732 * Buffers may be managed in a filesystem specific way.
733 * We must have no buffers or drop them.
734 */
735 if (page_has_private(page) &&
736 !try_to_release_page(page, GFP_KERNEL))
737 return -EAGAIN;
738 return migrate_page(mapping, newpage, page);
739}
740#endif
741
677static int btree_writepage(struct page *page, struct writeback_control *wbc) 742static int btree_writepage(struct page *page, struct writeback_control *wbc)
678{ 743{
679 struct extent_io_tree *tree; 744 struct extent_io_tree *tree;
@@ -688,8 +753,7 @@ static int btree_writepage(struct page *page, struct writeback_control *wbc)
688 } 753 }
689 754
690 redirty_page_for_writepage(wbc, page); 755 redirty_page_for_writepage(wbc, page);
691 eb = btrfs_find_tree_block(root, page_offset(page), 756 eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
692 PAGE_CACHE_SIZE);
693 WARN_ON(!eb); 757 WARN_ON(!eb);
694 758
695 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); 759 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
@@ -780,6 +844,9 @@ static const struct address_space_operations btree_aops = {
780 .releasepage = btree_releasepage, 844 .releasepage = btree_releasepage,
781 .invalidatepage = btree_invalidatepage, 845 .invalidatepage = btree_invalidatepage,
782 .sync_page = block_sync_page, 846 .sync_page = block_sync_page,
847#ifdef CONFIG_MIGRATION
848 .migratepage = btree_migratepage,
849#endif
783}; 850};
784 851
785int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, 852int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
@@ -836,12 +903,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
836 u32 blocksize, u64 parent_transid) 903 u32 blocksize, u64 parent_transid)
837{ 904{
838 struct extent_buffer *buf = NULL; 905 struct extent_buffer *buf = NULL;
839 struct inode *btree_inode = root->fs_info->btree_inode;
840 struct extent_io_tree *io_tree;
841 int ret; 906 int ret;
842 907
843 io_tree = &BTRFS_I(btree_inode)->io_tree;
844
845 buf = btrfs_find_create_tree_block(root, bytenr, blocksize); 908 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
846 if (!buf) 909 if (!buf)
847 return NULL; 910 return NULL;
@@ -893,7 +956,8 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
893 root->ref_cows = 0; 956 root->ref_cows = 0;
894 root->track_dirty = 0; 957 root->track_dirty = 0;
895 root->in_radix = 0; 958 root->in_radix = 0;
896 root->clean_orphans = 0; 959 root->orphan_item_inserted = 0;
960 root->orphan_cleanup_state = 0;
897 961
898 root->fs_info = fs_info; 962 root->fs_info = fs_info;
899 root->objectid = objectid; 963 root->objectid = objectid;
@@ -901,14 +965,17 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
901 root->highest_objectid = 0; 965 root->highest_objectid = 0;
902 root->name = NULL; 966 root->name = NULL;
903 root->in_sysfs = 0; 967 root->in_sysfs = 0;
904 root->inode_tree.rb_node = NULL; 968 root->inode_tree = RB_ROOT;
969 root->block_rsv = NULL;
970 root->orphan_block_rsv = NULL;
905 971
906 INIT_LIST_HEAD(&root->dirty_list); 972 INIT_LIST_HEAD(&root->dirty_list);
907 INIT_LIST_HEAD(&root->orphan_list); 973 INIT_LIST_HEAD(&root->orphan_list);
908 INIT_LIST_HEAD(&root->root_list); 974 INIT_LIST_HEAD(&root->root_list);
909 spin_lock_init(&root->node_lock); 975 spin_lock_init(&root->node_lock);
910 spin_lock_init(&root->list_lock); 976 spin_lock_init(&root->orphan_lock);
911 spin_lock_init(&root->inode_lock); 977 spin_lock_init(&root->inode_lock);
978 spin_lock_init(&root->accounting_lock);
912 mutex_init(&root->objectid_mutex); 979 mutex_init(&root->objectid_mutex);
913 mutex_init(&root->log_mutex); 980 mutex_init(&root->log_mutex);
914 init_waitqueue_head(&root->log_writer_wait); 981 init_waitqueue_head(&root->log_writer_wait);
@@ -962,44 +1029,11 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
962 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); 1029 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
963 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), 1030 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
964 blocksize, generation); 1031 blocksize, generation);
965 BUG_ON(!root->node); 1032 if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
966 root->commit_root = btrfs_root_node(root); 1033 free_extent_buffer(root->node);
967 return 0; 1034 return -EIO;
968}
969
970int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
971 struct btrfs_fs_info *fs_info)
972{
973 struct extent_buffer *eb;
974 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
975 u64 start = 0;
976 u64 end = 0;
977 int ret;
978
979 if (!log_root_tree)
980 return 0;
981
982 while (1) {
983 ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
984 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
985 if (ret)
986 break;
987
988 clear_extent_bits(&log_root_tree->dirty_log_pages, start, end,
989 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
990 } 1035 }
991 eb = fs_info->log_root_tree->node; 1036 root->commit_root = btrfs_root_node(root);
992
993 WARN_ON(btrfs_header_level(eb) != 0);
994 WARN_ON(btrfs_header_nritems(eb) != 0);
995
996 ret = btrfs_free_reserved_extent(fs_info->tree_root,
997 eb->start, eb->len);
998 BUG_ON(ret);
999
1000 free_extent_buffer(eb);
1001 kfree(fs_info->log_root_tree);
1002 fs_info->log_root_tree = NULL;
1003 return 0; 1037 return 0;
1004} 1038}
1005 1039
@@ -1133,6 +1167,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1133 } 1167 }
1134 btrfs_free_path(path); 1168 btrfs_free_path(path);
1135 if (ret) { 1169 if (ret) {
1170 kfree(root);
1136 if (ret > 0) 1171 if (ret > 0)
1137 ret = -ENOENT; 1172 ret = -ENOENT;
1138 return ERR_PTR(ret); 1173 return ERR_PTR(ret);
@@ -1190,19 +1225,23 @@ again:
1190 if (root) 1225 if (root)
1191 return root; 1226 return root;
1192 1227
1193 ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1194 if (ret == 0)
1195 ret = -ENOENT;
1196 if (ret < 0)
1197 return ERR_PTR(ret);
1198
1199 root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location); 1228 root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1200 if (IS_ERR(root)) 1229 if (IS_ERR(root))
1201 return root; 1230 return root;
1202 1231
1203 WARN_ON(btrfs_root_refs(&root->root_item) == 0);
1204 set_anon_super(&root->anon_super, NULL); 1232 set_anon_super(&root->anon_super, NULL);
1205 1233
1234 if (btrfs_root_refs(&root->root_item) == 0) {
1235 ret = -ENOENT;
1236 goto fail;
1237 }
1238
1239 ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1240 if (ret < 0)
1241 goto fail;
1242 if (ret == 0)
1243 root->orphan_item_inserted = 1;
1244
1206 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); 1245 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1207 if (ret) 1246 if (ret)
1208 goto fail; 1247 goto fail;
@@ -1211,10 +1250,9 @@ again:
1211 ret = radix_tree_insert(&fs_info->fs_roots_radix, 1250 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1212 (unsigned long)root->root_key.objectid, 1251 (unsigned long)root->root_key.objectid,
1213 root); 1252 root);
1214 if (ret == 0) { 1253 if (ret == 0)
1215 root->in_radix = 1; 1254 root->in_radix = 1;
1216 root->clean_orphans = 1; 1255
1217 }
1218 spin_unlock(&fs_info->fs_roots_radix_lock); 1256 spin_unlock(&fs_info->fs_roots_radix_lock);
1219 radix_tree_preload_end(); 1257 radix_tree_preload_end();
1220 if (ret) { 1258 if (ret) {
@@ -1372,19 +1410,11 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1372{ 1410{
1373 int err; 1411 int err;
1374 1412
1375 bdi->name = "btrfs";
1376 bdi->capabilities = BDI_CAP_MAP_COPY; 1413 bdi->capabilities = BDI_CAP_MAP_COPY;
1377 err = bdi_init(bdi); 1414 err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1378 if (err) 1415 if (err)
1379 return err; 1416 return err;
1380 1417
1381 err = bdi_register(bdi, NULL, "btrfs-%d",
1382 atomic_inc_return(&btrfs_bdi_num));
1383 if (err) {
1384 bdi_destroy(bdi);
1385 return err;
1386 }
1387
1388 bdi->ra_pages = default_backing_dev_info.ra_pages; 1418 bdi->ra_pages = default_backing_dev_info.ra_pages;
1389 bdi->unplug_io_fn = btrfs_unplug_io_fn; 1419 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1390 bdi->unplug_io_data = info; 1420 bdi->unplug_io_data = info;
@@ -1400,7 +1430,6 @@ static int bio_ready_for_csum(struct bio *bio)
1400 u64 start = 0; 1430 u64 start = 0;
1401 struct page *page; 1431 struct page *page;
1402 struct extent_io_tree *io_tree = NULL; 1432 struct extent_io_tree *io_tree = NULL;
1403 struct btrfs_fs_info *info = NULL;
1404 struct bio_vec *bvec; 1433 struct bio_vec *bvec;
1405 int i; 1434 int i;
1406 int ret; 1435 int ret;
@@ -1419,7 +1448,6 @@ static int bio_ready_for_csum(struct bio *bio)
1419 buf_len = page->private >> 2; 1448 buf_len = page->private >> 2;
1420 start = page_offset(page) + bvec->bv_offset; 1449 start = page_offset(page) + bvec->bv_offset;
1421 io_tree = &BTRFS_I(page->mapping->host)->io_tree; 1450 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1422 info = BTRFS_I(page->mapping->host)->root->fs_info;
1423 } 1451 }
1424 /* are we fully contained in this bio? */ 1452 /* are we fully contained in this bio? */
1425 if (buf_len <= length) 1453 if (buf_len <= length)
@@ -1450,7 +1478,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
1450 * ram and up to date before trying to verify things. For 1478 * ram and up to date before trying to verify things. For
1451 * blocksize <= pagesize, it is basically a noop 1479 * blocksize <= pagesize, it is basically a noop
1452 */ 1480 */
1453 if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata && 1481 if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
1454 !bio_ready_for_csum(bio)) { 1482 !bio_ready_for_csum(bio)) {
1455 btrfs_queue_worker(&fs_info->endio_meta_workers, 1483 btrfs_queue_worker(&fs_info->endio_meta_workers,
1456 &end_io_wq->work); 1484 &end_io_wq->work);
@@ -1468,10 +1496,6 @@ static int cleaner_kthread(void *arg)
1468 struct btrfs_root *root = arg; 1496 struct btrfs_root *root = arg;
1469 1497
1470 do { 1498 do {
1471 smp_mb();
1472 if (root->fs_info->closing)
1473 break;
1474
1475 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); 1499 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1476 1500
1477 if (!(root->fs_info->sb->s_flags & MS_RDONLY) && 1501 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
@@ -1484,11 +1508,9 @@ static int cleaner_kthread(void *arg)
1484 if (freezing(current)) { 1508 if (freezing(current)) {
1485 refrigerator(); 1509 refrigerator();
1486 } else { 1510 } else {
1487 smp_mb();
1488 if (root->fs_info->closing)
1489 break;
1490 set_current_state(TASK_INTERRUPTIBLE); 1511 set_current_state(TASK_INTERRUPTIBLE);
1491 schedule(); 1512 if (!kthread_should_stop())
1513 schedule();
1492 __set_current_state(TASK_RUNNING); 1514 __set_current_state(TASK_RUNNING);
1493 } 1515 }
1494 } while (!kthread_should_stop()); 1516 } while (!kthread_should_stop());
@@ -1500,36 +1522,40 @@ static int transaction_kthread(void *arg)
1500 struct btrfs_root *root = arg; 1522 struct btrfs_root *root = arg;
1501 struct btrfs_trans_handle *trans; 1523 struct btrfs_trans_handle *trans;
1502 struct btrfs_transaction *cur; 1524 struct btrfs_transaction *cur;
1525 u64 transid;
1503 unsigned long now; 1526 unsigned long now;
1504 unsigned long delay; 1527 unsigned long delay;
1505 int ret; 1528 int ret;
1506 1529
1507 do { 1530 do {
1508 smp_mb();
1509 if (root->fs_info->closing)
1510 break;
1511
1512 delay = HZ * 30; 1531 delay = HZ * 30;
1513 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); 1532 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1514 mutex_lock(&root->fs_info->transaction_kthread_mutex); 1533 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1515 1534
1516 mutex_lock(&root->fs_info->trans_mutex); 1535 spin_lock(&root->fs_info->new_trans_lock);
1517 cur = root->fs_info->running_transaction; 1536 cur = root->fs_info->running_transaction;
1518 if (!cur) { 1537 if (!cur) {
1519 mutex_unlock(&root->fs_info->trans_mutex); 1538 spin_unlock(&root->fs_info->new_trans_lock);
1520 goto sleep; 1539 goto sleep;
1521 } 1540 }
1522 1541
1523 now = get_seconds(); 1542 now = get_seconds();
1524 if (now < cur->start_time || now - cur->start_time < 30) { 1543 if (!cur->blocked &&
1525 mutex_unlock(&root->fs_info->trans_mutex); 1544 (now < cur->start_time || now - cur->start_time < 30)) {
1545 spin_unlock(&root->fs_info->new_trans_lock);
1526 delay = HZ * 5; 1546 delay = HZ * 5;
1527 goto sleep; 1547 goto sleep;
1528 } 1548 }
1529 mutex_unlock(&root->fs_info->trans_mutex); 1549 transid = cur->transid;
1530 trans = btrfs_start_transaction(root, 1); 1550 spin_unlock(&root->fs_info->new_trans_lock);
1531 ret = btrfs_commit_transaction(trans, root);
1532 1551
1552 trans = btrfs_join_transaction(root, 1);
1553 if (transid == trans->transid) {
1554 ret = btrfs_commit_transaction(trans, root);
1555 BUG_ON(ret);
1556 } else {
1557 btrfs_end_transaction(trans, root);
1558 }
1533sleep: 1559sleep:
1534 wake_up_process(root->fs_info->cleaner_kthread); 1560 wake_up_process(root->fs_info->cleaner_kthread);
1535 mutex_unlock(&root->fs_info->transaction_kthread_mutex); 1561 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
@@ -1537,10 +1563,10 @@ sleep:
1537 if (freezing(current)) { 1563 if (freezing(current)) {
1538 refrigerator(); 1564 refrigerator();
1539 } else { 1565 } else {
1540 if (root->fs_info->closing)
1541 break;
1542 set_current_state(TASK_INTERRUPTIBLE); 1566 set_current_state(TASK_INTERRUPTIBLE);
1543 schedule_timeout(delay); 1567 if (!kthread_should_stop() &&
1568 !btrfs_transaction_blocked(root->fs_info))
1569 schedule_timeout(delay);
1544 __set_current_state(TASK_RUNNING); 1570 __set_current_state(TASK_RUNNING);
1545 } 1571 }
1546 } while (!kthread_should_stop()); 1572 } while (!kthread_should_stop());
@@ -1564,10 +1590,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1564 GFP_NOFS); 1590 GFP_NOFS);
1565 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), 1591 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1566 GFP_NOFS); 1592 GFP_NOFS);
1567 struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root), 1593 struct btrfs_root *tree_root = btrfs_sb(sb);
1568 GFP_NOFS); 1594 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1569 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1570 GFP_NOFS);
1571 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), 1595 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1572 GFP_NOFS); 1596 GFP_NOFS);
1573 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), 1597 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
@@ -1627,12 +1651,18 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1627 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 1651 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1628 INIT_LIST_HEAD(&fs_info->space_info); 1652 INIT_LIST_HEAD(&fs_info->space_info);
1629 btrfs_mapping_init(&fs_info->mapping_tree); 1653 btrfs_mapping_init(&fs_info->mapping_tree);
1654 btrfs_init_block_rsv(&fs_info->global_block_rsv);
1655 btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
1656 btrfs_init_block_rsv(&fs_info->trans_block_rsv);
1657 btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
1658 btrfs_init_block_rsv(&fs_info->empty_block_rsv);
1659 INIT_LIST_HEAD(&fs_info->durable_block_rsv_list);
1660 mutex_init(&fs_info->durable_block_rsv_mutex);
1630 atomic_set(&fs_info->nr_async_submits, 0); 1661 atomic_set(&fs_info->nr_async_submits, 0);
1631 atomic_set(&fs_info->async_delalloc_pages, 0); 1662 atomic_set(&fs_info->async_delalloc_pages, 0);
1632 atomic_set(&fs_info->async_submit_draining, 0); 1663 atomic_set(&fs_info->async_submit_draining, 0);
1633 atomic_set(&fs_info->nr_async_bios, 0); 1664 atomic_set(&fs_info->nr_async_bios, 0);
1634 fs_info->sb = sb; 1665 fs_info->sb = sb;
1635 fs_info->max_extent = (u64)-1;
1636 fs_info->max_inline = 8192 * 1024; 1666 fs_info->max_inline = 8192 * 1024;
1637 fs_info->metadata_ratio = 0; 1667 fs_info->metadata_ratio = 0;
1638 1668
@@ -1673,7 +1703,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1673 insert_inode_hash(fs_info->btree_inode); 1703 insert_inode_hash(fs_info->btree_inode);
1674 1704
1675 spin_lock_init(&fs_info->block_group_cache_lock); 1705 spin_lock_init(&fs_info->block_group_cache_lock);
1676 fs_info->block_group_cache_tree.rb_node = NULL; 1706 fs_info->block_group_cache_tree = RB_ROOT;
1677 1707
1678 extent_io_tree_init(&fs_info->freed_extents[0], 1708 extent_io_tree_init(&fs_info->freed_extents[0],
1679 fs_info->btree_inode->i_mapping, GFP_NOFS); 1709 fs_info->btree_inode->i_mapping, GFP_NOFS);
@@ -1699,15 +1729,17 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1699 1729
1700 init_waitqueue_head(&fs_info->transaction_throttle); 1730 init_waitqueue_head(&fs_info->transaction_throttle);
1701 init_waitqueue_head(&fs_info->transaction_wait); 1731 init_waitqueue_head(&fs_info->transaction_wait);
1732 init_waitqueue_head(&fs_info->transaction_blocked_wait);
1702 init_waitqueue_head(&fs_info->async_submit_wait); 1733 init_waitqueue_head(&fs_info->async_submit_wait);
1703 1734
1704 __setup_root(4096, 4096, 4096, 4096, tree_root, 1735 __setup_root(4096, 4096, 4096, 4096, tree_root,
1705 fs_info, BTRFS_ROOT_TREE_OBJECTID); 1736 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1706 1737
1707
1708 bh = btrfs_read_dev_super(fs_devices->latest_bdev); 1738 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1709 if (!bh) 1739 if (!bh) {
1740 err = -EINVAL;
1710 goto fail_iput; 1741 goto fail_iput;
1742 }
1711 1743
1712 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); 1744 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1713 memcpy(&fs_info->super_for_commit, &fs_info->super_copy, 1745 memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
@@ -1720,6 +1752,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1720 if (!btrfs_super_root(disk_super)) 1752 if (!btrfs_super_root(disk_super))
1721 goto fail_iput; 1753 goto fail_iput;
1722 1754
1755 /* check FS state, whether FS is broken. */
1756 fs_info->fs_state |= btrfs_super_flags(disk_super);
1757
1758 btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
1759
1723 ret = btrfs_parse_options(tree_root, options); 1760 ret = btrfs_parse_options(tree_root, options);
1724 if (ret) { 1761 if (ret) {
1725 err = ret; 1762 err = ret;
@@ -1737,10 +1774,10 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1737 } 1774 }
1738 1775
1739 features = btrfs_super_incompat_flags(disk_super); 1776 features = btrfs_super_incompat_flags(disk_super);
1740 if (!(features & BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF)) { 1777 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
1741 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; 1778 if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
1742 btrfs_set_super_incompat_flags(disk_super, features); 1779 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
1743 } 1780 btrfs_set_super_incompat_flags(disk_super, features);
1744 1781
1745 features = btrfs_super_compat_ro_flags(disk_super) & 1782 features = btrfs_super_compat_ro_flags(disk_super) &
1746 ~BTRFS_FEATURE_COMPAT_RO_SUPP; 1783 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
@@ -1767,9 +1804,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1767 min_t(u64, fs_devices->num_devices, 1804 min_t(u64, fs_devices->num_devices,
1768 fs_info->thread_pool_size), 1805 fs_info->thread_pool_size),
1769 &fs_info->generic_worker); 1806 &fs_info->generic_worker);
1770 btrfs_init_workers(&fs_info->enospc_workers, "enospc",
1771 fs_info->thread_pool_size,
1772 &fs_info->generic_worker);
1773 1807
1774 /* a higher idle thresh on the submit workers makes it much more 1808 /* a higher idle thresh on the submit workers makes it much more
1775 * likely that bios will be send down in a sane order to the 1809 * likely that bios will be send down in a sane order to the
@@ -1797,6 +1831,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1797 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", 1831 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1798 fs_info->thread_pool_size, 1832 fs_info->thread_pool_size,
1799 &fs_info->generic_worker); 1833 &fs_info->generic_worker);
1834 btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
1835 1, &fs_info->generic_worker);
1800 1836
1801 /* 1837 /*
1802 * endios are largely parallel and should have a very 1838 * endios are largely parallel and should have a very
@@ -1817,7 +1853,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1817 btrfs_start_workers(&fs_info->endio_meta_workers, 1); 1853 btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1818 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); 1854 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1819 btrfs_start_workers(&fs_info->endio_write_workers, 1); 1855 btrfs_start_workers(&fs_info->endio_write_workers, 1);
1820 btrfs_start_workers(&fs_info->enospc_workers, 1); 1856 btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
1821 1857
1822 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 1858 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1823 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 1859 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -1920,17 +1956,22 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1920 1956
1921 csum_root->track_dirty = 1; 1957 csum_root->track_dirty = 1;
1922 1958
1923 btrfs_read_block_groups(extent_root);
1924
1925 fs_info->generation = generation; 1959 fs_info->generation = generation;
1926 fs_info->last_trans_committed = generation; 1960 fs_info->last_trans_committed = generation;
1927 fs_info->data_alloc_profile = (u64)-1; 1961 fs_info->data_alloc_profile = (u64)-1;
1928 fs_info->metadata_alloc_profile = (u64)-1; 1962 fs_info->metadata_alloc_profile = (u64)-1;
1929 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; 1963 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1964
1965 ret = btrfs_read_block_groups(extent_root);
1966 if (ret) {
1967 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
1968 goto fail_block_groups;
1969 }
1970
1930 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 1971 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1931 "btrfs-cleaner"); 1972 "btrfs-cleaner");
1932 if (IS_ERR(fs_info->cleaner_kthread)) 1973 if (IS_ERR(fs_info->cleaner_kthread))
1933 goto fail_csum_root; 1974 goto fail_block_groups;
1934 1975
1935 fs_info->transaction_kthread = kthread_run(transaction_kthread, 1976 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1936 tree_root, 1977 tree_root,
@@ -1946,7 +1987,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1946 btrfs_set_opt(fs_info->mount_opt, SSD); 1987 btrfs_set_opt(fs_info->mount_opt, SSD);
1947 } 1988 }
1948 1989
1949 if (btrfs_super_log_root(disk_super) != 0) { 1990 /* do not make disk changes in broken FS */
1991 if (btrfs_super_log_root(disk_super) != 0 &&
1992 !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
1950 u64 bytenr = btrfs_super_log_root(disk_super); 1993 u64 bytenr = btrfs_super_log_root(disk_super);
1951 1994
1952 if (fs_devices->rw_devices == 0) { 1995 if (fs_devices->rw_devices == 0) {
@@ -1959,8 +2002,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1959 btrfs_level_size(tree_root, 2002 btrfs_level_size(tree_root,
1960 btrfs_super_log_root_level(disk_super)); 2003 btrfs_super_log_root_level(disk_super));
1961 2004
1962 log_tree_root = kzalloc(sizeof(struct btrfs_root), 2005 log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1963 GFP_NOFS); 2006 if (!log_tree_root) {
2007 err = -ENOMEM;
2008 goto fail_trans_kthread;
2009 }
1964 2010
1965 __setup_root(nodesize, leafsize, sectorsize, stripesize, 2011 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1966 log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); 2012 log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
@@ -1981,8 +2027,16 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1981 BUG_ON(ret); 2027 BUG_ON(ret);
1982 2028
1983 if (!(sb->s_flags & MS_RDONLY)) { 2029 if (!(sb->s_flags & MS_RDONLY)) {
1984 ret = btrfs_recover_relocation(tree_root); 2030 ret = btrfs_cleanup_fs_roots(fs_info);
1985 BUG_ON(ret); 2031 BUG_ON(ret);
2032
2033 ret = btrfs_recover_relocation(tree_root);
2034 if (ret < 0) {
2035 printk(KERN_WARNING
2036 "btrfs: failed to recover relocation\n");
2037 err = -EINVAL;
2038 goto fail_trans_kthread;
2039 }
1986 } 2040 }
1987 2041
1988 location.objectid = BTRFS_FS_TREE_OBJECTID; 2042 location.objectid = BTRFS_FS_TREE_OBJECTID;
@@ -1992,10 +2046,15 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1992 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); 2046 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
1993 if (!fs_info->fs_root) 2047 if (!fs_info->fs_root)
1994 goto fail_trans_kthread; 2048 goto fail_trans_kthread;
2049 if (IS_ERR(fs_info->fs_root)) {
2050 err = PTR_ERR(fs_info->fs_root);
2051 goto fail_trans_kthread;
2052 }
1995 2053
1996 if (!(sb->s_flags & MS_RDONLY)) { 2054 if (!(sb->s_flags & MS_RDONLY)) {
1997 down_read(&fs_info->cleanup_work_sem); 2055 down_read(&fs_info->cleanup_work_sem);
1998 btrfs_orphan_cleanup(fs_info->fs_root); 2056 btrfs_orphan_cleanup(fs_info->fs_root);
2057 btrfs_orphan_cleanup(fs_info->tree_root);
1999 up_read(&fs_info->cleanup_work_sem); 2058 up_read(&fs_info->cleanup_work_sem);
2000 } 2059 }
2001 2060
@@ -2013,7 +2072,8 @@ fail_cleaner:
2013 filemap_write_and_wait(fs_info->btree_inode->i_mapping); 2072 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2014 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2073 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2015 2074
2016fail_csum_root: 2075fail_block_groups:
2076 btrfs_free_block_groups(fs_info);
2017 free_extent_buffer(csum_root->node); 2077 free_extent_buffer(csum_root->node);
2018 free_extent_buffer(csum_root->commit_root); 2078 free_extent_buffer(csum_root->commit_root);
2019fail_dev_root: 2079fail_dev_root:
@@ -2037,8 +2097,8 @@ fail_sb_buffer:
2037 btrfs_stop_workers(&fs_info->endio_meta_workers); 2097 btrfs_stop_workers(&fs_info->endio_meta_workers);
2038 btrfs_stop_workers(&fs_info->endio_meta_write_workers); 2098 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2039 btrfs_stop_workers(&fs_info->endio_write_workers); 2099 btrfs_stop_workers(&fs_info->endio_write_workers);
2100 btrfs_stop_workers(&fs_info->endio_freespace_worker);
2040 btrfs_stop_workers(&fs_info->submit_workers); 2101 btrfs_stop_workers(&fs_info->submit_workers);
2041 btrfs_stop_workers(&fs_info->enospc_workers);
2042fail_iput: 2102fail_iput:
2043 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2103 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2044 iput(fs_info->btree_inode); 2104 iput(fs_info->btree_inode);
@@ -2066,7 +2126,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2066 if (uptodate) { 2126 if (uptodate) {
2067 set_buffer_uptodate(bh); 2127 set_buffer_uptodate(bh);
2068 } else { 2128 } else {
2069 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) { 2129 if (printk_ratelimit()) {
2070 printk(KERN_WARNING "lost page write due to " 2130 printk(KERN_WARNING "lost page write due to "
2071 "I/O error on %s\n", 2131 "I/O error on %s\n",
2072 bdevname(bh->b_bdev, b)); 2132 bdevname(bh->b_bdev, b));
@@ -2203,21 +2263,10 @@ static int write_dev_supers(struct btrfs_device *device,
2203 bh->b_end_io = btrfs_end_buffer_write_sync; 2263 bh->b_end_io = btrfs_end_buffer_write_sync;
2204 } 2264 }
2205 2265
2206 if (i == last_barrier && do_barriers && device->barriers) { 2266 if (i == last_barrier && do_barriers)
2207 ret = submit_bh(WRITE_BARRIER, bh); 2267 ret = submit_bh(WRITE_FLUSH_FUA, bh);
2208 if (ret == -EOPNOTSUPP) { 2268 else
2209 printk("btrfs: disabling barriers on dev %s\n",
2210 device->name);
2211 set_buffer_uptodate(bh);
2212 device->barriers = 0;
2213 /* one reference for submit_bh */
2214 get_bh(bh);
2215 lock_buffer(bh);
2216 ret = submit_bh(WRITE_SYNC, bh);
2217 }
2218 } else {
2219 ret = submit_bh(WRITE_SYNC, bh); 2269 ret = submit_bh(WRITE_SYNC, bh);
2220 }
2221 2270
2222 if (ret) 2271 if (ret)
2223 errors++; 2272 errors++;
@@ -2403,11 +2452,11 @@ int btrfs_commit_super(struct btrfs_root *root)
2403 down_write(&root->fs_info->cleanup_work_sem); 2452 down_write(&root->fs_info->cleanup_work_sem);
2404 up_write(&root->fs_info->cleanup_work_sem); 2453 up_write(&root->fs_info->cleanup_work_sem);
2405 2454
2406 trans = btrfs_start_transaction(root, 1); 2455 trans = btrfs_join_transaction(root, 1);
2407 ret = btrfs_commit_transaction(trans, root); 2456 ret = btrfs_commit_transaction(trans, root);
2408 BUG_ON(ret); 2457 BUG_ON(ret);
2409 /* run commit again to drop the original snapshot */ 2458 /* run commit again to drop the original snapshot */
2410 trans = btrfs_start_transaction(root, 1); 2459 trans = btrfs_join_transaction(root, 1);
2411 btrfs_commit_transaction(trans, root); 2460 btrfs_commit_transaction(trans, root);
2412 ret = btrfs_write_and_wait_transaction(NULL, root); 2461 ret = btrfs_write_and_wait_transaction(NULL, root);
2413 BUG_ON(ret); 2462 BUG_ON(ret);
@@ -2424,15 +2473,36 @@ int close_ctree(struct btrfs_root *root)
2424 fs_info->closing = 1; 2473 fs_info->closing = 1;
2425 smp_mb(); 2474 smp_mb();
2426 2475
2427 kthread_stop(root->fs_info->transaction_kthread); 2476 btrfs_put_block_group_cache(fs_info);
2428 kthread_stop(root->fs_info->cleaner_kthread);
2429 2477
2478 /*
2479 * Here come 2 situations when btrfs is broken to flip readonly:
2480 *
2481 * 1. when btrfs flips readonly somewhere else before
2482 * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
2483 * and btrfs will skip to write sb directly to keep
2484 * ERROR state on disk.
2485 *
2486 * 2. when btrfs flips readonly just in btrfs_commit_super,
2487 * and in such case, btrfs cannnot write sb via btrfs_commit_super,
2488 * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
2489 * btrfs will cleanup all FS resources first and write sb then.
2490 */
2430 if (!(fs_info->sb->s_flags & MS_RDONLY)) { 2491 if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2431 ret = btrfs_commit_super(root); 2492 ret = btrfs_commit_super(root);
2432 if (ret) 2493 if (ret)
2433 printk(KERN_ERR "btrfs: commit super ret %d\n", ret); 2494 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2434 } 2495 }
2435 2496
2497 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
2498 ret = btrfs_error_commit_super(root);
2499 if (ret)
2500 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2501 }
2502
2503 kthread_stop(root->fs_info->transaction_kthread);
2504 kthread_stop(root->fs_info->cleaner_kthread);
2505
2436 fs_info->closing = 2; 2506 fs_info->closing = 2;
2437 smp_mb(); 2507 smp_mb();
2438 2508
@@ -2470,8 +2540,8 @@ int close_ctree(struct btrfs_root *root)
2470 btrfs_stop_workers(&fs_info->endio_meta_workers); 2540 btrfs_stop_workers(&fs_info->endio_meta_workers);
2471 btrfs_stop_workers(&fs_info->endio_meta_write_workers); 2541 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2472 btrfs_stop_workers(&fs_info->endio_write_workers); 2542 btrfs_stop_workers(&fs_info->endio_write_workers);
2543 btrfs_stop_workers(&fs_info->endio_freespace_worker);
2473 btrfs_stop_workers(&fs_info->submit_workers); 2544 btrfs_stop_workers(&fs_info->submit_workers);
2474 btrfs_stop_workers(&fs_info->enospc_workers);
2475 2545
2476 btrfs_close_devices(fs_info->fs_devices); 2546 btrfs_close_devices(fs_info->fs_devices);
2477 btrfs_mapping_tree_free(&fs_info->mapping_tree); 2547 btrfs_mapping_tree_free(&fs_info->mapping_tree);
@@ -2492,7 +2562,8 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2492 int ret; 2562 int ret;
2493 struct inode *btree_inode = buf->first_page->mapping->host; 2563 struct inode *btree_inode = buf->first_page->mapping->host;
2494 2564
2495 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf); 2565 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
2566 NULL);
2496 if (!ret) 2567 if (!ret)
2497 return ret; 2568 return ret;
2498 2569
@@ -2600,6 +2671,352 @@ out:
2600 return 0; 2671 return 0;
2601} 2672}
2602 2673
2674static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
2675 int read_only)
2676{
2677 if (read_only)
2678 return;
2679
2680 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2681 printk(KERN_WARNING "warning: mount fs with errors, "
2682 "running btrfsck is recommended\n");
2683}
2684
2685int btrfs_error_commit_super(struct btrfs_root *root)
2686{
2687 int ret;
2688
2689 mutex_lock(&root->fs_info->cleaner_mutex);
2690 btrfs_run_delayed_iputs(root);
2691 mutex_unlock(&root->fs_info->cleaner_mutex);
2692
2693 down_write(&root->fs_info->cleanup_work_sem);
2694 up_write(&root->fs_info->cleanup_work_sem);
2695
2696 /* cleanup FS via transaction */
2697 btrfs_cleanup_transaction(root);
2698
2699 ret = write_ctree_super(NULL, root, 0);
2700
2701 return ret;
2702}
2703
2704static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
2705{
2706 struct btrfs_inode *btrfs_inode;
2707 struct list_head splice;
2708
2709 INIT_LIST_HEAD(&splice);
2710
2711 mutex_lock(&root->fs_info->ordered_operations_mutex);
2712 spin_lock(&root->fs_info->ordered_extent_lock);
2713
2714 list_splice_init(&root->fs_info->ordered_operations, &splice);
2715 while (!list_empty(&splice)) {
2716 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
2717 ordered_operations);
2718
2719 list_del_init(&btrfs_inode->ordered_operations);
2720
2721 btrfs_invalidate_inodes(btrfs_inode->root);
2722 }
2723
2724 spin_unlock(&root->fs_info->ordered_extent_lock);
2725 mutex_unlock(&root->fs_info->ordered_operations_mutex);
2726
2727 return 0;
2728}
2729
2730static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
2731{
2732 struct list_head splice;
2733 struct btrfs_ordered_extent *ordered;
2734 struct inode *inode;
2735
2736 INIT_LIST_HEAD(&splice);
2737
2738 spin_lock(&root->fs_info->ordered_extent_lock);
2739
2740 list_splice_init(&root->fs_info->ordered_extents, &splice);
2741 while (!list_empty(&splice)) {
2742 ordered = list_entry(splice.next, struct btrfs_ordered_extent,
2743 root_extent_list);
2744
2745 list_del_init(&ordered->root_extent_list);
2746 atomic_inc(&ordered->refs);
2747
2748 /* the inode may be getting freed (in sys_unlink path). */
2749 inode = igrab(ordered->inode);
2750
2751 spin_unlock(&root->fs_info->ordered_extent_lock);
2752 if (inode)
2753 iput(inode);
2754
2755 atomic_set(&ordered->refs, 1);
2756 btrfs_put_ordered_extent(ordered);
2757
2758 spin_lock(&root->fs_info->ordered_extent_lock);
2759 }
2760
2761 spin_unlock(&root->fs_info->ordered_extent_lock);
2762
2763 return 0;
2764}
2765
2766static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
2767 struct btrfs_root *root)
2768{
2769 struct rb_node *node;
2770 struct btrfs_delayed_ref_root *delayed_refs;
2771 struct btrfs_delayed_ref_node *ref;
2772 int ret = 0;
2773
2774 delayed_refs = &trans->delayed_refs;
2775
2776 spin_lock(&delayed_refs->lock);
2777 if (delayed_refs->num_entries == 0) {
2778 printk(KERN_INFO "delayed_refs has NO entry\n");
2779 return ret;
2780 }
2781
2782 node = rb_first(&delayed_refs->root);
2783 while (node) {
2784 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2785 node = rb_next(node);
2786
2787 ref->in_tree = 0;
2788 rb_erase(&ref->rb_node, &delayed_refs->root);
2789 delayed_refs->num_entries--;
2790
2791 atomic_set(&ref->refs, 1);
2792 if (btrfs_delayed_ref_is_head(ref)) {
2793 struct btrfs_delayed_ref_head *head;
2794
2795 head = btrfs_delayed_node_to_head(ref);
2796 mutex_lock(&head->mutex);
2797 kfree(head->extent_op);
2798 delayed_refs->num_heads--;
2799 if (list_empty(&head->cluster))
2800 delayed_refs->num_heads_ready--;
2801 list_del_init(&head->cluster);
2802 mutex_unlock(&head->mutex);
2803 }
2804
2805 spin_unlock(&delayed_refs->lock);
2806 btrfs_put_delayed_ref(ref);
2807
2808 cond_resched();
2809 spin_lock(&delayed_refs->lock);
2810 }
2811
2812 spin_unlock(&delayed_refs->lock);
2813
2814 return ret;
2815}
2816
2817static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
2818{
2819 struct btrfs_pending_snapshot *snapshot;
2820 struct list_head splice;
2821
2822 INIT_LIST_HEAD(&splice);
2823
2824 list_splice_init(&t->pending_snapshots, &splice);
2825
2826 while (!list_empty(&splice)) {
2827 snapshot = list_entry(splice.next,
2828 struct btrfs_pending_snapshot,
2829 list);
2830
2831 list_del_init(&snapshot->list);
2832
2833 kfree(snapshot);
2834 }
2835
2836 return 0;
2837}
2838
2839static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
2840{
2841 struct btrfs_inode *btrfs_inode;
2842 struct list_head splice;
2843
2844 INIT_LIST_HEAD(&splice);
2845
2846 list_splice_init(&root->fs_info->delalloc_inodes, &splice);
2847
2848 spin_lock(&root->fs_info->delalloc_lock);
2849
2850 while (!list_empty(&splice)) {
2851 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
2852 delalloc_inodes);
2853
2854 list_del_init(&btrfs_inode->delalloc_inodes);
2855
2856 btrfs_invalidate_inodes(btrfs_inode->root);
2857 }
2858
2859 spin_unlock(&root->fs_info->delalloc_lock);
2860
2861 return 0;
2862}
2863
2864static int btrfs_destroy_marked_extents(struct btrfs_root *root,
2865 struct extent_io_tree *dirty_pages,
2866 int mark)
2867{
2868 int ret;
2869 struct page *page;
2870 struct inode *btree_inode = root->fs_info->btree_inode;
2871 struct extent_buffer *eb;
2872 u64 start = 0;
2873 u64 end;
2874 u64 offset;
2875 unsigned long index;
2876
2877 while (1) {
2878 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
2879 mark);
2880 if (ret)
2881 break;
2882
2883 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
2884 while (start <= end) {
2885 index = start >> PAGE_CACHE_SHIFT;
2886 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
2887 page = find_get_page(btree_inode->i_mapping, index);
2888 if (!page)
2889 continue;
2890 offset = page_offset(page);
2891
2892 spin_lock(&dirty_pages->buffer_lock);
2893 eb = radix_tree_lookup(
2894 &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
2895 offset >> PAGE_CACHE_SHIFT);
2896 spin_unlock(&dirty_pages->buffer_lock);
2897 if (eb) {
2898 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
2899 &eb->bflags);
2900 atomic_set(&eb->refs, 1);
2901 }
2902 if (PageWriteback(page))
2903 end_page_writeback(page);
2904
2905 lock_page(page);
2906 if (PageDirty(page)) {
2907 clear_page_dirty_for_io(page);
2908 spin_lock_irq(&page->mapping->tree_lock);
2909 radix_tree_tag_clear(&page->mapping->page_tree,
2910 page_index(page),
2911 PAGECACHE_TAG_DIRTY);
2912 spin_unlock_irq(&page->mapping->tree_lock);
2913 }
2914
2915 page->mapping->a_ops->invalidatepage(page, 0);
2916 unlock_page(page);
2917 }
2918 }
2919
2920 return ret;
2921}
2922
2923static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
2924 struct extent_io_tree *pinned_extents)
2925{
2926 struct extent_io_tree *unpin;
2927 u64 start;
2928 u64 end;
2929 int ret;
2930
2931 unpin = pinned_extents;
2932 while (1) {
2933 ret = find_first_extent_bit(unpin, 0, &start, &end,
2934 EXTENT_DIRTY);
2935 if (ret)
2936 break;
2937
2938 /* opt_discard */
2939 ret = btrfs_error_discard_extent(root, start, end + 1 - start);
2940
2941 clear_extent_dirty(unpin, start, end, GFP_NOFS);
2942 btrfs_error_unpin_extent_range(root, start, end);
2943 cond_resched();
2944 }
2945
2946 return 0;
2947}
2948
2949static int btrfs_cleanup_transaction(struct btrfs_root *root)
2950{
2951 struct btrfs_transaction *t;
2952 LIST_HEAD(list);
2953
2954 WARN_ON(1);
2955
2956 mutex_lock(&root->fs_info->trans_mutex);
2957 mutex_lock(&root->fs_info->transaction_kthread_mutex);
2958
2959 list_splice_init(&root->fs_info->trans_list, &list);
2960 while (!list_empty(&list)) {
2961 t = list_entry(list.next, struct btrfs_transaction, list);
2962 if (!t)
2963 break;
2964
2965 btrfs_destroy_ordered_operations(root);
2966
2967 btrfs_destroy_ordered_extents(root);
2968
2969 btrfs_destroy_delayed_refs(t, root);
2970
2971 btrfs_block_rsv_release(root,
2972 &root->fs_info->trans_block_rsv,
2973 t->dirty_pages.dirty_bytes);
2974
2975 /* FIXME: cleanup wait for commit */
2976 t->in_commit = 1;
2977 t->blocked = 1;
2978 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
2979 wake_up(&root->fs_info->transaction_blocked_wait);
2980
2981 t->blocked = 0;
2982 if (waitqueue_active(&root->fs_info->transaction_wait))
2983 wake_up(&root->fs_info->transaction_wait);
2984 mutex_unlock(&root->fs_info->trans_mutex);
2985
2986 mutex_lock(&root->fs_info->trans_mutex);
2987 t->commit_done = 1;
2988 if (waitqueue_active(&t->commit_wait))
2989 wake_up(&t->commit_wait);
2990 mutex_unlock(&root->fs_info->trans_mutex);
2991
2992 mutex_lock(&root->fs_info->trans_mutex);
2993
2994 btrfs_destroy_pending_snapshots(t);
2995
2996 btrfs_destroy_delalloc_inodes(root);
2997
2998 spin_lock(&root->fs_info->new_trans_lock);
2999 root->fs_info->running_transaction = NULL;
3000 spin_unlock(&root->fs_info->new_trans_lock);
3001
3002 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3003 EXTENT_DIRTY);
3004
3005 btrfs_destroy_pinned_extent(root,
3006 root->fs_info->pinned_extents);
3007
3008 t->use_count = 0;
3009 list_del_init(&t->list);
3010 memset(t, 0, sizeof(*t));
3011 kmem_cache_free(btrfs_transaction_cachep, t);
3012 }
3013
3014 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3015 mutex_unlock(&root->fs_info->trans_mutex);
3016
3017 return 0;
3018}
3019
2603static struct extent_io_ops btree_extent_io_ops = { 3020static struct extent_io_ops btree_extent_io_ops = {
2604 .write_cache_pages_lock_hook = btree_lock_page_hook, 3021 .write_cache_pages_lock_hook = btree_lock_page_hook,
2605 .readpage_end_io_hook = btree_readpage_end_io_hook, 3022 .readpage_end_io_hook = btree_readpage_end_io_hook,