aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2011-12-16 01:39:20 -0500
committerSteve French <sfrench@us.ibm.com>2011-12-16 01:39:20 -0500
commitaaf015890754d58dcb71a4aa44ed246bb082bcf6 (patch)
tree17b51ff707fd1b3efec3a3ab872f0d7a7416aca5 /fs/btrfs
parent9c32c63bb70b2fafc3b18bee29959c3bf245ceba (diff)
parent8def5f51b012efb00e77ba2d04696cc0aadd0609 (diff)
Merge branch 'master' of git+ssh://git.samba.org/data/git/sfrench/cifs-2.6
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/btrfs_inode.h4
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/ctree.h8
-rw-r--r--fs/btrfs/delayed-inode.c58
-rw-r--r--fs/btrfs/disk-io.c189
-rw-r--r--fs/btrfs/extent-tree.c295
-rw-r--r--fs/btrfs/extent_io.c60
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/free-space-cache.c82
-rw-r--r--fs/btrfs/inode-map.c28
-rw-r--r--fs/btrfs/inode.c92
-rw-r--r--fs/btrfs/ioctl.c17
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c71
-rw-r--r--fs/btrfs/super.c93
-rw-r--r--fs/btrfs/transaction.c12
-rw-r--r--fs/btrfs/volumes.c7
-rw-r--r--fs/btrfs/volumes.h6
19 files changed, 678 insertions, 367 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 8855aad3929c..22c64fff1bd5 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -683,7 +683,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
683 return PTR_ERR(fspath); 683 return PTR_ERR(fspath);
684 684
685 if (fspath > fspath_min) { 685 if (fspath > fspath_min) {
686 ipath->fspath->val[i] = (u64)fspath; 686 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
687 ++ipath->fspath->elem_cnt; 687 ++ipath->fspath->elem_cnt;
688 ipath->fspath->bytes_left = fspath - fspath_min; 688 ipath->fspath->bytes_left = fspath - fspath_min;
689 } else { 689 } else {
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 5a5d325a3935..634608d2a6d0 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -147,14 +147,12 @@ struct btrfs_inode {
147 * the btrfs file release call will add this inode to the 147 * the btrfs file release call will add this inode to the
148 * ordered operations list so that we make sure to flush out any 148 * ordered operations list so that we make sure to flush out any
149 * new data the application may have written before commit. 149 * new data the application may have written before commit.
150 *
151 * yes, its silly to have a single bitflag, but we might grow more
152 * of these.
153 */ 150 */
154 unsigned ordered_data_close:1; 151 unsigned ordered_data_close:1;
155 unsigned orphan_meta_reserved:1; 152 unsigned orphan_meta_reserved:1;
156 unsigned dummy_inode:1; 153 unsigned dummy_inode:1;
157 unsigned in_defrag:1; 154 unsigned in_defrag:1;
155 unsigned delalloc_meta_reserved:1;
158 156
159 /* 157 /*
160 * always compress this one file 158 * always compress this one file
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0fe615e4ea38..dede441bdeee 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -514,10 +514,25 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
514 struct btrfs_root *root, 514 struct btrfs_root *root,
515 struct extent_buffer *buf) 515 struct extent_buffer *buf)
516{ 516{
517 /* ensure we can see the force_cow */
518 smp_rmb();
519
520 /*
521 * We do not need to cow a block if
522 * 1) this block is not created or changed in this transaction;
523 * 2) this block does not belong to TREE_RELOC tree;
524 * 3) the root is not forced COW.
525 *
526 * What is forced COW:
527 * when we create snapshot during commiting the transaction,
528 * after we've finished coping src root, we must COW the shared
529 * block to ensure the metadata consistency.
530 */
517 if (btrfs_header_generation(buf) == trans->transid && 531 if (btrfs_header_generation(buf) == trans->transid &&
518 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 532 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
519 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 533 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
520 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 534 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
535 !root->force_cow)
521 return 0; 536 return 0;
522 return 1; 537 return 1;
523} 538}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b9ba59ff9292..50634abef9b4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -848,7 +848,8 @@ struct btrfs_free_cluster {
848enum btrfs_caching_type { 848enum btrfs_caching_type {
849 BTRFS_CACHE_NO = 0, 849 BTRFS_CACHE_NO = 0,
850 BTRFS_CACHE_STARTED = 1, 850 BTRFS_CACHE_STARTED = 1,
851 BTRFS_CACHE_FINISHED = 2, 851 BTRFS_CACHE_FAST = 2,
852 BTRFS_CACHE_FINISHED = 3,
852}; 853};
853 854
854enum btrfs_disk_cache_state { 855enum btrfs_disk_cache_state {
@@ -1271,6 +1272,8 @@ struct btrfs_root {
1271 * for stat. It may be used for more later 1272 * for stat. It may be used for more later
1272 */ 1273 */
1273 dev_t anon_dev; 1274 dev_t anon_dev;
1275
1276 int force_cow;
1274}; 1277};
1275 1278
1276struct btrfs_ioctl_defrag_range_args { 1279struct btrfs_ioctl_defrag_range_args {
@@ -2366,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
2366int btrfs_block_rsv_refill(struct btrfs_root *root, 2369int btrfs_block_rsv_refill(struct btrfs_root *root,
2367 struct btrfs_block_rsv *block_rsv, 2370 struct btrfs_block_rsv *block_rsv,
2368 u64 min_reserved); 2371 u64 min_reserved);
2372int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
2373 struct btrfs_block_rsv *block_rsv,
2374 u64 min_reserved);
2369int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 2375int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
2370 struct btrfs_block_rsv *dst_rsv, 2376 struct btrfs_block_rsv *dst_rsv,
2371 u64 num_bytes); 2377 u64 num_bytes);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 3a1b939c9ae2..5b163572e0ca 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -617,12 +617,14 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
617static int btrfs_delayed_inode_reserve_metadata( 617static int btrfs_delayed_inode_reserve_metadata(
618 struct btrfs_trans_handle *trans, 618 struct btrfs_trans_handle *trans,
619 struct btrfs_root *root, 619 struct btrfs_root *root,
620 struct inode *inode,
620 struct btrfs_delayed_node *node) 621 struct btrfs_delayed_node *node)
621{ 622{
622 struct btrfs_block_rsv *src_rsv; 623 struct btrfs_block_rsv *src_rsv;
623 struct btrfs_block_rsv *dst_rsv; 624 struct btrfs_block_rsv *dst_rsv;
624 u64 num_bytes; 625 u64 num_bytes;
625 int ret; 626 int ret;
627 int release = false;
626 628
627 src_rsv = trans->block_rsv; 629 src_rsv = trans->block_rsv;
628 dst_rsv = &root->fs_info->delayed_block_rsv; 630 dst_rsv = &root->fs_info->delayed_block_rsv;
@@ -652,12 +654,65 @@ static int btrfs_delayed_inode_reserve_metadata(
652 if (!ret) 654 if (!ret)
653 node->bytes_reserved = num_bytes; 655 node->bytes_reserved = num_bytes;
654 return ret; 656 return ret;
657 } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
658 spin_lock(&BTRFS_I(inode)->lock);
659 if (BTRFS_I(inode)->delalloc_meta_reserved) {
660 BTRFS_I(inode)->delalloc_meta_reserved = 0;
661 spin_unlock(&BTRFS_I(inode)->lock);
662 release = true;
663 goto migrate;
664 }
665 spin_unlock(&BTRFS_I(inode)->lock);
666
667 /* Ok we didn't have space pre-reserved. This shouldn't happen
668 * too often but it can happen if we do delalloc to an existing
669 * inode which gets dirtied because of the time update, and then
670 * isn't touched again until after the transaction commits and
671 * then we try to write out the data. First try to be nice and
672 * reserve something strictly for us. If not be a pain and try
673 * to steal from the delalloc block rsv.
674 */
675 ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
676 if (!ret)
677 goto out;
678
679 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
680 if (!ret)
681 goto out;
682
683 /*
684 * Ok this is a problem, let's just steal from the global rsv
685 * since this really shouldn't happen that often.
686 */
687 WARN_ON(1);
688 ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
689 dst_rsv, num_bytes);
690 goto out;
655 } 691 }
656 692
693migrate:
657 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 694 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
695
696out:
697 /*
698 * Migrate only takes a reservation, it doesn't touch the size of the
699 * block_rsv. This is to simplify people who don't normally have things
700 * migrated from their block rsv. If they go to release their
701 * reservation, that will decrease the size as well, so if migrate
702 * reduced size we'd end up with a negative size. But for the
703 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
704 * but we could in fact do this reserve/migrate dance several times
705 * between the time we did the original reservation and we'd clean it
706 * up. So to take care of this, release the space for the meta
707 * reservation here. I think it may be time for a documentation page on
708 * how block rsvs. work.
709 */
658 if (!ret) 710 if (!ret)
659 node->bytes_reserved = num_bytes; 711 node->bytes_reserved = num_bytes;
660 712
713 if (release)
714 btrfs_block_rsv_release(root, src_rsv, num_bytes);
715
661 return ret; 716 return ret;
662} 717}
663 718
@@ -1708,7 +1763,8 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1708 goto release_node; 1763 goto release_node;
1709 } 1764 }
1710 1765
1711 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); 1766 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1767 delayed_node);
1712 if (ret) 1768 if (ret)
1713 goto release_node; 1769 goto release_node;
1714 1770
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 102c176fc29c..632f8f3cc9db 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -620,7 +620,7 @@ out:
620 620
621static int btree_io_failed_hook(struct bio *failed_bio, 621static int btree_io_failed_hook(struct bio *failed_bio,
622 struct page *page, u64 start, u64 end, 622 struct page *page, u64 start, u64 end,
623 u64 mirror_num, struct extent_state *state) 623 int mirror_num, struct extent_state *state)
624{ 624{
625 struct extent_io_tree *tree; 625 struct extent_io_tree *tree;
626 unsigned long len; 626 unsigned long len;
@@ -1890,31 +1890,32 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1890 u64 features; 1890 u64 features;
1891 struct btrfs_key location; 1891 struct btrfs_key location;
1892 struct buffer_head *bh; 1892 struct buffer_head *bh;
1893 struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), 1893 struct btrfs_super_block *disk_super;
1894 GFP_NOFS);
1895 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1896 GFP_NOFS);
1897 struct btrfs_root *tree_root = btrfs_sb(sb); 1894 struct btrfs_root *tree_root = btrfs_sb(sb);
1898 struct btrfs_fs_info *fs_info = NULL; 1895 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1899 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), 1896 struct btrfs_root *extent_root;
1900 GFP_NOFS); 1897 struct btrfs_root *csum_root;
1901 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), 1898 struct btrfs_root *chunk_root;
1902 GFP_NOFS); 1899 struct btrfs_root *dev_root;
1903 struct btrfs_root *log_tree_root; 1900 struct btrfs_root *log_tree_root;
1904
1905 int ret; 1901 int ret;
1906 int err = -EINVAL; 1902 int err = -EINVAL;
1907 int num_backups_tried = 0; 1903 int num_backups_tried = 0;
1908 int backup_index = 0; 1904 int backup_index = 0;
1909 1905
1910 struct btrfs_super_block *disk_super; 1906 extent_root = fs_info->extent_root =
1907 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1908 csum_root = fs_info->csum_root =
1909 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1910 chunk_root = fs_info->chunk_root =
1911 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1912 dev_root = fs_info->dev_root =
1913 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1911 1914
1912 if (!extent_root || !tree_root || !tree_root->fs_info || 1915 if (!extent_root || !csum_root || !chunk_root || !dev_root) {
1913 !chunk_root || !dev_root || !csum_root) {
1914 err = -ENOMEM; 1916 err = -ENOMEM;
1915 goto fail; 1917 goto fail;
1916 } 1918 }
1917 fs_info = tree_root->fs_info;
1918 1919
1919 ret = init_srcu_struct(&fs_info->subvol_srcu); 1920 ret = init_srcu_struct(&fs_info->subvol_srcu);
1920 if (ret) { 1921 if (ret) {
@@ -1954,12 +1955,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1954 mutex_init(&fs_info->reloc_mutex); 1955 mutex_init(&fs_info->reloc_mutex);
1955 1956
1956 init_completion(&fs_info->kobj_unregister); 1957 init_completion(&fs_info->kobj_unregister);
1957 fs_info->tree_root = tree_root;
1958 fs_info->extent_root = extent_root;
1959 fs_info->csum_root = csum_root;
1960 fs_info->chunk_root = chunk_root;
1961 fs_info->dev_root = dev_root;
1962 fs_info->fs_devices = fs_devices;
1963 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 1958 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1964 INIT_LIST_HEAD(&fs_info->space_info); 1959 INIT_LIST_HEAD(&fs_info->space_info);
1965 btrfs_mapping_init(&fs_info->mapping_tree); 1960 btrfs_mapping_init(&fs_info->mapping_tree);
@@ -2465,21 +2460,20 @@ fail_sb_buffer:
2465 btrfs_stop_workers(&fs_info->caching_workers); 2460 btrfs_stop_workers(&fs_info->caching_workers);
2466fail_alloc: 2461fail_alloc:
2467fail_iput: 2462fail_iput:
2463 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2464
2468 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2465 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2469 iput(fs_info->btree_inode); 2466 iput(fs_info->btree_inode);
2470
2471 btrfs_close_devices(fs_info->fs_devices);
2472 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2473fail_bdi: 2467fail_bdi:
2474 bdi_destroy(&fs_info->bdi); 2468 bdi_destroy(&fs_info->bdi);
2475fail_srcu: 2469fail_srcu:
2476 cleanup_srcu_struct(&fs_info->subvol_srcu); 2470 cleanup_srcu_struct(&fs_info->subvol_srcu);
2477fail: 2471fail:
2472 btrfs_close_devices(fs_info->fs_devices);
2478 free_fs_info(fs_info); 2473 free_fs_info(fs_info);
2479 return ERR_PTR(err); 2474 return ERR_PTR(err);
2480 2475
2481recovery_tree_root: 2476recovery_tree_root:
2482
2483 if (!btrfs_test_opt(tree_root, RECOVERY)) 2477 if (!btrfs_test_opt(tree_root, RECOVERY))
2484 goto fail_tree_roots; 2478 goto fail_tree_roots;
2485 2479
@@ -2579,22 +2573,10 @@ static int write_dev_supers(struct btrfs_device *device,
2579 int errors = 0; 2573 int errors = 0;
2580 u32 crc; 2574 u32 crc;
2581 u64 bytenr; 2575 u64 bytenr;
2582 int last_barrier = 0;
2583 2576
2584 if (max_mirrors == 0) 2577 if (max_mirrors == 0)
2585 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 2578 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2586 2579
2587 /* make sure only the last submit_bh does a barrier */
2588 if (do_barriers) {
2589 for (i = 0; i < max_mirrors; i++) {
2590 bytenr = btrfs_sb_offset(i);
2591 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2592 device->total_bytes)
2593 break;
2594 last_barrier = i;
2595 }
2596 }
2597
2598 for (i = 0; i < max_mirrors; i++) { 2580 for (i = 0; i < max_mirrors; i++) {
2599 bytenr = btrfs_sb_offset(i); 2581 bytenr = btrfs_sb_offset(i);
2600 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) 2582 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
@@ -2640,17 +2622,136 @@ static int write_dev_supers(struct btrfs_device *device,
2640 bh->b_end_io = btrfs_end_buffer_write_sync; 2622 bh->b_end_io = btrfs_end_buffer_write_sync;
2641 } 2623 }
2642 2624
2643 if (i == last_barrier && do_barriers) 2625 /*
2644 ret = submit_bh(WRITE_FLUSH_FUA, bh); 2626 * we fua the first super. The others we allow
2645 else 2627 * to go down lazy.
2646 ret = submit_bh(WRITE_SYNC, bh); 2628 */
2647 2629 ret = submit_bh(WRITE_FUA, bh);
2648 if (ret) 2630 if (ret)
2649 errors++; 2631 errors++;
2650 } 2632 }
2651 return errors < i ? 0 : -1; 2633 return errors < i ? 0 : -1;
2652} 2634}
2653 2635
2636/*
2637 * endio for the write_dev_flush, this will wake anyone waiting
2638 * for the barrier when it is done
2639 */
2640static void btrfs_end_empty_barrier(struct bio *bio, int err)
2641{
2642 if (err) {
2643 if (err == -EOPNOTSUPP)
2644 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2645 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2646 }
2647 if (bio->bi_private)
2648 complete(bio->bi_private);
2649 bio_put(bio);
2650}
2651
2652/*
2653 * trigger flushes for one the devices. If you pass wait == 0, the flushes are
2654 * sent down. With wait == 1, it waits for the previous flush.
2655 *
2656 * any device where the flush fails with eopnotsupp are flagged as not-barrier
2657 * capable
2658 */
2659static int write_dev_flush(struct btrfs_device *device, int wait)
2660{
2661 struct bio *bio;
2662 int ret = 0;
2663
2664 if (device->nobarriers)
2665 return 0;
2666
2667 if (wait) {
2668 bio = device->flush_bio;
2669 if (!bio)
2670 return 0;
2671
2672 wait_for_completion(&device->flush_wait);
2673
2674 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2675 printk("btrfs: disabling barriers on dev %s\n",
2676 device->name);
2677 device->nobarriers = 1;
2678 }
2679 if (!bio_flagged(bio, BIO_UPTODATE)) {
2680 ret = -EIO;
2681 }
2682
2683 /* drop the reference from the wait == 0 run */
2684 bio_put(bio);
2685 device->flush_bio = NULL;
2686
2687 return ret;
2688 }
2689
2690 /*
2691 * one reference for us, and we leave it for the
2692 * caller
2693 */
2694 device->flush_bio = NULL;;
2695 bio = bio_alloc(GFP_NOFS, 0);
2696 if (!bio)
2697 return -ENOMEM;
2698
2699 bio->bi_end_io = btrfs_end_empty_barrier;
2700 bio->bi_bdev = device->bdev;
2701 init_completion(&device->flush_wait);
2702 bio->bi_private = &device->flush_wait;
2703 device->flush_bio = bio;
2704
2705 bio_get(bio);
2706 submit_bio(WRITE_FLUSH, bio);
2707
2708 return 0;
2709}
2710
2711/*
2712 * send an empty flush down to each device in parallel,
2713 * then wait for them
2714 */
2715static int barrier_all_devices(struct btrfs_fs_info *info)
2716{
2717 struct list_head *head;
2718 struct btrfs_device *dev;
2719 int errors = 0;
2720 int ret;
2721
2722 /* send down all the barriers */
2723 head = &info->fs_devices->devices;
2724 list_for_each_entry_rcu(dev, head, dev_list) {
2725 if (!dev->bdev) {
2726 errors++;
2727 continue;
2728 }
2729 if (!dev->in_fs_metadata || !dev->writeable)
2730 continue;
2731
2732 ret = write_dev_flush(dev, 0);
2733 if (ret)
2734 errors++;
2735 }
2736
2737 /* wait for all the barriers */
2738 list_for_each_entry_rcu(dev, head, dev_list) {
2739 if (!dev->bdev) {
2740 errors++;
2741 continue;
2742 }
2743 if (!dev->in_fs_metadata || !dev->writeable)
2744 continue;
2745
2746 ret = write_dev_flush(dev, 1);
2747 if (ret)
2748 errors++;
2749 }
2750 if (errors)
2751 return -EIO;
2752 return 0;
2753}
2754
2654int write_all_supers(struct btrfs_root *root, int max_mirrors) 2755int write_all_supers(struct btrfs_root *root, int max_mirrors)
2655{ 2756{
2656 struct list_head *head; 2757 struct list_head *head;
@@ -2672,6 +2773,10 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2672 2773
2673 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2774 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2674 head = &root->fs_info->fs_devices->devices; 2775 head = &root->fs_info->fs_devices->devices;
2776
2777 if (do_barriers)
2778 barrier_all_devices(root->fs_info);
2779
2675 list_for_each_entry_rcu(dev, head, dev_list) { 2780 list_for_each_entry_rcu(dev, head, dev_list) {
2676 if (!dev->bdev) { 2781 if (!dev->bdev) {
2677 total_errors++; 2782 total_errors++;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9879bd474632..2ad813674d77 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
467 struct btrfs_root *root, 467 struct btrfs_root *root,
468 int load_cache_only) 468 int load_cache_only)
469{ 469{
470 DEFINE_WAIT(wait);
470 struct btrfs_fs_info *fs_info = cache->fs_info; 471 struct btrfs_fs_info *fs_info = cache->fs_info;
471 struct btrfs_caching_control *caching_ctl; 472 struct btrfs_caching_control *caching_ctl;
472 int ret = 0; 473 int ret = 0;
473 474
474 smp_mb(); 475 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
475 if (cache->cached != BTRFS_CACHE_NO) 476 BUG_ON(!caching_ctl);
477
478 INIT_LIST_HEAD(&caching_ctl->list);
479 mutex_init(&caching_ctl->mutex);
480 init_waitqueue_head(&caching_ctl->wait);
481 caching_ctl->block_group = cache;
482 caching_ctl->progress = cache->key.objectid;
483 atomic_set(&caching_ctl->count, 1);
484 caching_ctl->work.func = caching_thread;
485
486 spin_lock(&cache->lock);
487 /*
488 * This should be a rare occasion, but this could happen I think in the
489 * case where one thread starts to load the space cache info, and then
490 * some other thread starts a transaction commit which tries to do an
491 * allocation while the other thread is still loading the space cache
492 * info. The previous loop should have kept us from choosing this block
493 * group, but if we've moved to the state where we will wait on caching
494 * block groups we need to first check if we're doing a fast load here,
495 * so we can wait for it to finish, otherwise we could end up allocating
496 * from a block group who's cache gets evicted for one reason or
497 * another.
498 */
499 while (cache->cached == BTRFS_CACHE_FAST) {
500 struct btrfs_caching_control *ctl;
501
502 ctl = cache->caching_ctl;
503 atomic_inc(&ctl->count);
504 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
505 spin_unlock(&cache->lock);
506
507 schedule();
508
509 finish_wait(&ctl->wait, &wait);
510 put_caching_control(ctl);
511 spin_lock(&cache->lock);
512 }
513
514 if (cache->cached != BTRFS_CACHE_NO) {
515 spin_unlock(&cache->lock);
516 kfree(caching_ctl);
476 return 0; 517 return 0;
518 }
519 WARN_ON(cache->caching_ctl);
520 cache->caching_ctl = caching_ctl;
521 cache->cached = BTRFS_CACHE_FAST;
522 spin_unlock(&cache->lock);
477 523
478 /* 524 /*
479 * We can't do the read from on-disk cache during a commit since we need 525 * We can't do the read from on-disk cache during a commit since we need
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
484 if (trans && (!trans->transaction->in_commit) && 530 if (trans && (!trans->transaction->in_commit) &&
485 (root && root != root->fs_info->tree_root) && 531 (root && root != root->fs_info->tree_root) &&
486 btrfs_test_opt(root, SPACE_CACHE)) { 532 btrfs_test_opt(root, SPACE_CACHE)) {
487 spin_lock(&cache->lock);
488 if (cache->cached != BTRFS_CACHE_NO) {
489 spin_unlock(&cache->lock);
490 return 0;
491 }
492 cache->cached = BTRFS_CACHE_STARTED;
493 spin_unlock(&cache->lock);
494
495 ret = load_free_space_cache(fs_info, cache); 533 ret = load_free_space_cache(fs_info, cache);
496 534
497 spin_lock(&cache->lock); 535 spin_lock(&cache->lock);
498 if (ret == 1) { 536 if (ret == 1) {
537 cache->caching_ctl = NULL;
499 cache->cached = BTRFS_CACHE_FINISHED; 538 cache->cached = BTRFS_CACHE_FINISHED;
500 cache->last_byte_to_unpin = (u64)-1; 539 cache->last_byte_to_unpin = (u64)-1;
501 } else { 540 } else {
502 cache->cached = BTRFS_CACHE_NO; 541 if (load_cache_only) {
542 cache->caching_ctl = NULL;
543 cache->cached = BTRFS_CACHE_NO;
544 } else {
545 cache->cached = BTRFS_CACHE_STARTED;
546 }
503 } 547 }
504 spin_unlock(&cache->lock); 548 spin_unlock(&cache->lock);
549 wake_up(&caching_ctl->wait);
505 if (ret == 1) { 550 if (ret == 1) {
551 put_caching_control(caching_ctl);
506 free_excluded_extents(fs_info->extent_root, cache); 552 free_excluded_extents(fs_info->extent_root, cache);
507 return 0; 553 return 0;
508 } 554 }
555 } else {
556 /*
557 * We are not going to do the fast caching, set cached to the
558 * appropriate value and wakeup any waiters.
559 */
560 spin_lock(&cache->lock);
561 if (load_cache_only) {
562 cache->caching_ctl = NULL;
563 cache->cached = BTRFS_CACHE_NO;
564 } else {
565 cache->cached = BTRFS_CACHE_STARTED;
566 }
567 spin_unlock(&cache->lock);
568 wake_up(&caching_ctl->wait);
509 } 569 }
510 570
511 if (load_cache_only) 571 if (load_cache_only) {
512 return 0; 572 put_caching_control(caching_ctl);
513
514 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
515 BUG_ON(!caching_ctl);
516
517 INIT_LIST_HEAD(&caching_ctl->list);
518 mutex_init(&caching_ctl->mutex);
519 init_waitqueue_head(&caching_ctl->wait);
520 caching_ctl->block_group = cache;
521 caching_ctl->progress = cache->key.objectid;
522 /* one for caching kthread, one for caching block group list */
523 atomic_set(&caching_ctl->count, 2);
524 caching_ctl->work.func = caching_thread;
525
526 spin_lock(&cache->lock);
527 if (cache->cached != BTRFS_CACHE_NO) {
528 spin_unlock(&cache->lock);
529 kfree(caching_ctl);
530 return 0; 573 return 0;
531 } 574 }
532 cache->caching_ctl = caching_ctl;
533 cache->cached = BTRFS_CACHE_STARTED;
534 spin_unlock(&cache->lock);
535 575
536 down_write(&fs_info->extent_commit_sem); 576 down_write(&fs_info->extent_commit_sem);
577 atomic_inc(&caching_ctl->count);
537 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 578 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
538 up_write(&fs_info->extent_commit_sem); 579 up_write(&fs_info->extent_commit_sem);
539 580
@@ -3797,16 +3838,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
3797 kfree(rsv); 3838 kfree(rsv);
3798} 3839}
3799 3840
3800int btrfs_block_rsv_add(struct btrfs_root *root, 3841static inline int __block_rsv_add(struct btrfs_root *root,
3801 struct btrfs_block_rsv *block_rsv, 3842 struct btrfs_block_rsv *block_rsv,
3802 u64 num_bytes) 3843 u64 num_bytes, int flush)
3803{ 3844{
3804 int ret; 3845 int ret;
3805 3846
3806 if (num_bytes == 0) 3847 if (num_bytes == 0)
3807 return 0; 3848 return 0;
3808 3849
3809 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); 3850 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3810 if (!ret) { 3851 if (!ret) {
3811 block_rsv_add_bytes(block_rsv, num_bytes, 1); 3852 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3812 return 0; 3853 return 0;
@@ -3815,22 +3856,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
3815 return ret; 3856 return ret;
3816} 3857}
3817 3858
3859int btrfs_block_rsv_add(struct btrfs_root *root,
3860 struct btrfs_block_rsv *block_rsv,
3861 u64 num_bytes)
3862{
3863 return __block_rsv_add(root, block_rsv, num_bytes, 1);
3864}
3865
3818int btrfs_block_rsv_add_noflush(struct btrfs_root *root, 3866int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3819 struct btrfs_block_rsv *block_rsv, 3867 struct btrfs_block_rsv *block_rsv,
3820 u64 num_bytes) 3868 u64 num_bytes)
3821{ 3869{
3822 int ret; 3870 return __block_rsv_add(root, block_rsv, num_bytes, 0);
3823
3824 if (num_bytes == 0)
3825 return 0;
3826
3827 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
3828 if (!ret) {
3829 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3830 return 0;
3831 }
3832
3833 return ret;
3834} 3871}
3835 3872
3836int btrfs_block_rsv_check(struct btrfs_root *root, 3873int btrfs_block_rsv_check(struct btrfs_root *root,
@@ -3851,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
3851 return ret; 3888 return ret;
3852} 3889}
3853 3890
3854int btrfs_block_rsv_refill(struct btrfs_root *root, 3891static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
3855 struct btrfs_block_rsv *block_rsv, 3892 struct btrfs_block_rsv *block_rsv,
3856 u64 min_reserved) 3893 u64 min_reserved, int flush)
3857{ 3894{
3858 u64 num_bytes = 0; 3895 u64 num_bytes = 0;
3859 int ret = -ENOSPC; 3896 int ret = -ENOSPC;
@@ -3872,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
3872 if (!ret) 3909 if (!ret)
3873 return 0; 3910 return 0;
3874 3911
3875 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); 3912 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3876 if (!ret) { 3913 if (!ret) {
3877 block_rsv_add_bytes(block_rsv, num_bytes, 0); 3914 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3878 return 0; 3915 return 0;
@@ -3881,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
3881 return ret; 3918 return ret;
3882} 3919}
3883 3920
3921int btrfs_block_rsv_refill(struct btrfs_root *root,
3922 struct btrfs_block_rsv *block_rsv,
3923 u64 min_reserved)
3924{
3925 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
3926}
3927
3928int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
3929 struct btrfs_block_rsv *block_rsv,
3930 u64 min_reserved)
3931{
3932 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
3933}
3934
3884int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 3935int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3885 struct btrfs_block_rsv *dst_rsv, 3936 struct btrfs_block_rsv *dst_rsv,
3886 u64 num_bytes) 3937 u64 num_bytes)
@@ -4064,23 +4115,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4064 */ 4115 */
4065static unsigned drop_outstanding_extent(struct inode *inode) 4116static unsigned drop_outstanding_extent(struct inode *inode)
4066{ 4117{
4118 unsigned drop_inode_space = 0;
4067 unsigned dropped_extents = 0; 4119 unsigned dropped_extents = 0;
4068 4120
4069 BUG_ON(!BTRFS_I(inode)->outstanding_extents); 4121 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4070 BTRFS_I(inode)->outstanding_extents--; 4122 BTRFS_I(inode)->outstanding_extents--;
4071 4123
4124 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4125 BTRFS_I(inode)->delalloc_meta_reserved) {
4126 drop_inode_space = 1;
4127 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4128 }
4129
4072 /* 4130 /*
4073 * If we have more or the same amount of outsanding extents than we have 4131 * If we have more or the same amount of outsanding extents than we have
4074 * reserved then we need to leave the reserved extents count alone. 4132 * reserved then we need to leave the reserved extents count alone.
4075 */ 4133 */
4076 if (BTRFS_I(inode)->outstanding_extents >= 4134 if (BTRFS_I(inode)->outstanding_extents >=
4077 BTRFS_I(inode)->reserved_extents) 4135 BTRFS_I(inode)->reserved_extents)
4078 return 0; 4136 return drop_inode_space;
4079 4137
4080 dropped_extents = BTRFS_I(inode)->reserved_extents - 4138 dropped_extents = BTRFS_I(inode)->reserved_extents -
4081 BTRFS_I(inode)->outstanding_extents; 4139 BTRFS_I(inode)->outstanding_extents;
4082 BTRFS_I(inode)->reserved_extents -= dropped_extents; 4140 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4083 return dropped_extents; 4141 return dropped_extents + drop_inode_space;
4084} 4142}
4085 4143
4086/** 4144/**
@@ -4166,9 +4224,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4166 nr_extents = BTRFS_I(inode)->outstanding_extents - 4224 nr_extents = BTRFS_I(inode)->outstanding_extents -
4167 BTRFS_I(inode)->reserved_extents; 4225 BTRFS_I(inode)->reserved_extents;
4168 BTRFS_I(inode)->reserved_extents += nr_extents; 4226 BTRFS_I(inode)->reserved_extents += nr_extents;
4227 }
4169 4228
4170 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); 4229 /*
4230 * Add an item to reserve for updating the inode when we complete the
4231 * delalloc io.
4232 */
4233 if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4234 nr_extents++;
4235 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4171 } 4236 }
4237
4238 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4172 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); 4239 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4173 spin_unlock(&BTRFS_I(inode)->lock); 4240 spin_unlock(&BTRFS_I(inode)->lock);
4174 4241
@@ -5040,11 +5107,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5040 struct btrfs_root *root = orig_root->fs_info->extent_root; 5107 struct btrfs_root *root = orig_root->fs_info->extent_root;
5041 struct btrfs_free_cluster *last_ptr = NULL; 5108 struct btrfs_free_cluster *last_ptr = NULL;
5042 struct btrfs_block_group_cache *block_group = NULL; 5109 struct btrfs_block_group_cache *block_group = NULL;
5110 struct btrfs_block_group_cache *used_block_group;
5043 int empty_cluster = 2 * 1024 * 1024; 5111 int empty_cluster = 2 * 1024 * 1024;
5044 int allowed_chunk_alloc = 0; 5112 int allowed_chunk_alloc = 0;
5045 int done_chunk_alloc = 0; 5113 int done_chunk_alloc = 0;
5046 struct btrfs_space_info *space_info; 5114 struct btrfs_space_info *space_info;
5047 int last_ptr_loop = 0;
5048 int loop = 0; 5115 int loop = 0;
5049 int index = 0; 5116 int index = 0;
5050 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? 5117 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5106,6 +5173,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5106ideal_cache: 5173ideal_cache:
5107 block_group = btrfs_lookup_block_group(root->fs_info, 5174 block_group = btrfs_lookup_block_group(root->fs_info,
5108 search_start); 5175 search_start);
5176 used_block_group = block_group;
5109 /* 5177 /*
5110 * we don't want to use the block group if it doesn't match our 5178 * we don't want to use the block group if it doesn't match our
5111 * allocation bits, or if its not cached. 5179 * allocation bits, or if its not cached.
@@ -5143,6 +5211,7 @@ search:
5143 u64 offset; 5211 u64 offset;
5144 int cached; 5212 int cached;
5145 5213
5214 used_block_group = block_group;
5146 btrfs_get_block_group(block_group); 5215 btrfs_get_block_group(block_group);
5147 search_start = block_group->key.objectid; 5216 search_start = block_group->key.objectid;
5148 5217
@@ -5166,13 +5235,15 @@ search:
5166 } 5235 }
5167 5236
5168have_block_group: 5237have_block_group:
5169 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 5238 cached = block_group_cache_done(block_group);
5239 if (unlikely(!cached)) {
5170 u64 free_percent; 5240 u64 free_percent;
5171 5241
5242 found_uncached_bg = true;
5172 ret = cache_block_group(block_group, trans, 5243 ret = cache_block_group(block_group, trans,
5173 orig_root, 1); 5244 orig_root, 1);
5174 if (block_group->cached == BTRFS_CACHE_FINISHED) 5245 if (block_group->cached == BTRFS_CACHE_FINISHED)
5175 goto have_block_group; 5246 goto alloc;
5176 5247
5177 free_percent = btrfs_block_group_used(&block_group->item); 5248 free_percent = btrfs_block_group_used(&block_group->item);
5178 free_percent *= 100; 5249 free_percent *= 100;
@@ -5194,7 +5265,6 @@ have_block_group:
5194 orig_root, 0); 5265 orig_root, 0);
5195 BUG_ON(ret); 5266 BUG_ON(ret);
5196 } 5267 }
5197 found_uncached_bg = true;
5198 5268
5199 /* 5269 /*
5200 * If loop is set for cached only, try the next block 5270 * If loop is set for cached only, try the next block
@@ -5204,94 +5274,80 @@ have_block_group:
5204 goto loop; 5274 goto loop;
5205 } 5275 }
5206 5276
5207 cached = block_group_cache_done(block_group); 5277alloc:
5208 if (unlikely(!cached))
5209 found_uncached_bg = true;
5210
5211 if (unlikely(block_group->ro)) 5278 if (unlikely(block_group->ro))
5212 goto loop; 5279 goto loop;
5213 5280
5214 spin_lock(&block_group->free_space_ctl->tree_lock); 5281 spin_lock(&block_group->free_space_ctl->tree_lock);
5215 if (cached && 5282 if (cached &&
5216 block_group->free_space_ctl->free_space < 5283 block_group->free_space_ctl->free_space <
5217 num_bytes + empty_size) { 5284 num_bytes + empty_cluster + empty_size) {
5218 spin_unlock(&block_group->free_space_ctl->tree_lock); 5285 spin_unlock(&block_group->free_space_ctl->tree_lock);
5219 goto loop; 5286 goto loop;
5220 } 5287 }
5221 spin_unlock(&block_group->free_space_ctl->tree_lock); 5288 spin_unlock(&block_group->free_space_ctl->tree_lock);
5222 5289
5223 /* 5290 /*
5224 * Ok we want to try and use the cluster allocator, so lets look 5291 * Ok we want to try and use the cluster allocator, so
5225 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will 5292 * lets look there
5226 * have tried the cluster allocator plenty of times at this
5227 * point and not have found anything, so we are likely way too
5228 * fragmented for the clustering stuff to find anything, so lets
5229 * just skip it and let the allocator find whatever block it can
5230 * find
5231 */ 5293 */
5232 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) { 5294 if (last_ptr) {
5233 /* 5295 /*
5234 * the refill lock keeps out other 5296 * the refill lock keeps out other
5235 * people trying to start a new cluster 5297 * people trying to start a new cluster
5236 */ 5298 */
5237 spin_lock(&last_ptr->refill_lock); 5299 spin_lock(&last_ptr->refill_lock);
5238 if (last_ptr->block_group && 5300 used_block_group = last_ptr->block_group;
5239 (last_ptr->block_group->ro || 5301 if (used_block_group != block_group &&
5240 !block_group_bits(last_ptr->block_group, data))) { 5302 (!used_block_group ||
5241 offset = 0; 5303 used_block_group->ro ||
5304 !block_group_bits(used_block_group, data))) {
5305 used_block_group = block_group;
5242 goto refill_cluster; 5306 goto refill_cluster;
5243 } 5307 }
5244 5308
5245 offset = btrfs_alloc_from_cluster(block_group, last_ptr, 5309 if (used_block_group != block_group)
5246 num_bytes, search_start); 5310 btrfs_get_block_group(used_block_group);
5311
5312 offset = btrfs_alloc_from_cluster(used_block_group,
5313 last_ptr, num_bytes, used_block_group->key.objectid);
5247 if (offset) { 5314 if (offset) {
5248 /* we have a block, we're done */ 5315 /* we have a block, we're done */
5249 spin_unlock(&last_ptr->refill_lock); 5316 spin_unlock(&last_ptr->refill_lock);
5250 goto checks; 5317 goto checks;
5251 } 5318 }
5252 5319
5253 spin_lock(&last_ptr->lock); 5320 WARN_ON(last_ptr->block_group != used_block_group);
5254 /* 5321 if (used_block_group != block_group) {
5255 * whoops, this cluster doesn't actually point to 5322 btrfs_put_block_group(used_block_group);
5256 * this block group. Get a ref on the block 5323 used_block_group = block_group;
5257 * group is does point to and try again
5258 */
5259 if (!last_ptr_loop && last_ptr->block_group &&
5260 last_ptr->block_group != block_group &&
5261 index <=
5262 get_block_group_index(last_ptr->block_group)) {
5263
5264 btrfs_put_block_group(block_group);
5265 block_group = last_ptr->block_group;
5266 btrfs_get_block_group(block_group);
5267 spin_unlock(&last_ptr->lock);
5268 spin_unlock(&last_ptr->refill_lock);
5269
5270 last_ptr_loop = 1;
5271 search_start = block_group->key.objectid;
5272 /*
5273 * we know this block group is properly
5274 * in the list because
5275 * btrfs_remove_block_group, drops the
5276 * cluster before it removes the block
5277 * group from the list
5278 */
5279 goto have_block_group;
5280 } 5324 }
5281 spin_unlock(&last_ptr->lock);
5282refill_cluster: 5325refill_cluster:
5326 BUG_ON(used_block_group != block_group);
5327 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5328 * set up a new clusters, so lets just skip it
5329 * and let the allocator find whatever block
5330 * it can find. If we reach this point, we
5331 * will have tried the cluster allocator
5332 * plenty of times and not have found
5333 * anything, so we are likely way too
5334 * fragmented for the clustering stuff to find
5335 * anything. */
5336 if (loop >= LOOP_NO_EMPTY_SIZE) {
5337 spin_unlock(&last_ptr->refill_lock);
5338 goto unclustered_alloc;
5339 }
5340
5283 /* 5341 /*
5284 * this cluster didn't work out, free it and 5342 * this cluster didn't work out, free it and
5285 * start over 5343 * start over
5286 */ 5344 */
5287 btrfs_return_cluster_to_free_space(NULL, last_ptr); 5345 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5288 5346
5289 last_ptr_loop = 0;
5290
5291 /* allocate a cluster in this block group */ 5347 /* allocate a cluster in this block group */
5292 ret = btrfs_find_space_cluster(trans, root, 5348 ret = btrfs_find_space_cluster(trans, root,
5293 block_group, last_ptr, 5349 block_group, last_ptr,
5294 offset, num_bytes, 5350 search_start, num_bytes,
5295 empty_cluster + empty_size); 5351 empty_cluster + empty_size);
5296 if (ret == 0) { 5352 if (ret == 0) {
5297 /* 5353 /*
@@ -5327,6 +5383,7 @@ refill_cluster:
5327 goto loop; 5383 goto loop;
5328 } 5384 }
5329 5385
5386unclustered_alloc:
5330 offset = btrfs_find_space_for_alloc(block_group, search_start, 5387 offset = btrfs_find_space_for_alloc(block_group, search_start,
5331 num_bytes, empty_size); 5388 num_bytes, empty_size);
5332 /* 5389 /*
@@ -5353,14 +5410,14 @@ checks:
5353 search_start = stripe_align(root, offset); 5410 search_start = stripe_align(root, offset);
5354 /* move on to the next group */ 5411 /* move on to the next group */
5355 if (search_start + num_bytes >= search_end) { 5412 if (search_start + num_bytes >= search_end) {
5356 btrfs_add_free_space(block_group, offset, num_bytes); 5413 btrfs_add_free_space(used_block_group, offset, num_bytes);
5357 goto loop; 5414 goto loop;
5358 } 5415 }
5359 5416
5360 /* move on to the next group */ 5417 /* move on to the next group */
5361 if (search_start + num_bytes > 5418 if (search_start + num_bytes >
5362 block_group->key.objectid + block_group->key.offset) { 5419 used_block_group->key.objectid + used_block_group->key.offset) {
5363 btrfs_add_free_space(block_group, offset, num_bytes); 5420 btrfs_add_free_space(used_block_group, offset, num_bytes);
5364 goto loop; 5421 goto loop;
5365 } 5422 }
5366 5423
@@ -5368,14 +5425,14 @@ checks:
5368 ins->offset = num_bytes; 5425 ins->offset = num_bytes;
5369 5426
5370 if (offset < search_start) 5427 if (offset < search_start)
5371 btrfs_add_free_space(block_group, offset, 5428 btrfs_add_free_space(used_block_group, offset,
5372 search_start - offset); 5429 search_start - offset);
5373 BUG_ON(offset > search_start); 5430 BUG_ON(offset > search_start);
5374 5431
5375 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 5432 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5376 alloc_type); 5433 alloc_type);
5377 if (ret == -EAGAIN) { 5434 if (ret == -EAGAIN) {
5378 btrfs_add_free_space(block_group, offset, num_bytes); 5435 btrfs_add_free_space(used_block_group, offset, num_bytes);
5379 goto loop; 5436 goto loop;
5380 } 5437 }
5381 5438
@@ -5384,15 +5441,19 @@ checks:
5384 ins->offset = num_bytes; 5441 ins->offset = num_bytes;
5385 5442
5386 if (offset < search_start) 5443 if (offset < search_start)
5387 btrfs_add_free_space(block_group, offset, 5444 btrfs_add_free_space(used_block_group, offset,
5388 search_start - offset); 5445 search_start - offset);
5389 BUG_ON(offset > search_start); 5446 BUG_ON(offset > search_start);
5447 if (used_block_group != block_group)
5448 btrfs_put_block_group(used_block_group);
5390 btrfs_put_block_group(block_group); 5449 btrfs_put_block_group(block_group);
5391 break; 5450 break;
5392loop: 5451loop:
5393 failed_cluster_refill = false; 5452 failed_cluster_refill = false;
5394 failed_alloc = false; 5453 failed_alloc = false;
5395 BUG_ON(index != get_block_group_index(block_group)); 5454 BUG_ON(index != get_block_group_index(block_group));
5455 if (used_block_group != block_group)
5456 btrfs_put_block_group(used_block_group);
5396 btrfs_put_block_group(block_group); 5457 btrfs_put_block_group(block_group);
5397 } 5458 }
5398 up_read(&space_info->groups_sem); 5459 up_read(&space_info->groups_sem);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1f87c4d0e7a0..49f3c9dc09f4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -935,8 +935,10 @@ again:
935 node = tree_search(tree, start); 935 node = tree_search(tree, start);
936 if (!node) { 936 if (!node) {
937 prealloc = alloc_extent_state_atomic(prealloc); 937 prealloc = alloc_extent_state_atomic(prealloc);
938 if (!prealloc) 938 if (!prealloc) {
939 return -ENOMEM; 939 err = -ENOMEM;
940 goto out;
941 }
940 err = insert_state(tree, prealloc, start, end, &bits); 942 err = insert_state(tree, prealloc, start, end, &bits);
941 prealloc = NULL; 943 prealloc = NULL;
942 BUG_ON(err == -EEXIST); 944 BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
992 */ 994 */
993 if (state->start < start) { 995 if (state->start < start) {
994 prealloc = alloc_extent_state_atomic(prealloc); 996 prealloc = alloc_extent_state_atomic(prealloc);
995 if (!prealloc) 997 if (!prealloc) {
996 return -ENOMEM; 998 err = -ENOMEM;
999 goto out;
1000 }
997 err = split_state(tree, state, prealloc, start); 1001 err = split_state(tree, state, prealloc, start);
998 BUG_ON(err == -EEXIST); 1002 BUG_ON(err == -EEXIST);
999 prealloc = NULL; 1003 prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
1024 this_end = last_start - 1; 1028 this_end = last_start - 1;
1025 1029
1026 prealloc = alloc_extent_state_atomic(prealloc); 1030 prealloc = alloc_extent_state_atomic(prealloc);
1027 if (!prealloc) 1031 if (!prealloc) {
1028 return -ENOMEM; 1032 err = -ENOMEM;
1033 goto out;
1034 }
1029 1035
1030 /* 1036 /*
1031 * Avoid to free 'prealloc' if it can be merged with 1037 * Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
1051 */ 1057 */
1052 if (state->start <= end && state->end > end) { 1058 if (state->start <= end && state->end > end) {
1053 prealloc = alloc_extent_state_atomic(prealloc); 1059 prealloc = alloc_extent_state_atomic(prealloc);
1054 if (!prealloc) 1060 if (!prealloc) {
1055 return -ENOMEM; 1061 err = -ENOMEM;
1062 goto out;
1063 }
1056 1064
1057 err = split_state(tree, state, prealloc, end + 1); 1065 err = split_state(tree, state, prealloc, end + 1);
1058 BUG_ON(err == -EEXIST); 1066 BUG_ON(err == -EEXIST);
@@ -2285,16 +2293,22 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2285 clean_io_failure(start, page); 2293 clean_io_failure(start, page);
2286 } 2294 }
2287 if (!uptodate) { 2295 if (!uptodate) {
2288 u64 failed_mirror; 2296 int failed_mirror;
2289 failed_mirror = (u64)bio->bi_bdev; 2297 failed_mirror = (int)(unsigned long)bio->bi_bdev;
2290 if (tree->ops && tree->ops->readpage_io_failed_hook) 2298 /*
2291 ret = tree->ops->readpage_io_failed_hook( 2299 * The generic bio_readpage_error handles errors the
2292 bio, page, start, end, 2300 * following way: If possible, new read requests are
2293 failed_mirror, state); 2301 * created and submitted and will end up in
2294 else 2302 * end_bio_extent_readpage as well (if we're lucky, not
2295 ret = bio_readpage_error(bio, page, start, end, 2303 * in the !uptodate case). In that case it returns 0 and
2296 failed_mirror, NULL); 2304 * we just go on with the next page in our bio. If it
2305 * can't handle the error it will return -EIO and we
2306 * remain responsible for that page.
2307 */
2308 ret = bio_readpage_error(bio, page, start, end,
2309 failed_mirror, NULL);
2297 if (ret == 0) { 2310 if (ret == 0) {
2311error_handled:
2298 uptodate = 2312 uptodate =
2299 test_bit(BIO_UPTODATE, &bio->bi_flags); 2313 test_bit(BIO_UPTODATE, &bio->bi_flags);
2300 if (err) 2314 if (err)
@@ -2302,6 +2316,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2302 uncache_state(&cached); 2316 uncache_state(&cached);
2303 continue; 2317 continue;
2304 } 2318 }
2319 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2320 ret = tree->ops->readpage_io_failed_hook(
2321 bio, page, start, end,
2322 failed_mirror, state);
2323 if (ret == 0)
2324 goto error_handled;
2325 }
2305 } 2326 }
2306 2327
2307 if (uptodate) { 2328 if (uptodate) {
@@ -3366,6 +3387,9 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3366 return -ENOMEM; 3387 return -ENOMEM;
3367 path->leave_spinning = 1; 3388 path->leave_spinning = 1;
3368 3389
3390 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3391 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3392
3369 /* 3393 /*
3370 * lookup the last file extent. We're not using i_size here 3394 * lookup the last file extent. We're not using i_size here
3371 * because there might be preallocation past i_size 3395 * because there might be preallocation past i_size
@@ -3413,7 +3437,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3413 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3437 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3414 &cached_state, GFP_NOFS); 3438 &cached_state, GFP_NOFS);
3415 3439
3416 em = get_extent_skip_holes(inode, off, last_for_get_extent, 3440 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3417 get_extent); 3441 get_extent);
3418 if (!em) 3442 if (!em)
3419 goto out; 3443 goto out;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index feb9be0e23bc..7604c3001322 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -70,7 +70,7 @@ struct extent_io_ops {
70 unsigned long bio_flags); 70 unsigned long bio_flags);
71 int (*readpage_io_hook)(struct page *page, u64 start, u64 end); 71 int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
72 int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, 72 int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
73 u64 start, u64 end, u64 failed_mirror, 73 u64 start, u64 end, int failed_mirror,
74 struct extent_state *state); 74 struct extent_state *state);
75 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, 75 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
76 u64 start, u64 end, 76 u64 start, u64 end,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 7a15fcfb3e1f..ec23d43d0c35 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
351 } 351 }
352 } 352 }
353 353
354 for (i = 0; i < io_ctl->num_pages; i++) {
355 clear_page_dirty_for_io(io_ctl->pages[i]);
356 set_page_extent_mapped(io_ctl->pages[i]);
357 }
358
354 return 0; 359 return 0;
355} 360}
356 361
@@ -537,6 +542,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
537 struct btrfs_free_space *entry, u8 *type) 542 struct btrfs_free_space *entry, u8 *type)
538{ 543{
539 struct btrfs_free_space_entry *e; 544 struct btrfs_free_space_entry *e;
545 int ret;
546
547 if (!io_ctl->cur) {
548 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
549 if (ret)
550 return ret;
551 }
540 552
541 e = io_ctl->cur; 553 e = io_ctl->cur;
542 entry->offset = le64_to_cpu(e->offset); 554 entry->offset = le64_to_cpu(e->offset);
@@ -550,10 +562,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
550 562
551 io_ctl_unmap_page(io_ctl); 563 io_ctl_unmap_page(io_ctl);
552 564
553 if (io_ctl->index >= io_ctl->num_pages) 565 return 0;
554 return 0;
555
556 return io_ctl_check_crc(io_ctl, io_ctl->index);
557} 566}
558 567
559static int io_ctl_read_bitmap(struct io_ctl *io_ctl, 568static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
@@ -561,9 +570,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
561{ 570{
562 int ret; 571 int ret;
563 572
564 if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
565 io_ctl_unmap_page(io_ctl);
566
567 ret = io_ctl_check_crc(io_ctl, io_ctl->index); 573 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
568 if (ret) 574 if (ret)
569 return ret; 575 return ret;
@@ -699,6 +705,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
699 num_entries--; 705 num_entries--;
700 } 706 }
701 707
708 io_ctl_unmap_page(&io_ctl);
709
702 /* 710 /*
703 * We add the bitmaps at the end of the entries in order that 711 * We add the bitmaps at the end of the entries in order that
704 * the bitmap entries are added to the cache. 712 * the bitmap entries are added to the cache.
@@ -1462,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1462{ 1470{
1463 info->offset = offset_to_bitmap(ctl, offset); 1471 info->offset = offset_to_bitmap(ctl, offset);
1464 info->bytes = 0; 1472 info->bytes = 0;
1473 INIT_LIST_HEAD(&info->list);
1465 link_free_space(ctl, info); 1474 link_free_space(ctl, info);
1466 ctl->total_bitmaps++; 1475 ctl->total_bitmaps++;
1467 1476
@@ -1841,7 +1850,13 @@ again:
1841 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1850 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1842 1, 0); 1851 1, 0);
1843 if (!info) { 1852 if (!info) {
1844 WARN_ON(1); 1853 /* the tree logging code might be calling us before we
1854 * have fully loaded the free space rbtree for this
1855 * block group. So it is possible the entry won't
1856 * be in the rbtree yet at all. The caching code
1857 * will make sure not to put it in the rbtree if
1858 * the logging code has pinned it.
1859 */
1845 goto out_lock; 1860 goto out_lock;
1846 } 1861 }
1847 } 1862 }
@@ -2305,6 +2320,7 @@ again:
2305 2320
2306 if (!found) { 2321 if (!found) {
2307 start = i; 2322 start = i;
2323 cluster->max_size = 0;
2308 found = true; 2324 found = true;
2309 } 2325 }
2310 2326
@@ -2448,16 +2464,23 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2448{ 2464{
2449 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2465 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2450 struct btrfs_free_space *entry; 2466 struct btrfs_free_space *entry;
2451 struct rb_node *node;
2452 int ret = -ENOSPC; 2467 int ret = -ENOSPC;
2468 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2453 2469
2454 if (ctl->total_bitmaps == 0) 2470 if (ctl->total_bitmaps == 0)
2455 return -ENOSPC; 2471 return -ENOSPC;
2456 2472
2457 /* 2473 /*
2458 * First check our cached list of bitmaps and see if there is an entry 2474 * The bitmap that covers offset won't be in the list unless offset
2459 * here that will work. 2475 * is just its start offset.
2460 */ 2476 */
2477 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2478 if (entry->offset != bitmap_offset) {
2479 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2480 if (entry && list_empty(&entry->list))
2481 list_add(&entry->list, bitmaps);
2482 }
2483
2461 list_for_each_entry(entry, bitmaps, list) { 2484 list_for_each_entry(entry, bitmaps, list) {
2462 if (entry->bytes < min_bytes) 2485 if (entry->bytes < min_bytes)
2463 continue; 2486 continue;
@@ -2468,38 +2491,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2468 } 2491 }
2469 2492
2470 /* 2493 /*
2471 * If we do have entries on our list and we are here then we didn't find 2494 * The bitmaps list has all the bitmaps that record free space
2472 * anything, so go ahead and get the next entry after the last entry in 2495 * starting after offset, so no more search is required.
2473 * this list and start the search from there.
2474 */ 2496 */
2475 if (!list_empty(bitmaps)) { 2497 return -ENOSPC;
2476 entry = list_entry(bitmaps->prev, struct btrfs_free_space,
2477 list);
2478 node = rb_next(&entry->offset_index);
2479 if (!node)
2480 return -ENOSPC;
2481 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2482 goto search;
2483 }
2484
2485 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2486 if (!entry)
2487 return -ENOSPC;
2488
2489search:
2490 node = &entry->offset_index;
2491 do {
2492 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2493 node = rb_next(&entry->offset_index);
2494 if (!entry->bitmap)
2495 continue;
2496 if (entry->bytes < min_bytes)
2497 continue;
2498 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2499 bytes, min_bytes);
2500 } while (ret && node);
2501
2502 return ret;
2503} 2498}
2504 2499
2505/* 2500/*
@@ -2517,8 +2512,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2517 u64 offset, u64 bytes, u64 empty_size) 2512 u64 offset, u64 bytes, u64 empty_size)
2518{ 2513{
2519 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2514 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2520 struct list_head bitmaps;
2521 struct btrfs_free_space *entry, *tmp; 2515 struct btrfs_free_space *entry, *tmp;
2516 LIST_HEAD(bitmaps);
2522 u64 min_bytes; 2517 u64 min_bytes;
2523 int ret; 2518 int ret;
2524 2519
@@ -2557,7 +2552,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2557 goto out; 2552 goto out;
2558 } 2553 }
2559 2554
2560 INIT_LIST_HEAD(&bitmaps);
2561 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, 2555 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2562 bytes, min_bytes); 2556 bytes, min_bytes);
2563 if (ret) 2557 if (ret)
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 53dcbdf446cd..f8962a957d65 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -398,6 +398,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
399 struct btrfs_path *path; 399 struct btrfs_path *path;
400 struct inode *inode; 400 struct inode *inode;
401 struct btrfs_block_rsv *rsv;
402 u64 num_bytes;
401 u64 alloc_hint = 0; 403 u64 alloc_hint = 0;
402 int ret; 404 int ret;
403 int prealloc; 405 int prealloc;
@@ -421,11 +423,26 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
421 if (!path) 423 if (!path)
422 return -ENOMEM; 424 return -ENOMEM;
423 425
426 rsv = trans->block_rsv;
427 trans->block_rsv = &root->fs_info->trans_block_rsv;
428
429 num_bytes = trans->bytes_reserved;
430 /*
431 * 1 item for inode item insertion if need
432 * 3 items for inode item update (in the worst case)
433 * 1 item for free space object
434 * 3 items for pre-allocation
435 */
436 trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
437 ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv,
438 trans->bytes_reserved);
439 if (ret)
440 goto out;
424again: 441again:
425 inode = lookup_free_ino_inode(root, path); 442 inode = lookup_free_ino_inode(root, path);
426 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 443 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
427 ret = PTR_ERR(inode); 444 ret = PTR_ERR(inode);
428 goto out; 445 goto out_release;
429 } 446 }
430 447
431 if (IS_ERR(inode)) { 448 if (IS_ERR(inode)) {
@@ -434,7 +451,7 @@ again:
434 451
435 ret = create_free_ino_inode(root, trans, path); 452 ret = create_free_ino_inode(root, trans, path);
436 if (ret) 453 if (ret)
437 goto out; 454 goto out_release;
438 goto again; 455 goto again;
439 } 456 }
440 457
@@ -477,11 +494,14 @@ again:
477 } 494 }
478 btrfs_free_reserved_data_space(inode, prealloc); 495 btrfs_free_reserved_data_space(inode, prealloc);
479 496
497 ret = btrfs_write_out_ino_cache(root, trans, path);
480out_put: 498out_put:
481 iput(inode); 499 iput(inode);
500out_release:
501 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
482out: 502out:
483 if (ret == 0) 503 trans->block_rsv = rsv;
484 ret = btrfs_write_out_ino_cache(root, trans, path); 504 trans->bytes_reserved = num_bytes;
485 505
486 btrfs_free_path(path); 506 btrfs_free_path(path);
487 return ret; 507 return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 966ddcc4c63d..2c984f7d4c2a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -93,6 +93,8 @@ static noinline int cow_file_range(struct inode *inode,
93 struct page *locked_page, 93 struct page *locked_page,
94 u64 start, u64 end, int *page_started, 94 u64 start, u64 end, int *page_started,
95 unsigned long *nr_written, int unlock); 95 unsigned long *nr_written, int unlock);
96static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct inode *inode);
96 98
97static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 99static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
98 struct inode *inode, struct inode *dir, 100 struct inode *inode, struct inode *dir,
@@ -1741,7 +1743,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1741 trans = btrfs_join_transaction(root); 1743 trans = btrfs_join_transaction(root);
1742 BUG_ON(IS_ERR(trans)); 1744 BUG_ON(IS_ERR(trans));
1743 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1745 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1744 ret = btrfs_update_inode(trans, root, inode); 1746 ret = btrfs_update_inode_fallback(trans, root, inode);
1745 BUG_ON(ret); 1747 BUG_ON(ret);
1746 } 1748 }
1747 goto out; 1749 goto out;
@@ -1791,7 +1793,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1791 1793
1792 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1794 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1793 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1795 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1794 ret = btrfs_update_inode(trans, root, inode); 1796 ret = btrfs_update_inode_fallback(trans, root, inode);
1795 BUG_ON(ret); 1797 BUG_ON(ret);
1796 } 1798 }
1797 ret = 0; 1799 ret = 0;
@@ -2199,6 +2201,9 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2199 if (ret) 2201 if (ret)
2200 goto out; 2202 goto out;
2201 } 2203 }
2204 /* release the path since we're done with it */
2205 btrfs_release_path(path);
2206
2202 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 2207 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2203 2208
2204 if (root->orphan_block_rsv) 2209 if (root->orphan_block_rsv)
@@ -2426,7 +2431,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2426/* 2431/*
2427 * copy everything in the in-memory inode into the btree. 2432 * copy everything in the in-memory inode into the btree.
2428 */ 2433 */
2429noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 2434static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2430 struct btrfs_root *root, struct inode *inode) 2435 struct btrfs_root *root, struct inode *inode)
2431{ 2436{
2432 struct btrfs_inode_item *inode_item; 2437 struct btrfs_inode_item *inode_item;
@@ -2434,21 +2439,6 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2434 struct extent_buffer *leaf; 2439 struct extent_buffer *leaf;
2435 int ret; 2440 int ret;
2436 2441
2437 /*
2438 * If the inode is a free space inode, we can deadlock during commit
2439 * if we put it into the delayed code.
2440 *
2441 * The data relocation inode should also be directly updated
2442 * without delay
2443 */
2444 if (!btrfs_is_free_space_inode(root, inode)
2445 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2446 ret = btrfs_delayed_update_inode(trans, root, inode);
2447 if (!ret)
2448 btrfs_set_inode_last_trans(trans, inode);
2449 return ret;
2450 }
2451
2452 path = btrfs_alloc_path(); 2442 path = btrfs_alloc_path();
2453 if (!path) 2443 if (!path)
2454 return -ENOMEM; 2444 return -ENOMEM;
@@ -2477,6 +2467,43 @@ failed:
2477} 2467}
2478 2468
2479/* 2469/*
2470 * copy everything in the in-memory inode into the btree.
2471 */
2472noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2473 struct btrfs_root *root, struct inode *inode)
2474{
2475 int ret;
2476
2477 /*
2478 * If the inode is a free space inode, we can deadlock during commit
2479 * if we put it into the delayed code.
2480 *
2481 * The data relocation inode should also be directly updated
2482 * without delay
2483 */
2484 if (!btrfs_is_free_space_inode(root, inode)
2485 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2486 ret = btrfs_delayed_update_inode(trans, root, inode);
2487 if (!ret)
2488 btrfs_set_inode_last_trans(trans, inode);
2489 return ret;
2490 }
2491
2492 return btrfs_update_inode_item(trans, root, inode);
2493}
2494
2495static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2496 struct btrfs_root *root, struct inode *inode)
2497{
2498 int ret;
2499
2500 ret = btrfs_update_inode(trans, root, inode);
2501 if (ret == -ENOSPC)
2502 return btrfs_update_inode_item(trans, root, inode);
2503 return ret;
2504}
2505
2506/*
2480 * unlink helper that gets used here in inode.c and in the tree logging 2507 * unlink helper that gets used here in inode.c and in the tree logging
2481 * recovery code. It remove a link in a directory with a given name, and 2508 * recovery code. It remove a link in a directory with a given name, and
2482 * also drops the back refs in the inode to the directory 2509 * also drops the back refs in the inode to the directory
@@ -3463,7 +3490,7 @@ void btrfs_evict_inode(struct inode *inode)
3463 * doing the truncate. 3490 * doing the truncate.
3464 */ 3491 */
3465 while (1) { 3492 while (1) {
3466 ret = btrfs_block_rsv_refill(root, rsv, min_size); 3493 ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
3467 3494
3468 /* 3495 /*
3469 * Try and steal from the global reserve since we will 3496 * Try and steal from the global reserve since we will
@@ -5632,7 +5659,7 @@ again:
5632 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 5659 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5633 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5660 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5634 if (!ret) 5661 if (!ret)
5635 err = btrfs_update_inode(trans, root, inode); 5662 err = btrfs_update_inode_fallback(trans, root, inode);
5636 goto out; 5663 goto out;
5637 } 5664 }
5638 5665
@@ -5670,7 +5697,7 @@ again:
5670 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5697 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5671 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5698 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5672 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) 5699 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
5673 btrfs_update_inode(trans, root, inode); 5700 btrfs_update_inode_fallback(trans, root, inode);
5674 ret = 0; 5701 ret = 0;
5675out_unlock: 5702out_unlock:
5676 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5703 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
@@ -6529,14 +6556,16 @@ end_trans:
6529 ret = btrfs_orphan_del(NULL, inode); 6556 ret = btrfs_orphan_del(NULL, inode);
6530 } 6557 }
6531 6558
6532 trans->block_rsv = &root->fs_info->trans_block_rsv; 6559 if (trans) {
6533 ret = btrfs_update_inode(trans, root, inode); 6560 trans->block_rsv = &root->fs_info->trans_block_rsv;
6534 if (ret && !err) 6561 ret = btrfs_update_inode(trans, root, inode);
6535 err = ret; 6562 if (ret && !err)
6563 err = ret;
6536 6564
6537 nr = trans->blocks_used; 6565 nr = trans->blocks_used;
6538 ret = btrfs_end_transaction_throttle(trans, root); 6566 ret = btrfs_end_transaction_throttle(trans, root);
6539 btrfs_btree_balance_dirty(root, nr); 6567 btrfs_btree_balance_dirty(root, nr);
6568 }
6540 6569
6541out: 6570out:
6542 btrfs_free_block_rsv(root, rsv); 6571 btrfs_free_block_rsv(root, rsv);
@@ -6605,6 +6634,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6605 ei->orphan_meta_reserved = 0; 6634 ei->orphan_meta_reserved = 0;
6606 ei->dummy_inode = 0; 6635 ei->dummy_inode = 0;
6607 ei->in_defrag = 0; 6636 ei->in_defrag = 0;
6637 ei->delalloc_meta_reserved = 0;
6608 ei->force_compress = BTRFS_COMPRESS_NONE; 6638 ei->force_compress = BTRFS_COMPRESS_NONE;
6609 6639
6610 ei->delayed_node = NULL; 6640 ei->delayed_node = NULL;
@@ -6764,11 +6794,13 @@ static int btrfs_getattr(struct vfsmount *mnt,
6764 struct dentry *dentry, struct kstat *stat) 6794 struct dentry *dentry, struct kstat *stat)
6765{ 6795{
6766 struct inode *inode = dentry->d_inode; 6796 struct inode *inode = dentry->d_inode;
6797 u32 blocksize = inode->i_sb->s_blocksize;
6798
6767 generic_fillattr(inode, stat); 6799 generic_fillattr(inode, stat);
6768 stat->dev = BTRFS_I(inode)->root->anon_dev; 6800 stat->dev = BTRFS_I(inode)->root->anon_dev;
6769 stat->blksize = PAGE_CACHE_SIZE; 6801 stat->blksize = PAGE_CACHE_SIZE;
6770 stat->blocks = (inode_get_bytes(inode) + 6802 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
6771 BTRFS_I(inode)->delalloc_bytes) >> 9; 6803 ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
6772 return 0; 6804 return 0;
6773} 6805}
6774 6806
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4a34c472f126..72d461656f60 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1216,12 +1216,12 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1216 *devstr = '\0'; 1216 *devstr = '\0';
1217 devstr = vol_args->name; 1217 devstr = vol_args->name;
1218 devid = simple_strtoull(devstr, &end, 10); 1218 devid = simple_strtoull(devstr, &end, 10);
1219 printk(KERN_INFO "resizing devid %llu\n", 1219 printk(KERN_INFO "btrfs: resizing devid %llu\n",
1220 (unsigned long long)devid); 1220 (unsigned long long)devid);
1221 } 1221 }
1222 device = btrfs_find_device(root, devid, NULL, NULL); 1222 device = btrfs_find_device(root, devid, NULL, NULL);
1223 if (!device) { 1223 if (!device) {
1224 printk(KERN_INFO "resizer unable to find device %llu\n", 1224 printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
1225 (unsigned long long)devid); 1225 (unsigned long long)devid);
1226 ret = -EINVAL; 1226 ret = -EINVAL;
1227 goto out_unlock; 1227 goto out_unlock;
@@ -1267,7 +1267,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1267 do_div(new_size, root->sectorsize); 1267 do_div(new_size, root->sectorsize);
1268 new_size *= root->sectorsize; 1268 new_size *= root->sectorsize;
1269 1269
1270 printk(KERN_INFO "new size for %s is %llu\n", 1270 printk(KERN_INFO "btrfs: new size for %s is %llu\n",
1271 device->name, (unsigned long long)new_size); 1271 device->name, (unsigned long long)new_size);
1272 1272
1273 if (new_size > old_size) { 1273 if (new_size > old_size) {
@@ -1278,7 +1278,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1278 } 1278 }
1279 ret = btrfs_grow_device(trans, device, new_size); 1279 ret = btrfs_grow_device(trans, device, new_size);
1280 btrfs_commit_transaction(trans, root); 1280 btrfs_commit_transaction(trans, root);
1281 } else { 1281 } else if (new_size < old_size) {
1282 ret = btrfs_shrink_device(device, new_size); 1282 ret = btrfs_shrink_device(device, new_size);
1283 } 1283 }
1284 1284
@@ -2930,11 +2930,13 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
2930 goto out; 2930 goto out;
2931 2931
2932 for (i = 0; i < ipath->fspath->elem_cnt; ++i) { 2932 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
2933 rel_ptr = ipath->fspath->val[i] - (u64)ipath->fspath->val; 2933 rel_ptr = ipath->fspath->val[i] -
2934 (u64)(unsigned long)ipath->fspath->val;
2934 ipath->fspath->val[i] = rel_ptr; 2935 ipath->fspath->val[i] = rel_ptr;
2935 } 2936 }
2936 2937
2937 ret = copy_to_user((void *)ipa->fspath, (void *)ipath->fspath, size); 2938 ret = copy_to_user((void *)(unsigned long)ipa->fspath,
2939 (void *)(unsigned long)ipath->fspath, size);
2938 if (ret) { 2940 if (ret) {
2939 ret = -EFAULT; 2941 ret = -EFAULT;
2940 goto out; 2942 goto out;
@@ -3017,7 +3019,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
3017 if (ret < 0) 3019 if (ret < 0)
3018 goto out; 3020 goto out;
3019 3021
3020 ret = copy_to_user((void *)loi->inodes, (void *)inodes, size); 3022 ret = copy_to_user((void *)(unsigned long)loi->inodes,
3023 (void *)(unsigned long)inodes, size);
3021 if (ret) 3024 if (ret)
3022 ret = -EFAULT; 3025 ret = -EFAULT;
3023 3026
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 24d654ce7a06..dff29d5e151a 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1174,6 +1174,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
1174 list_add_tail(&new_edge->list[UPPER], 1174 list_add_tail(&new_edge->list[UPPER],
1175 &new_node->lower); 1175 &new_node->lower);
1176 } 1176 }
1177 } else {
1178 list_add_tail(&new_node->lower, &cache->leaves);
1177 } 1179 }
1178 1180
1179 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1181 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index ed11d3866afd..c27bcb67f330 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
256 btrfs_release_path(swarn->path); 256 btrfs_release_path(swarn->path);
257 257
258 ipath = init_ipath(4096, local_root, swarn->path); 258 ipath = init_ipath(4096, local_root, swarn->path);
259 if (IS_ERR(ipath)) {
260 ret = PTR_ERR(ipath);
261 ipath = NULL;
262 goto err;
263 }
259 ret = paths_from_inode(inum, ipath); 264 ret = paths_from_inode(inum, ipath);
260 265
261 if (ret < 0) 266 if (ret < 0)
@@ -272,7 +277,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
272 swarn->logical, swarn->dev->name, 277 swarn->logical, swarn->dev->name,
273 (unsigned long long)swarn->sector, root, inum, offset, 278 (unsigned long long)swarn->sector, root, inum, offset,
274 min(isize - offset, (u64)PAGE_SIZE), nlink, 279 min(isize - offset, (u64)PAGE_SIZE), nlink,
275 (char *)ipath->fspath->val[i]); 280 (char *)(unsigned long)ipath->fspath->val[i]);
276 281
277 free_ipath(ipath); 282 free_ipath(ipath);
278 return 0; 283 return 0;
@@ -944,50 +949,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
944static int scrub_submit(struct scrub_dev *sdev) 949static int scrub_submit(struct scrub_dev *sdev)
945{ 950{
946 struct scrub_bio *sbio; 951 struct scrub_bio *sbio;
947 struct bio *bio;
948 int i;
949 952
950 if (sdev->curr == -1) 953 if (sdev->curr == -1)
951 return 0; 954 return 0;
952 955
953 sbio = sdev->bios[sdev->curr]; 956 sbio = sdev->bios[sdev->curr];
954
955 bio = bio_alloc(GFP_NOFS, sbio->count);
956 if (!bio)
957 goto nomem;
958
959 bio->bi_private = sbio;
960 bio->bi_end_io = scrub_bio_end_io;
961 bio->bi_bdev = sdev->dev->bdev;
962 bio->bi_sector = sbio->physical >> 9;
963
964 for (i = 0; i < sbio->count; ++i) {
965 struct page *page;
966 int ret;
967
968 page = alloc_page(GFP_NOFS);
969 if (!page)
970 goto nomem;
971
972 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
973 if (!ret) {
974 __free_page(page);
975 goto nomem;
976 }
977 }
978
979 sbio->err = 0; 957 sbio->err = 0;
980 sdev->curr = -1; 958 sdev->curr = -1;
981 atomic_inc(&sdev->in_flight); 959 atomic_inc(&sdev->in_flight);
982 960
983 submit_bio(READ, bio); 961 submit_bio(READ, sbio->bio);
984 962
985 return 0; 963 return 0;
986
987nomem:
988 scrub_free_bio(bio);
989
990 return -ENOMEM;
991} 964}
992 965
993static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 966static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -995,6 +968,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
995 u8 *csum, int force) 968 u8 *csum, int force)
996{ 969{
997 struct scrub_bio *sbio; 970 struct scrub_bio *sbio;
971 struct page *page;
972 int ret;
998 973
999again: 974again:
1000 /* 975 /*
@@ -1015,12 +990,22 @@ again:
1015 } 990 }
1016 sbio = sdev->bios[sdev->curr]; 991 sbio = sdev->bios[sdev->curr];
1017 if (sbio->count == 0) { 992 if (sbio->count == 0) {
993 struct bio *bio;
994
1018 sbio->physical = physical; 995 sbio->physical = physical;
1019 sbio->logical = logical; 996 sbio->logical = logical;
997 bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
998 if (!bio)
999 return -ENOMEM;
1000
1001 bio->bi_private = sbio;
1002 bio->bi_end_io = scrub_bio_end_io;
1003 bio->bi_bdev = sdev->dev->bdev;
1004 bio->bi_sector = sbio->physical >> 9;
1005 sbio->err = 0;
1006 sbio->bio = bio;
1020 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 1007 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
1021 sbio->logical + sbio->count * PAGE_SIZE != logical) { 1008 sbio->logical + sbio->count * PAGE_SIZE != logical) {
1022 int ret;
1023
1024 ret = scrub_submit(sdev); 1009 ret = scrub_submit(sdev);
1025 if (ret) 1010 if (ret)
1026 return ret; 1011 return ret;
@@ -1030,6 +1015,20 @@ again:
1030 sbio->spag[sbio->count].generation = gen; 1015 sbio->spag[sbio->count].generation = gen;
1031 sbio->spag[sbio->count].have_csum = 0; 1016 sbio->spag[sbio->count].have_csum = 0;
1032 sbio->spag[sbio->count].mirror_num = mirror_num; 1017 sbio->spag[sbio->count].mirror_num = mirror_num;
1018
1019 page = alloc_page(GFP_NOFS);
1020 if (!page)
1021 return -ENOMEM;
1022
1023 ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
1024 if (!ret) {
1025 __free_page(page);
1026 ret = scrub_submit(sdev);
1027 if (ret)
1028 return ret;
1029 goto again;
1030 }
1031
1033 if (csum) { 1032 if (csum) {
1034 sbio->spag[sbio->count].have_csum = 1; 1033 sbio->spag[sbio->count].have_csum = 1;
1035 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 1034 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 57080dffdfc6..e28ad4baf483 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -197,7 +197,7 @@ static match_table_t tokens = {
197 {Opt_subvolrootid, "subvolrootid=%d"}, 197 {Opt_subvolrootid, "subvolrootid=%d"},
198 {Opt_defrag, "autodefrag"}, 198 {Opt_defrag, "autodefrag"},
199 {Opt_inode_cache, "inode_cache"}, 199 {Opt_inode_cache, "inode_cache"},
200 {Opt_no_space_cache, "no_space_cache"}, 200 {Opt_no_space_cache, "nospace_cache"},
201 {Opt_recovery, "recovery"}, 201 {Opt_recovery, "recovery"},
202 {Opt_err, NULL}, 202 {Opt_err, NULL},
203}; 203};
@@ -448,6 +448,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
448 token = match_token(p, tokens, args); 448 token = match_token(p, tokens, args);
449 switch (token) { 449 switch (token) {
450 case Opt_subvol: 450 case Opt_subvol:
451 kfree(*subvol_name);
451 *subvol_name = match_strdup(&args[0]); 452 *subvol_name = match_strdup(&args[0]);
452 break; 453 break;
453 case Opt_subvolid: 454 case Opt_subvolid:
@@ -710,7 +711,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
710 if (btrfs_test_opt(root, SPACE_CACHE)) 711 if (btrfs_test_opt(root, SPACE_CACHE))
711 seq_puts(seq, ",space_cache"); 712 seq_puts(seq, ",space_cache");
712 else 713 else
713 seq_puts(seq, ",no_space_cache"); 714 seq_puts(seq, ",nospace_cache");
714 if (btrfs_test_opt(root, CLEAR_CACHE)) 715 if (btrfs_test_opt(root, CLEAR_CACHE))
715 seq_puts(seq, ",clear_cache"); 716 seq_puts(seq, ",clear_cache");
716 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) 717 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
@@ -824,13 +825,9 @@ static char *setup_root_args(char *args)
824static struct dentry *mount_subvol(const char *subvol_name, int flags, 825static struct dentry *mount_subvol(const char *subvol_name, int flags,
825 const char *device_name, char *data) 826 const char *device_name, char *data)
826{ 827{
827 struct super_block *s;
828 struct dentry *root; 828 struct dentry *root;
829 struct vfsmount *mnt; 829 struct vfsmount *mnt;
830 struct mnt_namespace *ns_private;
831 char *newargs; 830 char *newargs;
832 struct path path;
833 int error;
834 831
835 newargs = setup_root_args(data); 832 newargs = setup_root_args(data);
836 if (!newargs) 833 if (!newargs)
@@ -841,39 +838,17 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
841 if (IS_ERR(mnt)) 838 if (IS_ERR(mnt))
842 return ERR_CAST(mnt); 839 return ERR_CAST(mnt);
843 840
844 ns_private = create_mnt_ns(mnt); 841 root = mount_subtree(mnt, subvol_name);
845 if (IS_ERR(ns_private)) {
846 mntput(mnt);
847 return ERR_CAST(ns_private);
848 }
849
850 /*
851 * This will trigger the automount of the subvol so we can just
852 * drop the mnt we have here and return the dentry that we
853 * found.
854 */
855 error = vfs_path_lookup(mnt->mnt_root, mnt, subvol_name,
856 LOOKUP_FOLLOW, &path);
857 put_mnt_ns(ns_private);
858 if (error)
859 return ERR_PTR(error);
860 842
861 if (!is_subvolume_inode(path.dentry->d_inode)) { 843 if (!IS_ERR(root) && !is_subvolume_inode(root->d_inode)) {
862 path_put(&path); 844 struct super_block *s = root->d_sb;
863 mntput(mnt); 845 dput(root);
864 error = -EINVAL; 846 root = ERR_PTR(-EINVAL);
847 deactivate_locked_super(s);
865 printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n", 848 printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n",
866 subvol_name); 849 subvol_name);
867 return ERR_PTR(-EINVAL);
868 } 850 }
869 851
870 /* Get a ref to the sb and the dentry we found and return it */
871 s = path.mnt->mnt_sb;
872 atomic_inc(&s->s_active);
873 root = dget(path.dentry);
874 path_put(&path);
875 down_write(&s->s_umount);
876
877 return root; 852 return root;
878} 853}
879 854
@@ -890,7 +865,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
890 struct super_block *s; 865 struct super_block *s;
891 struct dentry *root; 866 struct dentry *root;
892 struct btrfs_fs_devices *fs_devices = NULL; 867 struct btrfs_fs_devices *fs_devices = NULL;
893 struct btrfs_root *tree_root = NULL;
894 struct btrfs_fs_info *fs_info = NULL; 868 struct btrfs_fs_info *fs_info = NULL;
895 fmode_t mode = FMODE_READ; 869 fmode_t mode = FMODE_READ;
896 char *subvol_name = NULL; 870 char *subvol_name = NULL;
@@ -904,8 +878,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
904 error = btrfs_parse_early_options(data, mode, fs_type, 878 error = btrfs_parse_early_options(data, mode, fs_type,
905 &subvol_name, &subvol_objectid, 879 &subvol_name, &subvol_objectid,
906 &subvol_rootid, &fs_devices); 880 &subvol_rootid, &fs_devices);
907 if (error) 881 if (error) {
882 kfree(subvol_name);
908 return ERR_PTR(error); 883 return ERR_PTR(error);
884 }
909 885
910 if (subvol_name) { 886 if (subvol_name) {
911 root = mount_subvol(subvol_name, flags, device_name, data); 887 root = mount_subvol(subvol_name, flags, device_name, data);
@@ -917,15 +893,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
917 if (error) 893 if (error)
918 return ERR_PTR(error); 894 return ERR_PTR(error);
919 895
920 error = btrfs_open_devices(fs_devices, mode, fs_type);
921 if (error)
922 return ERR_PTR(error);
923
924 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
925 error = -EACCES;
926 goto error_close_devices;
927 }
928
929 /* 896 /*
930 * Setup a dummy root and fs_info for test/set super. This is because 897 * Setup a dummy root and fs_info for test/set super. This is because
931 * we don't actually fill this stuff out until open_ctree, but we need 898 * we don't actually fill this stuff out until open_ctree, but we need
@@ -933,24 +900,36 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
933 * then open_ctree will properly initialize everything later. 900 * then open_ctree will properly initialize everything later.
934 */ 901 */
935 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS); 902 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
936 tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); 903 if (!fs_info)
937 if (!fs_info || !tree_root) { 904 return ERR_PTR(-ENOMEM);
905
906 fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
907 if (!fs_info->tree_root) {
938 error = -ENOMEM; 908 error = -ENOMEM;
939 goto error_close_devices; 909 goto error_fs_info;
940 } 910 }
941 fs_info->tree_root = tree_root; 911 fs_info->tree_root->fs_info = fs_info;
942 fs_info->fs_devices = fs_devices; 912 fs_info->fs_devices = fs_devices;
943 tree_root->fs_info = fs_info;
944 913
945 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 914 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
946 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 915 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
947 if (!fs_info->super_copy || !fs_info->super_for_commit) { 916 if (!fs_info->super_copy || !fs_info->super_for_commit) {
948 error = -ENOMEM; 917 error = -ENOMEM;
918 goto error_fs_info;
919 }
920
921 error = btrfs_open_devices(fs_devices, mode, fs_type);
922 if (error)
923 goto error_fs_info;
924
925 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
926 error = -EACCES;
949 goto error_close_devices; 927 goto error_close_devices;
950 } 928 }
951 929
952 bdev = fs_devices->latest_bdev; 930 bdev = fs_devices->latest_bdev;
953 s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root); 931 s = sget(fs_type, btrfs_test_super, btrfs_set_super,
932 fs_info->tree_root);
954 if (IS_ERR(s)) { 933 if (IS_ERR(s)) {
955 error = PTR_ERR(s); 934 error = PTR_ERR(s);
956 goto error_close_devices; 935 goto error_close_devices;
@@ -959,12 +938,12 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
959 if (s->s_root) { 938 if (s->s_root) {
960 if ((flags ^ s->s_flags) & MS_RDONLY) { 939 if ((flags ^ s->s_flags) & MS_RDONLY) {
961 deactivate_locked_super(s); 940 deactivate_locked_super(s);
962 return ERR_PTR(-EBUSY); 941 error = -EBUSY;
942 goto error_close_devices;
963 } 943 }
964 944
965 btrfs_close_devices(fs_devices); 945 btrfs_close_devices(fs_devices);
966 free_fs_info(fs_info); 946 free_fs_info(fs_info);
967 kfree(tree_root);
968 } else { 947 } else {
969 char b[BDEVNAME_SIZE]; 948 char b[BDEVNAME_SIZE];
970 949
@@ -991,8 +970,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
991 970
992error_close_devices: 971error_close_devices:
993 btrfs_close_devices(fs_devices); 972 btrfs_close_devices(fs_devices);
973error_fs_info:
994 free_fs_info(fs_info); 974 free_fs_info(fs_info);
995 kfree(tree_root);
996 return ERR_PTR(error); 975 return ERR_PTR(error);
997} 976}
998 977
@@ -1078,7 +1057,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1078 int i = 0, nr_devices; 1057 int i = 0, nr_devices;
1079 int ret; 1058 int ret;
1080 1059
1081 nr_devices = fs_info->fs_devices->rw_devices; 1060 nr_devices = fs_info->fs_devices->open_devices;
1082 BUG_ON(!nr_devices); 1061 BUG_ON(!nr_devices);
1083 1062
1084 devices_info = kmalloc(sizeof(*devices_info) * nr_devices, 1063 devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
@@ -1100,8 +1079,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1100 else 1079 else
1101 min_stripe_size = BTRFS_STRIPE_LEN; 1080 min_stripe_size = BTRFS_STRIPE_LEN;
1102 1081
1103 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 1082 list_for_each_entry(device, &fs_devices->devices, dev_list) {
1104 if (!device->in_fs_metadata) 1083 if (!device->in_fs_metadata || !device->bdev)
1105 continue; 1084 continue;
1106 1085
1107 avail_space = device->total_bytes - device->bytes_used; 1086 avail_space = device->total_bytes - device->bytes_used;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 960835eaf4da..81376d94cd3c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -785,6 +785,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
785 785
786 btrfs_save_ino_cache(root, trans); 786 btrfs_save_ino_cache(root, trans);
787 787
788 /* see comments in should_cow_block() */
789 root->force_cow = 0;
790 smp_wmb();
791
788 if (root->commit_root != root->node) { 792 if (root->commit_root != root->node) {
789 mutex_lock(&root->fs_commit_mutex); 793 mutex_lock(&root->fs_commit_mutex);
790 switch_commit_root(root); 794 switch_commit_root(root);
@@ -882,8 +886,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
882 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 886 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
883 887
884 if (to_reserve > 0) { 888 if (to_reserve > 0) {
885 ret = btrfs_block_rsv_add(root, &pending->block_rsv, 889 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
886 to_reserve); 890 to_reserve);
887 if (ret) { 891 if (ret) {
888 pending->error = ret; 892 pending->error = ret;
889 goto fail; 893 goto fail;
@@ -947,6 +951,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
947 btrfs_tree_unlock(old); 951 btrfs_tree_unlock(old);
948 free_extent_buffer(old); 952 free_extent_buffer(old);
949 953
954 /* see comments in should_cow_block() */
955 root->force_cow = 1;
956 smp_wmb();
957
950 btrfs_set_root_node(new_root_item, tmp); 958 btrfs_set_root_node(new_root_item, tmp);
951 /* record when the snapshot was created in key.offset */ 959 /* record when the snapshot was created in key.offset */
952 key.offset = trans->transid; 960 key.offset = trans->transid;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f8e2943101a1..0a8c8f8304b1 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -999,7 +999,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
999 key.objectid = device->devid; 999 key.objectid = device->devid;
1000 key.offset = start; 1000 key.offset = start;
1001 key.type = BTRFS_DEV_EXTENT_KEY; 1001 key.type = BTRFS_DEV_EXTENT_KEY;
1002 1002again:
1003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1004 if (ret > 0) { 1004 if (ret > 0) {
1005 ret = btrfs_previous_item(root, path, key.objectid, 1005 ret = btrfs_previous_item(root, path, key.objectid,
@@ -1012,6 +1012,9 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1012 struct btrfs_dev_extent); 1012 struct btrfs_dev_extent);
1013 BUG_ON(found_key.offset > start || found_key.offset + 1013 BUG_ON(found_key.offset > start || found_key.offset +
1014 btrfs_dev_extent_length(leaf, extent) < start); 1014 btrfs_dev_extent_length(leaf, extent) < start);
1015 key = found_key;
1016 btrfs_release_path(path);
1017 goto again;
1015 } else if (ret == 0) { 1018 } else if (ret == 0) {
1016 leaf = path->nodes[0]; 1019 leaf = path->nodes[0];
1017 extent = btrfs_item_ptr(leaf, path->slots[0], 1020 extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -1608,7 +1611,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1608 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 1611 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1609 return -EINVAL; 1612 return -EINVAL;
1610 1613
1611 bdev = blkdev_get_by_path(device_path, FMODE_EXCL, 1614 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1612 root->fs_info->bdev_holder); 1615 root->fs_info->bdev_holder);
1613 if (IS_ERR(bdev)) 1616 if (IS_ERR(bdev))
1614 return PTR_ERR(bdev); 1617 return PTR_ERR(bdev);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ab5b1c49f352..78f2d4d4f37f 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -100,6 +100,12 @@ struct btrfs_device {
100 struct reada_zone *reada_curr_zone; 100 struct reada_zone *reada_curr_zone;
101 struct radix_tree_root reada_zones; 101 struct radix_tree_root reada_zones;
102 struct radix_tree_root reada_extents; 102 struct radix_tree_root reada_extents;
103
104 /* for sending down flush barriers */
105 struct bio *flush_bio;
106 struct completion flush_wait;
107 int nobarriers;
108
103}; 109};
104 110
105struct btrfs_fs_devices { 111struct btrfs_fs_devices {