aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorMark Brown <broonie@opensource.wolfsonmicro.com>2011-12-12 11:19:20 -0500
committerMark Brown <broonie@opensource.wolfsonmicro.com>2011-12-12 11:19:20 -0500
commit68556ca1e03d6a35be3b315eba58df2f8176e3a0 (patch)
tree36a390d29a0d03a59a90c0f223b0d98a80f0f6c3 /fs/btrfs
parent0604ca48f1689ad06144b81f5c08f297b6edd831 (diff)
parent8ab30691826fc05efa47c4ffba19b80496bb3a2c (diff)
Merge branch 'mfd/wm8994' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/misc into for-3.3
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/btrfs_inode.h4
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/ctree.h5
-rw-r--r--fs/btrfs/delayed-inode.c58
-rw-r--r--fs/btrfs/disk-io.c189
-rw-r--r--fs/btrfs/extent-tree.c169
-rw-r--r--fs/btrfs/extent_io.c9
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/free-space-cache.c80
-rw-r--r--fs/btrfs/inode-map.c28
-rw-r--r--fs/btrfs/inode.c90
-rw-r--r--fs/btrfs/ioctl.c15
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c66
-rw-r--r--fs/btrfs/super.c87
-rw-r--r--fs/btrfs/transaction.c12
-rw-r--r--fs/btrfs/volumes.c5
-rw-r--r--fs/btrfs/volumes.h6
19 files changed, 558 insertions, 288 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 8855aad3929c..22c64fff1bd5 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -683,7 +683,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
683 return PTR_ERR(fspath); 683 return PTR_ERR(fspath);
684 684
685 if (fspath > fspath_min) { 685 if (fspath > fspath_min) {
686 ipath->fspath->val[i] = (u64)fspath; 686 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
687 ++ipath->fspath->elem_cnt; 687 ++ipath->fspath->elem_cnt;
688 ipath->fspath->bytes_left = fspath - fspath_min; 688 ipath->fspath->bytes_left = fspath - fspath_min;
689 } else { 689 } else {
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 5a5d325a3935..634608d2a6d0 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -147,14 +147,12 @@ struct btrfs_inode {
147 * the btrfs file release call will add this inode to the 147 * the btrfs file release call will add this inode to the
148 * ordered operations list so that we make sure to flush out any 148 * ordered operations list so that we make sure to flush out any
149 * new data the application may have written before commit. 149 * new data the application may have written before commit.
150 *
151 * yes, its silly to have a single bitflag, but we might grow more
152 * of these.
153 */ 150 */
154 unsigned ordered_data_close:1; 151 unsigned ordered_data_close:1;
155 unsigned orphan_meta_reserved:1; 152 unsigned orphan_meta_reserved:1;
156 unsigned dummy_inode:1; 153 unsigned dummy_inode:1;
157 unsigned in_defrag:1; 154 unsigned in_defrag:1;
155 unsigned delalloc_meta_reserved:1;
158 156
159 /* 157 /*
160 * always compress this one file 158 * always compress this one file
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0fe615e4ea38..dede441bdeee 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -514,10 +514,25 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
514 struct btrfs_root *root, 514 struct btrfs_root *root,
515 struct extent_buffer *buf) 515 struct extent_buffer *buf)
516{ 516{
517 /* ensure we can see the force_cow */
518 smp_rmb();
519
520 /*
521 * We do not need to cow a block if
522 * 1) this block is not created or changed in this transaction;
523 * 2) this block does not belong to TREE_RELOC tree;
524 * 3) the root is not forced COW.
525 *
526 * What is forced COW:
527 * when we create snapshot during commiting the transaction,
528 * after we've finished coping src root, we must COW the shared
529 * block to ensure the metadata consistency.
530 */
517 if (btrfs_header_generation(buf) == trans->transid && 531 if (btrfs_header_generation(buf) == trans->transid &&
518 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 532 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
519 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 533 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
520 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 534 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
535 !root->force_cow)
521 return 0; 536 return 0;
522 return 1; 537 return 1;
523} 538}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b9ba59ff9292..04a5dfcee5a1 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -848,7 +848,8 @@ struct btrfs_free_cluster {
848enum btrfs_caching_type { 848enum btrfs_caching_type {
849 BTRFS_CACHE_NO = 0, 849 BTRFS_CACHE_NO = 0,
850 BTRFS_CACHE_STARTED = 1, 850 BTRFS_CACHE_STARTED = 1,
851 BTRFS_CACHE_FINISHED = 2, 851 BTRFS_CACHE_FAST = 2,
852 BTRFS_CACHE_FINISHED = 3,
852}; 853};
853 854
854enum btrfs_disk_cache_state { 855enum btrfs_disk_cache_state {
@@ -1271,6 +1272,8 @@ struct btrfs_root {
1271 * for stat. It may be used for more later 1272 * for stat. It may be used for more later
1272 */ 1273 */
1273 dev_t anon_dev; 1274 dev_t anon_dev;
1275
1276 int force_cow;
1274}; 1277};
1275 1278
1276struct btrfs_ioctl_defrag_range_args { 1279struct btrfs_ioctl_defrag_range_args {
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 3a1b939c9ae2..5b163572e0ca 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -617,12 +617,14 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
617static int btrfs_delayed_inode_reserve_metadata( 617static int btrfs_delayed_inode_reserve_metadata(
618 struct btrfs_trans_handle *trans, 618 struct btrfs_trans_handle *trans,
619 struct btrfs_root *root, 619 struct btrfs_root *root,
620 struct inode *inode,
620 struct btrfs_delayed_node *node) 621 struct btrfs_delayed_node *node)
621{ 622{
622 struct btrfs_block_rsv *src_rsv; 623 struct btrfs_block_rsv *src_rsv;
623 struct btrfs_block_rsv *dst_rsv; 624 struct btrfs_block_rsv *dst_rsv;
624 u64 num_bytes; 625 u64 num_bytes;
625 int ret; 626 int ret;
627 int release = false;
626 628
627 src_rsv = trans->block_rsv; 629 src_rsv = trans->block_rsv;
628 dst_rsv = &root->fs_info->delayed_block_rsv; 630 dst_rsv = &root->fs_info->delayed_block_rsv;
@@ -652,12 +654,65 @@ static int btrfs_delayed_inode_reserve_metadata(
652 if (!ret) 654 if (!ret)
653 node->bytes_reserved = num_bytes; 655 node->bytes_reserved = num_bytes;
654 return ret; 656 return ret;
657 } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
658 spin_lock(&BTRFS_I(inode)->lock);
659 if (BTRFS_I(inode)->delalloc_meta_reserved) {
660 BTRFS_I(inode)->delalloc_meta_reserved = 0;
661 spin_unlock(&BTRFS_I(inode)->lock);
662 release = true;
663 goto migrate;
664 }
665 spin_unlock(&BTRFS_I(inode)->lock);
666
667 /* Ok we didn't have space pre-reserved. This shouldn't happen
668 * too often but it can happen if we do delalloc to an existing
669 * inode which gets dirtied because of the time update, and then
670 * isn't touched again until after the transaction commits and
671 * then we try to write out the data. First try to be nice and
672 * reserve something strictly for us. If not be a pain and try
673 * to steal from the delalloc block rsv.
674 */
675 ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
676 if (!ret)
677 goto out;
678
679 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
680 if (!ret)
681 goto out;
682
683 /*
684 * Ok this is a problem, let's just steal from the global rsv
685 * since this really shouldn't happen that often.
686 */
687 WARN_ON(1);
688 ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
689 dst_rsv, num_bytes);
690 goto out;
655 } 691 }
656 692
693migrate:
657 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 694 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
695
696out:
697 /*
698 * Migrate only takes a reservation, it doesn't touch the size of the
699 * block_rsv. This is to simplify people who don't normally have things
700 * migrated from their block rsv. If they go to release their
701 * reservation, that will decrease the size as well, so if migrate
702 * reduced size we'd end up with a negative size. But for the
703 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
704 * but we could in fact do this reserve/migrate dance several times
705 * between the time we did the original reservation and we'd clean it
706 * up. So to take care of this, release the space for the meta
707 * reservation here. I think it may be time for a documentation page on
708 * how block rsvs. work.
709 */
658 if (!ret) 710 if (!ret)
659 node->bytes_reserved = num_bytes; 711 node->bytes_reserved = num_bytes;
660 712
713 if (release)
714 btrfs_block_rsv_release(root, src_rsv, num_bytes);
715
661 return ret; 716 return ret;
662} 717}
663 718
@@ -1708,7 +1763,8 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1708 goto release_node; 1763 goto release_node;
1709 } 1764 }
1710 1765
1711 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); 1766 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1767 delayed_node);
1712 if (ret) 1768 if (ret)
1713 goto release_node; 1769 goto release_node;
1714 1770
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 102c176fc29c..632f8f3cc9db 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -620,7 +620,7 @@ out:
620 620
621static int btree_io_failed_hook(struct bio *failed_bio, 621static int btree_io_failed_hook(struct bio *failed_bio,
622 struct page *page, u64 start, u64 end, 622 struct page *page, u64 start, u64 end,
623 u64 mirror_num, struct extent_state *state) 623 int mirror_num, struct extent_state *state)
624{ 624{
625 struct extent_io_tree *tree; 625 struct extent_io_tree *tree;
626 unsigned long len; 626 unsigned long len;
@@ -1890,31 +1890,32 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1890 u64 features; 1890 u64 features;
1891 struct btrfs_key location; 1891 struct btrfs_key location;
1892 struct buffer_head *bh; 1892 struct buffer_head *bh;
1893 struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), 1893 struct btrfs_super_block *disk_super;
1894 GFP_NOFS);
1895 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1896 GFP_NOFS);
1897 struct btrfs_root *tree_root = btrfs_sb(sb); 1894 struct btrfs_root *tree_root = btrfs_sb(sb);
1898 struct btrfs_fs_info *fs_info = NULL; 1895 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1899 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), 1896 struct btrfs_root *extent_root;
1900 GFP_NOFS); 1897 struct btrfs_root *csum_root;
1901 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), 1898 struct btrfs_root *chunk_root;
1902 GFP_NOFS); 1899 struct btrfs_root *dev_root;
1903 struct btrfs_root *log_tree_root; 1900 struct btrfs_root *log_tree_root;
1904
1905 int ret; 1901 int ret;
1906 int err = -EINVAL; 1902 int err = -EINVAL;
1907 int num_backups_tried = 0; 1903 int num_backups_tried = 0;
1908 int backup_index = 0; 1904 int backup_index = 0;
1909 1905
1910 struct btrfs_super_block *disk_super; 1906 extent_root = fs_info->extent_root =
1907 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1908 csum_root = fs_info->csum_root =
1909 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1910 chunk_root = fs_info->chunk_root =
1911 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1912 dev_root = fs_info->dev_root =
1913 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1911 1914
1912 if (!extent_root || !tree_root || !tree_root->fs_info || 1915 if (!extent_root || !csum_root || !chunk_root || !dev_root) {
1913 !chunk_root || !dev_root || !csum_root) {
1914 err = -ENOMEM; 1916 err = -ENOMEM;
1915 goto fail; 1917 goto fail;
1916 } 1918 }
1917 fs_info = tree_root->fs_info;
1918 1919
1919 ret = init_srcu_struct(&fs_info->subvol_srcu); 1920 ret = init_srcu_struct(&fs_info->subvol_srcu);
1920 if (ret) { 1921 if (ret) {
@@ -1954,12 +1955,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1954 mutex_init(&fs_info->reloc_mutex); 1955 mutex_init(&fs_info->reloc_mutex);
1955 1956
1956 init_completion(&fs_info->kobj_unregister); 1957 init_completion(&fs_info->kobj_unregister);
1957 fs_info->tree_root = tree_root;
1958 fs_info->extent_root = extent_root;
1959 fs_info->csum_root = csum_root;
1960 fs_info->chunk_root = chunk_root;
1961 fs_info->dev_root = dev_root;
1962 fs_info->fs_devices = fs_devices;
1963 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 1958 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1964 INIT_LIST_HEAD(&fs_info->space_info); 1959 INIT_LIST_HEAD(&fs_info->space_info);
1965 btrfs_mapping_init(&fs_info->mapping_tree); 1960 btrfs_mapping_init(&fs_info->mapping_tree);
@@ -2465,21 +2460,20 @@ fail_sb_buffer:
2465 btrfs_stop_workers(&fs_info->caching_workers); 2460 btrfs_stop_workers(&fs_info->caching_workers);
2466fail_alloc: 2461fail_alloc:
2467fail_iput: 2462fail_iput:
2463 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2464
2468 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2465 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2469 iput(fs_info->btree_inode); 2466 iput(fs_info->btree_inode);
2470
2471 btrfs_close_devices(fs_info->fs_devices);
2472 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2473fail_bdi: 2467fail_bdi:
2474 bdi_destroy(&fs_info->bdi); 2468 bdi_destroy(&fs_info->bdi);
2475fail_srcu: 2469fail_srcu:
2476 cleanup_srcu_struct(&fs_info->subvol_srcu); 2470 cleanup_srcu_struct(&fs_info->subvol_srcu);
2477fail: 2471fail:
2472 btrfs_close_devices(fs_info->fs_devices);
2478 free_fs_info(fs_info); 2473 free_fs_info(fs_info);
2479 return ERR_PTR(err); 2474 return ERR_PTR(err);
2480 2475
2481recovery_tree_root: 2476recovery_tree_root:
2482
2483 if (!btrfs_test_opt(tree_root, RECOVERY)) 2477 if (!btrfs_test_opt(tree_root, RECOVERY))
2484 goto fail_tree_roots; 2478 goto fail_tree_roots;
2485 2479
@@ -2579,22 +2573,10 @@ static int write_dev_supers(struct btrfs_device *device,
2579 int errors = 0; 2573 int errors = 0;
2580 u32 crc; 2574 u32 crc;
2581 u64 bytenr; 2575 u64 bytenr;
2582 int last_barrier = 0;
2583 2576
2584 if (max_mirrors == 0) 2577 if (max_mirrors == 0)
2585 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 2578 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2586 2579
2587 /* make sure only the last submit_bh does a barrier */
2588 if (do_barriers) {
2589 for (i = 0; i < max_mirrors; i++) {
2590 bytenr = btrfs_sb_offset(i);
2591 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2592 device->total_bytes)
2593 break;
2594 last_barrier = i;
2595 }
2596 }
2597
2598 for (i = 0; i < max_mirrors; i++) { 2580 for (i = 0; i < max_mirrors; i++) {
2599 bytenr = btrfs_sb_offset(i); 2581 bytenr = btrfs_sb_offset(i);
2600 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) 2582 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
@@ -2640,17 +2622,136 @@ static int write_dev_supers(struct btrfs_device *device,
2640 bh->b_end_io = btrfs_end_buffer_write_sync; 2622 bh->b_end_io = btrfs_end_buffer_write_sync;
2641 } 2623 }
2642 2624
2643 if (i == last_barrier && do_barriers) 2625 /*
2644 ret = submit_bh(WRITE_FLUSH_FUA, bh); 2626 * we fua the first super. The others we allow
2645 else 2627 * to go down lazy.
2646 ret = submit_bh(WRITE_SYNC, bh); 2628 */
2647 2629 ret = submit_bh(WRITE_FUA, bh);
2648 if (ret) 2630 if (ret)
2649 errors++; 2631 errors++;
2650 } 2632 }
2651 return errors < i ? 0 : -1; 2633 return errors < i ? 0 : -1;
2652} 2634}
2653 2635
2636/*
2637 * endio for the write_dev_flush, this will wake anyone waiting
2638 * for the barrier when it is done
2639 */
2640static void btrfs_end_empty_barrier(struct bio *bio, int err)
2641{
2642 if (err) {
2643 if (err == -EOPNOTSUPP)
2644 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2645 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2646 }
2647 if (bio->bi_private)
2648 complete(bio->bi_private);
2649 bio_put(bio);
2650}
2651
2652/*
2653 * trigger flushes for one the devices. If you pass wait == 0, the flushes are
2654 * sent down. With wait == 1, it waits for the previous flush.
2655 *
2656 * any device where the flush fails with eopnotsupp are flagged as not-barrier
2657 * capable
2658 */
2659static int write_dev_flush(struct btrfs_device *device, int wait)
2660{
2661 struct bio *bio;
2662 int ret = 0;
2663
2664 if (device->nobarriers)
2665 return 0;
2666
2667 if (wait) {
2668 bio = device->flush_bio;
2669 if (!bio)
2670 return 0;
2671
2672 wait_for_completion(&device->flush_wait);
2673
2674 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2675 printk("btrfs: disabling barriers on dev %s\n",
2676 device->name);
2677 device->nobarriers = 1;
2678 }
2679 if (!bio_flagged(bio, BIO_UPTODATE)) {
2680 ret = -EIO;
2681 }
2682
2683 /* drop the reference from the wait == 0 run */
2684 bio_put(bio);
2685 device->flush_bio = NULL;
2686
2687 return ret;
2688 }
2689
2690 /*
2691 * one reference for us, and we leave it for the
2692 * caller
2693 */
2694 device->flush_bio = NULL;;
2695 bio = bio_alloc(GFP_NOFS, 0);
2696 if (!bio)
2697 return -ENOMEM;
2698
2699 bio->bi_end_io = btrfs_end_empty_barrier;
2700 bio->bi_bdev = device->bdev;
2701 init_completion(&device->flush_wait);
2702 bio->bi_private = &device->flush_wait;
2703 device->flush_bio = bio;
2704
2705 bio_get(bio);
2706 submit_bio(WRITE_FLUSH, bio);
2707
2708 return 0;
2709}
2710
2711/*
2712 * send an empty flush down to each device in parallel,
2713 * then wait for them
2714 */
2715static int barrier_all_devices(struct btrfs_fs_info *info)
2716{
2717 struct list_head *head;
2718 struct btrfs_device *dev;
2719 int errors = 0;
2720 int ret;
2721
2722 /* send down all the barriers */
2723 head = &info->fs_devices->devices;
2724 list_for_each_entry_rcu(dev, head, dev_list) {
2725 if (!dev->bdev) {
2726 errors++;
2727 continue;
2728 }
2729 if (!dev->in_fs_metadata || !dev->writeable)
2730 continue;
2731
2732 ret = write_dev_flush(dev, 0);
2733 if (ret)
2734 errors++;
2735 }
2736
2737 /* wait for all the barriers */
2738 list_for_each_entry_rcu(dev, head, dev_list) {
2739 if (!dev->bdev) {
2740 errors++;
2741 continue;
2742 }
2743 if (!dev->in_fs_metadata || !dev->writeable)
2744 continue;
2745
2746 ret = write_dev_flush(dev, 1);
2747 if (ret)
2748 errors++;
2749 }
2750 if (errors)
2751 return -EIO;
2752 return 0;
2753}
2754
2654int write_all_supers(struct btrfs_root *root, int max_mirrors) 2755int write_all_supers(struct btrfs_root *root, int max_mirrors)
2655{ 2756{
2656 struct list_head *head; 2757 struct list_head *head;
@@ -2672,6 +2773,10 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2672 2773
2673 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2774 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2674 head = &root->fs_info->fs_devices->devices; 2775 head = &root->fs_info->fs_devices->devices;
2776
2777 if (do_barriers)
2778 barrier_all_devices(root->fs_info);
2779
2675 list_for_each_entry_rcu(dev, head, dev_list) { 2780 list_for_each_entry_rcu(dev, head, dev_list) {
2676 if (!dev->bdev) { 2781 if (!dev->bdev) {
2677 total_errors++; 2782 total_errors++;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9879bd474632..930ae8949737 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
467 struct btrfs_root *root, 467 struct btrfs_root *root,
468 int load_cache_only) 468 int load_cache_only)
469{ 469{
470 DEFINE_WAIT(wait);
470 struct btrfs_fs_info *fs_info = cache->fs_info; 471 struct btrfs_fs_info *fs_info = cache->fs_info;
471 struct btrfs_caching_control *caching_ctl; 472 struct btrfs_caching_control *caching_ctl;
472 int ret = 0; 473 int ret = 0;
473 474
474 smp_mb(); 475 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
475 if (cache->cached != BTRFS_CACHE_NO) 476 BUG_ON(!caching_ctl);
477
478 INIT_LIST_HEAD(&caching_ctl->list);
479 mutex_init(&caching_ctl->mutex);
480 init_waitqueue_head(&caching_ctl->wait);
481 caching_ctl->block_group = cache;
482 caching_ctl->progress = cache->key.objectid;
483 atomic_set(&caching_ctl->count, 1);
484 caching_ctl->work.func = caching_thread;
485
486 spin_lock(&cache->lock);
487 /*
488 * This should be a rare occasion, but this could happen I think in the
489 * case where one thread starts to load the space cache info, and then
490 * some other thread starts a transaction commit which tries to do an
491 * allocation while the other thread is still loading the space cache
492 * info. The previous loop should have kept us from choosing this block
493 * group, but if we've moved to the state where we will wait on caching
494 * block groups we need to first check if we're doing a fast load here,
495 * so we can wait for it to finish, otherwise we could end up allocating
496 * from a block group who's cache gets evicted for one reason or
497 * another.
498 */
499 while (cache->cached == BTRFS_CACHE_FAST) {
500 struct btrfs_caching_control *ctl;
501
502 ctl = cache->caching_ctl;
503 atomic_inc(&ctl->count);
504 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
505 spin_unlock(&cache->lock);
506
507 schedule();
508
509 finish_wait(&ctl->wait, &wait);
510 put_caching_control(ctl);
511 spin_lock(&cache->lock);
512 }
513
514 if (cache->cached != BTRFS_CACHE_NO) {
515 spin_unlock(&cache->lock);
516 kfree(caching_ctl);
476 return 0; 517 return 0;
518 }
519 WARN_ON(cache->caching_ctl);
520 cache->caching_ctl = caching_ctl;
521 cache->cached = BTRFS_CACHE_FAST;
522 spin_unlock(&cache->lock);
477 523
478 /* 524 /*
479 * We can't do the read from on-disk cache during a commit since we need 525 * We can't do the read from on-disk cache during a commit since we need
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
484 if (trans && (!trans->transaction->in_commit) && 530 if (trans && (!trans->transaction->in_commit) &&
485 (root && root != root->fs_info->tree_root) && 531 (root && root != root->fs_info->tree_root) &&
486 btrfs_test_opt(root, SPACE_CACHE)) { 532 btrfs_test_opt(root, SPACE_CACHE)) {
487 spin_lock(&cache->lock);
488 if (cache->cached != BTRFS_CACHE_NO) {
489 spin_unlock(&cache->lock);
490 return 0;
491 }
492 cache->cached = BTRFS_CACHE_STARTED;
493 spin_unlock(&cache->lock);
494
495 ret = load_free_space_cache(fs_info, cache); 533 ret = load_free_space_cache(fs_info, cache);
496 534
497 spin_lock(&cache->lock); 535 spin_lock(&cache->lock);
498 if (ret == 1) { 536 if (ret == 1) {
537 cache->caching_ctl = NULL;
499 cache->cached = BTRFS_CACHE_FINISHED; 538 cache->cached = BTRFS_CACHE_FINISHED;
500 cache->last_byte_to_unpin = (u64)-1; 539 cache->last_byte_to_unpin = (u64)-1;
501 } else { 540 } else {
502 cache->cached = BTRFS_CACHE_NO; 541 if (load_cache_only) {
542 cache->caching_ctl = NULL;
543 cache->cached = BTRFS_CACHE_NO;
544 } else {
545 cache->cached = BTRFS_CACHE_STARTED;
546 }
503 } 547 }
504 spin_unlock(&cache->lock); 548 spin_unlock(&cache->lock);
549 wake_up(&caching_ctl->wait);
505 if (ret == 1) { 550 if (ret == 1) {
551 put_caching_control(caching_ctl);
506 free_excluded_extents(fs_info->extent_root, cache); 552 free_excluded_extents(fs_info->extent_root, cache);
507 return 0; 553 return 0;
508 } 554 }
555 } else {
556 /*
557 * We are not going to do the fast caching, set cached to the
558 * appropriate value and wakeup any waiters.
559 */
560 spin_lock(&cache->lock);
561 if (load_cache_only) {
562 cache->caching_ctl = NULL;
563 cache->cached = BTRFS_CACHE_NO;
564 } else {
565 cache->cached = BTRFS_CACHE_STARTED;
566 }
567 spin_unlock(&cache->lock);
568 wake_up(&caching_ctl->wait);
509 } 569 }
510 570
511 if (load_cache_only) 571 if (load_cache_only) {
512 return 0; 572 put_caching_control(caching_ctl);
513
514 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
515 BUG_ON(!caching_ctl);
516
517 INIT_LIST_HEAD(&caching_ctl->list);
518 mutex_init(&caching_ctl->mutex);
519 init_waitqueue_head(&caching_ctl->wait);
520 caching_ctl->block_group = cache;
521 caching_ctl->progress = cache->key.objectid;
522 /* one for caching kthread, one for caching block group list */
523 atomic_set(&caching_ctl->count, 2);
524 caching_ctl->work.func = caching_thread;
525
526 spin_lock(&cache->lock);
527 if (cache->cached != BTRFS_CACHE_NO) {
528 spin_unlock(&cache->lock);
529 kfree(caching_ctl);
530 return 0; 573 return 0;
531 } 574 }
532 cache->caching_ctl = caching_ctl;
533 cache->cached = BTRFS_CACHE_STARTED;
534 spin_unlock(&cache->lock);
535 575
536 down_write(&fs_info->extent_commit_sem); 576 down_write(&fs_info->extent_commit_sem);
577 atomic_inc(&caching_ctl->count);
537 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 578 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
538 up_write(&fs_info->extent_commit_sem); 579 up_write(&fs_info->extent_commit_sem);
539 580
@@ -3797,16 +3838,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
3797 kfree(rsv); 3838 kfree(rsv);
3798} 3839}
3799 3840
3800int btrfs_block_rsv_add(struct btrfs_root *root, 3841static inline int __block_rsv_add(struct btrfs_root *root,
3801 struct btrfs_block_rsv *block_rsv, 3842 struct btrfs_block_rsv *block_rsv,
3802 u64 num_bytes) 3843 u64 num_bytes, int flush)
3803{ 3844{
3804 int ret; 3845 int ret;
3805 3846
3806 if (num_bytes == 0) 3847 if (num_bytes == 0)
3807 return 0; 3848 return 0;
3808 3849
3809 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); 3850 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3810 if (!ret) { 3851 if (!ret) {
3811 block_rsv_add_bytes(block_rsv, num_bytes, 1); 3852 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3812 return 0; 3853 return 0;
@@ -3815,22 +3856,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
3815 return ret; 3856 return ret;
3816} 3857}
3817 3858
3859int btrfs_block_rsv_add(struct btrfs_root *root,
3860 struct btrfs_block_rsv *block_rsv,
3861 u64 num_bytes)
3862{
3863 return __block_rsv_add(root, block_rsv, num_bytes, 1);
3864}
3865
3818int btrfs_block_rsv_add_noflush(struct btrfs_root *root, 3866int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3819 struct btrfs_block_rsv *block_rsv, 3867 struct btrfs_block_rsv *block_rsv,
3820 u64 num_bytes) 3868 u64 num_bytes)
3821{ 3869{
3822 int ret; 3870 return __block_rsv_add(root, block_rsv, num_bytes, 0);
3823
3824 if (num_bytes == 0)
3825 return 0;
3826
3827 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
3828 if (!ret) {
3829 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3830 return 0;
3831 }
3832
3833 return ret;
3834} 3871}
3835 3872
3836int btrfs_block_rsv_check(struct btrfs_root *root, 3873int btrfs_block_rsv_check(struct btrfs_root *root,
@@ -4064,23 +4101,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4064 */ 4101 */
4065static unsigned drop_outstanding_extent(struct inode *inode) 4102static unsigned drop_outstanding_extent(struct inode *inode)
4066{ 4103{
4104 unsigned drop_inode_space = 0;
4067 unsigned dropped_extents = 0; 4105 unsigned dropped_extents = 0;
4068 4106
4069 BUG_ON(!BTRFS_I(inode)->outstanding_extents); 4107 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4070 BTRFS_I(inode)->outstanding_extents--; 4108 BTRFS_I(inode)->outstanding_extents--;
4071 4109
4110 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4111 BTRFS_I(inode)->delalloc_meta_reserved) {
4112 drop_inode_space = 1;
4113 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4114 }
4115
4072 /* 4116 /*
4073 * If we have more or the same amount of outsanding extents than we have 4117 * If we have more or the same amount of outsanding extents than we have
4074 * reserved then we need to leave the reserved extents count alone. 4118 * reserved then we need to leave the reserved extents count alone.
4075 */ 4119 */
4076 if (BTRFS_I(inode)->outstanding_extents >= 4120 if (BTRFS_I(inode)->outstanding_extents >=
4077 BTRFS_I(inode)->reserved_extents) 4121 BTRFS_I(inode)->reserved_extents)
4078 return 0; 4122 return drop_inode_space;
4079 4123
4080 dropped_extents = BTRFS_I(inode)->reserved_extents - 4124 dropped_extents = BTRFS_I(inode)->reserved_extents -
4081 BTRFS_I(inode)->outstanding_extents; 4125 BTRFS_I(inode)->outstanding_extents;
4082 BTRFS_I(inode)->reserved_extents -= dropped_extents; 4126 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4083 return dropped_extents; 4127 return dropped_extents + drop_inode_space;
4084} 4128}
4085 4129
4086/** 4130/**
@@ -4166,9 +4210,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4166 nr_extents = BTRFS_I(inode)->outstanding_extents - 4210 nr_extents = BTRFS_I(inode)->outstanding_extents -
4167 BTRFS_I(inode)->reserved_extents; 4211 BTRFS_I(inode)->reserved_extents;
4168 BTRFS_I(inode)->reserved_extents += nr_extents; 4212 BTRFS_I(inode)->reserved_extents += nr_extents;
4213 }
4169 4214
4170 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); 4215 /*
4216 * Add an item to reserve for updating the inode when we complete the
4217 * delalloc io.
4218 */
4219 if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4220 nr_extents++;
4221 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4171 } 4222 }
4223
4224 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4172 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); 4225 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4173 spin_unlock(&BTRFS_I(inode)->lock); 4226 spin_unlock(&BTRFS_I(inode)->lock);
4174 4227
@@ -5166,13 +5219,15 @@ search:
5166 } 5219 }
5167 5220
5168have_block_group: 5221have_block_group:
5169 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 5222 cached = block_group_cache_done(block_group);
5223 if (unlikely(!cached)) {
5170 u64 free_percent; 5224 u64 free_percent;
5171 5225
5226 found_uncached_bg = true;
5172 ret = cache_block_group(block_group, trans, 5227 ret = cache_block_group(block_group, trans,
5173 orig_root, 1); 5228 orig_root, 1);
5174 if (block_group->cached == BTRFS_CACHE_FINISHED) 5229 if (block_group->cached == BTRFS_CACHE_FINISHED)
5175 goto have_block_group; 5230 goto alloc;
5176 5231
5177 free_percent = btrfs_block_group_used(&block_group->item); 5232 free_percent = btrfs_block_group_used(&block_group->item);
5178 free_percent *= 100; 5233 free_percent *= 100;
@@ -5194,7 +5249,6 @@ have_block_group:
5194 orig_root, 0); 5249 orig_root, 0);
5195 BUG_ON(ret); 5250 BUG_ON(ret);
5196 } 5251 }
5197 found_uncached_bg = true;
5198 5252
5199 /* 5253 /*
5200 * If loop is set for cached only, try the next block 5254 * If loop is set for cached only, try the next block
@@ -5204,10 +5258,7 @@ have_block_group:
5204 goto loop; 5258 goto loop;
5205 } 5259 }
5206 5260
5207 cached = block_group_cache_done(block_group); 5261alloc:
5208 if (unlikely(!cached))
5209 found_uncached_bg = true;
5210
5211 if (unlikely(block_group->ro)) 5262 if (unlikely(block_group->ro))
5212 goto loop; 5263 goto loop;
5213 5264
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1f87c4d0e7a0..9472d3de5e52 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2285,8 +2285,8 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2285 clean_io_failure(start, page); 2285 clean_io_failure(start, page);
2286 } 2286 }
2287 if (!uptodate) { 2287 if (!uptodate) {
2288 u64 failed_mirror; 2288 int failed_mirror;
2289 failed_mirror = (u64)bio->bi_bdev; 2289 failed_mirror = (int)(unsigned long)bio->bi_bdev;
2290 if (tree->ops && tree->ops->readpage_io_failed_hook) 2290 if (tree->ops && tree->ops->readpage_io_failed_hook)
2291 ret = tree->ops->readpage_io_failed_hook( 2291 ret = tree->ops->readpage_io_failed_hook(
2292 bio, page, start, end, 2292 bio, page, start, end,
@@ -3366,6 +3366,9 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3366 return -ENOMEM; 3366 return -ENOMEM;
3367 path->leave_spinning = 1; 3367 path->leave_spinning = 1;
3368 3368
3369 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3370 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3371
3369 /* 3372 /*
3370 * lookup the last file extent. We're not using i_size here 3373 * lookup the last file extent. We're not using i_size here
3371 * because there might be preallocation past i_size 3374 * because there might be preallocation past i_size
@@ -3413,7 +3416,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3413 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3416 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3414 &cached_state, GFP_NOFS); 3417 &cached_state, GFP_NOFS);
3415 3418
3416 em = get_extent_skip_holes(inode, off, last_for_get_extent, 3419 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3417 get_extent); 3420 get_extent);
3418 if (!em) 3421 if (!em)
3419 goto out; 3422 goto out;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index feb9be0e23bc..7604c3001322 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -70,7 +70,7 @@ struct extent_io_ops {
70 unsigned long bio_flags); 70 unsigned long bio_flags);
71 int (*readpage_io_hook)(struct page *page, u64 start, u64 end); 71 int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
72 int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, 72 int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
73 u64 start, u64 end, u64 failed_mirror, 73 u64 start, u64 end, int failed_mirror,
74 struct extent_state *state); 74 struct extent_state *state);
75 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, 75 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
76 u64 start, u64 end, 76 u64 start, u64 end,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 7a15fcfb3e1f..6e5b7e463698 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
351 } 351 }
352 } 352 }
353 353
354 for (i = 0; i < io_ctl->num_pages; i++) {
355 clear_page_dirty_for_io(io_ctl->pages[i]);
356 set_page_extent_mapped(io_ctl->pages[i]);
357 }
358
354 return 0; 359 return 0;
355} 360}
356 361
@@ -537,6 +542,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
537 struct btrfs_free_space *entry, u8 *type) 542 struct btrfs_free_space *entry, u8 *type)
538{ 543{
539 struct btrfs_free_space_entry *e; 544 struct btrfs_free_space_entry *e;
545 int ret;
546
547 if (!io_ctl->cur) {
548 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
549 if (ret)
550 return ret;
551 }
540 552
541 e = io_ctl->cur; 553 e = io_ctl->cur;
542 entry->offset = le64_to_cpu(e->offset); 554 entry->offset = le64_to_cpu(e->offset);
@@ -550,10 +562,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
550 562
551 io_ctl_unmap_page(io_ctl); 563 io_ctl_unmap_page(io_ctl);
552 564
553 if (io_ctl->index >= io_ctl->num_pages) 565 return 0;
554 return 0;
555
556 return io_ctl_check_crc(io_ctl, io_ctl->index);
557} 566}
558 567
559static int io_ctl_read_bitmap(struct io_ctl *io_ctl, 568static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
@@ -561,9 +570,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
561{ 570{
562 int ret; 571 int ret;
563 572
564 if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
565 io_ctl_unmap_page(io_ctl);
566
567 ret = io_ctl_check_crc(io_ctl, io_ctl->index); 573 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
568 if (ret) 574 if (ret)
569 return ret; 575 return ret;
@@ -699,6 +705,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
699 num_entries--; 705 num_entries--;
700 } 706 }
701 707
708 io_ctl_unmap_page(&io_ctl);
709
702 /* 710 /*
703 * We add the bitmaps at the end of the entries in order that 711 * We add the bitmaps at the end of the entries in order that
704 * the bitmap entries are added to the cache. 712 * the bitmap entries are added to the cache.
@@ -1841,7 +1849,13 @@ again:
1841 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1849 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1842 1, 0); 1850 1, 0);
1843 if (!info) { 1851 if (!info) {
1844 WARN_ON(1); 1852 /* the tree logging code might be calling us before we
1853 * have fully loaded the free space rbtree for this
1854 * block group. So it is possible the entry won't
1855 * be in the rbtree yet at all. The caching code
1856 * will make sure not to put it in the rbtree if
1857 * the logging code has pinned it.
1858 */
1845 goto out_lock; 1859 goto out_lock;
1846 } 1860 }
1847 } 1861 }
@@ -2448,16 +2462,23 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2448{ 2462{
2449 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2463 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2450 struct btrfs_free_space *entry; 2464 struct btrfs_free_space *entry;
2451 struct rb_node *node;
2452 int ret = -ENOSPC; 2465 int ret = -ENOSPC;
2466 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2453 2467
2454 if (ctl->total_bitmaps == 0) 2468 if (ctl->total_bitmaps == 0)
2455 return -ENOSPC; 2469 return -ENOSPC;
2456 2470
2457 /* 2471 /*
2458 * First check our cached list of bitmaps and see if there is an entry 2472 * The bitmap that covers offset won't be in the list unless offset
2459 * here that will work. 2473 * is just its start offset.
2460 */ 2474 */
2475 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2476 if (entry->offset != bitmap_offset) {
2477 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2478 if (entry && list_empty(&entry->list))
2479 list_add(&entry->list, bitmaps);
2480 }
2481
2461 list_for_each_entry(entry, bitmaps, list) { 2482 list_for_each_entry(entry, bitmaps, list) {
2462 if (entry->bytes < min_bytes) 2483 if (entry->bytes < min_bytes)
2463 continue; 2484 continue;
@@ -2468,38 +2489,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2468 } 2489 }
2469 2490
2470 /* 2491 /*
2471 * If we do have entries on our list and we are here then we didn't find 2492 * The bitmaps list has all the bitmaps that record free space
2472 * anything, so go ahead and get the next entry after the last entry in 2493 * starting after offset, so no more search is required.
2473 * this list and start the search from there.
2474 */ 2494 */
2475 if (!list_empty(bitmaps)) { 2495 return -ENOSPC;
2476 entry = list_entry(bitmaps->prev, struct btrfs_free_space,
2477 list);
2478 node = rb_next(&entry->offset_index);
2479 if (!node)
2480 return -ENOSPC;
2481 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2482 goto search;
2483 }
2484
2485 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2486 if (!entry)
2487 return -ENOSPC;
2488
2489search:
2490 node = &entry->offset_index;
2491 do {
2492 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2493 node = rb_next(&entry->offset_index);
2494 if (!entry->bitmap)
2495 continue;
2496 if (entry->bytes < min_bytes)
2497 continue;
2498 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2499 bytes, min_bytes);
2500 } while (ret && node);
2501
2502 return ret;
2503} 2496}
2504 2497
2505/* 2498/*
@@ -2517,8 +2510,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2517 u64 offset, u64 bytes, u64 empty_size) 2510 u64 offset, u64 bytes, u64 empty_size)
2518{ 2511{
2519 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2512 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2520 struct list_head bitmaps;
2521 struct btrfs_free_space *entry, *tmp; 2513 struct btrfs_free_space *entry, *tmp;
2514 LIST_HEAD(bitmaps);
2522 u64 min_bytes; 2515 u64 min_bytes;
2523 int ret; 2516 int ret;
2524 2517
@@ -2557,7 +2550,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2557 goto out; 2550 goto out;
2558 } 2551 }
2559 2552
2560 INIT_LIST_HEAD(&bitmaps);
2561 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, 2553 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2562 bytes, min_bytes); 2554 bytes, min_bytes);
2563 if (ret) 2555 if (ret)
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 53dcbdf446cd..f8962a957d65 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -398,6 +398,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
399 struct btrfs_path *path; 399 struct btrfs_path *path;
400 struct inode *inode; 400 struct inode *inode;
401 struct btrfs_block_rsv *rsv;
402 u64 num_bytes;
401 u64 alloc_hint = 0; 403 u64 alloc_hint = 0;
402 int ret; 404 int ret;
403 int prealloc; 405 int prealloc;
@@ -421,11 +423,26 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
421 if (!path) 423 if (!path)
422 return -ENOMEM; 424 return -ENOMEM;
423 425
426 rsv = trans->block_rsv;
427 trans->block_rsv = &root->fs_info->trans_block_rsv;
428
429 num_bytes = trans->bytes_reserved;
430 /*
431 * 1 item for inode item insertion if need
432 * 3 items for inode item update (in the worst case)
433 * 1 item for free space object
434 * 3 items for pre-allocation
435 */
436 trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
437 ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv,
438 trans->bytes_reserved);
439 if (ret)
440 goto out;
424again: 441again:
425 inode = lookup_free_ino_inode(root, path); 442 inode = lookup_free_ino_inode(root, path);
426 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 443 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
427 ret = PTR_ERR(inode); 444 ret = PTR_ERR(inode);
428 goto out; 445 goto out_release;
429 } 446 }
430 447
431 if (IS_ERR(inode)) { 448 if (IS_ERR(inode)) {
@@ -434,7 +451,7 @@ again:
434 451
435 ret = create_free_ino_inode(root, trans, path); 452 ret = create_free_ino_inode(root, trans, path);
436 if (ret) 453 if (ret)
437 goto out; 454 goto out_release;
438 goto again; 455 goto again;
439 } 456 }
440 457
@@ -477,11 +494,14 @@ again:
477 } 494 }
478 btrfs_free_reserved_data_space(inode, prealloc); 495 btrfs_free_reserved_data_space(inode, prealloc);
479 496
497 ret = btrfs_write_out_ino_cache(root, trans, path);
480out_put: 498out_put:
481 iput(inode); 499 iput(inode);
500out_release:
501 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
482out: 502out:
483 if (ret == 0) 503 trans->block_rsv = rsv;
484 ret = btrfs_write_out_ino_cache(root, trans, path); 504 trans->bytes_reserved = num_bytes;
485 505
486 btrfs_free_path(path); 506 btrfs_free_path(path);
487 return ret; 507 return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 966ddcc4c63d..526dd51a1966 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -93,6 +93,8 @@ static noinline int cow_file_range(struct inode *inode,
93 struct page *locked_page, 93 struct page *locked_page,
94 u64 start, u64 end, int *page_started, 94 u64 start, u64 end, int *page_started,
95 unsigned long *nr_written, int unlock); 95 unsigned long *nr_written, int unlock);
96static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct inode *inode);
96 98
97static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 99static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
98 struct inode *inode, struct inode *dir, 100 struct inode *inode, struct inode *dir,
@@ -1741,7 +1743,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1741 trans = btrfs_join_transaction(root); 1743 trans = btrfs_join_transaction(root);
1742 BUG_ON(IS_ERR(trans)); 1744 BUG_ON(IS_ERR(trans));
1743 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1745 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1744 ret = btrfs_update_inode(trans, root, inode); 1746 ret = btrfs_update_inode_fallback(trans, root, inode);
1745 BUG_ON(ret); 1747 BUG_ON(ret);
1746 } 1748 }
1747 goto out; 1749 goto out;
@@ -1791,7 +1793,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1791 1793
1792 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1794 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1793 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1795 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1794 ret = btrfs_update_inode(trans, root, inode); 1796 ret = btrfs_update_inode_fallback(trans, root, inode);
1795 BUG_ON(ret); 1797 BUG_ON(ret);
1796 } 1798 }
1797 ret = 0; 1799 ret = 0;
@@ -2199,6 +2201,9 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2199 if (ret) 2201 if (ret)
2200 goto out; 2202 goto out;
2201 } 2203 }
2204 /* release the path since we're done with it */
2205 btrfs_release_path(path);
2206
2202 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 2207 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2203 2208
2204 if (root->orphan_block_rsv) 2209 if (root->orphan_block_rsv)
@@ -2426,7 +2431,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2426/* 2431/*
2427 * copy everything in the in-memory inode into the btree. 2432 * copy everything in the in-memory inode into the btree.
2428 */ 2433 */
2429noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 2434static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2430 struct btrfs_root *root, struct inode *inode) 2435 struct btrfs_root *root, struct inode *inode)
2431{ 2436{
2432 struct btrfs_inode_item *inode_item; 2437 struct btrfs_inode_item *inode_item;
@@ -2434,21 +2439,6 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2434 struct extent_buffer *leaf; 2439 struct extent_buffer *leaf;
2435 int ret; 2440 int ret;
2436 2441
2437 /*
2438 * If the inode is a free space inode, we can deadlock during commit
2439 * if we put it into the delayed code.
2440 *
2441 * The data relocation inode should also be directly updated
2442 * without delay
2443 */
2444 if (!btrfs_is_free_space_inode(root, inode)
2445 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2446 ret = btrfs_delayed_update_inode(trans, root, inode);
2447 if (!ret)
2448 btrfs_set_inode_last_trans(trans, inode);
2449 return ret;
2450 }
2451
2452 path = btrfs_alloc_path(); 2442 path = btrfs_alloc_path();
2453 if (!path) 2443 if (!path)
2454 return -ENOMEM; 2444 return -ENOMEM;
@@ -2477,6 +2467,43 @@ failed:
2477} 2467}
2478 2468
2479/* 2469/*
2470 * copy everything in the in-memory inode into the btree.
2471 */
2472noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2473 struct btrfs_root *root, struct inode *inode)
2474{
2475 int ret;
2476
2477 /*
2478 * If the inode is a free space inode, we can deadlock during commit
2479 * if we put it into the delayed code.
2480 *
2481 * The data relocation inode should also be directly updated
2482 * without delay
2483 */
2484 if (!btrfs_is_free_space_inode(root, inode)
2485 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2486 ret = btrfs_delayed_update_inode(trans, root, inode);
2487 if (!ret)
2488 btrfs_set_inode_last_trans(trans, inode);
2489 return ret;
2490 }
2491
2492 return btrfs_update_inode_item(trans, root, inode);
2493}
2494
2495static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2496 struct btrfs_root *root, struct inode *inode)
2497{
2498 int ret;
2499
2500 ret = btrfs_update_inode(trans, root, inode);
2501 if (ret == -ENOSPC)
2502 return btrfs_update_inode_item(trans, root, inode);
2503 return ret;
2504}
2505
2506/*
2480 * unlink helper that gets used here in inode.c and in the tree logging 2507 * unlink helper that gets used here in inode.c and in the tree logging
2481 * recovery code. It remove a link in a directory with a given name, and 2508 * recovery code. It remove a link in a directory with a given name, and
2482 * also drops the back refs in the inode to the directory 2509 * also drops the back refs in the inode to the directory
@@ -5632,7 +5659,7 @@ again:
5632 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 5659 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5633 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5660 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5634 if (!ret) 5661 if (!ret)
5635 err = btrfs_update_inode(trans, root, inode); 5662 err = btrfs_update_inode_fallback(trans, root, inode);
5636 goto out; 5663 goto out;
5637 } 5664 }
5638 5665
@@ -5670,7 +5697,7 @@ again:
5670 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5697 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5671 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5698 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5672 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) 5699 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
5673 btrfs_update_inode(trans, root, inode); 5700 btrfs_update_inode_fallback(trans, root, inode);
5674 ret = 0; 5701 ret = 0;
5675out_unlock: 5702out_unlock:
5676 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5703 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
@@ -6529,14 +6556,16 @@ end_trans:
6529 ret = btrfs_orphan_del(NULL, inode); 6556 ret = btrfs_orphan_del(NULL, inode);
6530 } 6557 }
6531 6558
6532 trans->block_rsv = &root->fs_info->trans_block_rsv; 6559 if (trans) {
6533 ret = btrfs_update_inode(trans, root, inode); 6560 trans->block_rsv = &root->fs_info->trans_block_rsv;
6534 if (ret && !err) 6561 ret = btrfs_update_inode(trans, root, inode);
6535 err = ret; 6562 if (ret && !err)
6563 err = ret;
6536 6564
6537 nr = trans->blocks_used; 6565 nr = trans->blocks_used;
6538 ret = btrfs_end_transaction_throttle(trans, root); 6566 ret = btrfs_end_transaction_throttle(trans, root);
6539 btrfs_btree_balance_dirty(root, nr); 6567 btrfs_btree_balance_dirty(root, nr);
6568 }
6540 6569
6541out: 6570out:
6542 btrfs_free_block_rsv(root, rsv); 6571 btrfs_free_block_rsv(root, rsv);
@@ -6605,6 +6634,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6605 ei->orphan_meta_reserved = 0; 6634 ei->orphan_meta_reserved = 0;
6606 ei->dummy_inode = 0; 6635 ei->dummy_inode = 0;
6607 ei->in_defrag = 0; 6636 ei->in_defrag = 0;
6637 ei->delalloc_meta_reserved = 0;
6608 ei->force_compress = BTRFS_COMPRESS_NONE; 6638 ei->force_compress = BTRFS_COMPRESS_NONE;
6609 6639
6610 ei->delayed_node = NULL; 6640 ei->delayed_node = NULL;
@@ -6764,11 +6794,13 @@ static int btrfs_getattr(struct vfsmount *mnt,
6764 struct dentry *dentry, struct kstat *stat) 6794 struct dentry *dentry, struct kstat *stat)
6765{ 6795{
6766 struct inode *inode = dentry->d_inode; 6796 struct inode *inode = dentry->d_inode;
6797 u32 blocksize = inode->i_sb->s_blocksize;
6798
6767 generic_fillattr(inode, stat); 6799 generic_fillattr(inode, stat);
6768 stat->dev = BTRFS_I(inode)->root->anon_dev; 6800 stat->dev = BTRFS_I(inode)->root->anon_dev;
6769 stat->blksize = PAGE_CACHE_SIZE; 6801 stat->blksize = PAGE_CACHE_SIZE;
6770 stat->blocks = (inode_get_bytes(inode) + 6802 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
6771 BTRFS_I(inode)->delalloc_bytes) >> 9; 6803 ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
6772 return 0; 6804 return 0;
6773} 6805}
6774 6806
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4a34c472f126..a90e749ed6d2 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1216,12 +1216,12 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1216 *devstr = '\0'; 1216 *devstr = '\0';
1217 devstr = vol_args->name; 1217 devstr = vol_args->name;
1218 devid = simple_strtoull(devstr, &end, 10); 1218 devid = simple_strtoull(devstr, &end, 10);
1219 printk(KERN_INFO "resizing devid %llu\n", 1219 printk(KERN_INFO "btrfs: resizing devid %llu\n",
1220 (unsigned long long)devid); 1220 (unsigned long long)devid);
1221 } 1221 }
1222 device = btrfs_find_device(root, devid, NULL, NULL); 1222 device = btrfs_find_device(root, devid, NULL, NULL);
1223 if (!device) { 1223 if (!device) {
1224 printk(KERN_INFO "resizer unable to find device %llu\n", 1224 printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
1225 (unsigned long long)devid); 1225 (unsigned long long)devid);
1226 ret = -EINVAL; 1226 ret = -EINVAL;
1227 goto out_unlock; 1227 goto out_unlock;
@@ -1267,7 +1267,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1267 do_div(new_size, root->sectorsize); 1267 do_div(new_size, root->sectorsize);
1268 new_size *= root->sectorsize; 1268 new_size *= root->sectorsize;
1269 1269
1270 printk(KERN_INFO "new size for %s is %llu\n", 1270 printk(KERN_INFO "btrfs: new size for %s is %llu\n",
1271 device->name, (unsigned long long)new_size); 1271 device->name, (unsigned long long)new_size);
1272 1272
1273 if (new_size > old_size) { 1273 if (new_size > old_size) {
@@ -2930,11 +2930,13 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
2930 goto out; 2930 goto out;
2931 2931
2932 for (i = 0; i < ipath->fspath->elem_cnt; ++i) { 2932 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
2933 rel_ptr = ipath->fspath->val[i] - (u64)ipath->fspath->val; 2933 rel_ptr = ipath->fspath->val[i] -
2934 (u64)(unsigned long)ipath->fspath->val;
2934 ipath->fspath->val[i] = rel_ptr; 2935 ipath->fspath->val[i] = rel_ptr;
2935 } 2936 }
2936 2937
2937 ret = copy_to_user((void *)ipa->fspath, (void *)ipath->fspath, size); 2938 ret = copy_to_user((void *)(unsigned long)ipa->fspath,
2939 (void *)(unsigned long)ipath->fspath, size);
2938 if (ret) { 2940 if (ret) {
2939 ret = -EFAULT; 2941 ret = -EFAULT;
2940 goto out; 2942 goto out;
@@ -3017,7 +3019,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
3017 if (ret < 0) 3019 if (ret < 0)
3018 goto out; 3020 goto out;
3019 3021
3020 ret = copy_to_user((void *)loi->inodes, (void *)inodes, size); 3022 ret = copy_to_user((void *)(unsigned long)loi->inodes,
3023 (void *)(unsigned long)inodes, size);
3021 if (ret) 3024 if (ret)
3022 ret = -EFAULT; 3025 ret = -EFAULT;
3023 3026
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 24d654ce7a06..dff29d5e151a 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1174,6 +1174,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
1174 list_add_tail(&new_edge->list[UPPER], 1174 list_add_tail(&new_edge->list[UPPER],
1175 &new_node->lower); 1175 &new_node->lower);
1176 } 1176 }
1177 } else {
1178 list_add_tail(&new_node->lower, &cache->leaves);
1177 } 1179 }
1178 1180
1179 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1181 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index ed11d3866afd..fab420db5121 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -272,7 +272,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
272 swarn->logical, swarn->dev->name, 272 swarn->logical, swarn->dev->name,
273 (unsigned long long)swarn->sector, root, inum, offset, 273 (unsigned long long)swarn->sector, root, inum, offset,
274 min(isize - offset, (u64)PAGE_SIZE), nlink, 274 min(isize - offset, (u64)PAGE_SIZE), nlink,
275 (char *)ipath->fspath->val[i]); 275 (char *)(unsigned long)ipath->fspath->val[i]);
276 276
277 free_ipath(ipath); 277 free_ipath(ipath);
278 return 0; 278 return 0;
@@ -944,50 +944,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
944static int scrub_submit(struct scrub_dev *sdev) 944static int scrub_submit(struct scrub_dev *sdev)
945{ 945{
946 struct scrub_bio *sbio; 946 struct scrub_bio *sbio;
947 struct bio *bio;
948 int i;
949 947
950 if (sdev->curr == -1) 948 if (sdev->curr == -1)
951 return 0; 949 return 0;
952 950
953 sbio = sdev->bios[sdev->curr]; 951 sbio = sdev->bios[sdev->curr];
954
955 bio = bio_alloc(GFP_NOFS, sbio->count);
956 if (!bio)
957 goto nomem;
958
959 bio->bi_private = sbio;
960 bio->bi_end_io = scrub_bio_end_io;
961 bio->bi_bdev = sdev->dev->bdev;
962 bio->bi_sector = sbio->physical >> 9;
963
964 for (i = 0; i < sbio->count; ++i) {
965 struct page *page;
966 int ret;
967
968 page = alloc_page(GFP_NOFS);
969 if (!page)
970 goto nomem;
971
972 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
973 if (!ret) {
974 __free_page(page);
975 goto nomem;
976 }
977 }
978
979 sbio->err = 0; 952 sbio->err = 0;
980 sdev->curr = -1; 953 sdev->curr = -1;
981 atomic_inc(&sdev->in_flight); 954 atomic_inc(&sdev->in_flight);
982 955
983 submit_bio(READ, bio); 956 submit_bio(READ, sbio->bio);
984 957
985 return 0; 958 return 0;
986
987nomem:
988 scrub_free_bio(bio);
989
990 return -ENOMEM;
991} 959}
992 960
993static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 961static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -995,6 +963,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
995 u8 *csum, int force) 963 u8 *csum, int force)
996{ 964{
997 struct scrub_bio *sbio; 965 struct scrub_bio *sbio;
966 struct page *page;
967 int ret;
998 968
999again: 969again:
1000 /* 970 /*
@@ -1015,12 +985,22 @@ again:
1015 } 985 }
1016 sbio = sdev->bios[sdev->curr]; 986 sbio = sdev->bios[sdev->curr];
1017 if (sbio->count == 0) { 987 if (sbio->count == 0) {
988 struct bio *bio;
989
1018 sbio->physical = physical; 990 sbio->physical = physical;
1019 sbio->logical = logical; 991 sbio->logical = logical;
992 bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
993 if (!bio)
994 return -ENOMEM;
995
996 bio->bi_private = sbio;
997 bio->bi_end_io = scrub_bio_end_io;
998 bio->bi_bdev = sdev->dev->bdev;
999 bio->bi_sector = sbio->physical >> 9;
1000 sbio->err = 0;
1001 sbio->bio = bio;
1020 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 1002 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
1021 sbio->logical + sbio->count * PAGE_SIZE != logical) { 1003 sbio->logical + sbio->count * PAGE_SIZE != logical) {
1022 int ret;
1023
1024 ret = scrub_submit(sdev); 1004 ret = scrub_submit(sdev);
1025 if (ret) 1005 if (ret)
1026 return ret; 1006 return ret;
@@ -1030,6 +1010,20 @@ again:
1030 sbio->spag[sbio->count].generation = gen; 1010 sbio->spag[sbio->count].generation = gen;
1031 sbio->spag[sbio->count].have_csum = 0; 1011 sbio->spag[sbio->count].have_csum = 0;
1032 sbio->spag[sbio->count].mirror_num = mirror_num; 1012 sbio->spag[sbio->count].mirror_num = mirror_num;
1013
1014 page = alloc_page(GFP_NOFS);
1015 if (!page)
1016 return -ENOMEM;
1017
1018 ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
1019 if (!ret) {
1020 __free_page(page);
1021 ret = scrub_submit(sdev);
1022 if (ret)
1023 return ret;
1024 goto again;
1025 }
1026
1033 if (csum) { 1027 if (csum) {
1034 sbio->spag[sbio->count].have_csum = 1; 1028 sbio->spag[sbio->count].have_csum = 1;
1035 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 1029 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 57080dffdfc6..17ee7fc5e64e 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -197,7 +197,7 @@ static match_table_t tokens = {
197 {Opt_subvolrootid, "subvolrootid=%d"}, 197 {Opt_subvolrootid, "subvolrootid=%d"},
198 {Opt_defrag, "autodefrag"}, 198 {Opt_defrag, "autodefrag"},
199 {Opt_inode_cache, "inode_cache"}, 199 {Opt_inode_cache, "inode_cache"},
200 {Opt_no_space_cache, "no_space_cache"}, 200 {Opt_no_space_cache, "nospace_cache"},
201 {Opt_recovery, "recovery"}, 201 {Opt_recovery, "recovery"},
202 {Opt_err, NULL}, 202 {Opt_err, NULL},
203}; 203};
@@ -448,6 +448,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
448 token = match_token(p, tokens, args); 448 token = match_token(p, tokens, args);
449 switch (token) { 449 switch (token) {
450 case Opt_subvol: 450 case Opt_subvol:
451 kfree(*subvol_name);
451 *subvol_name = match_strdup(&args[0]); 452 *subvol_name = match_strdup(&args[0]);
452 break; 453 break;
453 case Opt_subvolid: 454 case Opt_subvolid:
@@ -710,7 +711,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
710 if (btrfs_test_opt(root, SPACE_CACHE)) 711 if (btrfs_test_opt(root, SPACE_CACHE))
711 seq_puts(seq, ",space_cache"); 712 seq_puts(seq, ",space_cache");
712 else 713 else
713 seq_puts(seq, ",no_space_cache"); 714 seq_puts(seq, ",nospace_cache");
714 if (btrfs_test_opt(root, CLEAR_CACHE)) 715 if (btrfs_test_opt(root, CLEAR_CACHE))
715 seq_puts(seq, ",clear_cache"); 716 seq_puts(seq, ",clear_cache");
716 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) 717 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
@@ -824,13 +825,9 @@ static char *setup_root_args(char *args)
824static struct dentry *mount_subvol(const char *subvol_name, int flags, 825static struct dentry *mount_subvol(const char *subvol_name, int flags,
825 const char *device_name, char *data) 826 const char *device_name, char *data)
826{ 827{
827 struct super_block *s;
828 struct dentry *root; 828 struct dentry *root;
829 struct vfsmount *mnt; 829 struct vfsmount *mnt;
830 struct mnt_namespace *ns_private;
831 char *newargs; 830 char *newargs;
832 struct path path;
833 int error;
834 831
835 newargs = setup_root_args(data); 832 newargs = setup_root_args(data);
836 if (!newargs) 833 if (!newargs)
@@ -841,39 +838,17 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
841 if (IS_ERR(mnt)) 838 if (IS_ERR(mnt))
842 return ERR_CAST(mnt); 839 return ERR_CAST(mnt);
843 840
844 ns_private = create_mnt_ns(mnt); 841 root = mount_subtree(mnt, subvol_name);
845 if (IS_ERR(ns_private)) {
846 mntput(mnt);
847 return ERR_CAST(ns_private);
848 }
849
850 /*
851 * This will trigger the automount of the subvol so we can just
852 * drop the mnt we have here and return the dentry that we
853 * found.
854 */
855 error = vfs_path_lookup(mnt->mnt_root, mnt, subvol_name,
856 LOOKUP_FOLLOW, &path);
857 put_mnt_ns(ns_private);
858 if (error)
859 return ERR_PTR(error);
860 842
861 if (!is_subvolume_inode(path.dentry->d_inode)) { 843 if (!IS_ERR(root) && !is_subvolume_inode(root->d_inode)) {
862 path_put(&path); 844 struct super_block *s = root->d_sb;
863 mntput(mnt); 845 dput(root);
864 error = -EINVAL; 846 root = ERR_PTR(-EINVAL);
847 deactivate_locked_super(s);
865 printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n", 848 printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n",
866 subvol_name); 849 subvol_name);
867 return ERR_PTR(-EINVAL);
868 } 850 }
869 851
870 /* Get a ref to the sb and the dentry we found and return it */
871 s = path.mnt->mnt_sb;
872 atomic_inc(&s->s_active);
873 root = dget(path.dentry);
874 path_put(&path);
875 down_write(&s->s_umount);
876
877 return root; 852 return root;
878} 853}
879 854
@@ -890,7 +865,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
890 struct super_block *s; 865 struct super_block *s;
891 struct dentry *root; 866 struct dentry *root;
892 struct btrfs_fs_devices *fs_devices = NULL; 867 struct btrfs_fs_devices *fs_devices = NULL;
893 struct btrfs_root *tree_root = NULL;
894 struct btrfs_fs_info *fs_info = NULL; 868 struct btrfs_fs_info *fs_info = NULL;
895 fmode_t mode = FMODE_READ; 869 fmode_t mode = FMODE_READ;
896 char *subvol_name = NULL; 870 char *subvol_name = NULL;
@@ -904,8 +878,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
904 error = btrfs_parse_early_options(data, mode, fs_type, 878 error = btrfs_parse_early_options(data, mode, fs_type,
905 &subvol_name, &subvol_objectid, 879 &subvol_name, &subvol_objectid,
906 &subvol_rootid, &fs_devices); 880 &subvol_rootid, &fs_devices);
907 if (error) 881 if (error) {
882 kfree(subvol_name);
908 return ERR_PTR(error); 883 return ERR_PTR(error);
884 }
909 885
910 if (subvol_name) { 886 if (subvol_name) {
911 root = mount_subvol(subvol_name, flags, device_name, data); 887 root = mount_subvol(subvol_name, flags, device_name, data);
@@ -917,15 +893,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
917 if (error) 893 if (error)
918 return ERR_PTR(error); 894 return ERR_PTR(error);
919 895
920 error = btrfs_open_devices(fs_devices, mode, fs_type);
921 if (error)
922 return ERR_PTR(error);
923
924 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
925 error = -EACCES;
926 goto error_close_devices;
927 }
928
929 /* 896 /*
930 * Setup a dummy root and fs_info for test/set super. This is because 897 * Setup a dummy root and fs_info for test/set super. This is because
931 * we don't actually fill this stuff out until open_ctree, but we need 898 * we don't actually fill this stuff out until open_ctree, but we need
@@ -933,24 +900,36 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
933 * then open_ctree will properly initialize everything later. 900 * then open_ctree will properly initialize everything later.
934 */ 901 */
935 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS); 902 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
936 tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); 903 if (!fs_info)
937 if (!fs_info || !tree_root) { 904 return ERR_PTR(-ENOMEM);
905
906 fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
907 if (!fs_info->tree_root) {
938 error = -ENOMEM; 908 error = -ENOMEM;
939 goto error_close_devices; 909 goto error_fs_info;
940 } 910 }
941 fs_info->tree_root = tree_root; 911 fs_info->tree_root->fs_info = fs_info;
942 fs_info->fs_devices = fs_devices; 912 fs_info->fs_devices = fs_devices;
943 tree_root->fs_info = fs_info;
944 913
945 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 914 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
946 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 915 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
947 if (!fs_info->super_copy || !fs_info->super_for_commit) { 916 if (!fs_info->super_copy || !fs_info->super_for_commit) {
948 error = -ENOMEM; 917 error = -ENOMEM;
918 goto error_fs_info;
919 }
920
921 error = btrfs_open_devices(fs_devices, mode, fs_type);
922 if (error)
923 goto error_fs_info;
924
925 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
926 error = -EACCES;
949 goto error_close_devices; 927 goto error_close_devices;
950 } 928 }
951 929
952 bdev = fs_devices->latest_bdev; 930 bdev = fs_devices->latest_bdev;
953 s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root); 931 s = sget(fs_type, btrfs_test_super, btrfs_set_super,
932 fs_info->tree_root);
954 if (IS_ERR(s)) { 933 if (IS_ERR(s)) {
955 error = PTR_ERR(s); 934 error = PTR_ERR(s);
956 goto error_close_devices; 935 goto error_close_devices;
@@ -959,12 +938,12 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
959 if (s->s_root) { 938 if (s->s_root) {
960 if ((flags ^ s->s_flags) & MS_RDONLY) { 939 if ((flags ^ s->s_flags) & MS_RDONLY) {
961 deactivate_locked_super(s); 940 deactivate_locked_super(s);
962 return ERR_PTR(-EBUSY); 941 error = -EBUSY;
942 goto error_close_devices;
963 } 943 }
964 944
965 btrfs_close_devices(fs_devices); 945 btrfs_close_devices(fs_devices);
966 free_fs_info(fs_info); 946 free_fs_info(fs_info);
967 kfree(tree_root);
968 } else { 947 } else {
969 char b[BDEVNAME_SIZE]; 948 char b[BDEVNAME_SIZE];
970 949
@@ -991,8 +970,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
991 970
992error_close_devices: 971error_close_devices:
993 btrfs_close_devices(fs_devices); 972 btrfs_close_devices(fs_devices);
973error_fs_info:
994 free_fs_info(fs_info); 974 free_fs_info(fs_info);
995 kfree(tree_root);
996 return ERR_PTR(error); 975 return ERR_PTR(error);
997} 976}
998 977
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 960835eaf4da..81376d94cd3c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -785,6 +785,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
785 785
786 btrfs_save_ino_cache(root, trans); 786 btrfs_save_ino_cache(root, trans);
787 787
788 /* see comments in should_cow_block() */
789 root->force_cow = 0;
790 smp_wmb();
791
788 if (root->commit_root != root->node) { 792 if (root->commit_root != root->node) {
789 mutex_lock(&root->fs_commit_mutex); 793 mutex_lock(&root->fs_commit_mutex);
790 switch_commit_root(root); 794 switch_commit_root(root);
@@ -882,8 +886,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
882 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 886 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
883 887
884 if (to_reserve > 0) { 888 if (to_reserve > 0) {
885 ret = btrfs_block_rsv_add(root, &pending->block_rsv, 889 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
886 to_reserve); 890 to_reserve);
887 if (ret) { 891 if (ret) {
888 pending->error = ret; 892 pending->error = ret;
889 goto fail; 893 goto fail;
@@ -947,6 +951,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
947 btrfs_tree_unlock(old); 951 btrfs_tree_unlock(old);
948 free_extent_buffer(old); 952 free_extent_buffer(old);
949 953
954 /* see comments in should_cow_block() */
955 root->force_cow = 1;
956 smp_wmb();
957
950 btrfs_set_root_node(new_root_item, tmp); 958 btrfs_set_root_node(new_root_item, tmp);
951 /* record when the snapshot was created in key.offset */ 959 /* record when the snapshot was created in key.offset */
952 key.offset = trans->transid; 960 key.offset = trans->transid;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f8e2943101a1..c37433d3cd82 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -999,7 +999,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
999 key.objectid = device->devid; 999 key.objectid = device->devid;
1000 key.offset = start; 1000 key.offset = start;
1001 key.type = BTRFS_DEV_EXTENT_KEY; 1001 key.type = BTRFS_DEV_EXTENT_KEY;
1002 1002again:
1003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1004 if (ret > 0) { 1004 if (ret > 0) {
1005 ret = btrfs_previous_item(root, path, key.objectid, 1005 ret = btrfs_previous_item(root, path, key.objectid,
@@ -1012,6 +1012,9 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1012 struct btrfs_dev_extent); 1012 struct btrfs_dev_extent);
1013 BUG_ON(found_key.offset > start || found_key.offset + 1013 BUG_ON(found_key.offset > start || found_key.offset +
1014 btrfs_dev_extent_length(leaf, extent) < start); 1014 btrfs_dev_extent_length(leaf, extent) < start);
1015 key = found_key;
1016 btrfs_release_path(path);
1017 goto again;
1015 } else if (ret == 0) { 1018 } else if (ret == 0) {
1016 leaf = path->nodes[0]; 1019 leaf = path->nodes[0];
1017 extent = btrfs_item_ptr(leaf, path->slots[0], 1020 extent = btrfs_item_ptr(leaf, path->slots[0],
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ab5b1c49f352..78f2d4d4f37f 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -100,6 +100,12 @@ struct btrfs_device {
100 struct reada_zone *reada_curr_zone; 100 struct reada_zone *reada_curr_zone;
101 struct radix_tree_root reada_zones; 101 struct radix_tree_root reada_zones;
102 struct radix_tree_root reada_extents; 102 struct radix_tree_root reada_extents;
103
104 /* for sending down flush barriers */
105 struct bio *flush_bio;
106 struct completion flush_wait;
107 int nobarriers;
108
103}; 109};
104 110
105struct btrfs_fs_devices { 111struct btrfs_fs_devices {