diff options
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r-- | fs/btrfs/extent-tree.c | 2255 |
1 files changed, 1353 insertions, 902 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c6a4f459ad76..b9080d71991a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -35,10 +35,9 @@ | |||
35 | 35 | ||
36 | static int update_block_group(struct btrfs_trans_handle *trans, | 36 | static int update_block_group(struct btrfs_trans_handle *trans, |
37 | struct btrfs_root *root, | 37 | struct btrfs_root *root, |
38 | u64 bytenr, u64 num_bytes, int alloc, | 38 | u64 bytenr, u64 num_bytes, int alloc); |
39 | int mark_free); | 39 | static int update_reserved_bytes(struct btrfs_block_group_cache *cache, |
40 | static int update_reserved_extents(struct btrfs_block_group_cache *cache, | 40 | u64 num_bytes, int reserve, int sinfo); |
41 | u64 num_bytes, int reserve); | ||
42 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | 41 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, |
43 | struct btrfs_root *root, | 42 | struct btrfs_root *root, |
44 | u64 bytenr, u64 num_bytes, u64 parent, | 43 | u64 bytenr, u64 num_bytes, u64 parent, |
@@ -61,12 +60,6 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, | |||
61 | static int do_chunk_alloc(struct btrfs_trans_handle *trans, | 60 | static int do_chunk_alloc(struct btrfs_trans_handle *trans, |
62 | struct btrfs_root *extent_root, u64 alloc_bytes, | 61 | struct btrfs_root *extent_root, u64 alloc_bytes, |
63 | u64 flags, int force); | 62 | u64 flags, int force); |
64 | static int pin_down_bytes(struct btrfs_trans_handle *trans, | ||
65 | struct btrfs_root *root, | ||
66 | struct btrfs_path *path, | ||
67 | u64 bytenr, u64 num_bytes, | ||
68 | int is_data, int reserved, | ||
69 | struct extent_buffer **must_clean); | ||
70 | static int find_next_key(struct btrfs_path *path, int level, | 63 | static int find_next_key(struct btrfs_path *path, int level, |
71 | struct btrfs_key *key); | 64 | struct btrfs_key *key); |
72 | static void dump_space_info(struct btrfs_space_info *info, u64 bytes, | 65 | static void dump_space_info(struct btrfs_space_info *info, u64 bytes, |
@@ -91,8 +84,12 @@ void btrfs_get_block_group(struct btrfs_block_group_cache *cache) | |||
91 | 84 | ||
92 | void btrfs_put_block_group(struct btrfs_block_group_cache *cache) | 85 | void btrfs_put_block_group(struct btrfs_block_group_cache *cache) |
93 | { | 86 | { |
94 | if (atomic_dec_and_test(&cache->count)) | 87 | if (atomic_dec_and_test(&cache->count)) { |
88 | WARN_ON(cache->pinned > 0); | ||
89 | WARN_ON(cache->reserved > 0); | ||
90 | WARN_ON(cache->reserved_pinned > 0); | ||
95 | kfree(cache); | 91 | kfree(cache); |
92 | } | ||
96 | } | 93 | } |
97 | 94 | ||
98 | /* | 95 | /* |
@@ -319,7 +316,7 @@ static int caching_kthread(void *data) | |||
319 | 316 | ||
320 | exclude_super_stripes(extent_root, block_group); | 317 | exclude_super_stripes(extent_root, block_group); |
321 | spin_lock(&block_group->space_info->lock); | 318 | spin_lock(&block_group->space_info->lock); |
322 | block_group->space_info->bytes_super += block_group->bytes_super; | 319 | block_group->space_info->bytes_readonly += block_group->bytes_super; |
323 | spin_unlock(&block_group->space_info->lock); | 320 | spin_unlock(&block_group->space_info->lock); |
324 | 321 | ||
325 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); | 322 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); |
@@ -507,6 +504,9 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, | |||
507 | struct list_head *head = &info->space_info; | 504 | struct list_head *head = &info->space_info; |
508 | struct btrfs_space_info *found; | 505 | struct btrfs_space_info *found; |
509 | 506 | ||
507 | flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM | | ||
508 | BTRFS_BLOCK_GROUP_METADATA; | ||
509 | |||
510 | rcu_read_lock(); | 510 | rcu_read_lock(); |
511 | list_for_each_entry_rcu(found, head, list) { | 511 | list_for_each_entry_rcu(found, head, list) { |
512 | if (found->flags == flags) { | 512 | if (found->flags == flags) { |
@@ -610,6 +610,113 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) | |||
610 | } | 610 | } |
611 | 611 | ||
612 | /* | 612 | /* |
613 | * helper function to lookup reference count and flags of extent. | ||
614 | * | ||
615 | * the head node for delayed ref is used to store the sum of all the | ||
616 | * reference count modifications queued up in the rbtree. the head | ||
617 | * node may also store the extent flags to set. This way you can check | ||
618 | * to see what the reference count and extent flags would be if all of | ||
619 | * the delayed refs are not processed. | ||
620 | */ | ||
621 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, | ||
622 | struct btrfs_root *root, u64 bytenr, | ||
623 | u64 num_bytes, u64 *refs, u64 *flags) | ||
624 | { | ||
625 | struct btrfs_delayed_ref_head *head; | ||
626 | struct btrfs_delayed_ref_root *delayed_refs; | ||
627 | struct btrfs_path *path; | ||
628 | struct btrfs_extent_item *ei; | ||
629 | struct extent_buffer *leaf; | ||
630 | struct btrfs_key key; | ||
631 | u32 item_size; | ||
632 | u64 num_refs; | ||
633 | u64 extent_flags; | ||
634 | int ret; | ||
635 | |||
636 | path = btrfs_alloc_path(); | ||
637 | if (!path) | ||
638 | return -ENOMEM; | ||
639 | |||
640 | key.objectid = bytenr; | ||
641 | key.type = BTRFS_EXTENT_ITEM_KEY; | ||
642 | key.offset = num_bytes; | ||
643 | if (!trans) { | ||
644 | path->skip_locking = 1; | ||
645 | path->search_commit_root = 1; | ||
646 | } | ||
647 | again: | ||
648 | ret = btrfs_search_slot(trans, root->fs_info->extent_root, | ||
649 | &key, path, 0, 0); | ||
650 | if (ret < 0) | ||
651 | goto out_free; | ||
652 | |||
653 | if (ret == 0) { | ||
654 | leaf = path->nodes[0]; | ||
655 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | ||
656 | if (item_size >= sizeof(*ei)) { | ||
657 | ei = btrfs_item_ptr(leaf, path->slots[0], | ||
658 | struct btrfs_extent_item); | ||
659 | num_refs = btrfs_extent_refs(leaf, ei); | ||
660 | extent_flags = btrfs_extent_flags(leaf, ei); | ||
661 | } else { | ||
662 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | ||
663 | struct btrfs_extent_item_v0 *ei0; | ||
664 | BUG_ON(item_size != sizeof(*ei0)); | ||
665 | ei0 = btrfs_item_ptr(leaf, path->slots[0], | ||
666 | struct btrfs_extent_item_v0); | ||
667 | num_refs = btrfs_extent_refs_v0(leaf, ei0); | ||
668 | /* FIXME: this isn't correct for data */ | ||
669 | extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; | ||
670 | #else | ||
671 | BUG(); | ||
672 | #endif | ||
673 | } | ||
674 | BUG_ON(num_refs == 0); | ||
675 | } else { | ||
676 | num_refs = 0; | ||
677 | extent_flags = 0; | ||
678 | ret = 0; | ||
679 | } | ||
680 | |||
681 | if (!trans) | ||
682 | goto out; | ||
683 | |||
684 | delayed_refs = &trans->transaction->delayed_refs; | ||
685 | spin_lock(&delayed_refs->lock); | ||
686 | head = btrfs_find_delayed_ref_head(trans, bytenr); | ||
687 | if (head) { | ||
688 | if (!mutex_trylock(&head->mutex)) { | ||
689 | atomic_inc(&head->node.refs); | ||
690 | spin_unlock(&delayed_refs->lock); | ||
691 | |||
692 | btrfs_release_path(root->fs_info->extent_root, path); | ||
693 | |||
694 | mutex_lock(&head->mutex); | ||
695 | mutex_unlock(&head->mutex); | ||
696 | btrfs_put_delayed_ref(&head->node); | ||
697 | goto again; | ||
698 | } | ||
699 | if (head->extent_op && head->extent_op->update_flags) | ||
700 | extent_flags |= head->extent_op->flags_to_set; | ||
701 | else | ||
702 | BUG_ON(num_refs == 0); | ||
703 | |||
704 | num_refs += head->node.ref_mod; | ||
705 | mutex_unlock(&head->mutex); | ||
706 | } | ||
707 | spin_unlock(&delayed_refs->lock); | ||
708 | out: | ||
709 | WARN_ON(num_refs == 0); | ||
710 | if (refs) | ||
711 | *refs = num_refs; | ||
712 | if (flags) | ||
713 | *flags = extent_flags; | ||
714 | out_free: | ||
715 | btrfs_free_path(path); | ||
716 | return ret; | ||
717 | } | ||
718 | |||
719 | /* | ||
613 | * Back reference rules. Back refs have three main goals: | 720 | * Back reference rules. Back refs have three main goals: |
614 | * | 721 | * |
615 | * 1) differentiate between all holders of references to an extent so that | 722 | * 1) differentiate between all holders of references to an extent so that |
@@ -1871,7 +1978,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, | |||
1871 | return ret; | 1978 | return ret; |
1872 | } | 1979 | } |
1873 | 1980 | ||
1874 | |||
1875 | /* helper function to actually process a single delayed ref entry */ | 1981 | /* helper function to actually process a single delayed ref entry */ |
1876 | static int run_one_delayed_ref(struct btrfs_trans_handle *trans, | 1982 | static int run_one_delayed_ref(struct btrfs_trans_handle *trans, |
1877 | struct btrfs_root *root, | 1983 | struct btrfs_root *root, |
@@ -1891,32 +1997,14 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, | |||
1891 | BUG_ON(extent_op); | 1997 | BUG_ON(extent_op); |
1892 | head = btrfs_delayed_node_to_head(node); | 1998 | head = btrfs_delayed_node_to_head(node); |
1893 | if (insert_reserved) { | 1999 | if (insert_reserved) { |
1894 | int mark_free = 0; | 2000 | btrfs_pin_extent(root, node->bytenr, |
1895 | struct extent_buffer *must_clean = NULL; | 2001 | node->num_bytes, 1); |
1896 | |||
1897 | ret = pin_down_bytes(trans, root, NULL, | ||
1898 | node->bytenr, node->num_bytes, | ||
1899 | head->is_data, 1, &must_clean); | ||
1900 | if (ret > 0) | ||
1901 | mark_free = 1; | ||
1902 | |||
1903 | if (must_clean) { | ||
1904 | clean_tree_block(NULL, root, must_clean); | ||
1905 | btrfs_tree_unlock(must_clean); | ||
1906 | free_extent_buffer(must_clean); | ||
1907 | } | ||
1908 | if (head->is_data) { | 2002 | if (head->is_data) { |
1909 | ret = btrfs_del_csums(trans, root, | 2003 | ret = btrfs_del_csums(trans, root, |
1910 | node->bytenr, | 2004 | node->bytenr, |
1911 | node->num_bytes); | 2005 | node->num_bytes); |
1912 | BUG_ON(ret); | 2006 | BUG_ON(ret); |
1913 | } | 2007 | } |
1914 | if (mark_free) { | ||
1915 | ret = btrfs_free_reserved_extent(root, | ||
1916 | node->bytenr, | ||
1917 | node->num_bytes); | ||
1918 | BUG_ON(ret); | ||
1919 | } | ||
1920 | } | 2008 | } |
1921 | mutex_unlock(&head->mutex); | 2009 | mutex_unlock(&head->mutex); |
1922 | return 0; | 2010 | return 0; |
@@ -2347,6 +2435,8 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, | |||
2347 | ret = 0; | 2435 | ret = 0; |
2348 | out: | 2436 | out: |
2349 | btrfs_free_path(path); | 2437 | btrfs_free_path(path); |
2438 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) | ||
2439 | WARN_ON(ret > 0); | ||
2350 | return ret; | 2440 | return ret; |
2351 | } | 2441 | } |
2352 | 2442 | ||
@@ -2660,12 +2750,21 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |||
2660 | struct btrfs_space_info **space_info) | 2750 | struct btrfs_space_info **space_info) |
2661 | { | 2751 | { |
2662 | struct btrfs_space_info *found; | 2752 | struct btrfs_space_info *found; |
2753 | int i; | ||
2754 | int factor; | ||
2755 | |||
2756 | if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | | ||
2757 | BTRFS_BLOCK_GROUP_RAID10)) | ||
2758 | factor = 2; | ||
2759 | else | ||
2760 | factor = 1; | ||
2663 | 2761 | ||
2664 | found = __find_space_info(info, flags); | 2762 | found = __find_space_info(info, flags); |
2665 | if (found) { | 2763 | if (found) { |
2666 | spin_lock(&found->lock); | 2764 | spin_lock(&found->lock); |
2667 | found->total_bytes += total_bytes; | 2765 | found->total_bytes += total_bytes; |
2668 | found->bytes_used += bytes_used; | 2766 | found->bytes_used += bytes_used; |
2767 | found->disk_used += bytes_used * factor; | ||
2669 | found->full = 0; | 2768 | found->full = 0; |
2670 | spin_unlock(&found->lock); | 2769 | spin_unlock(&found->lock); |
2671 | *space_info = found; | 2770 | *space_info = found; |
@@ -2675,18 +2774,20 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |||
2675 | if (!found) | 2774 | if (!found) |
2676 | return -ENOMEM; | 2775 | return -ENOMEM; |
2677 | 2776 | ||
2678 | INIT_LIST_HEAD(&found->block_groups); | 2777 | for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) |
2778 | INIT_LIST_HEAD(&found->block_groups[i]); | ||
2679 | init_rwsem(&found->groups_sem); | 2779 | init_rwsem(&found->groups_sem); |
2680 | init_waitqueue_head(&found->flush_wait); | ||
2681 | init_waitqueue_head(&found->allocate_wait); | ||
2682 | spin_lock_init(&found->lock); | 2780 | spin_lock_init(&found->lock); |
2683 | found->flags = flags; | 2781 | found->flags = flags & (BTRFS_BLOCK_GROUP_DATA | |
2782 | BTRFS_BLOCK_GROUP_SYSTEM | | ||
2783 | BTRFS_BLOCK_GROUP_METADATA); | ||
2684 | found->total_bytes = total_bytes; | 2784 | found->total_bytes = total_bytes; |
2685 | found->bytes_used = bytes_used; | 2785 | found->bytes_used = bytes_used; |
2786 | found->disk_used = bytes_used * factor; | ||
2686 | found->bytes_pinned = 0; | 2787 | found->bytes_pinned = 0; |
2687 | found->bytes_reserved = 0; | 2788 | found->bytes_reserved = 0; |
2688 | found->bytes_readonly = 0; | 2789 | found->bytes_readonly = 0; |
2689 | found->bytes_delalloc = 0; | 2790 | found->bytes_may_use = 0; |
2690 | found->full = 0; | 2791 | found->full = 0; |
2691 | found->force_alloc = 0; | 2792 | found->force_alloc = 0; |
2692 | *space_info = found; | 2793 | *space_info = found; |
@@ -2711,19 +2812,6 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) | |||
2711 | } | 2812 | } |
2712 | } | 2813 | } |
2713 | 2814 | ||
2714 | static void set_block_group_readonly(struct btrfs_block_group_cache *cache) | ||
2715 | { | ||
2716 | spin_lock(&cache->space_info->lock); | ||
2717 | spin_lock(&cache->lock); | ||
2718 | if (!cache->ro) { | ||
2719 | cache->space_info->bytes_readonly += cache->key.offset - | ||
2720 | btrfs_block_group_used(&cache->item); | ||
2721 | cache->ro = 1; | ||
2722 | } | ||
2723 | spin_unlock(&cache->lock); | ||
2724 | spin_unlock(&cache->space_info->lock); | ||
2725 | } | ||
2726 | |||
2727 | u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) | 2815 | u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) |
2728 | { | 2816 | { |
2729 | u64 num_devices = root->fs_info->fs_devices->rw_devices; | 2817 | u64 num_devices = root->fs_info->fs_devices->rw_devices; |
@@ -2752,491 +2840,50 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) | |||
2752 | return flags; | 2840 | return flags; |
2753 | } | 2841 | } |
2754 | 2842 | ||
2755 | static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data) | 2843 | static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) |
2756 | { | ||
2757 | struct btrfs_fs_info *info = root->fs_info; | ||
2758 | u64 alloc_profile; | ||
2759 | |||
2760 | if (data) { | ||
2761 | alloc_profile = info->avail_data_alloc_bits & | ||
2762 | info->data_alloc_profile; | ||
2763 | data = BTRFS_BLOCK_GROUP_DATA | alloc_profile; | ||
2764 | } else if (root == root->fs_info->chunk_root) { | ||
2765 | alloc_profile = info->avail_system_alloc_bits & | ||
2766 | info->system_alloc_profile; | ||
2767 | data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile; | ||
2768 | } else { | ||
2769 | alloc_profile = info->avail_metadata_alloc_bits & | ||
2770 | info->metadata_alloc_profile; | ||
2771 | data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile; | ||
2772 | } | ||
2773 | |||
2774 | return btrfs_reduce_alloc_profile(root, data); | ||
2775 | } | ||
2776 | |||
2777 | void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode) | ||
2778 | { | ||
2779 | u64 alloc_target; | ||
2780 | |||
2781 | alloc_target = btrfs_get_alloc_profile(root, 1); | ||
2782 | BTRFS_I(inode)->space_info = __find_space_info(root->fs_info, | ||
2783 | alloc_target); | ||
2784 | } | ||
2785 | |||
2786 | static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items) | ||
2787 | { | ||
2788 | u64 num_bytes; | ||
2789 | int level; | ||
2790 | |||
2791 | level = BTRFS_MAX_LEVEL - 2; | ||
2792 | /* | ||
2793 | * NOTE: these calculations are absolutely the worst possible case. | ||
2794 | * This assumes that _every_ item we insert will require a new leaf, and | ||
2795 | * that the tree has grown to its maximum level size. | ||
2796 | */ | ||
2797 | |||
2798 | /* | ||
2799 | * for every item we insert we could insert both an extent item and a | ||
2800 | * extent ref item. Then for ever item we insert, we will need to cow | ||
2801 | * both the original leaf, plus the leaf to the left and right of it. | ||
2802 | * | ||
2803 | * Unless we are talking about the extent root, then we just want the | ||
2804 | * number of items * 2, since we just need the extent item plus its ref. | ||
2805 | */ | ||
2806 | if (root == root->fs_info->extent_root) | ||
2807 | num_bytes = num_items * 2; | ||
2808 | else | ||
2809 | num_bytes = (num_items + (2 * num_items)) * 3; | ||
2810 | |||
2811 | /* | ||
2812 | * num_bytes is total number of leaves we could need times the leaf | ||
2813 | * size, and then for every leaf we could end up cow'ing 2 nodes per | ||
2814 | * level, down to the leaf level. | ||
2815 | */ | ||
2816 | num_bytes = (num_bytes * root->leafsize) + | ||
2817 | (num_bytes * (level * 2)) * root->nodesize; | ||
2818 | |||
2819 | return num_bytes; | ||
2820 | } | ||
2821 | |||
2822 | /* | ||
2823 | * Unreserve metadata space for delalloc. If we have less reserved credits than | ||
2824 | * we have extents, this function does nothing. | ||
2825 | */ | ||
2826 | int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root, | ||
2827 | struct inode *inode, int num_items) | ||
2828 | { | ||
2829 | struct btrfs_fs_info *info = root->fs_info; | ||
2830 | struct btrfs_space_info *meta_sinfo; | ||
2831 | u64 num_bytes; | ||
2832 | u64 alloc_target; | ||
2833 | bool bug = false; | ||
2834 | |||
2835 | /* get the space info for where the metadata will live */ | ||
2836 | alloc_target = btrfs_get_alloc_profile(root, 0); | ||
2837 | meta_sinfo = __find_space_info(info, alloc_target); | ||
2838 | |||
2839 | num_bytes = calculate_bytes_needed(root->fs_info->extent_root, | ||
2840 | num_items); | ||
2841 | |||
2842 | spin_lock(&meta_sinfo->lock); | ||
2843 | spin_lock(&BTRFS_I(inode)->accounting_lock); | ||
2844 | if (BTRFS_I(inode)->reserved_extents <= | ||
2845 | BTRFS_I(inode)->outstanding_extents) { | ||
2846 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | ||
2847 | spin_unlock(&meta_sinfo->lock); | ||
2848 | return 0; | ||
2849 | } | ||
2850 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | ||
2851 | |||
2852 | BTRFS_I(inode)->reserved_extents -= num_items; | ||
2853 | BUG_ON(BTRFS_I(inode)->reserved_extents < 0); | ||
2854 | |||
2855 | if (meta_sinfo->bytes_delalloc < num_bytes) { | ||
2856 | bug = true; | ||
2857 | meta_sinfo->bytes_delalloc = 0; | ||
2858 | } else { | ||
2859 | meta_sinfo->bytes_delalloc -= num_bytes; | ||
2860 | } | ||
2861 | spin_unlock(&meta_sinfo->lock); | ||
2862 | |||
2863 | BUG_ON(bug); | ||
2864 | |||
2865 | return 0; | ||
2866 | } | ||
2867 | |||
2868 | static void check_force_delalloc(struct btrfs_space_info *meta_sinfo) | ||
2869 | { | 2844 | { |
2870 | u64 thresh; | 2845 | if (flags & BTRFS_BLOCK_GROUP_DATA) |
2871 | 2846 | flags |= root->fs_info->avail_data_alloc_bits & | |
2872 | thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + | 2847 | root->fs_info->data_alloc_profile; |
2873 | meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly + | 2848 | else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) |
2874 | meta_sinfo->bytes_super + meta_sinfo->bytes_root + | 2849 | flags |= root->fs_info->avail_system_alloc_bits & |
2875 | meta_sinfo->bytes_may_use; | 2850 | root->fs_info->system_alloc_profile; |
2876 | 2851 | else if (flags & BTRFS_BLOCK_GROUP_METADATA) | |
2877 | thresh = meta_sinfo->total_bytes - thresh; | 2852 | flags |= root->fs_info->avail_metadata_alloc_bits & |
2878 | thresh *= 80; | 2853 | root->fs_info->metadata_alloc_profile; |
2879 | do_div(thresh, 100); | 2854 | return btrfs_reduce_alloc_profile(root, flags); |
2880 | if (thresh <= meta_sinfo->bytes_delalloc) | ||
2881 | meta_sinfo->force_delalloc = 1; | ||
2882 | else | ||
2883 | meta_sinfo->force_delalloc = 0; | ||
2884 | } | 2855 | } |
2885 | 2856 | ||
2886 | struct async_flush { | 2857 | static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) |
2887 | struct btrfs_root *root; | ||
2888 | struct btrfs_space_info *info; | ||
2889 | struct btrfs_work work; | ||
2890 | }; | ||
2891 | |||
2892 | static noinline void flush_delalloc_async(struct btrfs_work *work) | ||
2893 | { | 2858 | { |
2894 | struct async_flush *async; | 2859 | u64 flags; |
2895 | struct btrfs_root *root; | ||
2896 | struct btrfs_space_info *info; | ||
2897 | |||
2898 | async = container_of(work, struct async_flush, work); | ||
2899 | root = async->root; | ||
2900 | info = async->info; | ||
2901 | |||
2902 | btrfs_start_delalloc_inodes(root, 0); | ||
2903 | wake_up(&info->flush_wait); | ||
2904 | btrfs_wait_ordered_extents(root, 0, 0); | ||
2905 | |||
2906 | spin_lock(&info->lock); | ||
2907 | info->flushing = 0; | ||
2908 | spin_unlock(&info->lock); | ||
2909 | wake_up(&info->flush_wait); | ||
2910 | |||
2911 | kfree(async); | ||
2912 | } | ||
2913 | |||
2914 | static void wait_on_flush(struct btrfs_space_info *info) | ||
2915 | { | ||
2916 | DEFINE_WAIT(wait); | ||
2917 | u64 used; | ||
2918 | |||
2919 | while (1) { | ||
2920 | prepare_to_wait(&info->flush_wait, &wait, | ||
2921 | TASK_UNINTERRUPTIBLE); | ||
2922 | spin_lock(&info->lock); | ||
2923 | if (!info->flushing) { | ||
2924 | spin_unlock(&info->lock); | ||
2925 | break; | ||
2926 | } | ||
2927 | |||
2928 | used = info->bytes_used + info->bytes_reserved + | ||
2929 | info->bytes_pinned + info->bytes_readonly + | ||
2930 | info->bytes_super + info->bytes_root + | ||
2931 | info->bytes_may_use + info->bytes_delalloc; | ||
2932 | if (used < info->total_bytes) { | ||
2933 | spin_unlock(&info->lock); | ||
2934 | break; | ||
2935 | } | ||
2936 | spin_unlock(&info->lock); | ||
2937 | schedule(); | ||
2938 | } | ||
2939 | finish_wait(&info->flush_wait, &wait); | ||
2940 | } | ||
2941 | |||
2942 | static void flush_delalloc(struct btrfs_root *root, | ||
2943 | struct btrfs_space_info *info) | ||
2944 | { | ||
2945 | struct async_flush *async; | ||
2946 | bool wait = false; | ||
2947 | |||
2948 | spin_lock(&info->lock); | ||
2949 | 2860 | ||
2950 | if (!info->flushing) | 2861 | if (data) |
2951 | info->flushing = 1; | 2862 | flags = BTRFS_BLOCK_GROUP_DATA; |
2863 | else if (root == root->fs_info->chunk_root) | ||
2864 | flags = BTRFS_BLOCK_GROUP_SYSTEM; | ||
2952 | else | 2865 | else |
2953 | wait = true; | 2866 | flags = BTRFS_BLOCK_GROUP_METADATA; |
2954 | |||
2955 | spin_unlock(&info->lock); | ||
2956 | |||
2957 | if (wait) { | ||
2958 | wait_on_flush(info); | ||
2959 | return; | ||
2960 | } | ||
2961 | |||
2962 | async = kzalloc(sizeof(*async), GFP_NOFS); | ||
2963 | if (!async) | ||
2964 | goto flush; | ||
2965 | |||
2966 | async->root = root; | ||
2967 | async->info = info; | ||
2968 | async->work.func = flush_delalloc_async; | ||
2969 | 2867 | ||
2970 | btrfs_queue_worker(&root->fs_info->enospc_workers, | 2868 | return get_alloc_profile(root, flags); |
2971 | &async->work); | ||
2972 | wait_on_flush(info); | ||
2973 | return; | ||
2974 | |||
2975 | flush: | ||
2976 | btrfs_start_delalloc_inodes(root, 0); | ||
2977 | btrfs_wait_ordered_extents(root, 0, 0); | ||
2978 | |||
2979 | spin_lock(&info->lock); | ||
2980 | info->flushing = 0; | ||
2981 | spin_unlock(&info->lock); | ||
2982 | wake_up(&info->flush_wait); | ||
2983 | } | 2869 | } |
2984 | 2870 | ||
2985 | static int maybe_allocate_chunk(struct btrfs_root *root, | 2871 | void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode) |
2986 | struct btrfs_space_info *info) | ||
2987 | { | ||
2988 | struct btrfs_super_block *disk_super = &root->fs_info->super_copy; | ||
2989 | struct btrfs_trans_handle *trans; | ||
2990 | bool wait = false; | ||
2991 | int ret = 0; | ||
2992 | u64 min_metadata; | ||
2993 | u64 free_space; | ||
2994 | |||
2995 | free_space = btrfs_super_total_bytes(disk_super); | ||
2996 | /* | ||
2997 | * we allow the metadata to grow to a max of either 10gb or 5% of the | ||
2998 | * space in the volume. | ||
2999 | */ | ||
3000 | min_metadata = min((u64)10 * 1024 * 1024 * 1024, | ||
3001 | div64_u64(free_space * 5, 100)); | ||
3002 | if (info->total_bytes >= min_metadata) { | ||
3003 | spin_unlock(&info->lock); | ||
3004 | return 0; | ||
3005 | } | ||
3006 | |||
3007 | if (info->full) { | ||
3008 | spin_unlock(&info->lock); | ||
3009 | return 0; | ||
3010 | } | ||
3011 | |||
3012 | if (!info->allocating_chunk) { | ||
3013 | info->force_alloc = 1; | ||
3014 | info->allocating_chunk = 1; | ||
3015 | } else { | ||
3016 | wait = true; | ||
3017 | } | ||
3018 | |||
3019 | spin_unlock(&info->lock); | ||
3020 | |||
3021 | if (wait) { | ||
3022 | wait_event(info->allocate_wait, | ||
3023 | !info->allocating_chunk); | ||
3024 | return 1; | ||
3025 | } | ||
3026 | |||
3027 | trans = btrfs_start_transaction(root, 1); | ||
3028 | if (!trans) { | ||
3029 | ret = -ENOMEM; | ||
3030 | goto out; | ||
3031 | } | ||
3032 | |||
3033 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | ||
3034 | 4096 + 2 * 1024 * 1024, | ||
3035 | info->flags, 0); | ||
3036 | btrfs_end_transaction(trans, root); | ||
3037 | if (ret) | ||
3038 | goto out; | ||
3039 | out: | ||
3040 | spin_lock(&info->lock); | ||
3041 | info->allocating_chunk = 0; | ||
3042 | spin_unlock(&info->lock); | ||
3043 | wake_up(&info->allocate_wait); | ||
3044 | |||
3045 | if (ret) | ||
3046 | return 0; | ||
3047 | return 1; | ||
3048 | } | ||
3049 | |||
3050 | /* | ||
3051 | * Reserve metadata space for delalloc. | ||
3052 | */ | ||
3053 | int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root, | ||
3054 | struct inode *inode, int num_items) | ||
3055 | { | ||
3056 | struct btrfs_fs_info *info = root->fs_info; | ||
3057 | struct btrfs_space_info *meta_sinfo; | ||
3058 | u64 num_bytes; | ||
3059 | u64 used; | ||
3060 | u64 alloc_target; | ||
3061 | int flushed = 0; | ||
3062 | int force_delalloc; | ||
3063 | |||
3064 | /* get the space info for where the metadata will live */ | ||
3065 | alloc_target = btrfs_get_alloc_profile(root, 0); | ||
3066 | meta_sinfo = __find_space_info(info, alloc_target); | ||
3067 | |||
3068 | num_bytes = calculate_bytes_needed(root->fs_info->extent_root, | ||
3069 | num_items); | ||
3070 | again: | ||
3071 | spin_lock(&meta_sinfo->lock); | ||
3072 | |||
3073 | force_delalloc = meta_sinfo->force_delalloc; | ||
3074 | |||
3075 | if (unlikely(!meta_sinfo->bytes_root)) | ||
3076 | meta_sinfo->bytes_root = calculate_bytes_needed(root, 6); | ||
3077 | |||
3078 | if (!flushed) | ||
3079 | meta_sinfo->bytes_delalloc += num_bytes; | ||
3080 | |||
3081 | used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + | ||
3082 | meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly + | ||
3083 | meta_sinfo->bytes_super + meta_sinfo->bytes_root + | ||
3084 | meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc; | ||
3085 | |||
3086 | if (used > meta_sinfo->total_bytes) { | ||
3087 | flushed++; | ||
3088 | |||
3089 | if (flushed == 1) { | ||
3090 | if (maybe_allocate_chunk(root, meta_sinfo)) | ||
3091 | goto again; | ||
3092 | flushed++; | ||
3093 | } else { | ||
3094 | spin_unlock(&meta_sinfo->lock); | ||
3095 | } | ||
3096 | |||
3097 | if (flushed == 2) { | ||
3098 | filemap_flush(inode->i_mapping); | ||
3099 | goto again; | ||
3100 | } else if (flushed == 3) { | ||
3101 | flush_delalloc(root, meta_sinfo); | ||
3102 | goto again; | ||
3103 | } | ||
3104 | spin_lock(&meta_sinfo->lock); | ||
3105 | meta_sinfo->bytes_delalloc -= num_bytes; | ||
3106 | spin_unlock(&meta_sinfo->lock); | ||
3107 | printk(KERN_ERR "enospc, has %d, reserved %d\n", | ||
3108 | BTRFS_I(inode)->outstanding_extents, | ||
3109 | BTRFS_I(inode)->reserved_extents); | ||
3110 | dump_space_info(meta_sinfo, 0, 0); | ||
3111 | return -ENOSPC; | ||
3112 | } | ||
3113 | |||
3114 | BTRFS_I(inode)->reserved_extents += num_items; | ||
3115 | check_force_delalloc(meta_sinfo); | ||
3116 | spin_unlock(&meta_sinfo->lock); | ||
3117 | |||
3118 | if (!flushed && force_delalloc) | ||
3119 | filemap_flush(inode->i_mapping); | ||
3120 | |||
3121 | return 0; | ||
3122 | } | ||
3123 | |||
3124 | /* | ||
3125 | * unreserve num_items number of items worth of metadata space. This needs to | ||
3126 | * be paired with btrfs_reserve_metadata_space. | ||
3127 | * | ||
3128 | * NOTE: if you have the option, run this _AFTER_ you do a | ||
3129 | * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref | ||
3130 | * oprations which will result in more used metadata, so we want to make sure we | ||
3131 | * can do that without issue. | ||
3132 | */ | ||
3133 | int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items) | ||
3134 | { | ||
3135 | struct btrfs_fs_info *info = root->fs_info; | ||
3136 | struct btrfs_space_info *meta_sinfo; | ||
3137 | u64 num_bytes; | ||
3138 | u64 alloc_target; | ||
3139 | bool bug = false; | ||
3140 | |||
3141 | /* get the space info for where the metadata will live */ | ||
3142 | alloc_target = btrfs_get_alloc_profile(root, 0); | ||
3143 | meta_sinfo = __find_space_info(info, alloc_target); | ||
3144 | |||
3145 | num_bytes = calculate_bytes_needed(root, num_items); | ||
3146 | |||
3147 | spin_lock(&meta_sinfo->lock); | ||
3148 | if (meta_sinfo->bytes_may_use < num_bytes) { | ||
3149 | bug = true; | ||
3150 | meta_sinfo->bytes_may_use = 0; | ||
3151 | } else { | ||
3152 | meta_sinfo->bytes_may_use -= num_bytes; | ||
3153 | } | ||
3154 | spin_unlock(&meta_sinfo->lock); | ||
3155 | |||
3156 | BUG_ON(bug); | ||
3157 | |||
3158 | return 0; | ||
3159 | } | ||
3160 | |||
3161 | /* | ||
3162 | * Reserve some metadata space for use. We'll calculate the worste case number | ||
3163 | * of bytes that would be needed to modify num_items number of items. If we | ||
3164 | * have space, fantastic, if not, you get -ENOSPC. Please call | ||
3165 | * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of | ||
3166 | * items you reserved, since whatever metadata you needed should have already | ||
3167 | * been allocated. | ||
3168 | * | ||
3169 | * This will commit the transaction to make more space if we don't have enough | ||
3170 | * metadata space. THe only time we don't do this is if we're reserving space | ||
3171 | * inside of a transaction, then we will just return -ENOSPC and it is the | ||
3172 | * callers responsibility to handle it properly. | ||
3173 | */ | ||
3174 | int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items) | ||
3175 | { | 2872 | { |
3176 | struct btrfs_fs_info *info = root->fs_info; | 2873 | BTRFS_I(inode)->space_info = __find_space_info(root->fs_info, |
3177 | struct btrfs_space_info *meta_sinfo; | 2874 | BTRFS_BLOCK_GROUP_DATA); |
3178 | u64 num_bytes; | ||
3179 | u64 used; | ||
3180 | u64 alloc_target; | ||
3181 | int retries = 0; | ||
3182 | |||
3183 | /* get the space info for where the metadata will live */ | ||
3184 | alloc_target = btrfs_get_alloc_profile(root, 0); | ||
3185 | meta_sinfo = __find_space_info(info, alloc_target); | ||
3186 | |||
3187 | num_bytes = calculate_bytes_needed(root, num_items); | ||
3188 | again: | ||
3189 | spin_lock(&meta_sinfo->lock); | ||
3190 | |||
3191 | if (unlikely(!meta_sinfo->bytes_root)) | ||
3192 | meta_sinfo->bytes_root = calculate_bytes_needed(root, 6); | ||
3193 | |||
3194 | if (!retries) | ||
3195 | meta_sinfo->bytes_may_use += num_bytes; | ||
3196 | |||
3197 | used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + | ||
3198 | meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly + | ||
3199 | meta_sinfo->bytes_super + meta_sinfo->bytes_root + | ||
3200 | meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc; | ||
3201 | |||
3202 | if (used > meta_sinfo->total_bytes) { | ||
3203 | retries++; | ||
3204 | if (retries == 1) { | ||
3205 | if (maybe_allocate_chunk(root, meta_sinfo)) | ||
3206 | goto again; | ||
3207 | retries++; | ||
3208 | } else { | ||
3209 | spin_unlock(&meta_sinfo->lock); | ||
3210 | } | ||
3211 | |||
3212 | if (retries == 2) { | ||
3213 | flush_delalloc(root, meta_sinfo); | ||
3214 | goto again; | ||
3215 | } | ||
3216 | spin_lock(&meta_sinfo->lock); | ||
3217 | meta_sinfo->bytes_may_use -= num_bytes; | ||
3218 | spin_unlock(&meta_sinfo->lock); | ||
3219 | |||
3220 | dump_space_info(meta_sinfo, 0, 0); | ||
3221 | return -ENOSPC; | ||
3222 | } | ||
3223 | |||
3224 | check_force_delalloc(meta_sinfo); | ||
3225 | spin_unlock(&meta_sinfo->lock); | ||
3226 | |||
3227 | return 0; | ||
3228 | } | 2875 | } |
3229 | 2876 | ||
3230 | /* | 2877 | /* |
3231 | * This will check the space that the inode allocates from to make sure we have | 2878 | * This will check the space that the inode allocates from to make sure we have |
3232 | * enough space for bytes. | 2879 | * enough space for bytes. |
3233 | */ | 2880 | */ |
3234 | int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, | 2881 | int btrfs_check_data_free_space(struct inode *inode, u64 bytes) |
3235 | u64 bytes) | ||
3236 | { | 2882 | { |
3237 | struct btrfs_space_info *data_sinfo; | 2883 | struct btrfs_space_info *data_sinfo; |
2884 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
3238 | u64 used; | 2885 | u64 used; |
3239 | int ret = 0, committed = 0, flushed = 0; | 2886 | int ret = 0, committed = 0; |
3240 | 2887 | ||
3241 | /* make sure bytes are sectorsize aligned */ | 2888 | /* make sure bytes are sectorsize aligned */ |
3242 | bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); | 2889 | bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); |
@@ -3248,21 +2895,13 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, | |||
3248 | again: | 2895 | again: |
3249 | /* make sure we have enough space to handle the data first */ | 2896 | /* make sure we have enough space to handle the data first */ |
3250 | spin_lock(&data_sinfo->lock); | 2897 | spin_lock(&data_sinfo->lock); |
3251 | used = data_sinfo->bytes_used + data_sinfo->bytes_delalloc + | 2898 | used = data_sinfo->bytes_used + data_sinfo->bytes_reserved + |
3252 | data_sinfo->bytes_reserved + data_sinfo->bytes_pinned + | 2899 | data_sinfo->bytes_pinned + data_sinfo->bytes_readonly + |
3253 | data_sinfo->bytes_readonly + data_sinfo->bytes_may_use + | 2900 | data_sinfo->bytes_may_use; |
3254 | data_sinfo->bytes_super; | ||
3255 | 2901 | ||
3256 | if (used + bytes > data_sinfo->total_bytes) { | 2902 | if (used + bytes > data_sinfo->total_bytes) { |
3257 | struct btrfs_trans_handle *trans; | 2903 | struct btrfs_trans_handle *trans; |
3258 | 2904 | ||
3259 | if (!flushed) { | ||
3260 | spin_unlock(&data_sinfo->lock); | ||
3261 | flush_delalloc(root, data_sinfo); | ||
3262 | flushed = 1; | ||
3263 | goto again; | ||
3264 | } | ||
3265 | |||
3266 | /* | 2905 | /* |
3267 | * if we don't have enough free bytes in this space then we need | 2906 | * if we don't have enough free bytes in this space then we need |
3268 | * to alloc a new chunk. | 2907 | * to alloc a new chunk. |
@@ -3274,15 +2913,15 @@ again: | |||
3274 | spin_unlock(&data_sinfo->lock); | 2913 | spin_unlock(&data_sinfo->lock); |
3275 | alloc: | 2914 | alloc: |
3276 | alloc_target = btrfs_get_alloc_profile(root, 1); | 2915 | alloc_target = btrfs_get_alloc_profile(root, 1); |
3277 | trans = btrfs_start_transaction(root, 1); | 2916 | trans = btrfs_join_transaction(root, 1); |
3278 | if (!trans) | 2917 | if (IS_ERR(trans)) |
3279 | return -ENOMEM; | 2918 | return PTR_ERR(trans); |
3280 | 2919 | ||
3281 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | 2920 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, |
3282 | bytes + 2 * 1024 * 1024, | 2921 | bytes + 2 * 1024 * 1024, |
3283 | alloc_target, 0); | 2922 | alloc_target, 0); |
3284 | btrfs_end_transaction(trans, root); | 2923 | btrfs_end_transaction(trans, root); |
3285 | if (ret) | 2924 | if (ret < 0) |
3286 | return ret; | 2925 | return ret; |
3287 | 2926 | ||
3288 | if (!data_sinfo) { | 2927 | if (!data_sinfo) { |
@@ -3297,25 +2936,26 @@ alloc: | |||
3297 | if (!committed && !root->fs_info->open_ioctl_trans) { | 2936 | if (!committed && !root->fs_info->open_ioctl_trans) { |
3298 | committed = 1; | 2937 | committed = 1; |
3299 | trans = btrfs_join_transaction(root, 1); | 2938 | trans = btrfs_join_transaction(root, 1); |
3300 | if (!trans) | 2939 | if (IS_ERR(trans)) |
3301 | return -ENOMEM; | 2940 | return PTR_ERR(trans); |
3302 | ret = btrfs_commit_transaction(trans, root); | 2941 | ret = btrfs_commit_transaction(trans, root); |
3303 | if (ret) | 2942 | if (ret) |
3304 | return ret; | 2943 | return ret; |
3305 | goto again; | 2944 | goto again; |
3306 | } | 2945 | } |
3307 | 2946 | ||
3308 | printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes" | 2947 | #if 0 /* I hope we never need this code again, just in case */ |
3309 | ", %llu bytes_used, %llu bytes_reserved, " | 2948 | printk(KERN_ERR "no space left, need %llu, %llu bytes_used, " |
3310 | "%llu bytes_pinned, %llu bytes_readonly, %llu may use " | 2949 | "%llu bytes_reserved, " "%llu bytes_pinned, " |
3311 | "%llu total\n", (unsigned long long)bytes, | 2950 | "%llu bytes_readonly, %llu may use %llu total\n", |
3312 | (unsigned long long)data_sinfo->bytes_delalloc, | 2951 | (unsigned long long)bytes, |
3313 | (unsigned long long)data_sinfo->bytes_used, | 2952 | (unsigned long long)data_sinfo->bytes_used, |
3314 | (unsigned long long)data_sinfo->bytes_reserved, | 2953 | (unsigned long long)data_sinfo->bytes_reserved, |
3315 | (unsigned long long)data_sinfo->bytes_pinned, | 2954 | (unsigned long long)data_sinfo->bytes_pinned, |
3316 | (unsigned long long)data_sinfo->bytes_readonly, | 2955 | (unsigned long long)data_sinfo->bytes_readonly, |
3317 | (unsigned long long)data_sinfo->bytes_may_use, | 2956 | (unsigned long long)data_sinfo->bytes_may_use, |
3318 | (unsigned long long)data_sinfo->total_bytes); | 2957 | (unsigned long long)data_sinfo->total_bytes); |
2958 | #endif | ||
3319 | return -ENOSPC; | 2959 | return -ENOSPC; |
3320 | } | 2960 | } |
3321 | data_sinfo->bytes_may_use += bytes; | 2961 | data_sinfo->bytes_may_use += bytes; |
@@ -3326,12 +2966,13 @@ alloc: | |||
3326 | } | 2966 | } |
3327 | 2967 | ||
3328 | /* | 2968 | /* |
3329 | * if there was an error for whatever reason after calling | 2969 | * called when we are clearing an delalloc extent from the |
3330 | * btrfs_check_data_free_space, call this so we can cleanup the counters. | 2970 | * inode's io_tree or there was an error for whatever reason |
2971 | * after calling btrfs_check_data_free_space | ||
3331 | */ | 2972 | */ |
3332 | void btrfs_free_reserved_data_space(struct btrfs_root *root, | 2973 | void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) |
3333 | struct inode *inode, u64 bytes) | ||
3334 | { | 2974 | { |
2975 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
3335 | struct btrfs_space_info *data_sinfo; | 2976 | struct btrfs_space_info *data_sinfo; |
3336 | 2977 | ||
3337 | /* make sure bytes are sectorsize aligned */ | 2978 | /* make sure bytes are sectorsize aligned */ |
@@ -3344,48 +2985,6 @@ void btrfs_free_reserved_data_space(struct btrfs_root *root, | |||
3344 | spin_unlock(&data_sinfo->lock); | 2985 | spin_unlock(&data_sinfo->lock); |
3345 | } | 2986 | } |
3346 | 2987 | ||
3347 | /* called when we are adding a delalloc extent to the inode's io_tree */ | ||
3348 | void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode, | ||
3349 | u64 bytes) | ||
3350 | { | ||
3351 | struct btrfs_space_info *data_sinfo; | ||
3352 | |||
3353 | /* get the space info for where this inode will be storing its data */ | ||
3354 | data_sinfo = BTRFS_I(inode)->space_info; | ||
3355 | |||
3356 | /* make sure we have enough space to handle the data first */ | ||
3357 | spin_lock(&data_sinfo->lock); | ||
3358 | data_sinfo->bytes_delalloc += bytes; | ||
3359 | |||
3360 | /* | ||
3361 | * we are adding a delalloc extent without calling | ||
3362 | * btrfs_check_data_free_space first. This happens on a weird | ||
3363 | * writepage condition, but shouldn't hurt our accounting | ||
3364 | */ | ||
3365 | if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) { | ||
3366 | data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes; | ||
3367 | BTRFS_I(inode)->reserved_bytes = 0; | ||
3368 | } else { | ||
3369 | data_sinfo->bytes_may_use -= bytes; | ||
3370 | BTRFS_I(inode)->reserved_bytes -= bytes; | ||
3371 | } | ||
3372 | |||
3373 | spin_unlock(&data_sinfo->lock); | ||
3374 | } | ||
3375 | |||
3376 | /* called when we are clearing an delalloc extent from the inode's io_tree */ | ||
3377 | void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, | ||
3378 | u64 bytes) | ||
3379 | { | ||
3380 | struct btrfs_space_info *info; | ||
3381 | |||
3382 | info = BTRFS_I(inode)->space_info; | ||
3383 | |||
3384 | spin_lock(&info->lock); | ||
3385 | info->bytes_delalloc -= bytes; | ||
3386 | spin_unlock(&info->lock); | ||
3387 | } | ||
3388 | |||
3389 | static void force_metadata_allocation(struct btrfs_fs_info *info) | 2988 | static void force_metadata_allocation(struct btrfs_fs_info *info) |
3390 | { | 2989 | { |
3391 | struct list_head *head = &info->space_info; | 2990 | struct list_head *head = &info->space_info; |
@@ -3399,13 +2998,28 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) | |||
3399 | rcu_read_unlock(); | 2998 | rcu_read_unlock(); |
3400 | } | 2999 | } |
3401 | 3000 | ||
3001 | static int should_alloc_chunk(struct btrfs_space_info *sinfo, | ||
3002 | u64 alloc_bytes) | ||
3003 | { | ||
3004 | u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; | ||
3005 | |||
3006 | if (sinfo->bytes_used + sinfo->bytes_reserved + | ||
3007 | alloc_bytes + 256 * 1024 * 1024 < num_bytes) | ||
3008 | return 0; | ||
3009 | |||
3010 | if (sinfo->bytes_used + sinfo->bytes_reserved + | ||
3011 | alloc_bytes < div_factor(num_bytes, 8)) | ||
3012 | return 0; | ||
3013 | |||
3014 | return 1; | ||
3015 | } | ||
3016 | |||
3402 | static int do_chunk_alloc(struct btrfs_trans_handle *trans, | 3017 | static int do_chunk_alloc(struct btrfs_trans_handle *trans, |
3403 | struct btrfs_root *extent_root, u64 alloc_bytes, | 3018 | struct btrfs_root *extent_root, u64 alloc_bytes, |
3404 | u64 flags, int force) | 3019 | u64 flags, int force) |
3405 | { | 3020 | { |
3406 | struct btrfs_space_info *space_info; | 3021 | struct btrfs_space_info *space_info; |
3407 | struct btrfs_fs_info *fs_info = extent_root->fs_info; | 3022 | struct btrfs_fs_info *fs_info = extent_root->fs_info; |
3408 | u64 thresh; | ||
3409 | int ret = 0; | 3023 | int ret = 0; |
3410 | 3024 | ||
3411 | mutex_lock(&fs_info->chunk_mutex); | 3025 | mutex_lock(&fs_info->chunk_mutex); |
@@ -3428,11 +3042,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
3428 | goto out; | 3042 | goto out; |
3429 | } | 3043 | } |
3430 | 3044 | ||
3431 | thresh = space_info->total_bytes - space_info->bytes_readonly; | 3045 | if (!force && !should_alloc_chunk(space_info, alloc_bytes)) { |
3432 | thresh = div_factor(thresh, 8); | ||
3433 | if (!force && | ||
3434 | (space_info->bytes_used + space_info->bytes_pinned + | ||
3435 | space_info->bytes_reserved + alloc_bytes) < thresh) { | ||
3436 | spin_unlock(&space_info->lock); | 3046 | spin_unlock(&space_info->lock); |
3437 | goto out; | 3047 | goto out; |
3438 | } | 3048 | } |
@@ -3454,6 +3064,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
3454 | spin_lock(&space_info->lock); | 3064 | spin_lock(&space_info->lock); |
3455 | if (ret) | 3065 | if (ret) |
3456 | space_info->full = 1; | 3066 | space_info->full = 1; |
3067 | else | ||
3068 | ret = 1; | ||
3457 | space_info->force_alloc = 0; | 3069 | space_info->force_alloc = 0; |
3458 | spin_unlock(&space_info->lock); | 3070 | spin_unlock(&space_info->lock); |
3459 | out: | 3071 | out: |
@@ -3461,13 +3073,713 @@ out: | |||
3461 | return ret; | 3073 | return ret; |
3462 | } | 3074 | } |
3463 | 3075 | ||
3076 | static int maybe_allocate_chunk(struct btrfs_trans_handle *trans, | ||
3077 | struct btrfs_root *root, | ||
3078 | struct btrfs_space_info *sinfo, u64 num_bytes) | ||
3079 | { | ||
3080 | int ret; | ||
3081 | int end_trans = 0; | ||
3082 | |||
3083 | if (sinfo->full) | ||
3084 | return 0; | ||
3085 | |||
3086 | spin_lock(&sinfo->lock); | ||
3087 | ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024); | ||
3088 | spin_unlock(&sinfo->lock); | ||
3089 | if (!ret) | ||
3090 | return 0; | ||
3091 | |||
3092 | if (!trans) { | ||
3093 | trans = btrfs_join_transaction(root, 1); | ||
3094 | BUG_ON(IS_ERR(trans)); | ||
3095 | end_trans = 1; | ||
3096 | } | ||
3097 | |||
3098 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | ||
3099 | num_bytes + 2 * 1024 * 1024, | ||
3100 | get_alloc_profile(root, sinfo->flags), 0); | ||
3101 | |||
3102 | if (end_trans) | ||
3103 | btrfs_end_transaction(trans, root); | ||
3104 | |||
3105 | return ret == 1 ? 1 : 0; | ||
3106 | } | ||
3107 | |||
3108 | /* | ||
3109 | * shrink metadata reservation for delalloc | ||
3110 | */ | ||
3111 | static int shrink_delalloc(struct btrfs_trans_handle *trans, | ||
3112 | struct btrfs_root *root, u64 to_reclaim) | ||
3113 | { | ||
3114 | struct btrfs_block_rsv *block_rsv; | ||
3115 | u64 reserved; | ||
3116 | u64 max_reclaim; | ||
3117 | u64 reclaimed = 0; | ||
3118 | int pause = 1; | ||
3119 | int ret; | ||
3120 | |||
3121 | block_rsv = &root->fs_info->delalloc_block_rsv; | ||
3122 | spin_lock(&block_rsv->lock); | ||
3123 | reserved = block_rsv->reserved; | ||
3124 | spin_unlock(&block_rsv->lock); | ||
3125 | |||
3126 | if (reserved == 0) | ||
3127 | return 0; | ||
3128 | |||
3129 | max_reclaim = min(reserved, to_reclaim); | ||
3130 | |||
3131 | while (1) { | ||
3132 | ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0); | ||
3133 | if (!ret) { | ||
3134 | __set_current_state(TASK_INTERRUPTIBLE); | ||
3135 | schedule_timeout(pause); | ||
3136 | pause <<= 1; | ||
3137 | if (pause > HZ / 10) | ||
3138 | pause = HZ / 10; | ||
3139 | } else { | ||
3140 | pause = 1; | ||
3141 | } | ||
3142 | |||
3143 | spin_lock(&block_rsv->lock); | ||
3144 | if (reserved > block_rsv->reserved) | ||
3145 | reclaimed = reserved - block_rsv->reserved; | ||
3146 | reserved = block_rsv->reserved; | ||
3147 | spin_unlock(&block_rsv->lock); | ||
3148 | |||
3149 | if (reserved == 0 || reclaimed >= max_reclaim) | ||
3150 | break; | ||
3151 | |||
3152 | if (trans && trans->transaction->blocked) | ||
3153 | return -EAGAIN; | ||
3154 | } | ||
3155 | return reclaimed >= to_reclaim; | ||
3156 | } | ||
3157 | |||
3158 | static int should_retry_reserve(struct btrfs_trans_handle *trans, | ||
3159 | struct btrfs_root *root, | ||
3160 | struct btrfs_block_rsv *block_rsv, | ||
3161 | u64 num_bytes, int *retries) | ||
3162 | { | ||
3163 | struct btrfs_space_info *space_info = block_rsv->space_info; | ||
3164 | int ret; | ||
3165 | |||
3166 | if ((*retries) > 2) | ||
3167 | return -ENOSPC; | ||
3168 | |||
3169 | ret = maybe_allocate_chunk(trans, root, space_info, num_bytes); | ||
3170 | if (ret) | ||
3171 | return 1; | ||
3172 | |||
3173 | if (trans && trans->transaction->in_commit) | ||
3174 | return -ENOSPC; | ||
3175 | |||
3176 | ret = shrink_delalloc(trans, root, num_bytes); | ||
3177 | if (ret) | ||
3178 | return ret; | ||
3179 | |||
3180 | spin_lock(&space_info->lock); | ||
3181 | if (space_info->bytes_pinned < num_bytes) | ||
3182 | ret = 1; | ||
3183 | spin_unlock(&space_info->lock); | ||
3184 | if (ret) | ||
3185 | return -ENOSPC; | ||
3186 | |||
3187 | (*retries)++; | ||
3188 | |||
3189 | if (trans) | ||
3190 | return -EAGAIN; | ||
3191 | |||
3192 | trans = btrfs_join_transaction(root, 1); | ||
3193 | BUG_ON(IS_ERR(trans)); | ||
3194 | ret = btrfs_commit_transaction(trans, root); | ||
3195 | BUG_ON(ret); | ||
3196 | |||
3197 | return 1; | ||
3198 | } | ||
3199 | |||
3200 | static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv, | ||
3201 | u64 num_bytes) | ||
3202 | { | ||
3203 | struct btrfs_space_info *space_info = block_rsv->space_info; | ||
3204 | u64 unused; | ||
3205 | int ret = -ENOSPC; | ||
3206 | |||
3207 | spin_lock(&space_info->lock); | ||
3208 | unused = space_info->bytes_used + space_info->bytes_reserved + | ||
3209 | space_info->bytes_pinned + space_info->bytes_readonly; | ||
3210 | |||
3211 | if (unused < space_info->total_bytes) | ||
3212 | unused = space_info->total_bytes - unused; | ||
3213 | else | ||
3214 | unused = 0; | ||
3215 | |||
3216 | if (unused >= num_bytes) { | ||
3217 | if (block_rsv->priority >= 10) { | ||
3218 | space_info->bytes_reserved += num_bytes; | ||
3219 | ret = 0; | ||
3220 | } else { | ||
3221 | if ((unused + block_rsv->reserved) * | ||
3222 | block_rsv->priority >= | ||
3223 | (num_bytes + block_rsv->reserved) * 10) { | ||
3224 | space_info->bytes_reserved += num_bytes; | ||
3225 | ret = 0; | ||
3226 | } | ||
3227 | } | ||
3228 | } | ||
3229 | spin_unlock(&space_info->lock); | ||
3230 | |||
3231 | return ret; | ||
3232 | } | ||
3233 | |||
3234 | static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans, | ||
3235 | struct btrfs_root *root) | ||
3236 | { | ||
3237 | struct btrfs_block_rsv *block_rsv; | ||
3238 | if (root->ref_cows) | ||
3239 | block_rsv = trans->block_rsv; | ||
3240 | else | ||
3241 | block_rsv = root->block_rsv; | ||
3242 | |||
3243 | if (!block_rsv) | ||
3244 | block_rsv = &root->fs_info->empty_block_rsv; | ||
3245 | |||
3246 | return block_rsv; | ||
3247 | } | ||
3248 | |||
3249 | static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, | ||
3250 | u64 num_bytes) | ||
3251 | { | ||
3252 | int ret = -ENOSPC; | ||
3253 | spin_lock(&block_rsv->lock); | ||
3254 | if (block_rsv->reserved >= num_bytes) { | ||
3255 | block_rsv->reserved -= num_bytes; | ||
3256 | if (block_rsv->reserved < block_rsv->size) | ||
3257 | block_rsv->full = 0; | ||
3258 | ret = 0; | ||
3259 | } | ||
3260 | spin_unlock(&block_rsv->lock); | ||
3261 | return ret; | ||
3262 | } | ||
3263 | |||
3264 | static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, | ||
3265 | u64 num_bytes, int update_size) | ||
3266 | { | ||
3267 | spin_lock(&block_rsv->lock); | ||
3268 | block_rsv->reserved += num_bytes; | ||
3269 | if (update_size) | ||
3270 | block_rsv->size += num_bytes; | ||
3271 | else if (block_rsv->reserved >= block_rsv->size) | ||
3272 | block_rsv->full = 1; | ||
3273 | spin_unlock(&block_rsv->lock); | ||
3274 | } | ||
3275 | |||
3276 | void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, | ||
3277 | struct btrfs_block_rsv *dest, u64 num_bytes) | ||
3278 | { | ||
3279 | struct btrfs_space_info *space_info = block_rsv->space_info; | ||
3280 | |||
3281 | spin_lock(&block_rsv->lock); | ||
3282 | if (num_bytes == (u64)-1) | ||
3283 | num_bytes = block_rsv->size; | ||
3284 | block_rsv->size -= num_bytes; | ||
3285 | if (block_rsv->reserved >= block_rsv->size) { | ||
3286 | num_bytes = block_rsv->reserved - block_rsv->size; | ||
3287 | block_rsv->reserved = block_rsv->size; | ||
3288 | block_rsv->full = 1; | ||
3289 | } else { | ||
3290 | num_bytes = 0; | ||
3291 | } | ||
3292 | spin_unlock(&block_rsv->lock); | ||
3293 | |||
3294 | if (num_bytes > 0) { | ||
3295 | if (dest) { | ||
3296 | block_rsv_add_bytes(dest, num_bytes, 0); | ||
3297 | } else { | ||
3298 | spin_lock(&space_info->lock); | ||
3299 | space_info->bytes_reserved -= num_bytes; | ||
3300 | spin_unlock(&space_info->lock); | ||
3301 | } | ||
3302 | } | ||
3303 | } | ||
3304 | |||
3305 | static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src, | ||
3306 | struct btrfs_block_rsv *dst, u64 num_bytes) | ||
3307 | { | ||
3308 | int ret; | ||
3309 | |||
3310 | ret = block_rsv_use_bytes(src, num_bytes); | ||
3311 | if (ret) | ||
3312 | return ret; | ||
3313 | |||
3314 | block_rsv_add_bytes(dst, num_bytes, 1); | ||
3315 | return 0; | ||
3316 | } | ||
3317 | |||
3318 | void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv) | ||
3319 | { | ||
3320 | memset(rsv, 0, sizeof(*rsv)); | ||
3321 | spin_lock_init(&rsv->lock); | ||
3322 | atomic_set(&rsv->usage, 1); | ||
3323 | rsv->priority = 6; | ||
3324 | INIT_LIST_HEAD(&rsv->list); | ||
3325 | } | ||
3326 | |||
3327 | struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root) | ||
3328 | { | ||
3329 | struct btrfs_block_rsv *block_rsv; | ||
3330 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
3331 | u64 alloc_target; | ||
3332 | |||
3333 | block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS); | ||
3334 | if (!block_rsv) | ||
3335 | return NULL; | ||
3336 | |||
3337 | btrfs_init_block_rsv(block_rsv); | ||
3338 | |||
3339 | alloc_target = btrfs_get_alloc_profile(root, 0); | ||
3340 | block_rsv->space_info = __find_space_info(fs_info, | ||
3341 | BTRFS_BLOCK_GROUP_METADATA); | ||
3342 | |||
3343 | return block_rsv; | ||
3344 | } | ||
3345 | |||
3346 | void btrfs_free_block_rsv(struct btrfs_root *root, | ||
3347 | struct btrfs_block_rsv *rsv) | ||
3348 | { | ||
3349 | if (rsv && atomic_dec_and_test(&rsv->usage)) { | ||
3350 | btrfs_block_rsv_release(root, rsv, (u64)-1); | ||
3351 | if (!rsv->durable) | ||
3352 | kfree(rsv); | ||
3353 | } | ||
3354 | } | ||
3355 | |||
3356 | /* | ||
3357 | * make the block_rsv struct be able to capture freed space. | ||
3358 | * the captured space will re-add to the the block_rsv struct | ||
3359 | * after transaction commit | ||
3360 | */ | ||
3361 | void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info, | ||
3362 | struct btrfs_block_rsv *block_rsv) | ||
3363 | { | ||
3364 | block_rsv->durable = 1; | ||
3365 | mutex_lock(&fs_info->durable_block_rsv_mutex); | ||
3366 | list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list); | ||
3367 | mutex_unlock(&fs_info->durable_block_rsv_mutex); | ||
3368 | } | ||
3369 | |||
3370 | int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, | ||
3371 | struct btrfs_root *root, | ||
3372 | struct btrfs_block_rsv *block_rsv, | ||
3373 | u64 num_bytes, int *retries) | ||
3374 | { | ||
3375 | int ret; | ||
3376 | |||
3377 | if (num_bytes == 0) | ||
3378 | return 0; | ||
3379 | again: | ||
3380 | ret = reserve_metadata_bytes(block_rsv, num_bytes); | ||
3381 | if (!ret) { | ||
3382 | block_rsv_add_bytes(block_rsv, num_bytes, 1); | ||
3383 | return 0; | ||
3384 | } | ||
3385 | |||
3386 | ret = should_retry_reserve(trans, root, block_rsv, num_bytes, retries); | ||
3387 | if (ret > 0) | ||
3388 | goto again; | ||
3389 | |||
3390 | return ret; | ||
3391 | } | ||
3392 | |||
3393 | int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, | ||
3394 | struct btrfs_root *root, | ||
3395 | struct btrfs_block_rsv *block_rsv, | ||
3396 | u64 min_reserved, int min_factor) | ||
3397 | { | ||
3398 | u64 num_bytes = 0; | ||
3399 | int commit_trans = 0; | ||
3400 | int ret = -ENOSPC; | ||
3401 | |||
3402 | if (!block_rsv) | ||
3403 | return 0; | ||
3404 | |||
3405 | spin_lock(&block_rsv->lock); | ||
3406 | if (min_factor > 0) | ||
3407 | num_bytes = div_factor(block_rsv->size, min_factor); | ||
3408 | if (min_reserved > num_bytes) | ||
3409 | num_bytes = min_reserved; | ||
3410 | |||
3411 | if (block_rsv->reserved >= num_bytes) { | ||
3412 | ret = 0; | ||
3413 | } else { | ||
3414 | num_bytes -= block_rsv->reserved; | ||
3415 | if (block_rsv->durable && | ||
3416 | block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes) | ||
3417 | commit_trans = 1; | ||
3418 | } | ||
3419 | spin_unlock(&block_rsv->lock); | ||
3420 | if (!ret) | ||
3421 | return 0; | ||
3422 | |||
3423 | if (block_rsv->refill_used) { | ||
3424 | ret = reserve_metadata_bytes(block_rsv, num_bytes); | ||
3425 | if (!ret) { | ||
3426 | block_rsv_add_bytes(block_rsv, num_bytes, 0); | ||
3427 | return 0; | ||
3428 | } | ||
3429 | } | ||
3430 | |||
3431 | if (commit_trans) { | ||
3432 | if (trans) | ||
3433 | return -EAGAIN; | ||
3434 | |||
3435 | trans = btrfs_join_transaction(root, 1); | ||
3436 | BUG_ON(IS_ERR(trans)); | ||
3437 | ret = btrfs_commit_transaction(trans, root); | ||
3438 | return 0; | ||
3439 | } | ||
3440 | |||
3441 | WARN_ON(1); | ||
3442 | printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n", | ||
3443 | block_rsv->size, block_rsv->reserved, | ||
3444 | block_rsv->freed[0], block_rsv->freed[1]); | ||
3445 | |||
3446 | return -ENOSPC; | ||
3447 | } | ||
3448 | |||
3449 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, | ||
3450 | struct btrfs_block_rsv *dst_rsv, | ||
3451 | u64 num_bytes) | ||
3452 | { | ||
3453 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); | ||
3454 | } | ||
3455 | |||
3456 | void btrfs_block_rsv_release(struct btrfs_root *root, | ||
3457 | struct btrfs_block_rsv *block_rsv, | ||
3458 | u64 num_bytes) | ||
3459 | { | ||
3460 | struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; | ||
3461 | if (global_rsv->full || global_rsv == block_rsv || | ||
3462 | block_rsv->space_info != global_rsv->space_info) | ||
3463 | global_rsv = NULL; | ||
3464 | block_rsv_release_bytes(block_rsv, global_rsv, num_bytes); | ||
3465 | } | ||
3466 | |||
3467 | /* | ||
3468 | * helper to calculate size of global block reservation. | ||
3469 | * the desired value is sum of space used by extent tree, | ||
3470 | * checksum tree and root tree | ||
3471 | */ | ||
3472 | static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) | ||
3473 | { | ||
3474 | struct btrfs_space_info *sinfo; | ||
3475 | u64 num_bytes; | ||
3476 | u64 meta_used; | ||
3477 | u64 data_used; | ||
3478 | int csum_size = btrfs_super_csum_size(&fs_info->super_copy); | ||
3479 | #if 0 | ||
3480 | /* | ||
3481 | * per tree used space accounting can be inaccuracy, so we | ||
3482 | * can't rely on it. | ||
3483 | */ | ||
3484 | spin_lock(&fs_info->extent_root->accounting_lock); | ||
3485 | num_bytes = btrfs_root_used(&fs_info->extent_root->root_item); | ||
3486 | spin_unlock(&fs_info->extent_root->accounting_lock); | ||
3487 | |||
3488 | spin_lock(&fs_info->csum_root->accounting_lock); | ||
3489 | num_bytes += btrfs_root_used(&fs_info->csum_root->root_item); | ||
3490 | spin_unlock(&fs_info->csum_root->accounting_lock); | ||
3491 | |||
3492 | spin_lock(&fs_info->tree_root->accounting_lock); | ||
3493 | num_bytes += btrfs_root_used(&fs_info->tree_root->root_item); | ||
3494 | spin_unlock(&fs_info->tree_root->accounting_lock); | ||
3495 | #endif | ||
3496 | sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); | ||
3497 | spin_lock(&sinfo->lock); | ||
3498 | data_used = sinfo->bytes_used; | ||
3499 | spin_unlock(&sinfo->lock); | ||
3500 | |||
3501 | sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); | ||
3502 | spin_lock(&sinfo->lock); | ||
3503 | meta_used = sinfo->bytes_used; | ||
3504 | spin_unlock(&sinfo->lock); | ||
3505 | |||
3506 | num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) * | ||
3507 | csum_size * 2; | ||
3508 | num_bytes += div64_u64(data_used + meta_used, 50); | ||
3509 | |||
3510 | if (num_bytes * 3 > meta_used) | ||
3511 | num_bytes = div64_u64(meta_used, 3); | ||
3512 | |||
3513 | return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); | ||
3514 | } | ||
3515 | |||
3516 | static void update_global_block_rsv(struct btrfs_fs_info *fs_info) | ||
3517 | { | ||
3518 | struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; | ||
3519 | struct btrfs_space_info *sinfo = block_rsv->space_info; | ||
3520 | u64 num_bytes; | ||
3521 | |||
3522 | num_bytes = calc_global_metadata_size(fs_info); | ||
3523 | |||
3524 | spin_lock(&block_rsv->lock); | ||
3525 | spin_lock(&sinfo->lock); | ||
3526 | |||
3527 | block_rsv->size = num_bytes; | ||
3528 | |||
3529 | num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + | ||
3530 | sinfo->bytes_reserved + sinfo->bytes_readonly; | ||
3531 | |||
3532 | if (sinfo->total_bytes > num_bytes) { | ||
3533 | num_bytes = sinfo->total_bytes - num_bytes; | ||
3534 | block_rsv->reserved += num_bytes; | ||
3535 | sinfo->bytes_reserved += num_bytes; | ||
3536 | } | ||
3537 | |||
3538 | if (block_rsv->reserved >= block_rsv->size) { | ||
3539 | num_bytes = block_rsv->reserved - block_rsv->size; | ||
3540 | sinfo->bytes_reserved -= num_bytes; | ||
3541 | block_rsv->reserved = block_rsv->size; | ||
3542 | block_rsv->full = 1; | ||
3543 | } | ||
3544 | #if 0 | ||
3545 | printk(KERN_INFO"global block rsv size %llu reserved %llu\n", | ||
3546 | block_rsv->size, block_rsv->reserved); | ||
3547 | #endif | ||
3548 | spin_unlock(&sinfo->lock); | ||
3549 | spin_unlock(&block_rsv->lock); | ||
3550 | } | ||
3551 | |||
3552 | static void init_global_block_rsv(struct btrfs_fs_info *fs_info) | ||
3553 | { | ||
3554 | struct btrfs_space_info *space_info; | ||
3555 | |||
3556 | space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); | ||
3557 | fs_info->chunk_block_rsv.space_info = space_info; | ||
3558 | fs_info->chunk_block_rsv.priority = 10; | ||
3559 | |||
3560 | space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); | ||
3561 | fs_info->global_block_rsv.space_info = space_info; | ||
3562 | fs_info->global_block_rsv.priority = 10; | ||
3563 | fs_info->global_block_rsv.refill_used = 1; | ||
3564 | fs_info->delalloc_block_rsv.space_info = space_info; | ||
3565 | fs_info->trans_block_rsv.space_info = space_info; | ||
3566 | fs_info->empty_block_rsv.space_info = space_info; | ||
3567 | fs_info->empty_block_rsv.priority = 10; | ||
3568 | |||
3569 | fs_info->extent_root->block_rsv = &fs_info->global_block_rsv; | ||
3570 | fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; | ||
3571 | fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; | ||
3572 | fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; | ||
3573 | fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; | ||
3574 | |||
3575 | btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv); | ||
3576 | |||
3577 | btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv); | ||
3578 | |||
3579 | update_global_block_rsv(fs_info); | ||
3580 | } | ||
3581 | |||
3582 | static void release_global_block_rsv(struct btrfs_fs_info *fs_info) | ||
3583 | { | ||
3584 | block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1); | ||
3585 | WARN_ON(fs_info->delalloc_block_rsv.size > 0); | ||
3586 | WARN_ON(fs_info->delalloc_block_rsv.reserved > 0); | ||
3587 | WARN_ON(fs_info->trans_block_rsv.size > 0); | ||
3588 | WARN_ON(fs_info->trans_block_rsv.reserved > 0); | ||
3589 | WARN_ON(fs_info->chunk_block_rsv.size > 0); | ||
3590 | WARN_ON(fs_info->chunk_block_rsv.reserved > 0); | ||
3591 | } | ||
3592 | |||
3593 | static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) | ||
3594 | { | ||
3595 | return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * | ||
3596 | 3 * num_items; | ||
3597 | } | ||
3598 | |||
3599 | int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, | ||
3600 | struct btrfs_root *root, | ||
3601 | int num_items, int *retries) | ||
3602 | { | ||
3603 | u64 num_bytes; | ||
3604 | int ret; | ||
3605 | |||
3606 | if (num_items == 0 || root->fs_info->chunk_root == root) | ||
3607 | return 0; | ||
3608 | |||
3609 | num_bytes = calc_trans_metadata_size(root, num_items); | ||
3610 | ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, | ||
3611 | num_bytes, retries); | ||
3612 | if (!ret) { | ||
3613 | trans->bytes_reserved += num_bytes; | ||
3614 | trans->block_rsv = &root->fs_info->trans_block_rsv; | ||
3615 | } | ||
3616 | return ret; | ||
3617 | } | ||
3618 | |||
3619 | void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, | ||
3620 | struct btrfs_root *root) | ||
3621 | { | ||
3622 | if (!trans->bytes_reserved) | ||
3623 | return; | ||
3624 | |||
3625 | BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv); | ||
3626 | btrfs_block_rsv_release(root, trans->block_rsv, | ||
3627 | trans->bytes_reserved); | ||
3628 | trans->bytes_reserved = 0; | ||
3629 | } | ||
3630 | |||
3631 | int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, | ||
3632 | struct inode *inode) | ||
3633 | { | ||
3634 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
3635 | struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); | ||
3636 | struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; | ||
3637 | |||
3638 | /* | ||
3639 | * one for deleting orphan item, one for updating inode and | ||
3640 | * two for calling btrfs_truncate_inode_items. | ||
3641 | * | ||
3642 | * btrfs_truncate_inode_items is a delete operation, it frees | ||
3643 | * more space than it uses in most cases. So two units of | ||
3644 | * metadata space should be enough for calling it many times. | ||
3645 | * If all of the metadata space is used, we can commit | ||
3646 | * transaction and use space it freed. | ||
3647 | */ | ||
3648 | u64 num_bytes = calc_trans_metadata_size(root, 4); | ||
3649 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); | ||
3650 | } | ||
3651 | |||
3652 | void btrfs_orphan_release_metadata(struct inode *inode) | ||
3653 | { | ||
3654 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
3655 | u64 num_bytes = calc_trans_metadata_size(root, 4); | ||
3656 | btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); | ||
3657 | } | ||
3658 | |||
3659 | int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, | ||
3660 | struct btrfs_pending_snapshot *pending) | ||
3661 | { | ||
3662 | struct btrfs_root *root = pending->root; | ||
3663 | struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); | ||
3664 | struct btrfs_block_rsv *dst_rsv = &pending->block_rsv; | ||
3665 | /* | ||
3666 | * two for root back/forward refs, two for directory entries | ||
3667 | * and one for root of the snapshot. | ||
3668 | */ | ||
3669 | u64 num_bytes = calc_trans_metadata_size(root, 5); | ||
3670 | dst_rsv->space_info = src_rsv->space_info; | ||
3671 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); | ||
3672 | } | ||
3673 | |||
3674 | static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes) | ||
3675 | { | ||
3676 | return num_bytes >>= 3; | ||
3677 | } | ||
3678 | |||
3679 | int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | ||
3680 | { | ||
3681 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
3682 | struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; | ||
3683 | u64 to_reserve; | ||
3684 | int nr_extents; | ||
3685 | int retries = 0; | ||
3686 | int ret; | ||
3687 | |||
3688 | if (btrfs_transaction_in_commit(root->fs_info)) | ||
3689 | schedule_timeout(1); | ||
3690 | |||
3691 | num_bytes = ALIGN(num_bytes, root->sectorsize); | ||
3692 | again: | ||
3693 | spin_lock(&BTRFS_I(inode)->accounting_lock); | ||
3694 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; | ||
3695 | if (nr_extents > BTRFS_I(inode)->reserved_extents) { | ||
3696 | nr_extents -= BTRFS_I(inode)->reserved_extents; | ||
3697 | to_reserve = calc_trans_metadata_size(root, nr_extents); | ||
3698 | } else { | ||
3699 | nr_extents = 0; | ||
3700 | to_reserve = 0; | ||
3701 | } | ||
3702 | |||
3703 | to_reserve += calc_csum_metadata_size(inode, num_bytes); | ||
3704 | ret = reserve_metadata_bytes(block_rsv, to_reserve); | ||
3705 | if (ret) { | ||
3706 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | ||
3707 | ret = should_retry_reserve(NULL, root, block_rsv, to_reserve, | ||
3708 | &retries); | ||
3709 | if (ret > 0) | ||
3710 | goto again; | ||
3711 | return ret; | ||
3712 | } | ||
3713 | |||
3714 | BTRFS_I(inode)->reserved_extents += nr_extents; | ||
3715 | atomic_inc(&BTRFS_I(inode)->outstanding_extents); | ||
3716 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | ||
3717 | |||
3718 | block_rsv_add_bytes(block_rsv, to_reserve, 1); | ||
3719 | |||
3720 | if (block_rsv->size > 512 * 1024 * 1024) | ||
3721 | shrink_delalloc(NULL, root, to_reserve); | ||
3722 | |||
3723 | return 0; | ||
3724 | } | ||
3725 | |||
3726 | void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) | ||
3727 | { | ||
3728 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
3729 | u64 to_free; | ||
3730 | int nr_extents; | ||
3731 | |||
3732 | num_bytes = ALIGN(num_bytes, root->sectorsize); | ||
3733 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); | ||
3734 | |||
3735 | spin_lock(&BTRFS_I(inode)->accounting_lock); | ||
3736 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); | ||
3737 | if (nr_extents < BTRFS_I(inode)->reserved_extents) { | ||
3738 | nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents; | ||
3739 | BTRFS_I(inode)->reserved_extents -= nr_extents; | ||
3740 | } else { | ||
3741 | nr_extents = 0; | ||
3742 | } | ||
3743 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | ||
3744 | |||
3745 | to_free = calc_csum_metadata_size(inode, num_bytes); | ||
3746 | if (nr_extents > 0) | ||
3747 | to_free += calc_trans_metadata_size(root, nr_extents); | ||
3748 | |||
3749 | btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, | ||
3750 | to_free); | ||
3751 | } | ||
3752 | |||
3753 | int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes) | ||
3754 | { | ||
3755 | int ret; | ||
3756 | |||
3757 | ret = btrfs_check_data_free_space(inode, num_bytes); | ||
3758 | if (ret) | ||
3759 | return ret; | ||
3760 | |||
3761 | ret = btrfs_delalloc_reserve_metadata(inode, num_bytes); | ||
3762 | if (ret) { | ||
3763 | btrfs_free_reserved_data_space(inode, num_bytes); | ||
3764 | return ret; | ||
3765 | } | ||
3766 | |||
3767 | return 0; | ||
3768 | } | ||
3769 | |||
3770 | void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes) | ||
3771 | { | ||
3772 | btrfs_delalloc_release_metadata(inode, num_bytes); | ||
3773 | btrfs_free_reserved_data_space(inode, num_bytes); | ||
3774 | } | ||
3775 | |||
3464 | static int update_block_group(struct btrfs_trans_handle *trans, | 3776 | static int update_block_group(struct btrfs_trans_handle *trans, |
3465 | struct btrfs_root *root, | 3777 | struct btrfs_root *root, |
3466 | u64 bytenr, u64 num_bytes, int alloc, | 3778 | u64 bytenr, u64 num_bytes, int alloc) |
3467 | int mark_free) | ||
3468 | { | 3779 | { |
3469 | struct btrfs_block_group_cache *cache; | 3780 | struct btrfs_block_group_cache *cache; |
3470 | struct btrfs_fs_info *info = root->fs_info; | 3781 | struct btrfs_fs_info *info = root->fs_info; |
3782 | int factor; | ||
3471 | u64 total = num_bytes; | 3783 | u64 total = num_bytes; |
3472 | u64 old_val; | 3784 | u64 old_val; |
3473 | u64 byte_in_group; | 3785 | u64 byte_in_group; |
@@ -3486,6 +3798,12 @@ static int update_block_group(struct btrfs_trans_handle *trans, | |||
3486 | cache = btrfs_lookup_block_group(info, bytenr); | 3798 | cache = btrfs_lookup_block_group(info, bytenr); |
3487 | if (!cache) | 3799 | if (!cache) |
3488 | return -1; | 3800 | return -1; |
3801 | if (cache->flags & (BTRFS_BLOCK_GROUP_DUP | | ||
3802 | BTRFS_BLOCK_GROUP_RAID1 | | ||
3803 | BTRFS_BLOCK_GROUP_RAID10)) | ||
3804 | factor = 2; | ||
3805 | else | ||
3806 | factor = 1; | ||
3489 | byte_in_group = bytenr - cache->key.objectid; | 3807 | byte_in_group = bytenr - cache->key.objectid; |
3490 | WARN_ON(byte_in_group > cache->key.offset); | 3808 | WARN_ON(byte_in_group > cache->key.offset); |
3491 | 3809 | ||
@@ -3498,31 +3816,24 @@ static int update_block_group(struct btrfs_trans_handle *trans, | |||
3498 | old_val += num_bytes; | 3816 | old_val += num_bytes; |
3499 | btrfs_set_block_group_used(&cache->item, old_val); | 3817 | btrfs_set_block_group_used(&cache->item, old_val); |
3500 | cache->reserved -= num_bytes; | 3818 | cache->reserved -= num_bytes; |
3501 | cache->space_info->bytes_used += num_bytes; | ||
3502 | cache->space_info->bytes_reserved -= num_bytes; | 3819 | cache->space_info->bytes_reserved -= num_bytes; |
3503 | if (cache->ro) | 3820 | cache->space_info->bytes_used += num_bytes; |
3504 | cache->space_info->bytes_readonly -= num_bytes; | 3821 | cache->space_info->disk_used += num_bytes * factor; |
3505 | spin_unlock(&cache->lock); | 3822 | spin_unlock(&cache->lock); |
3506 | spin_unlock(&cache->space_info->lock); | 3823 | spin_unlock(&cache->space_info->lock); |
3507 | } else { | 3824 | } else { |
3508 | old_val -= num_bytes; | 3825 | old_val -= num_bytes; |
3509 | cache->space_info->bytes_used -= num_bytes; | ||
3510 | if (cache->ro) | ||
3511 | cache->space_info->bytes_readonly += num_bytes; | ||
3512 | btrfs_set_block_group_used(&cache->item, old_val); | 3826 | btrfs_set_block_group_used(&cache->item, old_val); |
3827 | cache->pinned += num_bytes; | ||
3828 | cache->space_info->bytes_pinned += num_bytes; | ||
3829 | cache->space_info->bytes_used -= num_bytes; | ||
3830 | cache->space_info->disk_used -= num_bytes * factor; | ||
3513 | spin_unlock(&cache->lock); | 3831 | spin_unlock(&cache->lock); |
3514 | spin_unlock(&cache->space_info->lock); | 3832 | spin_unlock(&cache->space_info->lock); |
3515 | if (mark_free) { | ||
3516 | int ret; | ||
3517 | 3833 | ||
3518 | ret = btrfs_discard_extent(root, bytenr, | 3834 | set_extent_dirty(info->pinned_extents, |
3519 | num_bytes); | 3835 | bytenr, bytenr + num_bytes - 1, |
3520 | WARN_ON(ret); | 3836 | GFP_NOFS | __GFP_NOFAIL); |
3521 | |||
3522 | ret = btrfs_add_free_space(cache, bytenr, | ||
3523 | num_bytes); | ||
3524 | WARN_ON(ret); | ||
3525 | } | ||
3526 | } | 3837 | } |
3527 | btrfs_put_block_group(cache); | 3838 | btrfs_put_block_group(cache); |
3528 | total -= num_bytes; | 3839 | total -= num_bytes; |
@@ -3546,18 +3857,10 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) | |||
3546 | return bytenr; | 3857 | return bytenr; |
3547 | } | 3858 | } |
3548 | 3859 | ||
3549 | /* | 3860 | static int pin_down_extent(struct btrfs_root *root, |
3550 | * this function must be called within transaction | 3861 | struct btrfs_block_group_cache *cache, |
3551 | */ | 3862 | u64 bytenr, u64 num_bytes, int reserved) |
3552 | int btrfs_pin_extent(struct btrfs_root *root, | ||
3553 | u64 bytenr, u64 num_bytes, int reserved) | ||
3554 | { | 3863 | { |
3555 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
3556 | struct btrfs_block_group_cache *cache; | ||
3557 | |||
3558 | cache = btrfs_lookup_block_group(fs_info, bytenr); | ||
3559 | BUG_ON(!cache); | ||
3560 | |||
3561 | spin_lock(&cache->space_info->lock); | 3864 | spin_lock(&cache->space_info->lock); |
3562 | spin_lock(&cache->lock); | 3865 | spin_lock(&cache->lock); |
3563 | cache->pinned += num_bytes; | 3866 | cache->pinned += num_bytes; |
@@ -3569,28 +3872,68 @@ int btrfs_pin_extent(struct btrfs_root *root, | |||
3569 | spin_unlock(&cache->lock); | 3872 | spin_unlock(&cache->lock); |
3570 | spin_unlock(&cache->space_info->lock); | 3873 | spin_unlock(&cache->space_info->lock); |
3571 | 3874 | ||
3572 | btrfs_put_block_group(cache); | 3875 | set_extent_dirty(root->fs_info->pinned_extents, bytenr, |
3876 | bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); | ||
3877 | return 0; | ||
3878 | } | ||
3879 | |||
3880 | /* | ||
3881 | * this function must be called within transaction | ||
3882 | */ | ||
3883 | int btrfs_pin_extent(struct btrfs_root *root, | ||
3884 | u64 bytenr, u64 num_bytes, int reserved) | ||
3885 | { | ||
3886 | struct btrfs_block_group_cache *cache; | ||
3887 | |||
3888 | cache = btrfs_lookup_block_group(root->fs_info, bytenr); | ||
3889 | BUG_ON(!cache); | ||
3890 | |||
3891 | pin_down_extent(root, cache, bytenr, num_bytes, reserved); | ||
3573 | 3892 | ||
3574 | set_extent_dirty(fs_info->pinned_extents, | 3893 | btrfs_put_block_group(cache); |
3575 | bytenr, bytenr + num_bytes - 1, GFP_NOFS); | ||
3576 | return 0; | 3894 | return 0; |
3577 | } | 3895 | } |
3578 | 3896 | ||
3579 | static int update_reserved_extents(struct btrfs_block_group_cache *cache, | 3897 | /* |
3580 | u64 num_bytes, int reserve) | 3898 | * update size of reserved extents. this function may return -EAGAIN |
3899 | * if 'reserve' is true or 'sinfo' is false. | ||
3900 | */ | ||
3901 | static int update_reserved_bytes(struct btrfs_block_group_cache *cache, | ||
3902 | u64 num_bytes, int reserve, int sinfo) | ||
3581 | { | 3903 | { |
3582 | spin_lock(&cache->space_info->lock); | 3904 | int ret = 0; |
3583 | spin_lock(&cache->lock); | 3905 | if (sinfo) { |
3584 | if (reserve) { | 3906 | struct btrfs_space_info *space_info = cache->space_info; |
3585 | cache->reserved += num_bytes; | 3907 | spin_lock(&space_info->lock); |
3586 | cache->space_info->bytes_reserved += num_bytes; | 3908 | spin_lock(&cache->lock); |
3909 | if (reserve) { | ||
3910 | if (cache->ro) { | ||
3911 | ret = -EAGAIN; | ||
3912 | } else { | ||
3913 | cache->reserved += num_bytes; | ||
3914 | space_info->bytes_reserved += num_bytes; | ||
3915 | } | ||
3916 | } else { | ||
3917 | if (cache->ro) | ||
3918 | space_info->bytes_readonly += num_bytes; | ||
3919 | cache->reserved -= num_bytes; | ||
3920 | space_info->bytes_reserved -= num_bytes; | ||
3921 | } | ||
3922 | spin_unlock(&cache->lock); | ||
3923 | spin_unlock(&space_info->lock); | ||
3587 | } else { | 3924 | } else { |
3588 | cache->reserved -= num_bytes; | 3925 | spin_lock(&cache->lock); |
3589 | cache->space_info->bytes_reserved -= num_bytes; | 3926 | if (cache->ro) { |
3927 | ret = -EAGAIN; | ||
3928 | } else { | ||
3929 | if (reserve) | ||
3930 | cache->reserved += num_bytes; | ||
3931 | else | ||
3932 | cache->reserved -= num_bytes; | ||
3933 | } | ||
3934 | spin_unlock(&cache->lock); | ||
3590 | } | 3935 | } |
3591 | spin_unlock(&cache->lock); | 3936 | return ret; |
3592 | spin_unlock(&cache->space_info->lock); | ||
3593 | return 0; | ||
3594 | } | 3937 | } |
3595 | 3938 | ||
3596 | int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, | 3939 | int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, |
@@ -3621,6 +3964,8 @@ int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, | |||
3621 | fs_info->pinned_extents = &fs_info->freed_extents[0]; | 3964 | fs_info->pinned_extents = &fs_info->freed_extents[0]; |
3622 | 3965 | ||
3623 | up_write(&fs_info->extent_commit_sem); | 3966 | up_write(&fs_info->extent_commit_sem); |
3967 | |||
3968 | update_global_block_rsv(fs_info); | ||
3624 | return 0; | 3969 | return 0; |
3625 | } | 3970 | } |
3626 | 3971 | ||
@@ -3647,14 +3992,21 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) | |||
3647 | btrfs_add_free_space(cache, start, len); | 3992 | btrfs_add_free_space(cache, start, len); |
3648 | } | 3993 | } |
3649 | 3994 | ||
3995 | start += len; | ||
3996 | |||
3650 | spin_lock(&cache->space_info->lock); | 3997 | spin_lock(&cache->space_info->lock); |
3651 | spin_lock(&cache->lock); | 3998 | spin_lock(&cache->lock); |
3652 | cache->pinned -= len; | 3999 | cache->pinned -= len; |
3653 | cache->space_info->bytes_pinned -= len; | 4000 | cache->space_info->bytes_pinned -= len; |
4001 | if (cache->ro) { | ||
4002 | cache->space_info->bytes_readonly += len; | ||
4003 | } else if (cache->reserved_pinned > 0) { | ||
4004 | len = min(len, cache->reserved_pinned); | ||
4005 | cache->reserved_pinned -= len; | ||
4006 | cache->space_info->bytes_reserved += len; | ||
4007 | } | ||
3654 | spin_unlock(&cache->lock); | 4008 | spin_unlock(&cache->lock); |
3655 | spin_unlock(&cache->space_info->lock); | 4009 | spin_unlock(&cache->space_info->lock); |
3656 | |||
3657 | start += len; | ||
3658 | } | 4010 | } |
3659 | 4011 | ||
3660 | if (cache) | 4012 | if (cache) |
@@ -3667,8 +4019,11 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, | |||
3667 | { | 4019 | { |
3668 | struct btrfs_fs_info *fs_info = root->fs_info; | 4020 | struct btrfs_fs_info *fs_info = root->fs_info; |
3669 | struct extent_io_tree *unpin; | 4021 | struct extent_io_tree *unpin; |
4022 | struct btrfs_block_rsv *block_rsv; | ||
4023 | struct btrfs_block_rsv *next_rsv; | ||
3670 | u64 start; | 4024 | u64 start; |
3671 | u64 end; | 4025 | u64 end; |
4026 | int idx; | ||
3672 | int ret; | 4027 | int ret; |
3673 | 4028 | ||
3674 | if (fs_info->pinned_extents == &fs_info->freed_extents[0]) | 4029 | if (fs_info->pinned_extents == &fs_info->freed_extents[0]) |
@@ -3689,59 +4044,30 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, | |||
3689 | cond_resched(); | 4044 | cond_resched(); |
3690 | } | 4045 | } |
3691 | 4046 | ||
3692 | return ret; | 4047 | mutex_lock(&fs_info->durable_block_rsv_mutex); |
3693 | } | 4048 | list_for_each_entry_safe(block_rsv, next_rsv, |
4049 | &fs_info->durable_block_rsv_list, list) { | ||
3694 | 4050 | ||
3695 | static int pin_down_bytes(struct btrfs_trans_handle *trans, | 4051 | idx = trans->transid & 0x1; |
3696 | struct btrfs_root *root, | 4052 | if (block_rsv->freed[idx] > 0) { |
3697 | struct btrfs_path *path, | 4053 | block_rsv_add_bytes(block_rsv, |
3698 | u64 bytenr, u64 num_bytes, | 4054 | block_rsv->freed[idx], 0); |
3699 | int is_data, int reserved, | 4055 | block_rsv->freed[idx] = 0; |
3700 | struct extent_buffer **must_clean) | 4056 | } |
3701 | { | 4057 | if (atomic_read(&block_rsv->usage) == 0) { |
3702 | int err = 0; | 4058 | btrfs_block_rsv_release(root, block_rsv, (u64)-1); |
3703 | struct extent_buffer *buf; | ||
3704 | |||
3705 | if (is_data) | ||
3706 | goto pinit; | ||
3707 | |||
3708 | /* | ||
3709 | * discard is sloooow, and so triggering discards on | ||
3710 | * individual btree blocks isn't a good plan. Just | ||
3711 | * pin everything in discard mode. | ||
3712 | */ | ||
3713 | if (btrfs_test_opt(root, DISCARD)) | ||
3714 | goto pinit; | ||
3715 | |||
3716 | buf = btrfs_find_tree_block(root, bytenr, num_bytes); | ||
3717 | if (!buf) | ||
3718 | goto pinit; | ||
3719 | 4059 | ||
3720 | /* we can reuse a block if it hasn't been written | 4060 | if (block_rsv->freed[0] == 0 && |
3721 | * and it is from this transaction. We can't | 4061 | block_rsv->freed[1] == 0) { |
3722 | * reuse anything from the tree log root because | 4062 | list_del_init(&block_rsv->list); |
3723 | * it has tiny sub-transactions. | 4063 | kfree(block_rsv); |
3724 | */ | 4064 | } |
3725 | if (btrfs_buffer_uptodate(buf, 0) && | 4065 | } else { |
3726 | btrfs_try_tree_lock(buf)) { | 4066 | btrfs_block_rsv_release(root, block_rsv, 0); |
3727 | u64 header_owner = btrfs_header_owner(buf); | ||
3728 | u64 header_transid = btrfs_header_generation(buf); | ||
3729 | if (header_owner != BTRFS_TREE_LOG_OBJECTID && | ||
3730 | header_transid == trans->transid && | ||
3731 | !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { | ||
3732 | *must_clean = buf; | ||
3733 | return 1; | ||
3734 | } | 4067 | } |
3735 | btrfs_tree_unlock(buf); | ||
3736 | } | 4068 | } |
3737 | free_extent_buffer(buf); | 4069 | mutex_unlock(&fs_info->durable_block_rsv_mutex); |
3738 | pinit: | ||
3739 | if (path) | ||
3740 | btrfs_set_path_blocking(path); | ||
3741 | /* unlocks the pinned mutex */ | ||
3742 | btrfs_pin_extent(root, bytenr, num_bytes, reserved); | ||
3743 | 4070 | ||
3744 | BUG_ON(err < 0); | ||
3745 | return 0; | 4071 | return 0; |
3746 | } | 4072 | } |
3747 | 4073 | ||
@@ -3902,9 +4228,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
3902 | BUG_ON(ret); | 4228 | BUG_ON(ret); |
3903 | } | 4229 | } |
3904 | } else { | 4230 | } else { |
3905 | int mark_free = 0; | ||
3906 | struct extent_buffer *must_clean = NULL; | ||
3907 | |||
3908 | if (found_extent) { | 4231 | if (found_extent) { |
3909 | BUG_ON(is_data && refs_to_drop != | 4232 | BUG_ON(is_data && refs_to_drop != |
3910 | extent_data_ref_count(root, path, iref)); | 4233 | extent_data_ref_count(root, path, iref)); |
@@ -3917,31 +4240,11 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
3917 | } | 4240 | } |
3918 | } | 4241 | } |
3919 | 4242 | ||
3920 | ret = pin_down_bytes(trans, root, path, bytenr, | ||
3921 | num_bytes, is_data, 0, &must_clean); | ||
3922 | if (ret > 0) | ||
3923 | mark_free = 1; | ||
3924 | BUG_ON(ret < 0); | ||
3925 | /* | ||
3926 | * it is going to be very rare for someone to be waiting | ||
3927 | * on the block we're freeing. del_items might need to | ||
3928 | * schedule, so rather than get fancy, just force it | ||
3929 | * to blocking here | ||
3930 | */ | ||
3931 | if (must_clean) | ||
3932 | btrfs_set_lock_blocking(must_clean); | ||
3933 | |||
3934 | ret = btrfs_del_items(trans, extent_root, path, path->slots[0], | 4243 | ret = btrfs_del_items(trans, extent_root, path, path->slots[0], |
3935 | num_to_del); | 4244 | num_to_del); |
3936 | BUG_ON(ret); | 4245 | BUG_ON(ret); |
3937 | btrfs_release_path(extent_root, path); | 4246 | btrfs_release_path(extent_root, path); |
3938 | 4247 | ||
3939 | if (must_clean) { | ||
3940 | clean_tree_block(NULL, root, must_clean); | ||
3941 | btrfs_tree_unlock(must_clean); | ||
3942 | free_extent_buffer(must_clean); | ||
3943 | } | ||
3944 | |||
3945 | if (is_data) { | 4248 | if (is_data) { |
3946 | ret = btrfs_del_csums(trans, root, bytenr, num_bytes); | 4249 | ret = btrfs_del_csums(trans, root, bytenr, num_bytes); |
3947 | BUG_ON(ret); | 4250 | BUG_ON(ret); |
@@ -3951,8 +4254,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
3951 | (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT); | 4254 | (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT); |
3952 | } | 4255 | } |
3953 | 4256 | ||
3954 | ret = update_block_group(trans, root, bytenr, num_bytes, 0, | 4257 | ret = update_block_group(trans, root, bytenr, num_bytes, 0); |
3955 | mark_free); | ||
3956 | BUG_ON(ret); | 4258 | BUG_ON(ret); |
3957 | } | 4259 | } |
3958 | btrfs_free_path(path); | 4260 | btrfs_free_path(path); |
@@ -3960,7 +4262,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
3960 | } | 4262 | } |
3961 | 4263 | ||
3962 | /* | 4264 | /* |
3963 | * when we free an extent, it is possible (and likely) that we free the last | 4265 | * when we free an block, it is possible (and likely) that we free the last |
3964 | * delayed ref for that extent as well. This searches the delayed ref tree for | 4266 | * delayed ref for that extent as well. This searches the delayed ref tree for |
3965 | * a given extent, and if there are no other delayed refs to be processed, it | 4267 | * a given extent, and if there are no other delayed refs to be processed, it |
3966 | * removes it from the tree. | 4268 | * removes it from the tree. |
@@ -3972,7 +4274,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, | |||
3972 | struct btrfs_delayed_ref_root *delayed_refs; | 4274 | struct btrfs_delayed_ref_root *delayed_refs; |
3973 | struct btrfs_delayed_ref_node *ref; | 4275 | struct btrfs_delayed_ref_node *ref; |
3974 | struct rb_node *node; | 4276 | struct rb_node *node; |
3975 | int ret; | 4277 | int ret = 0; |
3976 | 4278 | ||
3977 | delayed_refs = &trans->transaction->delayed_refs; | 4279 | delayed_refs = &trans->transaction->delayed_refs; |
3978 | spin_lock(&delayed_refs->lock); | 4280 | spin_lock(&delayed_refs->lock); |
@@ -4024,17 +4326,99 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, | |||
4024 | list_del_init(&head->cluster); | 4326 | list_del_init(&head->cluster); |
4025 | spin_unlock(&delayed_refs->lock); | 4327 | spin_unlock(&delayed_refs->lock); |
4026 | 4328 | ||
4027 | ret = run_one_delayed_ref(trans, root->fs_info->tree_root, | 4329 | BUG_ON(head->extent_op); |
4028 | &head->node, head->extent_op, | 4330 | if (head->must_insert_reserved) |
4029 | head->must_insert_reserved); | 4331 | ret = 1; |
4030 | BUG_ON(ret); | 4332 | |
4333 | mutex_unlock(&head->mutex); | ||
4031 | btrfs_put_delayed_ref(&head->node); | 4334 | btrfs_put_delayed_ref(&head->node); |
4032 | return 0; | 4335 | return ret; |
4033 | out: | 4336 | out: |
4034 | spin_unlock(&delayed_refs->lock); | 4337 | spin_unlock(&delayed_refs->lock); |
4035 | return 0; | 4338 | return 0; |
4036 | } | 4339 | } |
4037 | 4340 | ||
4341 | void btrfs_free_tree_block(struct btrfs_trans_handle *trans, | ||
4342 | struct btrfs_root *root, | ||
4343 | struct extent_buffer *buf, | ||
4344 | u64 parent, int last_ref) | ||
4345 | { | ||
4346 | struct btrfs_block_rsv *block_rsv; | ||
4347 | struct btrfs_block_group_cache *cache = NULL; | ||
4348 | int ret; | ||
4349 | |||
4350 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { | ||
4351 | ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len, | ||
4352 | parent, root->root_key.objectid, | ||
4353 | btrfs_header_level(buf), | ||
4354 | BTRFS_DROP_DELAYED_REF, NULL); | ||
4355 | BUG_ON(ret); | ||
4356 | } | ||
4357 | |||
4358 | if (!last_ref) | ||
4359 | return; | ||
4360 | |||
4361 | block_rsv = get_block_rsv(trans, root); | ||
4362 | cache = btrfs_lookup_block_group(root->fs_info, buf->start); | ||
4363 | BUG_ON(block_rsv->space_info != cache->space_info); | ||
4364 | |||
4365 | if (btrfs_header_generation(buf) == trans->transid) { | ||
4366 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { | ||
4367 | ret = check_ref_cleanup(trans, root, buf->start); | ||
4368 | if (!ret) | ||
4369 | goto pin; | ||
4370 | } | ||
4371 | |||
4372 | if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { | ||
4373 | pin_down_extent(root, cache, buf->start, buf->len, 1); | ||
4374 | goto pin; | ||
4375 | } | ||
4376 | |||
4377 | WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); | ||
4378 | |||
4379 | btrfs_add_free_space(cache, buf->start, buf->len); | ||
4380 | ret = update_reserved_bytes(cache, buf->len, 0, 0); | ||
4381 | if (ret == -EAGAIN) { | ||
4382 | /* block group became read-only */ | ||
4383 | update_reserved_bytes(cache, buf->len, 0, 1); | ||
4384 | goto out; | ||
4385 | } | ||
4386 | |||
4387 | ret = 1; | ||
4388 | spin_lock(&block_rsv->lock); | ||
4389 | if (block_rsv->reserved < block_rsv->size) { | ||
4390 | block_rsv->reserved += buf->len; | ||
4391 | ret = 0; | ||
4392 | } | ||
4393 | spin_unlock(&block_rsv->lock); | ||
4394 | |||
4395 | if (ret) { | ||
4396 | spin_lock(&cache->space_info->lock); | ||
4397 | cache->space_info->bytes_reserved -= buf->len; | ||
4398 | spin_unlock(&cache->space_info->lock); | ||
4399 | } | ||
4400 | goto out; | ||
4401 | } | ||
4402 | pin: | ||
4403 | if (block_rsv->durable && !cache->ro) { | ||
4404 | ret = 0; | ||
4405 | spin_lock(&cache->lock); | ||
4406 | if (!cache->ro) { | ||
4407 | cache->reserved_pinned += buf->len; | ||
4408 | ret = 1; | ||
4409 | } | ||
4410 | spin_unlock(&cache->lock); | ||
4411 | |||
4412 | if (ret) { | ||
4413 | spin_lock(&block_rsv->lock); | ||
4414 | block_rsv->freed[trans->transid & 0x1] += buf->len; | ||
4415 | spin_unlock(&block_rsv->lock); | ||
4416 | } | ||
4417 | } | ||
4418 | out: | ||
4419 | btrfs_put_block_group(cache); | ||
4420 | } | ||
4421 | |||
4038 | int btrfs_free_extent(struct btrfs_trans_handle *trans, | 4422 | int btrfs_free_extent(struct btrfs_trans_handle *trans, |
4039 | struct btrfs_root *root, | 4423 | struct btrfs_root *root, |
4040 | u64 bytenr, u64 num_bytes, u64 parent, | 4424 | u64 bytenr, u64 num_bytes, u64 parent, |
@@ -4056,8 +4440,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
4056 | parent, root_objectid, (int)owner, | 4440 | parent, root_objectid, (int)owner, |
4057 | BTRFS_DROP_DELAYED_REF, NULL); | 4441 | BTRFS_DROP_DELAYED_REF, NULL); |
4058 | BUG_ON(ret); | 4442 | BUG_ON(ret); |
4059 | ret = check_ref_cleanup(trans, root, bytenr); | ||
4060 | BUG_ON(ret); | ||
4061 | } else { | 4443 | } else { |
4062 | ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes, | 4444 | ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes, |
4063 | parent, root_objectid, owner, | 4445 | parent, root_objectid, owner, |
@@ -4067,21 +4449,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
4067 | return ret; | 4449 | return ret; |
4068 | } | 4450 | } |
4069 | 4451 | ||
4070 | int btrfs_free_tree_block(struct btrfs_trans_handle *trans, | ||
4071 | struct btrfs_root *root, | ||
4072 | u64 bytenr, u32 blocksize, | ||
4073 | u64 parent, u64 root_objectid, int level) | ||
4074 | { | ||
4075 | u64 used; | ||
4076 | spin_lock(&root->node_lock); | ||
4077 | used = btrfs_root_used(&root->root_item) - blocksize; | ||
4078 | btrfs_set_root_used(&root->root_item, used); | ||
4079 | spin_unlock(&root->node_lock); | ||
4080 | |||
4081 | return btrfs_free_extent(trans, root, bytenr, blocksize, | ||
4082 | parent, root_objectid, level, 0); | ||
4083 | } | ||
4084 | |||
4085 | static u64 stripe_align(struct btrfs_root *root, u64 val) | 4452 | static u64 stripe_align(struct btrfs_root *root, u64 val) |
4086 | { | 4453 | { |
4087 | u64 mask = ((u64)root->stripesize - 1); | 4454 | u64 mask = ((u64)root->stripesize - 1); |
@@ -4134,6 +4501,22 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache) | |||
4134 | return 0; | 4501 | return 0; |
4135 | } | 4502 | } |
4136 | 4503 | ||
4504 | static int get_block_group_index(struct btrfs_block_group_cache *cache) | ||
4505 | { | ||
4506 | int index; | ||
4507 | if (cache->flags & BTRFS_BLOCK_GROUP_RAID10) | ||
4508 | index = 0; | ||
4509 | else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1) | ||
4510 | index = 1; | ||
4511 | else if (cache->flags & BTRFS_BLOCK_GROUP_DUP) | ||
4512 | index = 2; | ||
4513 | else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0) | ||
4514 | index = 3; | ||
4515 | else | ||
4516 | index = 4; | ||
4517 | return index; | ||
4518 | } | ||
4519 | |||
4137 | enum btrfs_loop_type { | 4520 | enum btrfs_loop_type { |
4138 | LOOP_FIND_IDEAL = 0, | 4521 | LOOP_FIND_IDEAL = 0, |
4139 | LOOP_CACHING_NOWAIT = 1, | 4522 | LOOP_CACHING_NOWAIT = 1, |
@@ -4155,7 +4538,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
4155 | u64 num_bytes, u64 empty_size, | 4538 | u64 num_bytes, u64 empty_size, |
4156 | u64 search_start, u64 search_end, | 4539 | u64 search_start, u64 search_end, |
4157 | u64 hint_byte, struct btrfs_key *ins, | 4540 | u64 hint_byte, struct btrfs_key *ins, |
4158 | u64 exclude_start, u64 exclude_nr, | ||
4159 | int data) | 4541 | int data) |
4160 | { | 4542 | { |
4161 | int ret = 0; | 4543 | int ret = 0; |
@@ -4168,6 +4550,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
4168 | struct btrfs_space_info *space_info; | 4550 | struct btrfs_space_info *space_info; |
4169 | int last_ptr_loop = 0; | 4551 | int last_ptr_loop = 0; |
4170 | int loop = 0; | 4552 | int loop = 0; |
4553 | int index = 0; | ||
4171 | bool found_uncached_bg = false; | 4554 | bool found_uncached_bg = false; |
4172 | bool failed_cluster_refill = false; | 4555 | bool failed_cluster_refill = false; |
4173 | bool failed_alloc = false; | 4556 | bool failed_alloc = false; |
@@ -4237,6 +4620,7 @@ ideal_cache: | |||
4237 | btrfs_put_block_group(block_group); | 4620 | btrfs_put_block_group(block_group); |
4238 | up_read(&space_info->groups_sem); | 4621 | up_read(&space_info->groups_sem); |
4239 | } else { | 4622 | } else { |
4623 | index = get_block_group_index(block_group); | ||
4240 | goto have_block_group; | 4624 | goto have_block_group; |
4241 | } | 4625 | } |
4242 | } else if (block_group) { | 4626 | } else if (block_group) { |
@@ -4245,7 +4629,8 @@ ideal_cache: | |||
4245 | } | 4629 | } |
4246 | search: | 4630 | search: |
4247 | down_read(&space_info->groups_sem); | 4631 | down_read(&space_info->groups_sem); |
4248 | list_for_each_entry(block_group, &space_info->block_groups, list) { | 4632 | list_for_each_entry(block_group, &space_info->block_groups[index], |
4633 | list) { | ||
4249 | u64 offset; | 4634 | u64 offset; |
4250 | int cached; | 4635 | int cached; |
4251 | 4636 | ||
@@ -4436,23 +4821,22 @@ checks: | |||
4436 | goto loop; | 4821 | goto loop; |
4437 | } | 4822 | } |
4438 | 4823 | ||
4439 | if (exclude_nr > 0 && | 4824 | ins->objectid = search_start; |
4440 | (search_start + num_bytes > exclude_start && | 4825 | ins->offset = num_bytes; |
4441 | search_start < exclude_start + exclude_nr)) { | 4826 | |
4442 | search_start = exclude_start + exclude_nr; | 4827 | if (offset < search_start) |
4828 | btrfs_add_free_space(block_group, offset, | ||
4829 | search_start - offset); | ||
4830 | BUG_ON(offset > search_start); | ||
4443 | 4831 | ||
4832 | ret = update_reserved_bytes(block_group, num_bytes, 1, | ||
4833 | (data & BTRFS_BLOCK_GROUP_DATA)); | ||
4834 | if (ret == -EAGAIN) { | ||
4444 | btrfs_add_free_space(block_group, offset, num_bytes); | 4835 | btrfs_add_free_space(block_group, offset, num_bytes); |
4445 | /* | ||
4446 | * if search_start is still in this block group | ||
4447 | * then we just re-search this block group | ||
4448 | */ | ||
4449 | if (search_start >= block_group->key.objectid && | ||
4450 | search_start < (block_group->key.objectid + | ||
4451 | block_group->key.offset)) | ||
4452 | goto have_block_group; | ||
4453 | goto loop; | 4836 | goto loop; |
4454 | } | 4837 | } |
4455 | 4838 | ||
4839 | /* we are all good, lets return */ | ||
4456 | ins->objectid = search_start; | 4840 | ins->objectid = search_start; |
4457 | ins->offset = num_bytes; | 4841 | ins->offset = num_bytes; |
4458 | 4842 | ||
@@ -4460,18 +4844,18 @@ checks: | |||
4460 | btrfs_add_free_space(block_group, offset, | 4844 | btrfs_add_free_space(block_group, offset, |
4461 | search_start - offset); | 4845 | search_start - offset); |
4462 | BUG_ON(offset > search_start); | 4846 | BUG_ON(offset > search_start); |
4463 | |||
4464 | update_reserved_extents(block_group, num_bytes, 1); | ||
4465 | |||
4466 | /* we are all good, lets return */ | ||
4467 | break; | 4847 | break; |
4468 | loop: | 4848 | loop: |
4469 | failed_cluster_refill = false; | 4849 | failed_cluster_refill = false; |
4470 | failed_alloc = false; | 4850 | failed_alloc = false; |
4851 | BUG_ON(index != get_block_group_index(block_group)); | ||
4471 | btrfs_put_block_group(block_group); | 4852 | btrfs_put_block_group(block_group); |
4472 | } | 4853 | } |
4473 | up_read(&space_info->groups_sem); | 4854 | up_read(&space_info->groups_sem); |
4474 | 4855 | ||
4856 | if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES) | ||
4857 | goto search; | ||
4858 | |||
4475 | /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for | 4859 | /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for |
4476 | * for them to make caching progress. Also | 4860 | * for them to make caching progress. Also |
4477 | * determine the best possible bg to cache | 4861 | * determine the best possible bg to cache |
@@ -4485,6 +4869,7 @@ loop: | |||
4485 | if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && | 4869 | if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && |
4486 | (found_uncached_bg || empty_size || empty_cluster || | 4870 | (found_uncached_bg || empty_size || empty_cluster || |
4487 | allowed_chunk_alloc)) { | 4871 | allowed_chunk_alloc)) { |
4872 | index = 0; | ||
4488 | if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { | 4873 | if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { |
4489 | found_uncached_bg = false; | 4874 | found_uncached_bg = false; |
4490 | loop++; | 4875 | loop++; |
@@ -4567,31 +4952,30 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes, | |||
4567 | int dump_block_groups) | 4952 | int dump_block_groups) |
4568 | { | 4953 | { |
4569 | struct btrfs_block_group_cache *cache; | 4954 | struct btrfs_block_group_cache *cache; |
4955 | int index = 0; | ||
4570 | 4956 | ||
4571 | spin_lock(&info->lock); | 4957 | spin_lock(&info->lock); |
4572 | printk(KERN_INFO "space_info has %llu free, is %sfull\n", | 4958 | printk(KERN_INFO "space_info has %llu free, is %sfull\n", |
4573 | (unsigned long long)(info->total_bytes - info->bytes_used - | 4959 | (unsigned long long)(info->total_bytes - info->bytes_used - |
4574 | info->bytes_pinned - info->bytes_reserved - | 4960 | info->bytes_pinned - info->bytes_reserved - |
4575 | info->bytes_super), | 4961 | info->bytes_readonly), |
4576 | (info->full) ? "" : "not "); | 4962 | (info->full) ? "" : "not "); |
4577 | printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu," | 4963 | printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, " |
4578 | " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu" | 4964 | "reserved=%llu, may_use=%llu, readonly=%llu\n", |
4579 | "\n", | ||
4580 | (unsigned long long)info->total_bytes, | 4965 | (unsigned long long)info->total_bytes, |
4966 | (unsigned long long)info->bytes_used, | ||
4581 | (unsigned long long)info->bytes_pinned, | 4967 | (unsigned long long)info->bytes_pinned, |
4582 | (unsigned long long)info->bytes_delalloc, | 4968 | (unsigned long long)info->bytes_reserved, |
4583 | (unsigned long long)info->bytes_may_use, | 4969 | (unsigned long long)info->bytes_may_use, |
4584 | (unsigned long long)info->bytes_used, | 4970 | (unsigned long long)info->bytes_readonly); |
4585 | (unsigned long long)info->bytes_root, | ||
4586 | (unsigned long long)info->bytes_super, | ||
4587 | (unsigned long long)info->bytes_reserved); | ||
4588 | spin_unlock(&info->lock); | 4971 | spin_unlock(&info->lock); |
4589 | 4972 | ||
4590 | if (!dump_block_groups) | 4973 | if (!dump_block_groups) |
4591 | return; | 4974 | return; |
4592 | 4975 | ||
4593 | down_read(&info->groups_sem); | 4976 | down_read(&info->groups_sem); |
4594 | list_for_each_entry(cache, &info->block_groups, list) { | 4977 | again: |
4978 | list_for_each_entry(cache, &info->block_groups[index], list) { | ||
4595 | spin_lock(&cache->lock); | 4979 | spin_lock(&cache->lock); |
4596 | printk(KERN_INFO "block group %llu has %llu bytes, %llu used " | 4980 | printk(KERN_INFO "block group %llu has %llu bytes, %llu used " |
4597 | "%llu pinned %llu reserved\n", | 4981 | "%llu pinned %llu reserved\n", |
@@ -4603,6 +4987,8 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes, | |||
4603 | btrfs_dump_free_space(cache, bytes); | 4987 | btrfs_dump_free_space(cache, bytes); |
4604 | spin_unlock(&cache->lock); | 4988 | spin_unlock(&cache->lock); |
4605 | } | 4989 | } |
4990 | if (++index < BTRFS_NR_RAID_TYPES) | ||
4991 | goto again; | ||
4606 | up_read(&info->groups_sem); | 4992 | up_read(&info->groups_sem); |
4607 | } | 4993 | } |
4608 | 4994 | ||
@@ -4628,9 +5014,8 @@ again: | |||
4628 | 5014 | ||
4629 | WARN_ON(num_bytes < root->sectorsize); | 5015 | WARN_ON(num_bytes < root->sectorsize); |
4630 | ret = find_free_extent(trans, root, num_bytes, empty_size, | 5016 | ret = find_free_extent(trans, root, num_bytes, empty_size, |
4631 | search_start, search_end, hint_byte, ins, | 5017 | search_start, search_end, hint_byte, |
4632 | trans->alloc_exclude_start, | 5018 | ins, data); |
4633 | trans->alloc_exclude_nr, data); | ||
4634 | 5019 | ||
4635 | if (ret == -ENOSPC && num_bytes > min_alloc_size) { | 5020 | if (ret == -ENOSPC && num_bytes > min_alloc_size) { |
4636 | num_bytes = num_bytes >> 1; | 5021 | num_bytes = num_bytes >> 1; |
@@ -4668,7 +5053,7 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) | |||
4668 | ret = btrfs_discard_extent(root, start, len); | 5053 | ret = btrfs_discard_extent(root, start, len); |
4669 | 5054 | ||
4670 | btrfs_add_free_space(cache, start, len); | 5055 | btrfs_add_free_space(cache, start, len); |
4671 | update_reserved_extents(cache, len, 0); | 5056 | update_reserved_bytes(cache, len, 0, 1); |
4672 | btrfs_put_block_group(cache); | 5057 | btrfs_put_block_group(cache); |
4673 | 5058 | ||
4674 | return ret; | 5059 | return ret; |
@@ -4731,8 +5116,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
4731 | btrfs_mark_buffer_dirty(path->nodes[0]); | 5116 | btrfs_mark_buffer_dirty(path->nodes[0]); |
4732 | btrfs_free_path(path); | 5117 | btrfs_free_path(path); |
4733 | 5118 | ||
4734 | ret = update_block_group(trans, root, ins->objectid, ins->offset, | 5119 | ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); |
4735 | 1, 0); | ||
4736 | if (ret) { | 5120 | if (ret) { |
4737 | printk(KERN_ERR "btrfs update block group failed for %llu " | 5121 | printk(KERN_ERR "btrfs update block group failed for %llu " |
4738 | "%llu\n", (unsigned long long)ins->objectid, | 5122 | "%llu\n", (unsigned long long)ins->objectid, |
@@ -4792,8 +5176,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, | |||
4792 | btrfs_mark_buffer_dirty(leaf); | 5176 | btrfs_mark_buffer_dirty(leaf); |
4793 | btrfs_free_path(path); | 5177 | btrfs_free_path(path); |
4794 | 5178 | ||
4795 | ret = update_block_group(trans, root, ins->objectid, ins->offset, | 5179 | ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); |
4796 | 1, 0); | ||
4797 | if (ret) { | 5180 | if (ret) { |
4798 | printk(KERN_ERR "btrfs update block group failed for %llu " | 5181 | printk(KERN_ERR "btrfs update block group failed for %llu " |
4799 | "%llu\n", (unsigned long long)ins->objectid, | 5182 | "%llu\n", (unsigned long long)ins->objectid, |
@@ -4869,73 +5252,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, | |||
4869 | put_caching_control(caching_ctl); | 5252 | put_caching_control(caching_ctl); |
4870 | } | 5253 | } |
4871 | 5254 | ||
4872 | update_reserved_extents(block_group, ins->offset, 1); | 5255 | ret = update_reserved_bytes(block_group, ins->offset, 1, 1); |
5256 | BUG_ON(ret); | ||
4873 | btrfs_put_block_group(block_group); | 5257 | btrfs_put_block_group(block_group); |
4874 | ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, | 5258 | ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, |
4875 | 0, owner, offset, ins, 1); | 5259 | 0, owner, offset, ins, 1); |
4876 | return ret; | 5260 | return ret; |
4877 | } | 5261 | } |
4878 | 5262 | ||
4879 | /* | ||
4880 | * finds a free extent and does all the dirty work required for allocation | ||
4881 | * returns the key for the extent through ins, and a tree buffer for | ||
4882 | * the first block of the extent through buf. | ||
4883 | * | ||
4884 | * returns 0 if everything worked, non-zero otherwise. | ||
4885 | */ | ||
4886 | static int alloc_tree_block(struct btrfs_trans_handle *trans, | ||
4887 | struct btrfs_root *root, | ||
4888 | u64 num_bytes, u64 parent, u64 root_objectid, | ||
4889 | struct btrfs_disk_key *key, int level, | ||
4890 | u64 empty_size, u64 hint_byte, u64 search_end, | ||
4891 | struct btrfs_key *ins) | ||
4892 | { | ||
4893 | int ret; | ||
4894 | u64 flags = 0; | ||
4895 | |||
4896 | ret = btrfs_reserve_extent(trans, root, num_bytes, num_bytes, | ||
4897 | empty_size, hint_byte, search_end, | ||
4898 | ins, 0); | ||
4899 | if (ret) | ||
4900 | return ret; | ||
4901 | |||
4902 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { | ||
4903 | if (parent == 0) | ||
4904 | parent = ins->objectid; | ||
4905 | flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; | ||
4906 | } else | ||
4907 | BUG_ON(parent > 0); | ||
4908 | |||
4909 | if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { | ||
4910 | struct btrfs_delayed_extent_op *extent_op; | ||
4911 | extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); | ||
4912 | BUG_ON(!extent_op); | ||
4913 | if (key) | ||
4914 | memcpy(&extent_op->key, key, sizeof(extent_op->key)); | ||
4915 | else | ||
4916 | memset(&extent_op->key, 0, sizeof(extent_op->key)); | ||
4917 | extent_op->flags_to_set = flags; | ||
4918 | extent_op->update_key = 1; | ||
4919 | extent_op->update_flags = 1; | ||
4920 | extent_op->is_data = 0; | ||
4921 | |||
4922 | ret = btrfs_add_delayed_tree_ref(trans, ins->objectid, | ||
4923 | ins->offset, parent, root_objectid, | ||
4924 | level, BTRFS_ADD_DELAYED_EXTENT, | ||
4925 | extent_op); | ||
4926 | BUG_ON(ret); | ||
4927 | } | ||
4928 | |||
4929 | if (root_objectid == root->root_key.objectid) { | ||
4930 | u64 used; | ||
4931 | spin_lock(&root->node_lock); | ||
4932 | used = btrfs_root_used(&root->root_item) + num_bytes; | ||
4933 | btrfs_set_root_used(&root->root_item, used); | ||
4934 | spin_unlock(&root->node_lock); | ||
4935 | } | ||
4936 | return ret; | ||
4937 | } | ||
4938 | |||
4939 | struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, | 5263 | struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, |
4940 | struct btrfs_root *root, | 5264 | struct btrfs_root *root, |
4941 | u64 bytenr, u32 blocksize, | 5265 | u64 bytenr, u32 blocksize, |
@@ -4974,8 +5298,45 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, | |||
4974 | return buf; | 5298 | return buf; |
4975 | } | 5299 | } |
4976 | 5300 | ||
5301 | static struct btrfs_block_rsv * | ||
5302 | use_block_rsv(struct btrfs_trans_handle *trans, | ||
5303 | struct btrfs_root *root, u32 blocksize) | ||
5304 | { | ||
5305 | struct btrfs_block_rsv *block_rsv; | ||
5306 | int ret; | ||
5307 | |||
5308 | block_rsv = get_block_rsv(trans, root); | ||
5309 | |||
5310 | if (block_rsv->size == 0) { | ||
5311 | ret = reserve_metadata_bytes(block_rsv, blocksize); | ||
5312 | if (ret) | ||
5313 | return ERR_PTR(ret); | ||
5314 | return block_rsv; | ||
5315 | } | ||
5316 | |||
5317 | ret = block_rsv_use_bytes(block_rsv, blocksize); | ||
5318 | if (!ret) | ||
5319 | return block_rsv; | ||
5320 | |||
5321 | WARN_ON(1); | ||
5322 | printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n", | ||
5323 | block_rsv->size, block_rsv->reserved, | ||
5324 | block_rsv->freed[0], block_rsv->freed[1]); | ||
5325 | |||
5326 | return ERR_PTR(-ENOSPC); | ||
5327 | } | ||
5328 | |||
5329 | static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize) | ||
5330 | { | ||
5331 | block_rsv_add_bytes(block_rsv, blocksize, 0); | ||
5332 | block_rsv_release_bytes(block_rsv, NULL, 0); | ||
5333 | } | ||
5334 | |||
4977 | /* | 5335 | /* |
4978 | * helper function to allocate a block for a given tree | 5336 | * finds a free extent and does all the dirty work required for allocation |
5337 | * returns the key for the extent through ins, and a tree buffer for | ||
5338 | * the first block of the extent through buf. | ||
5339 | * | ||
4979 | * returns the tree buffer or NULL. | 5340 | * returns the tree buffer or NULL. |
4980 | */ | 5341 | */ |
4981 | struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, | 5342 | struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, |
@@ -4985,18 +5346,53 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, | |||
4985 | u64 hint, u64 empty_size) | 5346 | u64 hint, u64 empty_size) |
4986 | { | 5347 | { |
4987 | struct btrfs_key ins; | 5348 | struct btrfs_key ins; |
4988 | int ret; | 5349 | struct btrfs_block_rsv *block_rsv; |
4989 | struct extent_buffer *buf; | 5350 | struct extent_buffer *buf; |
5351 | u64 flags = 0; | ||
5352 | int ret; | ||
5353 | |||
4990 | 5354 | ||
4991 | ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid, | 5355 | block_rsv = use_block_rsv(trans, root, blocksize); |
4992 | key, level, empty_size, hint, (u64)-1, &ins); | 5356 | if (IS_ERR(block_rsv)) |
5357 | return ERR_CAST(block_rsv); | ||
5358 | |||
5359 | ret = btrfs_reserve_extent(trans, root, blocksize, blocksize, | ||
5360 | empty_size, hint, (u64)-1, &ins, 0); | ||
4993 | if (ret) { | 5361 | if (ret) { |
4994 | BUG_ON(ret > 0); | 5362 | unuse_block_rsv(block_rsv, blocksize); |
4995 | return ERR_PTR(ret); | 5363 | return ERR_PTR(ret); |
4996 | } | 5364 | } |
4997 | 5365 | ||
4998 | buf = btrfs_init_new_buffer(trans, root, ins.objectid, | 5366 | buf = btrfs_init_new_buffer(trans, root, ins.objectid, |
4999 | blocksize, level); | 5367 | blocksize, level); |
5368 | BUG_ON(IS_ERR(buf)); | ||
5369 | |||
5370 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { | ||
5371 | if (parent == 0) | ||
5372 | parent = ins.objectid; | ||
5373 | flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; | ||
5374 | } else | ||
5375 | BUG_ON(parent > 0); | ||
5376 | |||
5377 | if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { | ||
5378 | struct btrfs_delayed_extent_op *extent_op; | ||
5379 | extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); | ||
5380 | BUG_ON(!extent_op); | ||
5381 | if (key) | ||
5382 | memcpy(&extent_op->key, key, sizeof(extent_op->key)); | ||
5383 | else | ||
5384 | memset(&extent_op->key, 0, sizeof(extent_op->key)); | ||
5385 | extent_op->flags_to_set = flags; | ||
5386 | extent_op->update_key = 1; | ||
5387 | extent_op->update_flags = 1; | ||
5388 | extent_op->is_data = 0; | ||
5389 | |||
5390 | ret = btrfs_add_delayed_tree_ref(trans, ins.objectid, | ||
5391 | ins.offset, parent, root_objectid, | ||
5392 | level, BTRFS_ADD_DELAYED_EXTENT, | ||
5393 | extent_op); | ||
5394 | BUG_ON(ret); | ||
5395 | } | ||
5000 | return buf; | 5396 | return buf; |
5001 | } | 5397 | } |
5002 | 5398 | ||
@@ -5321,7 +5717,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, | |||
5321 | struct btrfs_path *path, | 5717 | struct btrfs_path *path, |
5322 | struct walk_control *wc) | 5718 | struct walk_control *wc) |
5323 | { | 5719 | { |
5324 | int ret = 0; | 5720 | int ret; |
5325 | int level = wc->level; | 5721 | int level = wc->level; |
5326 | struct extent_buffer *eb = path->nodes[level]; | 5722 | struct extent_buffer *eb = path->nodes[level]; |
5327 | u64 parent = 0; | 5723 | u64 parent = 0; |
@@ -5399,13 +5795,11 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, | |||
5399 | btrfs_header_owner(path->nodes[level + 1])); | 5795 | btrfs_header_owner(path->nodes[level + 1])); |
5400 | } | 5796 | } |
5401 | 5797 | ||
5402 | ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent, | 5798 | btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); |
5403 | root->root_key.objectid, level, 0); | ||
5404 | BUG_ON(ret); | ||
5405 | out: | 5799 | out: |
5406 | wc->refs[level] = 0; | 5800 | wc->refs[level] = 0; |
5407 | wc->flags[level] = 0; | 5801 | wc->flags[level] = 0; |
5408 | return ret; | 5802 | return 0; |
5409 | } | 5803 | } |
5410 | 5804 | ||
5411 | static noinline int walk_down_tree(struct btrfs_trans_handle *trans, | 5805 | static noinline int walk_down_tree(struct btrfs_trans_handle *trans, |
@@ -5483,7 +5877,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans, | |||
5483 | * also make sure backrefs for the shared block and all lower level | 5877 | * also make sure backrefs for the shared block and all lower level |
5484 | * blocks are properly updated. | 5878 | * blocks are properly updated. |
5485 | */ | 5879 | */ |
5486 | int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) | 5880 | int btrfs_drop_snapshot(struct btrfs_root *root, |
5881 | struct btrfs_block_rsv *block_rsv, int update_ref) | ||
5487 | { | 5882 | { |
5488 | struct btrfs_path *path; | 5883 | struct btrfs_path *path; |
5489 | struct btrfs_trans_handle *trans; | 5884 | struct btrfs_trans_handle *trans; |
@@ -5501,7 +5896,9 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) | |||
5501 | wc = kzalloc(sizeof(*wc), GFP_NOFS); | 5896 | wc = kzalloc(sizeof(*wc), GFP_NOFS); |
5502 | BUG_ON(!wc); | 5897 | BUG_ON(!wc); |
5503 | 5898 | ||
5504 | trans = btrfs_start_transaction(tree_root, 1); | 5899 | trans = btrfs_start_transaction(tree_root, 0); |
5900 | if (block_rsv) | ||
5901 | trans->block_rsv = block_rsv; | ||
5505 | 5902 | ||
5506 | if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { | 5903 | if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { |
5507 | level = btrfs_header_level(root->node); | 5904 | level = btrfs_header_level(root->node); |
@@ -5589,22 +5986,16 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) | |||
5589 | } | 5986 | } |
5590 | 5987 | ||
5591 | BUG_ON(wc->level == 0); | 5988 | BUG_ON(wc->level == 0); |
5592 | if (trans->transaction->in_commit || | 5989 | if (btrfs_should_end_transaction(trans, tree_root)) { |
5593 | trans->transaction->delayed_refs.flushing) { | ||
5594 | ret = btrfs_update_root(trans, tree_root, | 5990 | ret = btrfs_update_root(trans, tree_root, |
5595 | &root->root_key, | 5991 | &root->root_key, |
5596 | root_item); | 5992 | root_item); |
5597 | BUG_ON(ret); | 5993 | BUG_ON(ret); |
5598 | 5994 | ||
5599 | btrfs_end_transaction(trans, tree_root); | 5995 | btrfs_end_transaction_throttle(trans, tree_root); |
5600 | trans = btrfs_start_transaction(tree_root, 1); | 5996 | trans = btrfs_start_transaction(tree_root, 0); |
5601 | } else { | 5997 | if (block_rsv) |
5602 | unsigned long update; | 5998 | trans->block_rsv = block_rsv; |
5603 | update = trans->delayed_ref_updates; | ||
5604 | trans->delayed_ref_updates = 0; | ||
5605 | if (update) | ||
5606 | btrfs_run_delayed_refs(trans, tree_root, | ||
5607 | update); | ||
5608 | } | 5999 | } |
5609 | } | 6000 | } |
5610 | btrfs_release_path(root, path); | 6001 | btrfs_release_path(root, path); |
@@ -5632,7 +6023,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) | |||
5632 | kfree(root); | 6023 | kfree(root); |
5633 | } | 6024 | } |
5634 | out: | 6025 | out: |
5635 | btrfs_end_transaction(trans, tree_root); | 6026 | btrfs_end_transaction_throttle(trans, tree_root); |
5636 | kfree(wc); | 6027 | kfree(wc); |
5637 | btrfs_free_path(path); | 6028 | btrfs_free_path(path); |
5638 | return err; | 6029 | return err; |
@@ -7228,48 +7619,80 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) | |||
7228 | return flags; | 7619 | return flags; |
7229 | } | 7620 | } |
7230 | 7621 | ||
7231 | static int __alloc_chunk_for_shrink(struct btrfs_root *root, | 7622 | static int set_block_group_ro(struct btrfs_block_group_cache *cache) |
7232 | struct btrfs_block_group_cache *shrink_block_group, | ||
7233 | int force) | ||
7234 | { | 7623 | { |
7235 | struct btrfs_trans_handle *trans; | 7624 | struct btrfs_space_info *sinfo = cache->space_info; |
7236 | u64 new_alloc_flags; | 7625 | u64 num_bytes; |
7237 | u64 calc; | 7626 | int ret = -ENOSPC; |
7238 | 7627 | ||
7239 | spin_lock(&shrink_block_group->lock); | 7628 | if (cache->ro) |
7240 | if (btrfs_block_group_used(&shrink_block_group->item) + | 7629 | return 0; |
7241 | shrink_block_group->reserved > 0) { | ||
7242 | spin_unlock(&shrink_block_group->lock); | ||
7243 | 7630 | ||
7244 | trans = btrfs_start_transaction(root, 1); | 7631 | spin_lock(&sinfo->lock); |
7245 | spin_lock(&shrink_block_group->lock); | 7632 | spin_lock(&cache->lock); |
7633 | num_bytes = cache->key.offset - cache->reserved - cache->pinned - | ||
7634 | cache->bytes_super - btrfs_block_group_used(&cache->item); | ||
7635 | |||
7636 | if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + | ||
7637 | sinfo->bytes_may_use + sinfo->bytes_readonly + | ||
7638 | cache->reserved_pinned + num_bytes < sinfo->total_bytes) { | ||
7639 | sinfo->bytes_readonly += num_bytes; | ||
7640 | sinfo->bytes_reserved += cache->reserved_pinned; | ||
7641 | cache->reserved_pinned = 0; | ||
7642 | cache->ro = 1; | ||
7643 | ret = 0; | ||
7644 | } | ||
7645 | spin_unlock(&cache->lock); | ||
7646 | spin_unlock(&sinfo->lock); | ||
7647 | return ret; | ||
7648 | } | ||
7246 | 7649 | ||
7247 | new_alloc_flags = update_block_group_flags(root, | 7650 | int btrfs_set_block_group_ro(struct btrfs_root *root, |
7248 | shrink_block_group->flags); | 7651 | struct btrfs_block_group_cache *cache) |
7249 | if (new_alloc_flags != shrink_block_group->flags) { | ||
7250 | calc = | ||
7251 | btrfs_block_group_used(&shrink_block_group->item); | ||
7252 | } else { | ||
7253 | calc = shrink_block_group->key.offset; | ||
7254 | } | ||
7255 | spin_unlock(&shrink_block_group->lock); | ||
7256 | 7652 | ||
7257 | do_chunk_alloc(trans, root->fs_info->extent_root, | 7653 | { |
7258 | calc + 2 * 1024 * 1024, new_alloc_flags, force); | 7654 | struct btrfs_trans_handle *trans; |
7655 | u64 alloc_flags; | ||
7656 | int ret; | ||
7259 | 7657 | ||
7260 | btrfs_end_transaction(trans, root); | 7658 | BUG_ON(cache->ro); |
7261 | } else | 7659 | |
7262 | spin_unlock(&shrink_block_group->lock); | 7660 | trans = btrfs_join_transaction(root, 1); |
7263 | return 0; | 7661 | BUG_ON(IS_ERR(trans)); |
7264 | } | ||
7265 | 7662 | ||
7663 | alloc_flags = update_block_group_flags(root, cache->flags); | ||
7664 | if (alloc_flags != cache->flags) | ||
7665 | do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | ||
7266 | 7666 | ||
7267 | int btrfs_prepare_block_group_relocation(struct btrfs_root *root, | 7667 | ret = set_block_group_ro(cache); |
7268 | struct btrfs_block_group_cache *group) | 7668 | if (!ret) |
7669 | goto out; | ||
7670 | alloc_flags = get_alloc_profile(root, cache->space_info->flags); | ||
7671 | ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | ||
7672 | if (ret < 0) | ||
7673 | goto out; | ||
7674 | ret = set_block_group_ro(cache); | ||
7675 | out: | ||
7676 | btrfs_end_transaction(trans, root); | ||
7677 | return ret; | ||
7678 | } | ||
7269 | 7679 | ||
7680 | int btrfs_set_block_group_rw(struct btrfs_root *root, | ||
7681 | struct btrfs_block_group_cache *cache) | ||
7270 | { | 7682 | { |
7271 | __alloc_chunk_for_shrink(root, group, 1); | 7683 | struct btrfs_space_info *sinfo = cache->space_info; |
7272 | set_block_group_readonly(group); | 7684 | u64 num_bytes; |
7685 | |||
7686 | BUG_ON(!cache->ro); | ||
7687 | |||
7688 | spin_lock(&sinfo->lock); | ||
7689 | spin_lock(&cache->lock); | ||
7690 | num_bytes = cache->key.offset - cache->reserved - cache->pinned - | ||
7691 | cache->bytes_super - btrfs_block_group_used(&cache->item); | ||
7692 | sinfo->bytes_readonly -= num_bytes; | ||
7693 | cache->ro = 0; | ||
7694 | spin_unlock(&cache->lock); | ||
7695 | spin_unlock(&sinfo->lock); | ||
7273 | return 0; | 7696 | return 0; |
7274 | } | 7697 | } |
7275 | 7698 | ||
@@ -7436,17 +7859,33 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) | |||
7436 | */ | 7859 | */ |
7437 | synchronize_rcu(); | 7860 | synchronize_rcu(); |
7438 | 7861 | ||
7862 | release_global_block_rsv(info); | ||
7863 | |||
7439 | while(!list_empty(&info->space_info)) { | 7864 | while(!list_empty(&info->space_info)) { |
7440 | space_info = list_entry(info->space_info.next, | 7865 | space_info = list_entry(info->space_info.next, |
7441 | struct btrfs_space_info, | 7866 | struct btrfs_space_info, |
7442 | list); | 7867 | list); |
7443 | 7868 | if (space_info->bytes_pinned > 0 || | |
7869 | space_info->bytes_reserved > 0) { | ||
7870 | WARN_ON(1); | ||
7871 | dump_space_info(space_info, 0, 0); | ||
7872 | } | ||
7444 | list_del(&space_info->list); | 7873 | list_del(&space_info->list); |
7445 | kfree(space_info); | 7874 | kfree(space_info); |
7446 | } | 7875 | } |
7447 | return 0; | 7876 | return 0; |
7448 | } | 7877 | } |
7449 | 7878 | ||
7879 | static void __link_block_group(struct btrfs_space_info *space_info, | ||
7880 | struct btrfs_block_group_cache *cache) | ||
7881 | { | ||
7882 | int index = get_block_group_index(cache); | ||
7883 | |||
7884 | down_write(&space_info->groups_sem); | ||
7885 | list_add_tail(&cache->list, &space_info->block_groups[index]); | ||
7886 | up_write(&space_info->groups_sem); | ||
7887 | } | ||
7888 | |||
7450 | int btrfs_read_block_groups(struct btrfs_root *root) | 7889 | int btrfs_read_block_groups(struct btrfs_root *root) |
7451 | { | 7890 | { |
7452 | struct btrfs_path *path; | 7891 | struct btrfs_path *path; |
@@ -7468,10 +7907,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
7468 | 7907 | ||
7469 | while (1) { | 7908 | while (1) { |
7470 | ret = find_first_block_group(root, path, &key); | 7909 | ret = find_first_block_group(root, path, &key); |
7471 | if (ret > 0) { | 7910 | if (ret > 0) |
7472 | ret = 0; | 7911 | break; |
7473 | goto error; | ||
7474 | } | ||
7475 | if (ret != 0) | 7912 | if (ret != 0) |
7476 | goto error; | 7913 | goto error; |
7477 | 7914 | ||
@@ -7480,7 +7917,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
7480 | cache = kzalloc(sizeof(*cache), GFP_NOFS); | 7917 | cache = kzalloc(sizeof(*cache), GFP_NOFS); |
7481 | if (!cache) { | 7918 | if (!cache) { |
7482 | ret = -ENOMEM; | 7919 | ret = -ENOMEM; |
7483 | break; | 7920 | goto error; |
7484 | } | 7921 | } |
7485 | 7922 | ||
7486 | atomic_set(&cache->count, 1); | 7923 | atomic_set(&cache->count, 1); |
@@ -7537,20 +7974,36 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
7537 | BUG_ON(ret); | 7974 | BUG_ON(ret); |
7538 | cache->space_info = space_info; | 7975 | cache->space_info = space_info; |
7539 | spin_lock(&cache->space_info->lock); | 7976 | spin_lock(&cache->space_info->lock); |
7540 | cache->space_info->bytes_super += cache->bytes_super; | 7977 | cache->space_info->bytes_readonly += cache->bytes_super; |
7541 | spin_unlock(&cache->space_info->lock); | 7978 | spin_unlock(&cache->space_info->lock); |
7542 | 7979 | ||
7543 | down_write(&space_info->groups_sem); | 7980 | __link_block_group(space_info, cache); |
7544 | list_add_tail(&cache->list, &space_info->block_groups); | ||
7545 | up_write(&space_info->groups_sem); | ||
7546 | 7981 | ||
7547 | ret = btrfs_add_block_group_cache(root->fs_info, cache); | 7982 | ret = btrfs_add_block_group_cache(root->fs_info, cache); |
7548 | BUG_ON(ret); | 7983 | BUG_ON(ret); |
7549 | 7984 | ||
7550 | set_avail_alloc_bits(root->fs_info, cache->flags); | 7985 | set_avail_alloc_bits(root->fs_info, cache->flags); |
7551 | if (btrfs_chunk_readonly(root, cache->key.objectid)) | 7986 | if (btrfs_chunk_readonly(root, cache->key.objectid)) |
7552 | set_block_group_readonly(cache); | 7987 | set_block_group_ro(cache); |
7553 | } | 7988 | } |
7989 | |||
7990 | list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) { | ||
7991 | if (!(get_alloc_profile(root, space_info->flags) & | ||
7992 | (BTRFS_BLOCK_GROUP_RAID10 | | ||
7993 | BTRFS_BLOCK_GROUP_RAID1 | | ||
7994 | BTRFS_BLOCK_GROUP_DUP))) | ||
7995 | continue; | ||
7996 | /* | ||
7997 | * avoid allocating from un-mirrored block group if there are | ||
7998 | * mirrored block groups. | ||
7999 | */ | ||
8000 | list_for_each_entry(cache, &space_info->block_groups[3], list) | ||
8001 | set_block_group_ro(cache); | ||
8002 | list_for_each_entry(cache, &space_info->block_groups[4], list) | ||
8003 | set_block_group_ro(cache); | ||
8004 | } | ||
8005 | |||
8006 | init_global_block_rsv(info); | ||
7554 | ret = 0; | 8007 | ret = 0; |
7555 | error: | 8008 | error: |
7556 | btrfs_free_path(path); | 8009 | btrfs_free_path(path); |
@@ -7611,12 +8064,10 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
7611 | BUG_ON(ret); | 8064 | BUG_ON(ret); |
7612 | 8065 | ||
7613 | spin_lock(&cache->space_info->lock); | 8066 | spin_lock(&cache->space_info->lock); |
7614 | cache->space_info->bytes_super += cache->bytes_super; | 8067 | cache->space_info->bytes_readonly += cache->bytes_super; |
7615 | spin_unlock(&cache->space_info->lock); | 8068 | spin_unlock(&cache->space_info->lock); |
7616 | 8069 | ||
7617 | down_write(&cache->space_info->groups_sem); | 8070 | __link_block_group(cache->space_info, cache); |
7618 | list_add_tail(&cache->list, &cache->space_info->block_groups); | ||
7619 | up_write(&cache->space_info->groups_sem); | ||
7620 | 8071 | ||
7621 | ret = btrfs_add_block_group_cache(root->fs_info, cache); | 8072 | ret = btrfs_add_block_group_cache(root->fs_info, cache); |
7622 | BUG_ON(ret); | 8073 | BUG_ON(ret); |