summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2019-06-20 15:37:45 -0400
committerDavid Sterba <dsterba@suse.com>2019-09-09 08:59:04 -0400
commit2e405ad842546a1a37aaa586d5140d071cb1f802 (patch)
tree42299ec8e870e4953d7b5b6a498d61ed652f3d33
parentaac0023c2106952538414254960c51dcf0dc39e9 (diff)
btrfs: migrate the block group lookup code
Move these bits first as they are the easiest to move. Export two of the helpers so they can be moved all at once. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> [ minor style updates ] Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/block-group.c95
-rw-r--r--fs/btrfs/block-group.h7
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/extent-tree.c96
5 files changed, 105 insertions, 98 deletions
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 76a843198bcb..82200dbca5ac 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -11,7 +11,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
11 compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ 11 compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
12 reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ 12 reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
13 uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \ 13 uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
14 block-rsv.o delalloc-space.o 14 block-rsv.o delalloc-space.o block-group.o
15 15
16btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o 16btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
17btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o 17btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
new file mode 100644
index 000000000000..ebe7b1c5c1e3
--- /dev/null
+++ b/fs/btrfs/block-group.c
@@ -0,0 +1,95 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#include "ctree.h"
4#include "block-group.h"
5
6/*
7 * This will return the block group at or after bytenr if contains is 0, else
8 * it will return the block group that contains the bytenr
9 */
10static struct btrfs_block_group_cache *block_group_cache_tree_search(
11 struct btrfs_fs_info *info, u64 bytenr, int contains)
12{
13 struct btrfs_block_group_cache *cache, *ret = NULL;
14 struct rb_node *n;
15 u64 end, start;
16
17 spin_lock(&info->block_group_cache_lock);
18 n = info->block_group_cache_tree.rb_node;
19
20 while (n) {
21 cache = rb_entry(n, struct btrfs_block_group_cache,
22 cache_node);
23 end = cache->key.objectid + cache->key.offset - 1;
24 start = cache->key.objectid;
25
26 if (bytenr < start) {
27 if (!contains && (!ret || start < ret->key.objectid))
28 ret = cache;
29 n = n->rb_left;
30 } else if (bytenr > start) {
31 if (contains && bytenr <= end) {
32 ret = cache;
33 break;
34 }
35 n = n->rb_right;
36 } else {
37 ret = cache;
38 break;
39 }
40 }
41 if (ret) {
42 btrfs_get_block_group(ret);
43 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
44 info->first_logical_byte = ret->key.objectid;
45 }
46 spin_unlock(&info->block_group_cache_lock);
47
48 return ret;
49}
50
51/*
52 * Return the block group that starts at or after bytenr
53 */
54struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
55 struct btrfs_fs_info *info, u64 bytenr)
56{
57 return block_group_cache_tree_search(info, bytenr, 0);
58}
59
60/*
61 * Return the block group that contains the given bytenr
62 */
63struct btrfs_block_group_cache *btrfs_lookup_block_group(
64 struct btrfs_fs_info *info, u64 bytenr)
65{
66 return block_group_cache_tree_search(info, bytenr, 1);
67}
68
69struct btrfs_block_group_cache *btrfs_next_block_group(
70 struct btrfs_block_group_cache *cache)
71{
72 struct btrfs_fs_info *fs_info = cache->fs_info;
73 struct rb_node *node;
74
75 spin_lock(&fs_info->block_group_cache_lock);
76
77 /* If our block group was removed, we need a full search. */
78 if (RB_EMPTY_NODE(&cache->cache_node)) {
79 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
80
81 spin_unlock(&fs_info->block_group_cache_lock);
82 btrfs_put_block_group(cache);
83 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
84 }
85 node = rb_next(&cache->cache_node);
86 btrfs_put_block_group(cache);
87 if (node) {
88 cache = rb_entry(node, struct btrfs_block_group_cache,
89 cache_node);
90 btrfs_get_block_group(cache);
91 } else
92 cache = NULL;
93 spin_unlock(&fs_info->block_group_cache_lock);
94 return cache;
95}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 054745007519..87bac0d5ad69 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -151,4 +151,11 @@ static inline int btrfs_should_fragment_free_space(
151} 151}
152#endif 152#endif
153 153
154struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
155 struct btrfs_fs_info *info, u64 bytenr);
156struct btrfs_block_group_cache *btrfs_lookup_block_group(
157 struct btrfs_fs_info *info, u64 bytenr);
158struct btrfs_block_group_cache *btrfs_next_block_group(
159 struct btrfs_block_group_cache *cache);
160
154#endif /* BTRFS_BLOCK_GROUP_H */ 161#endif /* BTRFS_BLOCK_GROUP_H */
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index e95fdd1d9dd2..49ac72c3d0cd 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2496,9 +2496,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
2496int btrfs_exclude_logged_extents(struct extent_buffer *eb); 2496int btrfs_exclude_logged_extents(struct extent_buffer *eb);
2497int btrfs_cross_ref_exist(struct btrfs_root *root, 2497int btrfs_cross_ref_exist(struct btrfs_root *root,
2498 u64 objectid, u64 offset, u64 bytenr); 2498 u64 objectid, u64 offset, u64 bytenr);
2499struct btrfs_block_group_cache *btrfs_lookup_block_group(
2500 struct btrfs_fs_info *info,
2501 u64 bytenr);
2502void btrfs_get_block_group(struct btrfs_block_group_cache *cache); 2499void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
2503void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 2500void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
2504struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 2501struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f28697131f22..a454945227ca 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -133,52 +133,6 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
133 return 0; 133 return 0;
134} 134}
135 135
136/*
137 * This will return the block group at or after bytenr if contains is 0, else
138 * it will return the block group that contains the bytenr
139 */
140static struct btrfs_block_group_cache *
141block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
142 int contains)
143{
144 struct btrfs_block_group_cache *cache, *ret = NULL;
145 struct rb_node *n;
146 u64 end, start;
147
148 spin_lock(&info->block_group_cache_lock);
149 n = info->block_group_cache_tree.rb_node;
150
151 while (n) {
152 cache = rb_entry(n, struct btrfs_block_group_cache,
153 cache_node);
154 end = cache->key.objectid + cache->key.offset - 1;
155 start = cache->key.objectid;
156
157 if (bytenr < start) {
158 if (!contains && (!ret || start < ret->key.objectid))
159 ret = cache;
160 n = n->rb_left;
161 } else if (bytenr > start) {
162 if (contains && bytenr <= end) {
163 ret = cache;
164 break;
165 }
166 n = n->rb_right;
167 } else {
168 ret = cache;
169 break;
170 }
171 }
172 if (ret) {
173 btrfs_get_block_group(ret);
174 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
175 info->first_logical_byte = ret->key.objectid;
176 }
177 spin_unlock(&info->block_group_cache_lock);
178
179 return ret;
180}
181
182static int add_excluded_extent(struct btrfs_fs_info *fs_info, 136static int add_excluded_extent(struct btrfs_fs_info *fs_info,
183 u64 start, u64 num_bytes) 137 u64 start, u64 num_bytes)
184{ 138{
@@ -673,24 +627,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
673 return ret; 627 return ret;
674} 628}
675 629
676/*
677 * return the block group that starts at or after bytenr
678 */
679static struct btrfs_block_group_cache *
680btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
681{
682 return block_group_cache_tree_search(info, bytenr, 0);
683}
684
685/*
686 * return the block group that contains the given bytenr
687 */
688struct btrfs_block_group_cache *btrfs_lookup_block_group(
689 struct btrfs_fs_info *info,
690 u64 bytenr)
691{
692 return block_group_cache_tree_search(info, bytenr, 1);
693}
694 630
695static u64 generic_ref_to_space_flags(struct btrfs_ref *ref) 631static u64 generic_ref_to_space_flags(struct btrfs_ref *ref)
696{ 632{
@@ -3146,34 +3082,6 @@ fail:
3146 3082
3147} 3083}
3148 3084
3149static struct btrfs_block_group_cache *next_block_group(
3150 struct btrfs_block_group_cache *cache)
3151{
3152 struct btrfs_fs_info *fs_info = cache->fs_info;
3153 struct rb_node *node;
3154
3155 spin_lock(&fs_info->block_group_cache_lock);
3156
3157 /* If our block group was removed, we need a full search. */
3158 if (RB_EMPTY_NODE(&cache->cache_node)) {
3159 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3160
3161 spin_unlock(&fs_info->block_group_cache_lock);
3162 btrfs_put_block_group(cache);
3163 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
3164 }
3165 node = rb_next(&cache->cache_node);
3166 btrfs_put_block_group(cache);
3167 if (node) {
3168 cache = rb_entry(node, struct btrfs_block_group_cache,
3169 cache_node);
3170 btrfs_get_block_group(cache);
3171 } else
3172 cache = NULL;
3173 spin_unlock(&fs_info->block_group_cache_lock);
3174 return cache;
3175}
3176
3177static int cache_save_setup(struct btrfs_block_group_cache *block_group, 3085static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3178 struct btrfs_trans_handle *trans, 3086 struct btrfs_trans_handle *trans,
3179 struct btrfs_path *path) 3087 struct btrfs_path *path)
@@ -7651,7 +7559,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7651 if (block_group->iref) 7559 if (block_group->iref)
7652 break; 7560 break;
7653 spin_unlock(&block_group->lock); 7561 spin_unlock(&block_group->lock);
7654 block_group = next_block_group(block_group); 7562 block_group = btrfs_next_block_group(block_group);
7655 } 7563 }
7656 if (!block_group) { 7564 if (!block_group) {
7657 if (last == 0) 7565 if (last == 0)
@@ -8872,7 +8780,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
8872 return -EINVAL; 8780 return -EINVAL;
8873 8781
8874 cache = btrfs_lookup_first_block_group(fs_info, range->start); 8782 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8875 for (; cache; cache = next_block_group(cache)) { 8783 for (; cache; cache = btrfs_next_block_group(cache)) {
8876 if (cache->key.objectid >= range_end) { 8784 if (cache->key.objectid >= range_end) {
8877 btrfs_put_block_group(cache); 8785 btrfs_put_block_group(cache);
8878 break; 8786 break;