aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fb.com>2014-03-19 13:35:14 -0400
committerChris Mason <clm@fb.com>2014-03-21 18:28:09 -0400
commit4485386853454f184235c8a973b29fa7fa522eb1 (patch)
tree6f0569ba8c5f10bc047f9e7326f18eae56edfe78
parentbfa7e1f8be4bd7118e485a42cc8889530d415d05 (diff)
Btrfs: take into account total references when doing backref lookup
I added an optimization for large files where we would stop searching for backrefs once we had looked at the number of references we currently had for this extent. This works great most of the time, but for snapshots that point to this extent and has changes in the original root this assumption falls on it face. So keep track of any delayed ref mods made and add in the actual ref count as reported by the extent item and use that to limit how far down an inode we'll search for extents. Thanks, Reportedy-by: Hugo Mills <hugo@carfax.org.uk> Signed-off-by: Josef Bacik <jbacik@fb.com> Reported-by: Hugo Mills <hugo@carfax.org.uk> Tested-by: Hugo Mills <hugo@carfax.org.uk> Signed-off-by: Chris Mason <clm@fb.com>
-rw-r--r--fs/btrfs/backref.c29
1 files changed, 18 insertions, 11 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 860f4f22b9b0..aad7201ad11b 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -220,7 +220,8 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
220 220
221static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 221static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
222 struct ulist *parents, struct __prelim_ref *ref, 222 struct ulist *parents, struct __prelim_ref *ref,
223 int level, u64 time_seq, const u64 *extent_item_pos) 223 int level, u64 time_seq, const u64 *extent_item_pos,
224 u64 total_refs)
224{ 225{
225 int ret = 0; 226 int ret = 0;
226 int slot; 227 int slot;
@@ -249,7 +250,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
249 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 250 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
250 ret = btrfs_next_old_leaf(root, path, time_seq); 251 ret = btrfs_next_old_leaf(root, path, time_seq);
251 252
252 while (!ret && count < ref->count) { 253 while (!ret && count < total_refs) {
253 eb = path->nodes[0]; 254 eb = path->nodes[0];
254 slot = path->slots[0]; 255 slot = path->slots[0];
255 256
@@ -306,7 +307,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
306 struct btrfs_path *path, u64 time_seq, 307 struct btrfs_path *path, u64 time_seq,
307 struct __prelim_ref *ref, 308 struct __prelim_ref *ref,
308 struct ulist *parents, 309 struct ulist *parents,
309 const u64 *extent_item_pos) 310 const u64 *extent_item_pos, u64 total_refs)
310{ 311{
311 struct btrfs_root *root; 312 struct btrfs_root *root;
312 struct btrfs_key root_key; 313 struct btrfs_key root_key;
@@ -361,7 +362,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
361 } 362 }
362 363
363 ret = add_all_parents(root, path, parents, ref, level, time_seq, 364 ret = add_all_parents(root, path, parents, ref, level, time_seq,
364 extent_item_pos); 365 extent_item_pos, total_refs);
365out: 366out:
366 path->lowest_level = 0; 367 path->lowest_level = 0;
367 btrfs_release_path(path); 368 btrfs_release_path(path);
@@ -374,7 +375,7 @@ out:
374static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, 375static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
375 struct btrfs_path *path, u64 time_seq, 376 struct btrfs_path *path, u64 time_seq,
376 struct list_head *head, 377 struct list_head *head,
377 const u64 *extent_item_pos) 378 const u64 *extent_item_pos, u64 total_refs)
378{ 379{
379 int err; 380 int err;
380 int ret = 0; 381 int ret = 0;
@@ -400,7 +401,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
400 if (ref->count == 0) 401 if (ref->count == 0)
401 continue; 402 continue;
402 err = __resolve_indirect_ref(fs_info, path, time_seq, ref, 403 err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
403 parents, extent_item_pos); 404 parents, extent_item_pos,
405 total_refs);
404 /* 406 /*
405 * we can only tolerate ENOENT,otherwise,we should catch error 407 * we can only tolerate ENOENT,otherwise,we should catch error
406 * and return directly. 408 * and return directly.
@@ -557,7 +559,7 @@ static void __merge_refs(struct list_head *head, int mode)
557 * smaller or equal that seq to the list 559 * smaller or equal that seq to the list
558 */ 560 */
559static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, 561static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
560 struct list_head *prefs) 562 struct list_head *prefs, u64 *total_refs)
561{ 563{
562 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 564 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
563 struct rb_node *n = &head->node.rb_node; 565 struct rb_node *n = &head->node.rb_node;
@@ -593,6 +595,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
593 default: 595 default:
594 BUG_ON(1); 596 BUG_ON(1);
595 } 597 }
598 *total_refs += (node->ref_mod * sgn);
596 switch (node->type) { 599 switch (node->type) {
597 case BTRFS_TREE_BLOCK_REF_KEY: { 600 case BTRFS_TREE_BLOCK_REF_KEY: {
598 struct btrfs_delayed_tree_ref *ref; 601 struct btrfs_delayed_tree_ref *ref;
@@ -653,7 +656,8 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
653 */ 656 */
654static int __add_inline_refs(struct btrfs_fs_info *fs_info, 657static int __add_inline_refs(struct btrfs_fs_info *fs_info,
655 struct btrfs_path *path, u64 bytenr, 658 struct btrfs_path *path, u64 bytenr,
656 int *info_level, struct list_head *prefs) 659 int *info_level, struct list_head *prefs,
660 u64 *total_refs)
657{ 661{
658 int ret = 0; 662 int ret = 0;
659 int slot; 663 int slot;
@@ -677,6 +681,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
677 681
678 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 682 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
679 flags = btrfs_extent_flags(leaf, ei); 683 flags = btrfs_extent_flags(leaf, ei);
684 *total_refs += btrfs_extent_refs(leaf, ei);
680 btrfs_item_key_to_cpu(leaf, &found_key, slot); 685 btrfs_item_key_to_cpu(leaf, &found_key, slot);
681 686
682 ptr = (unsigned long)(ei + 1); 687 ptr = (unsigned long)(ei + 1);
@@ -859,6 +864,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
859 struct list_head prefs; 864 struct list_head prefs;
860 struct __prelim_ref *ref; 865 struct __prelim_ref *ref;
861 struct extent_inode_elem *eie = NULL; 866 struct extent_inode_elem *eie = NULL;
867 u64 total_refs = 0;
862 868
863 INIT_LIST_HEAD(&prefs); 869 INIT_LIST_HEAD(&prefs);
864 INIT_LIST_HEAD(&prefs_delayed); 870 INIT_LIST_HEAD(&prefs_delayed);
@@ -917,7 +923,7 @@ again:
917 } 923 }
918 spin_unlock(&delayed_refs->lock); 924 spin_unlock(&delayed_refs->lock);
919 ret = __add_delayed_refs(head, time_seq, 925 ret = __add_delayed_refs(head, time_seq,
920 &prefs_delayed); 926 &prefs_delayed, &total_refs);
921 mutex_unlock(&head->mutex); 927 mutex_unlock(&head->mutex);
922 if (ret) 928 if (ret)
923 goto out; 929 goto out;
@@ -938,7 +944,8 @@ again:
938 (key.type == BTRFS_EXTENT_ITEM_KEY || 944 (key.type == BTRFS_EXTENT_ITEM_KEY ||
939 key.type == BTRFS_METADATA_ITEM_KEY)) { 945 key.type == BTRFS_METADATA_ITEM_KEY)) {
940 ret = __add_inline_refs(fs_info, path, bytenr, 946 ret = __add_inline_refs(fs_info, path, bytenr,
941 &info_level, &prefs); 947 &info_level, &prefs,
948 &total_refs);
942 if (ret) 949 if (ret)
943 goto out; 950 goto out;
944 ret = __add_keyed_refs(fs_info, path, bytenr, 951 ret = __add_keyed_refs(fs_info, path, bytenr,
@@ -958,7 +965,7 @@ again:
958 __merge_refs(&prefs, 1); 965 __merge_refs(&prefs, 1);
959 966
960 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs, 967 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
961 extent_item_pos); 968 extent_item_pos, total_refs);
962 if (ret) 969 if (ret)
963 goto out; 970 goto out;
964 971