diff options
author | Josef Bacik <jbacik@fusionio.com> | 2013-07-30 16:30:30 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-10-13 19:08:34 -0400 |
commit | 34aa872c2cea9518bba66ab8d88bc0f90dbeb2ba (patch) | |
tree | 167f579f81ae46d978365c93e0f61a9d35f46f88 | |
parent | 98e00cd81b76f8b50b881bf3ac532fa1ab42931c (diff) |
Btrfs: change how we queue blocks for backref checking
commit b6c60c8018c4e9beb2f83fc82c09f9d033766571 upstream.
Previously we only added blocks to the list to have their backrefs checked if
the level of the block is right above the one we are searching for. This is
because we want to make sure we don't add the entire path up to the root to the
lists to make sure we process things one at a time. This assumes that if any
blocks in the path to the root are going to be not checked (shared in other
words) then they will be in the level right above the current block on up. This
isn't quite right though since we can have blocks higher up the list that are
shared because they are attached to a reloc root. But we won't add this block
to be checked and then later on we will BUG_ON(!upper->checked). So instead
keep track of wether or not we've queued a block to be checked in this current
search, and if we haven't go ahead and queue it to be checked. This patch fixed
the panic I was seeing where we BUG_ON(!upper->checked). Thanks,
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | fs/btrfs/relocation.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 4febca4fc2de..b3896d5f233a 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -691,6 +691,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc, | |||
691 | int cowonly; | 691 | int cowonly; |
692 | int ret; | 692 | int ret; |
693 | int err = 0; | 693 | int err = 0; |
694 | bool need_check = true; | ||
694 | 695 | ||
695 | path1 = btrfs_alloc_path(); | 696 | path1 = btrfs_alloc_path(); |
696 | path2 = btrfs_alloc_path(); | 697 | path2 = btrfs_alloc_path(); |
@@ -914,6 +915,7 @@ again: | |||
914 | cur->bytenr); | 915 | cur->bytenr); |
915 | 916 | ||
916 | lower = cur; | 917 | lower = cur; |
918 | need_check = true; | ||
917 | for (; level < BTRFS_MAX_LEVEL; level++) { | 919 | for (; level < BTRFS_MAX_LEVEL; level++) { |
918 | if (!path2->nodes[level]) { | 920 | if (!path2->nodes[level]) { |
919 | BUG_ON(btrfs_root_bytenr(&root->root_item) != | 921 | BUG_ON(btrfs_root_bytenr(&root->root_item) != |
@@ -957,14 +959,12 @@ again: | |||
957 | 959 | ||
958 | /* | 960 | /* |
959 | * add the block to pending list if we | 961 | * add the block to pending list if we |
960 | * need check its backrefs. only block | 962 | * need check its backrefs, we only do this once |
961 | * at 'cur->level + 1' is added to the | 963 | * while walking up a tree as we will catch |
962 | * tail of pending list. this guarantees | 964 | * anything else later on. |
963 | * we check backrefs from lower level | ||
964 | * blocks to upper level blocks. | ||
965 | */ | 965 | */ |
966 | if (!upper->checked && | 966 | if (!upper->checked && need_check) { |
967 | level == cur->level + 1) { | 967 | need_check = false; |
968 | list_add_tail(&edge->list[UPPER], | 968 | list_add_tail(&edge->list[UPPER], |
969 | &list); | 969 | &list); |
970 | } else | 970 | } else |