aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2014-10-13 07:28:39 -0400
committerChris Mason <clm@fb.com>2014-11-20 20:14:29 -0500
commitc8fd3de79f44f5d41bc3a801214faf667b95df9d (patch)
tree493fbd0a05415f8aa2416564b19cea6c3544564a /fs/btrfs/extent_io.c
parente38e2ed701ff5f3d889c8dda5fe863e165e60d61 (diff)
Btrfs: avoid returning -ENOMEM in convert_extent_bit() too early
We try to allocate an extent state before acquiring the tree's spinlock just in case we end up needing to split an existing extent state into two. If that allocation failed, we would return -ENOMEM. However, our only single caller (transaction/log commit code), passes in an extent state that was cached from a call to find_first_extent_bit() and that has a very high chance to match exactly the input range (always true for a transaction commit and very often, but not always, true for a log commit) - in this case we end up not needing at all that initial extent state used for an eventual split. Therefore just don't return -ENOMEM if we can't allocate the temporary extent state, since we might not need it at all, and if we end up needing one, we'll do it later anyway. Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 0d931b143c00..654ed3de0054 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1066,13 +1066,21 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1066 int err = 0; 1066 int err = 0;
1067 u64 last_start; 1067 u64 last_start;
1068 u64 last_end; 1068 u64 last_end;
1069 bool first_iteration = true;
1069 1070
1070 btrfs_debug_check_extent_io_range(tree, start, end); 1071 btrfs_debug_check_extent_io_range(tree, start, end);
1071 1072
1072again: 1073again:
1073 if (!prealloc && (mask & __GFP_WAIT)) { 1074 if (!prealloc && (mask & __GFP_WAIT)) {
1075 /*
1076 * Best effort, don't worry if extent state allocation fails
1077 * here for the first iteration. We might have a cached state
1078 * that matches exactly the target range, in which case no
1079 * extent state allocations are needed. We'll only know this
1080 * after locking the tree.
1081 */
1074 prealloc = alloc_extent_state(mask); 1082 prealloc = alloc_extent_state(mask);
1075 if (!prealloc) 1083 if (!prealloc && !first_iteration)
1076 return -ENOMEM; 1084 return -ENOMEM;
1077 } 1085 }
1078 1086
@@ -1242,6 +1250,7 @@ search_again:
1242 spin_unlock(&tree->lock); 1250 spin_unlock(&tree->lock);
1243 if (mask & __GFP_WAIT) 1251 if (mask & __GFP_WAIT)
1244 cond_resched(); 1252 cond_resched();
1253 first_iteration = false;
1245 goto again; 1254 goto again;
1246} 1255}
1247 1256