aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fusionio.com>2012-09-27 17:07:30 -0400
committerChris Mason <chris.mason@fusionio.com>2012-10-09 09:15:41 -0400
commite6138876ad8327250d77291b3262fee356267211 (patch)
treeffc3fe0a05e0fd7e55b92e3ef8bad42d3c73d68c /fs/btrfs/extent-tree.c
parentce1953325662fa597197ce728e4195582fc21c8d (diff)
Btrfs: cache extent state when writing out dirty metadata pages
Everytime we write out dirty pages we search for an offset in the tree, convert the bits in the state, and then when we wait we search for the offset again and clear the bits. So for every dirty range in the io tree we are doing 4 rb searches, which is suboptimal. With this patch we are only doing 2 searches for every cycle (modulo weird things happening). Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3270b1087850..ca4aad96f814 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -312,7 +312,8 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
312 while (start < end) { 312 while (start < end) {
313 ret = find_first_extent_bit(info->pinned_extents, start, 313 ret = find_first_extent_bit(info->pinned_extents, start,
314 &extent_start, &extent_end, 314 &extent_start, &extent_end,
315 EXTENT_DIRTY | EXTENT_UPTODATE); 315 EXTENT_DIRTY | EXTENT_UPTODATE,
316 NULL);
316 if (ret) 317 if (ret)
317 break; 318 break;
318 319
@@ -5045,7 +5046,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5045 5046
5046 while (1) { 5047 while (1) {
5047 ret = find_first_extent_bit(unpin, 0, &start, &end, 5048 ret = find_first_extent_bit(unpin, 0, &start, &end,
5048 EXTENT_DIRTY); 5049 EXTENT_DIRTY, NULL);
5049 if (ret) 5050 if (ret)
5050 break; 5051 break;
5051 5052