aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fusionio.com>2012-09-27 17:07:30 -0400
committerChris Mason <chris.mason@fusionio.com>2012-10-09 09:15:41 -0400
commite6138876ad8327250d77291b3262fee356267211 (patch)
treeffc3fe0a05e0fd7e55b92e3ef8bad42d3c73d68c /fs/btrfs/disk-io.c
parentce1953325662fa597197ce728e4195582fc21c8d (diff)
Btrfs: cache extent state when writing out dirty metadata pages
Everytime we write out dirty pages we search for an offset in the tree, convert the bits in the state, and then when we wait we search for the offset again and clear the bits. So for every dirty range in the io tree we are doing 4 rb searches, which is suboptimal. With this patch we are only doing 2 searches for every cycle (modulo weird things happening). Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index aa02eab8c40b..c69995556f61 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3572,7 +3572,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3572 3572
3573 while (1) { 3573 while (1) {
3574 ret = find_first_extent_bit(dirty_pages, start, &start, &end, 3574 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3575 mark); 3575 mark, NULL);
3576 if (ret) 3576 if (ret)
3577 break; 3577 break;
3578 3578
@@ -3627,7 +3627,7 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3627again: 3627again:
3628 while (1) { 3628 while (1) {
3629 ret = find_first_extent_bit(unpin, 0, &start, &end, 3629 ret = find_first_extent_bit(unpin, 0, &start, &end,
3630 EXTENT_DIRTY); 3630 EXTENT_DIRTY, NULL);
3631 if (ret) 3631 if (ret)
3632 break; 3632 break;
3633 3633