aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-08-05 12:57:59 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:03 -0400
commit40431d6c1288793a682fc6f5e5b5c9d5cac34608 (patch)
treea840fb38459476ff0aecda8369f965c344a25562
parent9042846bc7ae69cc3288d85af6bad16208d93a95 (diff)
Btrfs: optimize set extent bit
The Btrfs set_extent_bit call currently searches the rbtree every time it needs to find more extent_state objects to fill the requested operation. This adds a simple test with rb_next to see if the next object in the tree was adjacent to the one we just found. If so, we skip the search and just use the next object. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/extent_io.c15
-rw-r--r--fs/btrfs/file.c2
2 files changed, 13 insertions, 4 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 68260180f58..7e5c5a0749e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -694,8 +694,8 @@ again:
694 BUG_ON(err == -EEXIST); 694 BUG_ON(err == -EEXIST);
695 goto out; 695 goto out;
696 } 696 }
697
698 state = rb_entry(node, struct extent_state, rb_node); 697 state = rb_entry(node, struct extent_state, rb_node);
698hit_next:
699 last_start = state->start; 699 last_start = state->start;
700 last_end = state->end; 700 last_end = state->end;
701 701
@@ -706,6 +706,7 @@ again:
706 * Just lock what we found and keep going 706 * Just lock what we found and keep going
707 */ 707 */
708 if (state->start == start && state->end <= end) { 708 if (state->start == start && state->end <= end) {
709 struct rb_node *next_node;
709 set = state->state & bits; 710 set = state->state & bits;
710 if (set && exclusive) { 711 if (set && exclusive) {
711 *failed_start = state->start; 712 *failed_start = state->start;
@@ -716,7 +717,17 @@ again:
716 merge_state(tree, state); 717 merge_state(tree, state);
717 if (last_end == (u64)-1) 718 if (last_end == (u64)-1)
718 goto out; 719 goto out;
720
719 start = last_end + 1; 721 start = last_end + 1;
722 if (start < end && prealloc && !need_resched()) {
723 next_node = rb_next(node);
724 if (next_node) {
725 state = rb_entry(next_node, struct extent_state,
726 rb_node);
727 if (state->start == start)
728 goto hit_next;
729 }
730 }
720 goto search_again; 731 goto search_again;
721 } 732 }
722 733
@@ -852,7 +863,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
852 gfp_t mask) 863 gfp_t mask)
853{ 864{
854 return set_extent_bit(tree, start, end, 865 return set_extent_bit(tree, start, end,
855 EXTENT_DELALLOC | EXTENT_DIRTY, 866 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
856 0, NULL, mask); 867 0, NULL, mask);
857} 868}
858 869
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 7c3cd248d8d..a760d97279a 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -136,8 +136,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
136 btrfs_set_trans_block_group(trans, inode); 136 btrfs_set_trans_block_group(trans, inode);
137 hint_byte = 0; 137 hint_byte = 0;
138 138
139 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
140
141 /* check for reserved extents on each page, we don't want 139 /* check for reserved extents on each page, we don't want
142 * to reset the delalloc bit on things that already have 140 * to reset the delalloc bit on things that already have
143 * extents reserved. 141 * extents reserved.