diff options
author | Chris Mason <chris.mason@oracle.com> | 2009-09-02 15:22:30 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2009-09-11 13:31:07 -0400 |
commit | 9655d2982b53fdb38a9e0f2f11315b99b92d66e2 (patch) | |
tree | e1271f2f2a3c2c356e0692b36a2d4742b5d651d8 /fs/btrfs/ordered-data.c | |
parent | d5550c6315fe0647b7ac21a6a736bf4a42620eac (diff) |
Btrfs: use a cached state for extent state operations during delalloc
This changes the btrfs code to find delalloc ranges in the extent state
tree to use the new state caching code from set/test bit. It reduces
one of the biggest causes of rbtree searches in the writeback path.
test_range_bit is also modified to take the cached state as a starting
point while searching.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ordered-data.c')
-rw-r--r-- | fs/btrfs/ordered-data.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index d6f0806c682f..7f751e462f0b 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -262,7 +262,7 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, | |||
262 | 262 | ||
263 | ret = test_range_bit(io_tree, entry->file_offset, | 263 | ret = test_range_bit(io_tree, entry->file_offset, |
264 | entry->file_offset + entry->len - 1, | 264 | entry->file_offset + entry->len - 1, |
265 | EXTENT_ORDERED, 0); | 265 | EXTENT_ORDERED, 0, NULL); |
266 | if (ret == 0) | 266 | if (ret == 0) |
267 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | 267 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
268 | out: | 268 | out: |
@@ -522,7 +522,7 @@ again: | |||
522 | end--; | 522 | end--; |
523 | } | 523 | } |
524 | if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, | 524 | if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, |
525 | EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { | 525 | EXTENT_ORDERED | EXTENT_DELALLOC, 0, NULL)) { |
526 | schedule_timeout(1); | 526 | schedule_timeout(1); |
527 | goto again; | 527 | goto again; |
528 | } | 528 | } |
@@ -613,7 +613,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, | |||
613 | */ | 613 | */ |
614 | if (test_range_bit(io_tree, disk_i_size, | 614 | if (test_range_bit(io_tree, disk_i_size, |
615 | ordered->file_offset + ordered->len - 1, | 615 | ordered->file_offset + ordered->len - 1, |
616 | EXTENT_DELALLOC, 0)) { | 616 | EXTENT_DELALLOC, 0, NULL)) { |
617 | goto out; | 617 | goto out; |
618 | } | 618 | } |
619 | /* | 619 | /* |
@@ -664,7 +664,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, | |||
664 | */ | 664 | */ |
665 | if (i_size_test > entry_end(ordered) && | 665 | if (i_size_test > entry_end(ordered) && |
666 | !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1, | 666 | !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1, |
667 | EXTENT_DELALLOC, 0)) { | 667 | EXTENT_DELALLOC, 0, NULL)) { |
668 | new_i_size = min_t(u64, i_size_test, i_size_read(inode)); | 668 | new_i_size = min_t(u64, i_size_test, i_size_read(inode)); |
669 | } | 669 | } |
670 | BTRFS_I(inode)->disk_i_size = new_i_size; | 670 | BTRFS_I(inode)->disk_i_size = new_i_size; |