diff options
author | Josef Bacik <josef@redhat.com> | 2012-04-23 14:41:09 -0400 |
---|---|---|
committer | Josef Bacik <josef@redhat.com> | 2012-05-30 10:23:28 -0400 |
commit | 551ebb2d34304ee2abfe6b00d39ec65d5e4e8266 (patch) | |
tree | 19e9e5717e2dc031ba9b87bc314552dda886fce4 /fs/btrfs/ordered-data.c | |
parent | d7dbe9e7f64e72ec6548658857c5d92327a73633 (diff) |
Btrfs: remove useless waiting and extra filemap work
In btrfs_wait_ordered_range we have been calling filemap_fdata_write() twice
because compression does strange things and then waiting. Then we look up
ordered extents and if we find any we will always schedule_timeout(); once
and then loop back around and do it all again. We will even check to see if
there is delalloc pages on this range and loop again. So this patch gets
rid of the multipe fdata_write() calls and just does
filemap_write_and_wait(). In the case of compression we will still find the
ordered extents and start those individually if we need to so that is ok,
but in the normal buffered case we avoid all this weird overhead.
Then in the case of the schedule_timeout(1), we don't need it. All callers
either 1) don't care, they just want to make sure what they just wrote maeks
it to disk or 2) are doing the lock()->lookup ordered->unlock->flush thing
in which case it will lock and check for ordered extents _anyway_ so get
back to them as quickly as possible. The delaloc check is simply not
needed, this only catches the case where we write to the file again since
doing the filemap_write_and_wait() and if the caller truly cares about that
it will take care of everything itself. Thanks,
Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs/btrfs/ordered-data.c')
-rw-r--r-- | fs/btrfs/ordered-data.c | 17 |
1 files changed, 2 insertions, 15 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index bbf6d0d9aebe..9807750c6255 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -621,19 +621,11 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) | |||
621 | if (orig_end > INT_LIMIT(loff_t)) | 621 | if (orig_end > INT_LIMIT(loff_t)) |
622 | orig_end = INT_LIMIT(loff_t); | 622 | orig_end = INT_LIMIT(loff_t); |
623 | } | 623 | } |
624 | again: | 624 | |
625 | /* start IO across the range first to instantiate any delalloc | 625 | /* start IO across the range first to instantiate any delalloc |
626 | * extents | 626 | * extents |
627 | */ | 627 | */ |
628 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); | 628 | filemap_write_and_wait_range(inode->i_mapping, start, orig_end); |
629 | |||
630 | /* The compression code will leave pages locked but return from | ||
631 | * writepage without setting the page writeback. Starting again | ||
632 | * with WB_SYNC_ALL will end up waiting for the IO to actually start. | ||
633 | */ | ||
634 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); | ||
635 | |||
636 | filemap_fdatawait_range(inode->i_mapping, start, orig_end); | ||
637 | 629 | ||
638 | end = orig_end; | 630 | end = orig_end; |
639 | found = 0; | 631 | found = 0; |
@@ -657,11 +649,6 @@ again: | |||
657 | break; | 649 | break; |
658 | end--; | 650 | end--; |
659 | } | 651 | } |
660 | if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, | ||
661 | EXTENT_DELALLOC, 0, NULL)) { | ||
662 | schedule_timeout(1); | ||
663 | goto again; | ||
664 | } | ||
665 | } | 652 | } |
666 | 653 | ||
667 | /* | 654 | /* |