aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-02-03 13:18:45 -0500
committerChris Mason <chris.mason@oracle.com>2010-03-15 11:00:12 -0400
commit4125bf761cd0786e1163e024c7c809ce2cc625bc (patch)
tree3e1b73b0db29994e8fb56c727c38c1167922f189
parent0be2e98173f8badd5ccc7c2e994891746ba1caf4 (diff)
Btrfs: finish read pages in the order they are submitted
The endio is done at reverse order of bio vectors. That means for a sequential read, the page first submitted will finish last in a bio. Considering we will do checksum (making cache hot) for every page, this does introduce delay (and chance to squeeze cache used soon) for pages submitted at the begining. I don't observe obvious performance difference with below patch at my simple test, but seems more natural to finish read in the order they are submitted. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/extent_io.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7073cbb1b2d4..355a973719a0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1750,7 +1750,8 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
1750static void end_bio_extent_readpage(struct bio *bio, int err) 1750static void end_bio_extent_readpage(struct bio *bio, int err)
1751{ 1751{
1752 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1752 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1753 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1753 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1754 struct bio_vec *bvec = bio->bi_io_vec;
1754 struct extent_io_tree *tree; 1755 struct extent_io_tree *tree;
1755 u64 start; 1756 u64 start;
1756 u64 end; 1757 u64 end;
@@ -1773,7 +1774,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1773 else 1774 else
1774 whole_page = 0; 1775 whole_page = 0;
1775 1776
1776 if (--bvec >= bio->bi_io_vec) 1777 if (++bvec <= bvec_end)
1777 prefetchw(&bvec->bv_page->flags); 1778 prefetchw(&bvec->bv_page->flags);
1778 1779
1779 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 1780 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
@@ -1818,7 +1819,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1818 } 1819 }
1819 check_page_locked(tree, page); 1820 check_page_locked(tree, page);
1820 } 1821 }
1821 } while (bvec >= bio->bi_io_vec); 1822 } while (bvec <= bvec_end);
1822 1823
1823 bio_put(bio); 1824 bio_put(bio);
1824} 1825}