aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2017-09-06 19:21:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 20:27:26 -0400
commitc10f778ddfc161f5c58a8d6de4ad92235ea2eeba (patch)
tree703158abaa22046480a5abc7591a67ec7c18f8ed /fs/buffer.c
parentb947cee4b96306037e166ff1ea5156c0ecdd7d91 (diff)
fs: fix performance regression in clean_bdev_aliases()
Commit e64855c6cfaa ("fs: Add helper to clean bdev aliases under a bh and use it") added a wrapper for clean_bdev_aliases() that invalidates bdev aliases underlying a single buffer head. However this has caused a performance regression for bonnie++ benchmark on ext4 filesystem when delayed allocation is turned off (ext3 mode) - average of 3 runs: Hmean SeqOut Char 164787.55 ( 0.00%) 107189.06 (-34.95%) Hmean SeqOut Block 219883.89 ( 0.00%) 168870.32 (-23.20%) The reason for this regression is that clean_bdev_aliases() is slower when called for a single block because pagevec_lookup() it uses will end up iterating through the radix tree until it finds a page (which may take a while) but we are only interested whether there's a page at a particular index. Fix the problem by using pagevec_lookup_range() instead which avoids the needless iteration. Fixes: e64855c6cfaa ("fs: Add helper to clean bdev aliases under a bh and use it") Link: http://lkml.kernel.org/r/20170726114704.7626-5-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Cc: Jens Axboe <axboe@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 5b20893708e2..7e531bb356bd 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1627,19 +1627,18 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1627 struct pagevec pvec; 1627 struct pagevec pvec;
1628 pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 1628 pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
1629 pgoff_t end; 1629 pgoff_t end;
1630 int i; 1630 int i, count;
1631 struct buffer_head *bh; 1631 struct buffer_head *bh;
1632 struct buffer_head *head; 1632 struct buffer_head *head;
1633 1633
1634 end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); 1634 end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
1635 pagevec_init(&pvec, 0); 1635 pagevec_init(&pvec, 0);
1636 while (index <= end && pagevec_lookup(&pvec, bd_mapping, &index, 1636 while (pagevec_lookup_range(&pvec, bd_mapping, &index, end,
1637 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 1637 PAGEVEC_SIZE)) {
1638 for (i = 0; i < pagevec_count(&pvec); i++) { 1638 count = pagevec_count(&pvec);
1639 for (i = 0; i < count; i++) {
1639 struct page *page = pvec.pages[i]; 1640 struct page *page = pvec.pages[i];
1640 1641
1641 if (page->index > end)
1642 break;
1643 if (!page_has_buffers(page)) 1642 if (!page_has_buffers(page))
1644 continue; 1643 continue;
1645 /* 1644 /*
@@ -1669,6 +1668,9 @@ unlock_page:
1669 } 1668 }
1670 pagevec_release(&pvec); 1669 pagevec_release(&pvec);
1671 cond_resched(); 1670 cond_resched();
1671 /* End of range already reached? */
1672 if (index > end || !index)
1673 break;
1672 } 1674 }
1673} 1675}
1674EXPORT_SYMBOL(clean_bdev_aliases); 1676EXPORT_SYMBOL(clean_bdev_aliases);