aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <david@fromorbit.com>2012-04-23 01:58:43 -0400
committerBen Myers <bpm@sgi.com>2012-05-14 17:20:35 -0400
commit6ffc4db5de61d36e969a26bc94509c59246c81f8 (patch)
tree44f5850d4892f017d15b3ac0a0c9bd1f35cacc8d /fs/xfs
parent4c2d542f2e786537db33b613d5199dc6d69a96da (diff)
xfs: page type check in writeback only checks last buffer
xfs_is_delayed_page() checks to see if a page has buffers matching the given IO type passed in. It does so by walking the buffer heads on the page and checking if the state flags match the IO type. However, the "acceptable" variable that is calculated is overwritten every time a new buffer is checked. Hence if the first buffer on the page is of the right type, this state is lost if the second buffer is not of the correct type. This means that xfs_aops_discard_page() may not discard delalloc regions when it is supposed to, and xfs_convert_page() may not cluster IO as efficiently as possible. This problem only occurs on filesystems with a block size smaller than page size. Also, rename xfs_is_delayed_page() to xfs_check_page_type() to better describe what it is doing - it is not delalloc specific anymore. The problem was first noticed by Peter Watkins. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_aops.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 79a01395c4c..139e495b0cd 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -623,7 +623,7 @@ xfs_map_at_offset(
623 * or delayed allocate extent. 623 * or delayed allocate extent.
624 */ 624 */
625STATIC int 625STATIC int
626xfs_is_delayed_page( 626xfs_check_page_type(
627 struct page *page, 627 struct page *page,
628 unsigned int type) 628 unsigned int type)
629{ 629{
@@ -637,11 +637,11 @@ xfs_is_delayed_page(
637 bh = head = page_buffers(page); 637 bh = head = page_buffers(page);
638 do { 638 do {
639 if (buffer_unwritten(bh)) 639 if (buffer_unwritten(bh))
640 acceptable = (type == IO_UNWRITTEN); 640 acceptable += (type == IO_UNWRITTEN);
641 else if (buffer_delay(bh)) 641 else if (buffer_delay(bh))
642 acceptable = (type == IO_DELALLOC); 642 acceptable += (type == IO_DELALLOC);
643 else if (buffer_dirty(bh) && buffer_mapped(bh)) 643 else if (buffer_dirty(bh) && buffer_mapped(bh))
644 acceptable = (type == IO_OVERWRITE); 644 acceptable += (type == IO_OVERWRITE);
645 else 645 else
646 break; 646 break;
647 } while ((bh = bh->b_this_page) != head); 647 } while ((bh = bh->b_this_page) != head);
@@ -684,7 +684,7 @@ xfs_convert_page(
684 goto fail_unlock_page; 684 goto fail_unlock_page;
685 if (page->mapping != inode->i_mapping) 685 if (page->mapping != inode->i_mapping)
686 goto fail_unlock_page; 686 goto fail_unlock_page;
687 if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) 687 if (!xfs_check_page_type(page, (*ioendp)->io_type))
688 goto fail_unlock_page; 688 goto fail_unlock_page;
689 689
690 /* 690 /*
@@ -834,7 +834,7 @@ xfs_aops_discard_page(
834 struct buffer_head *bh, *head; 834 struct buffer_head *bh, *head;
835 loff_t offset = page_offset(page); 835 loff_t offset = page_offset(page);
836 836
837 if (!xfs_is_delayed_page(page, IO_DELALLOC)) 837 if (!xfs_check_page_type(page, IO_DELALLOC))
838 goto out_invalidate; 838 goto out_invalidate;
839 839
840 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 840 if (XFS_FORCED_SHUTDOWN(ip->i_mount))