aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2009-01-05 21:50:43 -0500
committerTheodore Ts'o <tytso@mit.edu>2009-01-05 21:50:43 -0500
commit791b7f08954869d7b8ff438f3dac3cfb39778297 (patch)
tree9e5e9414e6b102940dea0f6c3103fee7fcc0df1d /fs/ext4/inode.c
parent2a21e37e48b94388f2cc8c0392f104f5443d4bb8 (diff)
ext4: Fix the delalloc writepages to allocate blocks at the right offset.
When iterating through the pages which have mapped buffer_heads, we failed to update the b_state value. This results in allocating blocks at logical offset 0. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Cc: stable@kernel.org
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c56
1 files changed, 39 insertions, 17 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ac97348f85b5..c77a7ac753f3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1653,35 +1653,39 @@ struct mpage_da_data {
1653 */ 1653 */
1654static int mpage_da_submit_io(struct mpage_da_data *mpd) 1654static int mpage_da_submit_io(struct mpage_da_data *mpd)
1655{ 1655{
1656 struct address_space *mapping = mpd->inode->i_mapping;
1657 int ret = 0, err, nr_pages, i;
1658 unsigned long index, end;
1659 struct pagevec pvec;
1660 long pages_skipped; 1656 long pages_skipped;
1657 struct pagevec pvec;
1658 unsigned long index, end;
1659 int ret = 0, err, nr_pages, i;
1660 struct inode *inode = mpd->inode;
1661 struct address_space *mapping = inode->i_mapping;
1661 1662
1662 BUG_ON(mpd->next_page <= mpd->first_page); 1663 BUG_ON(mpd->next_page <= mpd->first_page);
1663 pagevec_init(&pvec, 0); 1664 /*
1665 * We need to start from the first_page to the next_page - 1
1666 * to make sure we also write the mapped dirty buffer_heads.
1667 * If we look at mpd->lbh.b_blocknr we would only be looking
1668 * at the currently mapped buffer_heads.
1669 */
1664 index = mpd->first_page; 1670 index = mpd->first_page;
1665 end = mpd->next_page - 1; 1671 end = mpd->next_page - 1;
1666 1672
1673 pagevec_init(&pvec, 0);
1667 while (index <= end) { 1674 while (index <= end) {
1668 /* 1675 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1669 * We can use PAGECACHE_TAG_DIRTY lookup here because
1670 * even though we have cleared the dirty flag on the page
1671 * We still keep the page in the radix tree with tag
1672 * PAGECACHE_TAG_DIRTY. See clear_page_dirty_for_io.
1673 * The PAGECACHE_TAG_DIRTY is cleared in set_page_writeback
1674 * which is called via the below writepage callback.
1675 */
1676 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1677 PAGECACHE_TAG_DIRTY,
1678 min(end - index,
1679 (pgoff_t)PAGEVEC_SIZE-1) + 1);
1680 if (nr_pages == 0) 1676 if (nr_pages == 0)
1681 break; 1677 break;
1682 for (i = 0; i < nr_pages; i++) { 1678 for (i = 0; i < nr_pages; i++) {
1683 struct page *page = pvec.pages[i]; 1679 struct page *page = pvec.pages[i];
1684 1680
1681 index = page->index;
1682 if (index > end)
1683 break;
1684 index++;
1685
1686 BUG_ON(!PageLocked(page));
1687 BUG_ON(PageWriteback(page));
1688
1685 pages_skipped = mpd->wbc->pages_skipped; 1689 pages_skipped = mpd->wbc->pages_skipped;
1686 err = mapping->a_ops->writepage(page, mpd->wbc); 1690 err = mapping->a_ops->writepage(page, mpd->wbc);
1687 if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 1691 if (!err && (pages_skipped == mpd->wbc->pages_skipped))
@@ -2095,11 +2099,29 @@ static int __mpage_da_writepage(struct page *page,
2095 bh = head; 2099 bh = head;
2096 do { 2100 do {
2097 BUG_ON(buffer_locked(bh)); 2101 BUG_ON(buffer_locked(bh));
2102 /*
2103 * We need to try to allocate
2104 * unmapped blocks in the same page.
2105 * Otherwise we won't make progress
2106 * with the page in ext4_da_writepage
2107 */
2098 if (buffer_dirty(bh) && 2108 if (buffer_dirty(bh) &&
2099 (!buffer_mapped(bh) || buffer_delay(bh))) { 2109 (!buffer_mapped(bh) || buffer_delay(bh))) {
2100 mpage_add_bh_to_extent(mpd, logical, bh); 2110 mpage_add_bh_to_extent(mpd, logical, bh);
2101 if (mpd->io_done) 2111 if (mpd->io_done)
2102 return MPAGE_DA_EXTENT_TAIL; 2112 return MPAGE_DA_EXTENT_TAIL;
2113 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2114 /*
2115 * mapped dirty buffer. We need to update
2116 * the b_state because we look at
2117 * b_state in mpage_da_map_blocks. We don't
2118 * update b_size because if we find an
2119 * unmapped buffer_head later we need to
2120 * use the b_state flag of that buffer_head.
2121 */
2122 if (mpd->lbh.b_size == 0)
2123 mpd->lbh.b_state =
2124 bh->b_state & BH_FLAGS;
2103 } 2125 }
2104 logical++; 2126 logical++;
2105 } while ((bh = bh->b_this_page) != head); 2127 } while ((bh = bh->b_this_page) != head);