aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/ext4/inode.c56
1 files changed, 39 insertions, 17 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ac97348f85b5..c77a7ac753f3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1653,35 +1653,39 @@ struct mpage_da_data {
1653 */ 1653 */
1654static int mpage_da_submit_io(struct mpage_da_data *mpd) 1654static int mpage_da_submit_io(struct mpage_da_data *mpd)
1655{ 1655{
1656 struct address_space *mapping = mpd->inode->i_mapping;
1657 int ret = 0, err, nr_pages, i;
1658 unsigned long index, end;
1659 struct pagevec pvec;
1660 long pages_skipped; 1656 long pages_skipped;
1657 struct pagevec pvec;
1658 unsigned long index, end;
1659 int ret = 0, err, nr_pages, i;
1660 struct inode *inode = mpd->inode;
1661 struct address_space *mapping = inode->i_mapping;
1661 1662
1662 BUG_ON(mpd->next_page <= mpd->first_page); 1663 BUG_ON(mpd->next_page <= mpd->first_page);
1663 pagevec_init(&pvec, 0); 1664 /*
1665 * We need to start from the first_page to the next_page - 1
1666 * to make sure we also write the mapped dirty buffer_heads.
1667 * If we look at mpd->lbh.b_blocknr we would only be looking
1668 * at the currently mapped buffer_heads.
1669 */
1664 index = mpd->first_page; 1670 index = mpd->first_page;
1665 end = mpd->next_page - 1; 1671 end = mpd->next_page - 1;
1666 1672
1673 pagevec_init(&pvec, 0);
1667 while (index <= end) { 1674 while (index <= end) {
1668 /* 1675 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1669 * We can use PAGECACHE_TAG_DIRTY lookup here because
1670 * even though we have cleared the dirty flag on the page
1671 * We still keep the page in the radix tree with tag
1672 * PAGECACHE_TAG_DIRTY. See clear_page_dirty_for_io.
1673 * The PAGECACHE_TAG_DIRTY is cleared in set_page_writeback
1674 * which is called via the below writepage callback.
1675 */
1676 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1677 PAGECACHE_TAG_DIRTY,
1678 min(end - index,
1679 (pgoff_t)PAGEVEC_SIZE-1) + 1);
1680 if (nr_pages == 0) 1676 if (nr_pages == 0)
1681 break; 1677 break;
1682 for (i = 0; i < nr_pages; i++) { 1678 for (i = 0; i < nr_pages; i++) {
1683 struct page *page = pvec.pages[i]; 1679 struct page *page = pvec.pages[i];
1684 1680
1681 index = page->index;
1682 if (index > end)
1683 break;
1684 index++;
1685
1686 BUG_ON(!PageLocked(page));
1687 BUG_ON(PageWriteback(page));
1688
1685 pages_skipped = mpd->wbc->pages_skipped; 1689 pages_skipped = mpd->wbc->pages_skipped;
1686 err = mapping->a_ops->writepage(page, mpd->wbc); 1690 err = mapping->a_ops->writepage(page, mpd->wbc);
1687 if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 1691 if (!err && (pages_skipped == mpd->wbc->pages_skipped))
@@ -2095,11 +2099,29 @@ static int __mpage_da_writepage(struct page *page,
2095 bh = head; 2099 bh = head;
2096 do { 2100 do {
2097 BUG_ON(buffer_locked(bh)); 2101 BUG_ON(buffer_locked(bh));
2102 /*
2103 * We need to try to allocate
2104 * unmapped blocks in the same page.
2105 * Otherwise we won't make progress
2106 * with the page in ext4_da_writepage
2107 */
2098 if (buffer_dirty(bh) && 2108 if (buffer_dirty(bh) &&
2099 (!buffer_mapped(bh) || buffer_delay(bh))) { 2109 (!buffer_mapped(bh) || buffer_delay(bh))) {
2100 mpage_add_bh_to_extent(mpd, logical, bh); 2110 mpage_add_bh_to_extent(mpd, logical, bh);
2101 if (mpd->io_done) 2111 if (mpd->io_done)
2102 return MPAGE_DA_EXTENT_TAIL; 2112 return MPAGE_DA_EXTENT_TAIL;
2113 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2114 /*
2115 * mapped dirty buffer. We need to update
2116 * the b_state because we look at
2117 * b_state in mpage_da_map_blocks. We don't
2118 * update b_size because if we find an
2119 * unmapped buffer_head later we need to
2120 * use the b_state flag of that buffer_head.
2121 */
2122 if (mpd->lbh.b_size == 0)
2123 mpd->lbh.b_state =
2124 bh->b_state & BH_FLAGS;
2103 } 2125 }
2104 logical++; 2126 logical++;
2105 } while ((bh = bh->b_this_page) != head); 2127 } while ((bh = bh->b_this_page) != head);