diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2008-08-19 21:08:18 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2008-08-19 21:08:18 -0400 |
commit | c4a0c46ec92c194c873232b88debce4e1a448483 (patch) | |
tree | ebde1cc191b1af97bcd9189067b23205fa6cefdc /fs | |
parent | af5bc92dded4d98dfeabc8b5b9812571345b263d (diff) |
ext4: invalidate pages if delalloc block allocation fails.
We are a bit agressive in invalidating all the pages. But
it is ok because we really don't know why the block allocation
failed and it is better to come of the writeback path
so that user can look for more info.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ext4/inode.c | 85 |
1 files changed, 73 insertions, 12 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 89c92c0f8297..b6fa0c4087e9 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -1783,6 +1783,39 @@ static inline void __unmap_underlying_blocks(struct inode *inode, | |||
1783 | unmap_underlying_metadata(bdev, bh->b_blocknr + i); | 1783 | unmap_underlying_metadata(bdev, bh->b_blocknr + i); |
1784 | } | 1784 | } |
1785 | 1785 | ||
1786 | static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, | ||
1787 | sector_t logical, long blk_cnt) | ||
1788 | { | ||
1789 | int nr_pages, i; | ||
1790 | pgoff_t index, end; | ||
1791 | struct pagevec pvec; | ||
1792 | struct inode *inode = mpd->inode; | ||
1793 | struct address_space *mapping = inode->i_mapping; | ||
1794 | |||
1795 | index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
1796 | end = (logical + blk_cnt - 1) >> | ||
1797 | (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
1798 | while (index <= end) { | ||
1799 | nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); | ||
1800 | if (nr_pages == 0) | ||
1801 | break; | ||
1802 | for (i = 0; i < nr_pages; i++) { | ||
1803 | struct page *page = pvec.pages[i]; | ||
1804 | index = page->index; | ||
1805 | if (index > end) | ||
1806 | break; | ||
1807 | index++; | ||
1808 | |||
1809 | BUG_ON(!PageLocked(page)); | ||
1810 | BUG_ON(PageWriteback(page)); | ||
1811 | block_invalidatepage(page, 0); | ||
1812 | ClearPageUptodate(page); | ||
1813 | unlock_page(page); | ||
1814 | } | ||
1815 | } | ||
1816 | return; | ||
1817 | } | ||
1818 | |||
1786 | /* | 1819 | /* |
1787 | * mpage_da_map_blocks - go through given space | 1820 | * mpage_da_map_blocks - go through given space |
1788 | * | 1821 | * |
@@ -1792,7 +1825,7 @@ static inline void __unmap_underlying_blocks(struct inode *inode, | |||
1792 | * The function skips space we know is already mapped to disk blocks. | 1825 | * The function skips space we know is already mapped to disk blocks. |
1793 | * | 1826 | * |
1794 | */ | 1827 | */ |
1795 | static void mpage_da_map_blocks(struct mpage_da_data *mpd) | 1828 | static int mpage_da_map_blocks(struct mpage_da_data *mpd) |
1796 | { | 1829 | { |
1797 | int err = 0; | 1830 | int err = 0; |
1798 | struct buffer_head *lbh = &mpd->lbh; | 1831 | struct buffer_head *lbh = &mpd->lbh; |
@@ -1803,7 +1836,7 @@ static void mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
1803 | * We consider only non-mapped and non-allocated blocks | 1836 | * We consider only non-mapped and non-allocated blocks |
1804 | */ | 1837 | */ |
1805 | if (buffer_mapped(lbh) && !buffer_delay(lbh)) | 1838 | if (buffer_mapped(lbh) && !buffer_delay(lbh)) |
1806 | return; | 1839 | return 0; |
1807 | 1840 | ||
1808 | new.b_state = lbh->b_state; | 1841 | new.b_state = lbh->b_state; |
1809 | new.b_blocknr = 0; | 1842 | new.b_blocknr = 0; |
@@ -1814,10 +1847,38 @@ static void mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
1814 | * to write simply return | 1847 | * to write simply return |
1815 | */ | 1848 | */ |
1816 | if (!new.b_size) | 1849 | if (!new.b_size) |
1817 | return; | 1850 | return 0; |
1818 | err = mpd->get_block(mpd->inode, next, &new, 1); | 1851 | err = mpd->get_block(mpd->inode, next, &new, 1); |
1819 | if (err) | 1852 | if (err) { |
1820 | return; | 1853 | |
1854 | /* If get block returns with error | ||
1855 | * we simply return. Later writepage | ||
1856 | * will redirty the page and writepages | ||
1857 | * will find the dirty page again | ||
1858 | */ | ||
1859 | if (err == -EAGAIN) | ||
1860 | return 0; | ||
1861 | /* | ||
1862 | * get block failure will cause us | ||
1863 | * to loop in writepages. Because | ||
1864 | * a_ops->writepage won't be able to | ||
1865 | * make progress. The page will be redirtied | ||
1866 | * by writepage and writepages will again | ||
1867 | * try to write the same. | ||
1868 | */ | ||
1869 | printk(KERN_EMERG "%s block allocation failed for inode %lu " | ||
1870 | "at logical offset %llu with max blocks " | ||
1871 | "%zd with error %d\n", | ||
1872 | __func__, mpd->inode->i_ino, | ||
1873 | (unsigned long long)next, | ||
1874 | lbh->b_size >> mpd->inode->i_blkbits, err); | ||
1875 | printk(KERN_EMERG "This should not happen.!! " | ||
1876 | "Data will be lost\n"); | ||
1877 | /* invlaidate all the pages */ | ||
1878 | ext4_da_block_invalidatepages(mpd, next, | ||
1879 | lbh->b_size >> mpd->inode->i_blkbits); | ||
1880 | return err; | ||
1881 | } | ||
1821 | BUG_ON(new.b_size == 0); | 1882 | BUG_ON(new.b_size == 0); |
1822 | 1883 | ||
1823 | if (buffer_new(&new)) | 1884 | if (buffer_new(&new)) |
@@ -1830,7 +1891,7 @@ static void mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
1830 | if (buffer_delay(lbh) || buffer_unwritten(lbh)) | 1891 | if (buffer_delay(lbh) || buffer_unwritten(lbh)) |
1831 | mpage_put_bnr_to_bhs(mpd, next, &new); | 1892 | mpage_put_bnr_to_bhs(mpd, next, &new); |
1832 | 1893 | ||
1833 | return; | 1894 | return 0; |
1834 | } | 1895 | } |
1835 | 1896 | ||
1836 | #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ | 1897 | #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ |
@@ -1899,8 +1960,8 @@ flush_it: | |||
1899 | * We couldn't merge the block to our extent, so we | 1960 | * We couldn't merge the block to our extent, so we |
1900 | * need to flush current extent and start new one | 1961 | * need to flush current extent and start new one |
1901 | */ | 1962 | */ |
1902 | mpage_da_map_blocks(mpd); | 1963 | if (mpage_da_map_blocks(mpd) == 0) |
1903 | mpage_da_submit_io(mpd); | 1964 | mpage_da_submit_io(mpd); |
1904 | mpd->io_done = 1; | 1965 | mpd->io_done = 1; |
1905 | return; | 1966 | return; |
1906 | } | 1967 | } |
@@ -1942,8 +2003,8 @@ static int __mpage_da_writepage(struct page *page, | |||
1942 | * and start IO on them using writepage() | 2003 | * and start IO on them using writepage() |
1943 | */ | 2004 | */ |
1944 | if (mpd->next_page != mpd->first_page) { | 2005 | if (mpd->next_page != mpd->first_page) { |
1945 | mpage_da_map_blocks(mpd); | 2006 | if (mpage_da_map_blocks(mpd) == 0) |
1946 | mpage_da_submit_io(mpd); | 2007 | mpage_da_submit_io(mpd); |
1947 | /* | 2008 | /* |
1948 | * skip rest of the page in the page_vec | 2009 | * skip rest of the page in the page_vec |
1949 | */ | 2010 | */ |
@@ -2046,8 +2107,8 @@ static int mpage_da_writepages(struct address_space *mapping, | |||
2046 | * Handle last extent of pages | 2107 | * Handle last extent of pages |
2047 | */ | 2108 | */ |
2048 | if (!mpd.io_done && mpd.next_page != mpd.first_page) { | 2109 | if (!mpd.io_done && mpd.next_page != mpd.first_page) { |
2049 | mpage_da_map_blocks(&mpd); | 2110 | if (mpage_da_map_blocks(&mpd) == 0) |
2050 | mpage_da_submit_io(&mpd); | 2111 | mpage_da_submit_io(&mpd); |
2051 | } | 2112 | } |
2052 | 2113 | ||
2053 | wbc->nr_to_write = to_write - mpd.pages_written; | 2114 | wbc->nr_to_write = to_write - mpd.pages_written; |