diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2008-09-08 23:05:34 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2008-09-08 23:05:34 -0400 |
commit | df22291ff0fde0d350cf15dac3e5cc33ac528875 (patch) | |
tree | 723d781b430e7ab796b67eec4e956b6a2e47bf93 /fs/ext4/inode.c | |
parent | 166348dd37a4baacfb6fe495954b56f56b116f0c (diff) |
ext4: Retry block allocation if we have free blocks left
When we truncate files, the meta-data blocks released are not reused
untill we commit the truncate transaction. That means delayed get_block
request will return ENOSPC even if we have free blocks left. Force a
journal commit and retry block allocation if we get ENOSPC with free
blocks left.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r-- | fs/ext4/inode.c | 81 |
1 files changed, 57 insertions, 24 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index b567e71f5be9..f97b3478eb89 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -1634,6 +1634,7 @@ struct mpage_da_data { | |||
1634 | struct writeback_control *wbc; | 1634 | struct writeback_control *wbc; |
1635 | int io_done; | 1635 | int io_done; |
1636 | long pages_written; | 1636 | long pages_written; |
1637 | int retval; | ||
1637 | }; | 1638 | }; |
1638 | 1639 | ||
1639 | /* | 1640 | /* |
@@ -1820,6 +1821,24 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, | |||
1820 | return; | 1821 | return; |
1821 | } | 1822 | } |
1822 | 1823 | ||
1824 | static void ext4_print_free_blocks(struct inode *inode) | ||
1825 | { | ||
1826 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | ||
1827 | printk(KERN_EMERG "Total free blocks count %lld\n", | ||
1828 | ext4_count_free_blocks(inode->i_sb)); | ||
1829 | printk(KERN_EMERG "Free/Dirty block details\n"); | ||
1830 | printk(KERN_EMERG "free_blocks=%lld\n", | ||
1831 | percpu_counter_sum(&sbi->s_freeblocks_counter)); | ||
1832 | printk(KERN_EMERG "dirty_blocks=%lld\n", | ||
1833 | percpu_counter_sum(&sbi->s_dirtyblocks_counter)); | ||
1834 | printk(KERN_EMERG "Block reservation details\n"); | ||
1835 | printk(KERN_EMERG "i_reserved_data_blocks=%lu\n", | ||
1836 | EXT4_I(inode)->i_reserved_data_blocks); | ||
1837 | printk(KERN_EMERG "i_reserved_meta_blocks=%lu\n", | ||
1838 | EXT4_I(inode)->i_reserved_meta_blocks); | ||
1839 | return; | ||
1840 | } | ||
1841 | |||
1823 | /* | 1842 | /* |
1824 | * mpage_da_map_blocks - go through given space | 1843 | * mpage_da_map_blocks - go through given space |
1825 | * | 1844 | * |
@@ -1834,7 +1853,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
1834 | int err = 0; | 1853 | int err = 0; |
1835 | struct buffer_head new; | 1854 | struct buffer_head new; |
1836 | struct buffer_head *lbh = &mpd->lbh; | 1855 | struct buffer_head *lbh = &mpd->lbh; |
1837 | sector_t next = lbh->b_blocknr; | 1856 | sector_t next; |
1838 | 1857 | ||
1839 | /* | 1858 | /* |
1840 | * We consider only non-mapped and non-allocated blocks | 1859 | * We consider only non-mapped and non-allocated blocks |
@@ -1844,6 +1863,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
1844 | new.b_state = lbh->b_state; | 1863 | new.b_state = lbh->b_state; |
1845 | new.b_blocknr = 0; | 1864 | new.b_blocknr = 0; |
1846 | new.b_size = lbh->b_size; | 1865 | new.b_size = lbh->b_size; |
1866 | next = lbh->b_blocknr; | ||
1847 | /* | 1867 | /* |
1848 | * If we didn't accumulate anything | 1868 | * If we didn't accumulate anything |
1849 | * to write simply return | 1869 | * to write simply return |
@@ -1860,6 +1880,13 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
1860 | */ | 1880 | */ |
1861 | if (err == -EAGAIN) | 1881 | if (err == -EAGAIN) |
1862 | return 0; | 1882 | return 0; |
1883 | |||
1884 | if (err == -ENOSPC && | ||
1885 | ext4_count_free_blocks(mpd->inode->i_sb)) { | ||
1886 | mpd->retval = err; | ||
1887 | return 0; | ||
1888 | } | ||
1889 | |||
1863 | /* | 1890 | /* |
1864 | * get block failure will cause us | 1891 | * get block failure will cause us |
1865 | * to loop in writepages. Because | 1892 | * to loop in writepages. Because |
@@ -1877,8 +1904,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
1877 | printk(KERN_EMERG "This should not happen.!! " | 1904 | printk(KERN_EMERG "This should not happen.!! " |
1878 | "Data will be lost\n"); | 1905 | "Data will be lost\n"); |
1879 | if (err == -ENOSPC) { | 1906 | if (err == -ENOSPC) { |
1880 | printk(KERN_CRIT "Total free blocks count %lld\n", | 1907 | ext4_print_free_blocks(mpd->inode); |
1881 | ext4_count_free_blocks(mpd->inode->i_sb)); | ||
1882 | } | 1908 | } |
1883 | /* invlaidate all the pages */ | 1909 | /* invlaidate all the pages */ |
1884 | ext4_da_block_invalidatepages(mpd, next, | 1910 | ext4_da_block_invalidatepages(mpd, next, |
@@ -2085,39 +2111,36 @@ static int __mpage_da_writepage(struct page *page, | |||
2085 | */ | 2111 | */ |
2086 | static int mpage_da_writepages(struct address_space *mapping, | 2112 | static int mpage_da_writepages(struct address_space *mapping, |
2087 | struct writeback_control *wbc, | 2113 | struct writeback_control *wbc, |
2088 | get_block_t get_block) | 2114 | struct mpage_da_data *mpd) |
2089 | { | 2115 | { |
2090 | struct mpage_da_data mpd; | ||
2091 | long to_write; | 2116 | long to_write; |
2092 | int ret; | 2117 | int ret; |
2093 | 2118 | ||
2094 | if (!get_block) | 2119 | if (!mpd->get_block) |
2095 | return generic_writepages(mapping, wbc); | 2120 | return generic_writepages(mapping, wbc); |
2096 | 2121 | ||
2097 | mpd.wbc = wbc; | 2122 | mpd->lbh.b_size = 0; |
2098 | mpd.inode = mapping->host; | 2123 | mpd->lbh.b_state = 0; |
2099 | mpd.lbh.b_size = 0; | 2124 | mpd->lbh.b_blocknr = 0; |
2100 | mpd.lbh.b_state = 0; | 2125 | mpd->first_page = 0; |
2101 | mpd.lbh.b_blocknr = 0; | 2126 | mpd->next_page = 0; |
2102 | mpd.first_page = 0; | 2127 | mpd->io_done = 0; |
2103 | mpd.next_page = 0; | 2128 | mpd->pages_written = 0; |
2104 | mpd.get_block = get_block; | 2129 | mpd->retval = 0; |
2105 | mpd.io_done = 0; | ||
2106 | mpd.pages_written = 0; | ||
2107 | 2130 | ||
2108 | to_write = wbc->nr_to_write; | 2131 | to_write = wbc->nr_to_write; |
2109 | 2132 | ||
2110 | ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, &mpd); | 2133 | ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, mpd); |
2111 | 2134 | ||
2112 | /* | 2135 | /* |
2113 | * Handle last extent of pages | 2136 | * Handle last extent of pages |
2114 | */ | 2137 | */ |
2115 | if (!mpd.io_done && mpd.next_page != mpd.first_page) { | 2138 | if (!mpd->io_done && mpd->next_page != mpd->first_page) { |
2116 | if (mpage_da_map_blocks(&mpd) == 0) | 2139 | if (mpage_da_map_blocks(mpd) == 0) |
2117 | mpage_da_submit_io(&mpd); | 2140 | mpage_da_submit_io(mpd); |
2118 | } | 2141 | } |
2119 | 2142 | ||
2120 | wbc->nr_to_write = to_write - mpd.pages_written; | 2143 | wbc->nr_to_write = to_write - mpd->pages_written; |
2121 | return ret; | 2144 | return ret; |
2122 | } | 2145 | } |
2123 | 2146 | ||
@@ -2357,6 +2380,7 @@ static int ext4_da_writepages(struct address_space *mapping, | |||
2357 | { | 2380 | { |
2358 | handle_t *handle = NULL; | 2381 | handle_t *handle = NULL; |
2359 | loff_t range_start = 0; | 2382 | loff_t range_start = 0; |
2383 | struct mpage_da_data mpd; | ||
2360 | struct inode *inode = mapping->host; | 2384 | struct inode *inode = mapping->host; |
2361 | int needed_blocks, ret = 0, nr_to_writebump = 0; | 2385 | int needed_blocks, ret = 0, nr_to_writebump = 0; |
2362 | long to_write, pages_skipped = 0; | 2386 | long to_write, pages_skipped = 0; |
@@ -2390,6 +2414,9 @@ static int ext4_da_writepages(struct address_space *mapping, | |||
2390 | range_start = wbc->range_start; | 2414 | range_start = wbc->range_start; |
2391 | pages_skipped = wbc->pages_skipped; | 2415 | pages_skipped = wbc->pages_skipped; |
2392 | 2416 | ||
2417 | mpd.wbc = wbc; | ||
2418 | mpd.inode = mapping->host; | ||
2419 | |||
2393 | restart_loop: | 2420 | restart_loop: |
2394 | to_write = wbc->nr_to_write; | 2421 | to_write = wbc->nr_to_write; |
2395 | while (!ret && to_write > 0) { | 2422 | while (!ret && to_write > 0) { |
@@ -2413,11 +2440,17 @@ restart_loop: | |||
2413 | dump_stack(); | 2440 | dump_stack(); |
2414 | goto out_writepages; | 2441 | goto out_writepages; |
2415 | } | 2442 | } |
2416 | |||
2417 | to_write -= wbc->nr_to_write; | 2443 | to_write -= wbc->nr_to_write; |
2418 | ret = mpage_da_writepages(mapping, wbc, | 2444 | |
2419 | ext4_da_get_block_write); | 2445 | mpd.get_block = ext4_da_get_block_write; |
2446 | ret = mpage_da_writepages(mapping, wbc, &mpd); | ||
2447 | |||
2420 | ext4_journal_stop(handle); | 2448 | ext4_journal_stop(handle); |
2449 | |||
2450 | if (mpd.retval == -ENOSPC) | ||
2451 | jbd2_journal_force_commit_nested(sbi->s_journal); | ||
2452 | |||
2453 | /* reset the retry count */ | ||
2421 | if (ret == MPAGE_DA_EXTENT_TAIL) { | 2454 | if (ret == MPAGE_DA_EXTENT_TAIL) { |
2422 | /* | 2455 | /* |
2423 | * got one extent now try with | 2456 | * got one extent now try with |