aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2013-01-28 12:55:08 -0500
committerTheodore Ts'o <tytso@mit.edu>2013-01-28 12:55:08 -0500
commitf8bec37037aceb126d695c021cf4dc93b7238d47 (patch)
treed0489b1d92fa602da3ef15014370ec568e925c15 /fs/ext4/inode.c
parent002bd7fa3ac7441bdb36df67b2c64bc8c1be5360 (diff)
ext4: dirty page has always buffers attached
ext4_writepage(), write_cache_pages_da(), and mpage_da_submit_io() doesn't have to deal with the case when page doesn't have buffers. We attach buffers to a page in ->write_begin() and ->page_mkwrite() which covers all places where a page can become dirty. Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c147
1 files changed, 38 insertions, 109 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cbbf58327a29..8a89cbbf0f1a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -132,8 +132,6 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
132} 132}
133 133
134static void ext4_invalidatepage(struct page *page, unsigned long offset); 134static void ext4_invalidatepage(struct page *page, unsigned long offset);
135static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
136 struct buffer_head *bh_result, int create);
137static int __ext4_journalled_writepage(struct page *page, unsigned int len); 135static int __ext4_journalled_writepage(struct page *page, unsigned int len);
138static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 136static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
139static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 137static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
@@ -1374,7 +1372,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
1374 if (nr_pages == 0) 1372 if (nr_pages == 0)
1375 break; 1373 break;
1376 for (i = 0; i < nr_pages; i++) { 1374 for (i = 0; i < nr_pages; i++) {
1377 int commit_write = 0, skip_page = 0; 1375 int skip_page = 0;
1378 struct page *page = pvec.pages[i]; 1376 struct page *page = pvec.pages[i];
1379 1377
1380 index = page->index; 1378 index = page->index;
@@ -1396,27 +1394,9 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
1396 BUG_ON(!PageLocked(page)); 1394 BUG_ON(!PageLocked(page));
1397 BUG_ON(PageWriteback(page)); 1395 BUG_ON(PageWriteback(page));
1398 1396
1399 /*
1400 * If the page does not have buffers (for
1401 * whatever reason), try to create them using
1402 * __block_write_begin. If this fails,
1403 * skip the page and move on.
1404 */
1405 if (!page_has_buffers(page)) {
1406 if (__block_write_begin(page, 0, len,
1407 noalloc_get_block_write)) {
1408 skip_page:
1409 unlock_page(page);
1410 continue;
1411 }
1412 commit_write = 1;
1413 }
1414
1415 bh = page_bufs = page_buffers(page); 1397 bh = page_bufs = page_buffers(page);
1416 block_start = 0; 1398 block_start = 0;
1417 do { 1399 do {
1418 if (!bh)
1419 goto skip_page;
1420 if (map && (cur_logical >= map->m_lblk) && 1400 if (map && (cur_logical >= map->m_lblk) &&
1421 (cur_logical <= (map->m_lblk + 1401 (cur_logical <= (map->m_lblk +
1422 (map->m_len - 1)))) { 1402 (map->m_len - 1)))) {
@@ -1444,12 +1424,10 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
1444 pblock++; 1424 pblock++;
1445 } while (bh != page_bufs); 1425 } while (bh != page_bufs);
1446 1426
1447 if (skip_page) 1427 if (skip_page) {
1448 goto skip_page; 1428 unlock_page(page);
1449 1429 continue;
1450 if (commit_write) 1430 }
1451 /* mark the buffer_heads as dirty & uptodate */
1452 block_commit_write(page, 0, len);
1453 1431
1454 clear_page_dirty_for_io(page); 1432 clear_page_dirty_for_io(page);
1455 err = ext4_bio_write_page(&io_submit, page, len, 1433 err = ext4_bio_write_page(&io_submit, page, len,
@@ -1869,27 +1847,6 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1869 return 0; 1847 return 0;
1870} 1848}
1871 1849
1872/*
1873 * This function is used as a standard get_block_t calback function when there
1874 * is no desire to allocate any blocks. It is used as a callback function for
1875 * block_write_begin(). These functions should only try to map a single block
1876 * at a time.
1877 *
1878 * Since this function doesn't do block allocations even if the caller
1879 * requests it by passing in create=1, it is critically important that
1880 * any caller checks to make sure that any buffer heads are returned
1881 * by this function are either all already mapped or marked for
1882 * delayed allocation before calling ext4_bio_write_page(). Otherwise,
1883 * b_blocknr could be left unitialized, and the page write functions will
1884 * be taken by surprise.
1885 */
1886static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
1887 struct buffer_head *bh_result, int create)
1888{
1889 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1890 return _ext4_get_block(inode, iblock, bh_result, 0);
1891}
1892
1893static int bget_one(handle_t *handle, struct buffer_head *bh) 1850static int bget_one(handle_t *handle, struct buffer_head *bh)
1894{ 1851{
1895 get_bh(bh); 1852 get_bh(bh);
@@ -2014,7 +1971,7 @@ out:
2014static int ext4_writepage(struct page *page, 1971static int ext4_writepage(struct page *page,
2015 struct writeback_control *wbc) 1972 struct writeback_control *wbc)
2016{ 1973{
2017 int ret = 0, commit_write = 0; 1974 int ret = 0;
2018 loff_t size; 1975 loff_t size;
2019 unsigned int len; 1976 unsigned int len;
2020 struct buffer_head *page_bufs = NULL; 1977 struct buffer_head *page_bufs = NULL;
@@ -2028,21 +1985,6 @@ static int ext4_writepage(struct page *page,
2028 else 1985 else
2029 len = PAGE_CACHE_SIZE; 1986 len = PAGE_CACHE_SIZE;
2030 1987
2031 /*
2032 * If the page does not have buffers (for whatever reason),
2033 * try to create them using __block_write_begin. If this
2034 * fails, redirty the page and move on.
2035 */
2036 if (!page_has_buffers(page)) {
2037 if (__block_write_begin(page, 0, len,
2038 noalloc_get_block_write)) {
2039 redirty_page:
2040 redirty_page_for_writepage(wbc, page);
2041 unlock_page(page);
2042 return 0;
2043 }
2044 commit_write = 1;
2045 }
2046 page_bufs = page_buffers(page); 1988 page_bufs = page_buffers(page);
2047 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 1989 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2048 ext4_bh_delay_or_unwritten)) { 1990 ext4_bh_delay_or_unwritten)) {
@@ -2056,11 +1998,10 @@ static int ext4_writepage(struct page *page,
2056 */ 1998 */
2057 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1999 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
2058 PF_MEMALLOC); 2000 PF_MEMALLOC);
2059 goto redirty_page; 2001 redirty_page_for_writepage(wbc, page);
2002 unlock_page(page);
2003 return 0;
2060 } 2004 }
2061 if (commit_write)
2062 /* now mark the buffer_heads as dirty and uptodate */
2063 block_commit_write(page, 0, len);
2064 2005
2065 if (PageChecked(page) && ext4_should_journal_data(inode)) 2006 if (PageChecked(page) && ext4_should_journal_data(inode))
2066 /* 2007 /*
@@ -2203,51 +2144,39 @@ static int write_cache_pages_da(handle_t *handle,
2203 logical = (sector_t) page->index << 2144 logical = (sector_t) page->index <<
2204 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2145 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2205 2146
2206 if (!page_has_buffers(page)) { 2147 /* Add all dirty buffers to mpd */
2207 mpage_add_bh_to_extent(mpd, logical, 2148 head = page_buffers(page);
2208 PAGE_CACHE_SIZE, 2149 bh = head;
2209 (1 << BH_Dirty) | (1 << BH_Uptodate)); 2150 do {
2210 if (mpd->io_done) 2151 BUG_ON(buffer_locked(bh));
2211 goto ret_extent_tail;
2212 } else {
2213 /* 2152 /*
2214 * Page with regular buffer heads, 2153 * We need to try to allocate unmapped blocks
2215 * just add all dirty ones 2154 * in the same page. Otherwise we won't make
2155 * progress with the page in ext4_writepage
2216 */ 2156 */
2217 head = page_buffers(page); 2157 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2218 bh = head; 2158 mpage_add_bh_to_extent(mpd, logical,
2219 do { 2159 bh->b_size,
2220 BUG_ON(buffer_locked(bh)); 2160 bh->b_state);
2161 if (mpd->io_done)
2162 goto ret_extent_tail;
2163 } else if (buffer_dirty(bh) &&
2164 buffer_mapped(bh)) {
2221 /* 2165 /*
2222 * We need to try to allocate 2166 * mapped dirty buffer. We need to
2223 * unmapped blocks in the same page. 2167 * update the b_state because we look
2224 * Otherwise we won't make progress 2168 * at b_state in mpage_da_map_blocks.
2225 * with the page in ext4_writepage 2169 * We don't update b_size because if we
2170 * find an unmapped buffer_head later
2171 * we need to use the b_state flag of
2172 * that buffer_head.
2226 */ 2173 */
2227 if (ext4_bh_delay_or_unwritten(NULL, bh)) { 2174 if (mpd->b_size == 0)
2228 mpage_add_bh_to_extent(mpd, logical, 2175 mpd->b_state =
2229 bh->b_size, 2176 bh->b_state & BH_FLAGS;
2230 bh->b_state); 2177 }
2231 if (mpd->io_done) 2178 logical++;
2232 goto ret_extent_tail; 2179 } while ((bh = bh->b_this_page) != head);
2233 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2234 /*
2235 * mapped dirty buffer. We need
2236 * to update the b_state
2237 * because we look at b_state
2238 * in mpage_da_map_blocks. We
2239 * don't update b_size because
2240 * if we find an unmapped
2241 * buffer_head later we need to
2242 * use the b_state flag of that
2243 * buffer_head.
2244 */
2245 if (mpd->b_size == 0)
2246 mpd->b_state = bh->b_state & BH_FLAGS;
2247 }
2248 logical++;
2249 } while ((bh = bh->b_this_page) != head);
2250 }
2251 2180
2252 if (nr_to_write > 0) { 2181 if (nr_to_write > 0) {
2253 nr_to_write--; 2182 nr_to_write--;