aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2010-10-27 21:30:09 -0400
committerTheodore Ts'o <tytso@mit.edu>2010-10-27 21:30:09 -0400
commit5a87b7a5da250c9be6d757758425dfeaf8ed3179 (patch)
treeefd1ca776e184101cb20ba4606bc35103385bcb5 /fs/ext4/inode.c
parent16828088f9e518158edecb6cde7e6fa38e4c889b (diff)
ext4: call mpage_da_submit_io() from mpage_da_map_blocks()
Eventually we need to completely reorganize the ext4 writepage callpath, but for now, we simplify things a little by calling mpage_da_submit_io() from mpage_da_map_blocks(), since all of the places where we call mpage_da_map_blocks() it is followed up by a call to mpage_da_submit_io(). We're also a wee bit better with respect to error handling, but there are still a number of issues where it's not clear what the right thing is to do with ext4 functions deep in the writeback codepath fails. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c66
1 files changed, 33 insertions, 33 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 670ab15e4f9a..55961ff4efc2 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -60,6 +60,7 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
60} 60}
61 61
62static void ext4_invalidatepage(struct page *page, unsigned long offset); 62static void ext4_invalidatepage(struct page *page, unsigned long offset);
63static int ext4_writepage(struct page *page, struct writeback_control *wbc);
63 64
64/* 65/*
65 * Test whether an inode is a fast symlink. 66 * Test whether an inode is a fast symlink.
@@ -2033,7 +2034,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
2033 BUG_ON(PageWriteback(page)); 2034 BUG_ON(PageWriteback(page));
2034 2035
2035 pages_skipped = mpd->wbc->pages_skipped; 2036 pages_skipped = mpd->wbc->pages_skipped;
2036 err = mapping->a_ops->writepage(page, mpd->wbc); 2037 err = ext4_writepage(page, mpd->wbc);
2037 if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 2038 if (!err && (pages_skipped == mpd->wbc->pages_skipped))
2038 /* 2039 /*
2039 * have successfully written the page 2040 * have successfully written the page
@@ -2189,14 +2190,15 @@ static void ext4_print_free_blocks(struct inode *inode)
2189} 2190}
2190 2191
2191/* 2192/*
2192 * mpage_da_map_blocks - go through given space 2193 * mpage_da_map_and_submit - go through given space, map them
2194 * if necessary, and then submit them for I/O
2193 * 2195 *
2194 * @mpd - bh describing space 2196 * @mpd - bh describing space
2195 * 2197 *
2196 * The function skips space we know is already mapped to disk blocks. 2198 * The function skips space we know is already mapped to disk blocks.
2197 * 2199 *
2198 */ 2200 */
2199static int mpage_da_map_blocks(struct mpage_da_data *mpd) 2201static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2200{ 2202{
2201 int err, blks, get_blocks_flags; 2203 int err, blks, get_blocks_flags;
2202 struct ext4_map_blocks map; 2204 struct ext4_map_blocks map;
@@ -2206,18 +2208,14 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2206 handle_t *handle = NULL; 2208 handle_t *handle = NULL;
2207 2209
2208 /* 2210 /*
2209 * We consider only non-mapped and non-allocated blocks 2211 * If the blocks are mapped already, or we couldn't accumulate
2212 * any blocks, then proceed immediately to the submission stage.
2210 */ 2213 */
2211 if ((mpd->b_state & (1 << BH_Mapped)) && 2214 if ((mpd->b_size == 0) ||
2212 !(mpd->b_state & (1 << BH_Delay)) && 2215 ((mpd->b_state & (1 << BH_Mapped)) &&
2213 !(mpd->b_state & (1 << BH_Unwritten))) 2216 !(mpd->b_state & (1 << BH_Delay)) &&
2214 return 0; 2217 !(mpd->b_state & (1 << BH_Unwritten))))
2215 2218 goto submit_io;
2216 /*
2217 * If we didn't accumulate anything to write simply return
2218 */
2219 if (!mpd->b_size)
2220 return 0;
2221 2219
2222 handle = ext4_journal_current_handle(); 2220 handle = ext4_journal_current_handle();
2223 BUG_ON(!handle); 2221 BUG_ON(!handle);
@@ -2254,17 +2252,18 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2254 2252
2255 err = blks; 2253 err = blks;
2256 /* 2254 /*
2257 * If get block returns with error we simply 2255 * If get block returns EAGAIN or ENOSPC and there
2258 * return. Later writepage will redirty the page and 2256 * appears to be free blocks we will call
2259 * writepages will find the dirty page again 2257 * ext4_writepage() for all of the pages which will
2258 * just redirty the pages.
2260 */ 2259 */
2261 if (err == -EAGAIN) 2260 if (err == -EAGAIN)
2262 return 0; 2261 goto submit_io;
2263 2262
2264 if (err == -ENOSPC && 2263 if (err == -ENOSPC &&
2265 ext4_count_free_blocks(sb)) { 2264 ext4_count_free_blocks(sb)) {
2266 mpd->retval = err; 2265 mpd->retval = err;
2267 return 0; 2266 goto submit_io;
2268 } 2267 }
2269 2268
2270 /* 2269 /*
@@ -2289,7 +2288,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2289 /* invalidate all the pages */ 2288 /* invalidate all the pages */
2290 ext4_da_block_invalidatepages(mpd, next, 2289 ext4_da_block_invalidatepages(mpd, next,
2291 mpd->b_size >> mpd->inode->i_blkbits); 2290 mpd->b_size >> mpd->inode->i_blkbits);
2292 return err; 2291 return;
2293 } 2292 }
2294 BUG_ON(blks == 0); 2293 BUG_ON(blks == 0);
2295 2294
@@ -2312,7 +2311,8 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2312 if (ext4_should_order_data(mpd->inode)) { 2311 if (ext4_should_order_data(mpd->inode)) {
2313 err = ext4_jbd2_file_inode(handle, mpd->inode); 2312 err = ext4_jbd2_file_inode(handle, mpd->inode);
2314 if (err) 2313 if (err)
2315 return err; 2314 /* This only happens if the journal is aborted */
2315 return;
2316 } 2316 }
2317 2317
2318 /* 2318 /*
@@ -2323,10 +2323,16 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2323 disksize = i_size_read(mpd->inode); 2323 disksize = i_size_read(mpd->inode);
2324 if (disksize > EXT4_I(mpd->inode)->i_disksize) { 2324 if (disksize > EXT4_I(mpd->inode)->i_disksize) {
2325 ext4_update_i_disksize(mpd->inode, disksize); 2325 ext4_update_i_disksize(mpd->inode, disksize);
2326 return ext4_mark_inode_dirty(handle, mpd->inode); 2326 err = ext4_mark_inode_dirty(handle, mpd->inode);
2327 if (err)
2328 ext4_error(mpd->inode->i_sb,
2329 "Failed to mark inode %lu dirty",
2330 mpd->inode->i_ino);
2327 } 2331 }
2328 2332
2329 return 0; 2333submit_io:
2334 mpage_da_submit_io(mpd);
2335 mpd->io_done = 1;
2330} 2336}
2331 2337
2332#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 2338#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
@@ -2403,9 +2409,7 @@ flush_it:
2403 * We couldn't merge the block to our extent, so we 2409 * We couldn't merge the block to our extent, so we
2404 * need to flush current extent and start new one 2410 * need to flush current extent and start new one
2405 */ 2411 */
2406 if (mpage_da_map_blocks(mpd) == 0) 2412 mpage_da_map_and_submit(mpd);
2407 mpage_da_submit_io(mpd);
2408 mpd->io_done = 1;
2409 return; 2413 return;
2410} 2414}
2411 2415
@@ -2437,15 +2441,13 @@ static int __mpage_da_writepage(struct page *page,
2437 if (mpd->next_page != page->index) { 2441 if (mpd->next_page != page->index) {
2438 /* 2442 /*
2439 * Nope, we can't. So, we map non-allocated blocks 2443 * Nope, we can't. So, we map non-allocated blocks
2440 * and start IO on them using writepage() 2444 * and start IO on them
2441 */ 2445 */
2442 if (mpd->next_page != mpd->first_page) { 2446 if (mpd->next_page != mpd->first_page) {
2443 if (mpage_da_map_blocks(mpd) == 0) 2447 mpage_da_map_and_submit(mpd);
2444 mpage_da_submit_io(mpd);
2445 /* 2448 /*
2446 * skip rest of the page in the page_vec 2449 * skip rest of the page in the page_vec
2447 */ 2450 */
2448 mpd->io_done = 1;
2449 redirty_page_for_writepage(wbc, page); 2451 redirty_page_for_writepage(wbc, page);
2450 unlock_page(page); 2452 unlock_page(page);
2451 return MPAGE_DA_EXTENT_TAIL; 2453 return MPAGE_DA_EXTENT_TAIL;
@@ -3071,9 +3073,7 @@ retry:
3071 * them for I/O. 3073 * them for I/O.
3072 */ 3074 */
3073 if (!mpd.io_done && mpd.next_page != mpd.first_page) { 3075 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
3074 if (mpage_da_map_blocks(&mpd) == 0) 3076 mpage_da_map_and_submit(&mpd);
3075 mpage_da_submit_io(&mpd);
3076 mpd.io_done = 1;
3077 ret = MPAGE_DA_EXTENT_TAIL; 3077 ret = MPAGE_DA_EXTENT_TAIL;
3078 } 3078 }
3079 trace_ext4_da_write_pages(inode, &mpd); 3079 trace_ext4_da_write_pages(inode, &mpd);