aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2010-05-16 20:00:00 -0400
committerTheodore Ts'o <tytso@mit.edu>2010-05-16 20:00:00 -0400
commit2ed886852adfcb070bf350e66a0da0d98b2f3ab5 (patch)
treebcec0a1004f413b70087e2c43097892f87f21cc3 /fs/ext4/inode.c
parente35fd6609b2fee54484d520deccb8f18bf7d38f3 (diff)
ext4: Convert callers of ext4_get_blocks() to use ext4_map_blocks()
This saves a huge amount of stack space by avoiding unnecesary struct buffer_head's from being allocated on the stack. In addition, to make the code easier to understand, collapse and refactor ext4_get_block(), ext4_get_block_write(), noalloc_get_block_write(), into a single function. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c327
1 files changed, 127 insertions, 200 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ff2f5fd681b5..0b1d7c89f93f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1336,133 +1336,112 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
1336 return retval; 1336 return retval;
1337} 1337}
1338 1338
1339int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1340 unsigned int max_blocks, struct buffer_head *bh,
1341 int flags)
1342{
1343 struct ext4_map_blocks map;
1344 int ret;
1345
1346 map.m_lblk = block;
1347 map.m_len = max_blocks;
1348
1349 ret = ext4_map_blocks(handle, inode, &map, flags);
1350 if (ret < 0)
1351 return ret;
1352
1353 bh->b_blocknr = map.m_pblk;
1354 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
1355 bh->b_bdev = inode->i_sb->s_bdev;
1356 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1357 return ret;
1358}
1359
1360/* Maximum number of blocks we map for direct IO at once. */ 1339/* Maximum number of blocks we map for direct IO at once. */
1361#define DIO_MAX_BLOCKS 4096 1340#define DIO_MAX_BLOCKS 4096
1362 1341
1363int ext4_get_block(struct inode *inode, sector_t iblock, 1342static int _ext4_get_block(struct inode *inode, sector_t iblock,
1364 struct buffer_head *bh_result, int create) 1343 struct buffer_head *bh, int flags)
1365{ 1344{
1366 handle_t *handle = ext4_journal_current_handle(); 1345 handle_t *handle = ext4_journal_current_handle();
1346 struct ext4_map_blocks map;
1367 int ret = 0, started = 0; 1347 int ret = 0, started = 0;
1368 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1369 int dio_credits; 1348 int dio_credits;
1370 1349
1371 if (create && !handle) { 1350 map.m_lblk = iblock;
1351 map.m_len = bh->b_size >> inode->i_blkbits;
1352
1353 if (flags && !handle) {
1372 /* Direct IO write... */ 1354 /* Direct IO write... */
1373 if (max_blocks > DIO_MAX_BLOCKS) 1355 if (map.m_len > DIO_MAX_BLOCKS)
1374 max_blocks = DIO_MAX_BLOCKS; 1356 map.m_len = DIO_MAX_BLOCKS;
1375 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 1357 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
1376 handle = ext4_journal_start(inode, dio_credits); 1358 handle = ext4_journal_start(inode, dio_credits);
1377 if (IS_ERR(handle)) { 1359 if (IS_ERR(handle)) {
1378 ret = PTR_ERR(handle); 1360 ret = PTR_ERR(handle);
1379 goto out; 1361 return ret;
1380 } 1362 }
1381 started = 1; 1363 started = 1;
1382 } 1364 }
1383 1365
1384 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, 1366 ret = ext4_map_blocks(handle, inode, &map, flags);
1385 create ? EXT4_GET_BLOCKS_CREATE : 0);
1386 if (ret > 0) { 1367 if (ret > 0) {
1387 bh_result->b_size = (ret << inode->i_blkbits); 1368 map_bh(bh, inode->i_sb, map.m_pblk);
1369 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1370 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
1388 ret = 0; 1371 ret = 0;
1389 } 1372 }
1390 if (started) 1373 if (started)
1391 ext4_journal_stop(handle); 1374 ext4_journal_stop(handle);
1392out:
1393 return ret; 1375 return ret;
1394} 1376}
1395 1377
1378int ext4_get_block(struct inode *inode, sector_t iblock,
1379 struct buffer_head *bh, int create)
1380{
1381 return _ext4_get_block(inode, iblock, bh,
1382 create ? EXT4_GET_BLOCKS_CREATE : 0);
1383}
1384
1396/* 1385/*
1397 * `handle' can be NULL if create is zero 1386 * `handle' can be NULL if create is zero
1398 */ 1387 */
1399struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1388struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1400 ext4_lblk_t block, int create, int *errp) 1389 ext4_lblk_t block, int create, int *errp)
1401{ 1390{
1402 struct buffer_head dummy; 1391 struct ext4_map_blocks map;
1392 struct buffer_head *bh;
1403 int fatal = 0, err; 1393 int fatal = 0, err;
1404 int flags = 0;
1405 1394
1406 J_ASSERT(handle != NULL || create == 0); 1395 J_ASSERT(handle != NULL || create == 0);
1407 1396
1408 dummy.b_state = 0; 1397 map.m_lblk = block;
1409 dummy.b_blocknr = -1000; 1398 map.m_len = 1;
1410 buffer_trace_init(&dummy.b_history); 1399 err = ext4_map_blocks(handle, inode, &map,
1411 if (create) 1400 create ? EXT4_GET_BLOCKS_CREATE : 0);
1412 flags |= EXT4_GET_BLOCKS_CREATE; 1401
1413 err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags); 1402 if (err < 0)
1414 /* 1403 *errp = err;
1415 * ext4_get_blocks() returns number of blocks mapped. 0 in 1404 if (err <= 0)
1416 * case of a HOLE. 1405 return NULL;
1417 */ 1406 *errp = 0;
1418 if (err > 0) { 1407
1419 if (err > 1) 1408 bh = sb_getblk(inode->i_sb, map.m_pblk);
1420 WARN_ON(1); 1409 if (!bh) {
1421 err = 0; 1410 *errp = -EIO;
1411 return NULL;
1422 } 1412 }
1423 *errp = err; 1413 if (map.m_flags & EXT4_MAP_NEW) {
1424 if (!err && buffer_mapped(&dummy)) { 1414 J_ASSERT(create != 0);
1425 struct buffer_head *bh; 1415 J_ASSERT(handle != NULL);
1426 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1427 if (!bh) {
1428 *errp = -EIO;
1429 goto err;
1430 }
1431 if (buffer_new(&dummy)) {
1432 J_ASSERT(create != 0);
1433 J_ASSERT(handle != NULL);
1434 1416
1435 /* 1417 /*
1436 * Now that we do not always journal data, we should 1418 * Now that we do not always journal data, we should
1437 * keep in mind whether this should always journal the 1419 * keep in mind whether this should always journal the
1438 * new buffer as metadata. For now, regular file 1420 * new buffer as metadata. For now, regular file
1439 * writes use ext4_get_block instead, so it's not a 1421 * writes use ext4_get_block instead, so it's not a
1440 * problem. 1422 * problem.
1441 */ 1423 */
1442 lock_buffer(bh); 1424 lock_buffer(bh);
1443 BUFFER_TRACE(bh, "call get_create_access"); 1425 BUFFER_TRACE(bh, "call get_create_access");
1444 fatal = ext4_journal_get_create_access(handle, bh); 1426 fatal = ext4_journal_get_create_access(handle, bh);
1445 if (!fatal && !buffer_uptodate(bh)) { 1427 if (!fatal && !buffer_uptodate(bh)) {
1446 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1428 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1447 set_buffer_uptodate(bh); 1429 set_buffer_uptodate(bh);
1448 }
1449 unlock_buffer(bh);
1450 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1451 err = ext4_handle_dirty_metadata(handle, inode, bh);
1452 if (!fatal)
1453 fatal = err;
1454 } else {
1455 BUFFER_TRACE(bh, "not a new buffer");
1456 }
1457 if (fatal) {
1458 *errp = fatal;
1459 brelse(bh);
1460 bh = NULL;
1461 } 1430 }
1462 return bh; 1431 unlock_buffer(bh);
1432 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1433 err = ext4_handle_dirty_metadata(handle, inode, bh);
1434 if (!fatal)
1435 fatal = err;
1436 } else {
1437 BUFFER_TRACE(bh, "not a new buffer");
1463 } 1438 }
1464err: 1439 if (fatal) {
1465 return NULL; 1440 *errp = fatal;
1441 brelse(bh);
1442 bh = NULL;
1443 }
1444 return bh;
1466} 1445}
1467 1446
1468struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1447struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
@@ -2050,28 +2029,23 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
2050/* 2029/*
2051 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers 2030 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
2052 * 2031 *
2053 * @mpd->inode - inode to walk through
2054 * @exbh->b_blocknr - first block on a disk
2055 * @exbh->b_size - amount of space in bytes
2056 * @logical - first logical block to start assignment with
2057 *
2058 * the function goes through all passed space and put actual disk 2032 * the function goes through all passed space and put actual disk
2059 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten 2033 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
2060 */ 2034 */
2061static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, 2035static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd,
2062 struct buffer_head *exbh) 2036 struct ext4_map_blocks *map)
2063{ 2037{
2064 struct inode *inode = mpd->inode; 2038 struct inode *inode = mpd->inode;
2065 struct address_space *mapping = inode->i_mapping; 2039 struct address_space *mapping = inode->i_mapping;
2066 int blocks = exbh->b_size >> inode->i_blkbits; 2040 int blocks = map->m_len;
2067 sector_t pblock = exbh->b_blocknr, cur_logical; 2041 sector_t pblock = map->m_pblk, cur_logical;
2068 struct buffer_head *head, *bh; 2042 struct buffer_head *head, *bh;
2069 pgoff_t index, end; 2043 pgoff_t index, end;
2070 struct pagevec pvec; 2044 struct pagevec pvec;
2071 int nr_pages, i; 2045 int nr_pages, i;
2072 2046
2073 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2047 index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2074 end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2048 end = (map->m_lblk + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2075 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2049 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2076 2050
2077 pagevec_init(&pvec, 0); 2051 pagevec_init(&pvec, 0);
@@ -2098,17 +2072,16 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
2098 2072
2099 /* skip blocks out of the range */ 2073 /* skip blocks out of the range */
2100 do { 2074 do {
2101 if (cur_logical >= logical) 2075 if (cur_logical >= map->m_lblk)
2102 break; 2076 break;
2103 cur_logical++; 2077 cur_logical++;
2104 } while ((bh = bh->b_this_page) != head); 2078 } while ((bh = bh->b_this_page) != head);
2105 2079
2106 do { 2080 do {
2107 if (cur_logical >= logical + blocks) 2081 if (cur_logical >= map->m_lblk + blocks)
2108 break; 2082 break;
2109 2083
2110 if (buffer_delay(bh) || 2084 if (buffer_delay(bh) || buffer_unwritten(bh)) {
2111 buffer_unwritten(bh)) {
2112 2085
2113 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev); 2086 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
2114 2087
@@ -2127,7 +2100,7 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
2127 } else if (buffer_mapped(bh)) 2100 } else if (buffer_mapped(bh))
2128 BUG_ON(bh->b_blocknr != pblock); 2101 BUG_ON(bh->b_blocknr != pblock);
2129 2102
2130 if (buffer_uninit(exbh)) 2103 if (map->m_flags & EXT4_MAP_UNINIT)
2131 set_buffer_uninit(bh); 2104 set_buffer_uninit(bh);
2132 cur_logical++; 2105 cur_logical++;
2133 pblock++; 2106 pblock++;
@@ -2138,21 +2111,6 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
2138} 2111}
2139 2112
2140 2113
2141/*
2142 * __unmap_underlying_blocks - just a helper function to unmap
2143 * set of blocks described by @bh
2144 */
2145static inline void __unmap_underlying_blocks(struct inode *inode,
2146 struct buffer_head *bh)
2147{
2148 struct block_device *bdev = inode->i_sb->s_bdev;
2149 int blocks, i;
2150
2151 blocks = bh->b_size >> inode->i_blkbits;
2152 for (i = 0; i < blocks; i++)
2153 unmap_underlying_metadata(bdev, bh->b_blocknr + i);
2154}
2155
2156static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 2114static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2157 sector_t logical, long blk_cnt) 2115 sector_t logical, long blk_cnt)
2158{ 2116{
@@ -2214,7 +2172,7 @@ static void ext4_print_free_blocks(struct inode *inode)
2214static int mpage_da_map_blocks(struct mpage_da_data *mpd) 2172static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2215{ 2173{
2216 int err, blks, get_blocks_flags; 2174 int err, blks, get_blocks_flags;
2217 struct buffer_head new; 2175 struct ext4_map_blocks map;
2218 sector_t next = mpd->b_blocknr; 2176 sector_t next = mpd->b_blocknr;
2219 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 2177 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
2220 loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 2178 loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
@@ -2255,15 +2213,15 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2255 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 2213 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
2256 * variables are updated after the blocks have been allocated. 2214 * variables are updated after the blocks have been allocated.
2257 */ 2215 */
2258 new.b_state = 0; 2216 map.m_lblk = next;
2217 map.m_len = max_blocks;
2259 get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 2218 get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
2260 if (ext4_should_dioread_nolock(mpd->inode)) 2219 if (ext4_should_dioread_nolock(mpd->inode))
2261 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2220 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2262 if (mpd->b_state & (1 << BH_Delay)) 2221 if (mpd->b_state & (1 << BH_Delay))
2263 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2222 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2264 2223
2265 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, 2224 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
2266 &new, get_blocks_flags);
2267 if (blks < 0) { 2225 if (blks < 0) {
2268 err = blks; 2226 err = blks;
2269 /* 2227 /*
@@ -2305,10 +2263,13 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2305 } 2263 }
2306 BUG_ON(blks == 0); 2264 BUG_ON(blks == 0);
2307 2265
2308 new.b_size = (blks << mpd->inode->i_blkbits); 2266 if (map.m_flags & EXT4_MAP_NEW) {
2267 struct block_device *bdev = mpd->inode->i_sb->s_bdev;
2268 int i;
2309 2269
2310 if (buffer_new(&new)) 2270 for (i = 0; i < map.m_len; i++)
2311 __unmap_underlying_blocks(mpd->inode, &new); 2271 unmap_underlying_metadata(bdev, map.m_pblk + i);
2272 }
2312 2273
2313 /* 2274 /*
2314 * If blocks are delayed marked, we need to 2275 * If blocks are delayed marked, we need to
@@ -2316,7 +2277,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2316 */ 2277 */
2317 if ((mpd->b_state & (1 << BH_Delay)) || 2278 if ((mpd->b_state & (1 << BH_Delay)) ||
2318 (mpd->b_state & (1 << BH_Unwritten))) 2279 (mpd->b_state & (1 << BH_Unwritten)))
2319 mpage_put_bnr_to_bhs(mpd, next, &new); 2280 mpage_put_bnr_to_bhs(mpd, &map);
2320 2281
2321 if (ext4_should_order_data(mpd->inode)) { 2282 if (ext4_should_order_data(mpd->inode)) {
2322 err = ext4_jbd2_file_inode(handle, mpd->inode); 2283 err = ext4_jbd2_file_inode(handle, mpd->inode);
@@ -2534,8 +2495,9 @@ static int __mpage_da_writepage(struct page *page,
2534 * initialized properly. 2495 * initialized properly.
2535 */ 2496 */
2536static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 2497static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2537 struct buffer_head *bh_result, int create) 2498 struct buffer_head *bh, int create)
2538{ 2499{
2500 struct ext4_map_blocks map;
2539 int ret = 0; 2501 int ret = 0;
2540 sector_t invalid_block = ~((sector_t) 0xffff); 2502 sector_t invalid_block = ~((sector_t) 0xffff);
2541 2503
@@ -2543,16 +2505,22 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2543 invalid_block = ~0; 2505 invalid_block = ~0;
2544 2506
2545 BUG_ON(create == 0); 2507 BUG_ON(create == 0);
2546 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2508 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
2509
2510 map.m_lblk = iblock;
2511 map.m_len = 1;
2547 2512
2548 /* 2513 /*
2549 * first, we need to know whether the block is allocated already 2514 * first, we need to know whether the block is allocated already
2550 * preallocated blocks are unmapped but should treated 2515 * preallocated blocks are unmapped but should treated
2551 * the same as allocated blocks. 2516 * the same as allocated blocks.
2552 */ 2517 */
2553 ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0); 2518 ret = ext4_map_blocks(NULL, inode, &map, 0);
2554 if ((ret == 0) && !buffer_delay(bh_result)) { 2519 if (ret < 0)
2555 /* the block isn't (pre)allocated yet, let's reserve space */ 2520 return ret;
2521 if (ret == 0) {
2522 if (buffer_delay(bh))
2523 return 0; /* Not sure this could or should happen */
2556 /* 2524 /*
2557 * XXX: __block_prepare_write() unmaps passed block, 2525 * XXX: __block_prepare_write() unmaps passed block,
2558 * is it OK? 2526 * is it OK?
@@ -2562,26 +2530,26 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2562 /* not enough space to reserve */ 2530 /* not enough space to reserve */
2563 return ret; 2531 return ret;
2564 2532
2565 map_bh(bh_result, inode->i_sb, invalid_block); 2533 map_bh(bh, inode->i_sb, invalid_block);
2566 set_buffer_new(bh_result); 2534 set_buffer_new(bh);
2567 set_buffer_delay(bh_result); 2535 set_buffer_delay(bh);
2568 } else if (ret > 0) { 2536 return 0;
2569 bh_result->b_size = (ret << inode->i_blkbits);
2570 if (buffer_unwritten(bh_result)) {
2571 /* A delayed write to unwritten bh should
2572 * be marked new and mapped. Mapped ensures
2573 * that we don't do get_block multiple times
2574 * when we write to the same offset and new
2575 * ensures that we do proper zero out for
2576 * partial write.
2577 */
2578 set_buffer_new(bh_result);
2579 set_buffer_mapped(bh_result);
2580 }
2581 ret = 0;
2582 } 2537 }
2583 2538
2584 return ret; 2539 map_bh(bh, inode->i_sb, map.m_pblk);
2540 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
2541
2542 if (buffer_unwritten(bh)) {
2543 /* A delayed write to unwritten bh should be marked
2544 * new and mapped. Mapped ensures that we don't do
2545 * get_block multiple times when we write to the same
2546 * offset and new ensures that we do proper zero out
2547 * for partial write.
2548 */
2549 set_buffer_new(bh);
2550 set_buffer_mapped(bh);
2551 }
2552 return 0;
2585} 2553}
2586 2554
2587/* 2555/*
@@ -2603,21 +2571,8 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2603static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 2571static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2604 struct buffer_head *bh_result, int create) 2572 struct buffer_head *bh_result, int create)
2605{ 2573{
2606 int ret = 0;
2607 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2608
2609 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2574 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2610 2575 return _ext4_get_block(inode, iblock, bh_result, 0);
2611 /*
2612 * we don't want to do block allocation in writepage
2613 * so call get_block_wrap with create = 0
2614 */
2615 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
2616 if (ret > 0) {
2617 bh_result->b_size = (ret << inode->i_blkbits);
2618 ret = 0;
2619 }
2620 return ret;
2621} 2576}
2622 2577
2623static int bget_one(handle_t *handle, struct buffer_head *bh) 2578static int bget_one(handle_t *handle, struct buffer_head *bh)
@@ -3644,46 +3599,18 @@ out:
3644 return ret; 3599 return ret;
3645} 3600}
3646 3601
3602/*
3603 * ext4_get_block used when preparing for a DIO write or buffer write.
3604 * We allocate an uinitialized extent if blocks haven't been allocated.
3605 * The extent will be converted to initialized after the IO is complete.
3606 */
3647static int ext4_get_block_write(struct inode *inode, sector_t iblock, 3607static int ext4_get_block_write(struct inode *inode, sector_t iblock,
3648 struct buffer_head *bh_result, int create) 3608 struct buffer_head *bh_result, int create)
3649{ 3609{
3650 handle_t *handle = ext4_journal_current_handle();
3651 int ret = 0;
3652 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
3653 int dio_credits;
3654 int started = 0;
3655
3656 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 3610 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
3657 inode->i_ino, create); 3611 inode->i_ino, create);
3658 /* 3612 return _ext4_get_block(inode, iblock, bh_result,
3659 * ext4_get_block in prepare for a DIO write or buffer write. 3613 EXT4_GET_BLOCKS_IO_CREATE_EXT);
3660 * We allocate an uinitialized extent if blocks haven't been allocated.
3661 * The extent will be converted to initialized after IO complete.
3662 */
3663 create = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3664
3665 if (!handle) {
3666 if (max_blocks > DIO_MAX_BLOCKS)
3667 max_blocks = DIO_MAX_BLOCKS;
3668 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
3669 handle = ext4_journal_start(inode, dio_credits);
3670 if (IS_ERR(handle)) {
3671 ret = PTR_ERR(handle);
3672 goto out;
3673 }
3674 started = 1;
3675 }
3676
3677 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
3678 create);
3679 if (ret > 0) {
3680 bh_result->b_size = (ret << inode->i_blkbits);
3681 ret = 0;
3682 }
3683 if (started)
3684 ext4_journal_stop(handle);
3685out:
3686 return ret;
3687} 3614}
3688 3615
3689static void dump_completed_IO(struct inode * inode) 3616static void dump_completed_IO(struct inode * inode)