aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorAllison Henderson <achender@linux.vnet.ibm.com>2011-05-25 07:41:46 -0400
committerTheodore Ts'o <tytso@mit.edu>2011-05-25 07:41:46 -0400
commite861304b8ed83fe43e36d46794d72641c82d4636 (patch)
tree14a9c42ee5b6a531bc3202063bcd9c413e30c17e /fs/ext4
parentd583fb87a3ff0ca50befd2f73f7a67fade1c8c56 (diff)
ext4: add "punch hole" flag to ext4_map_blocks()
This patch adds a new flag to ext4_map_blocks() that specifies the given range of blocks should be punched out. Extents are first converted to uninitialized extents before they are punched out. Because punching a hole may require that the extent be split, it is possible that the splitting may need more blocks than are available. To deal with this, use of reserved blocks are enabled to allow the split to proceed. The routine then returns the number of blocks successfully punched out. [ext4 punch hole patch series 4/5 v7] Signed-off-by: Allison Henderson <achender@us.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Reviewed-by: Mingming Cao <cmm@us.ibm.com>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/extents.c98
1 files changed, 87 insertions, 11 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index c8c687b5d9a8..1d456b2ac377 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3298,15 +3298,19 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3298 ext4_fsblk_t newblock = 0; 3298 ext4_fsblk_t newblock = 0;
3299 int err = 0, depth, ret; 3299 int err = 0, depth, ret;
3300 unsigned int allocated = 0; 3300 unsigned int allocated = 0;
3301 unsigned int punched_out = 0;
3302 unsigned int result = 0;
3301 struct ext4_allocation_request ar; 3303 struct ext4_allocation_request ar;
3302 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 3304 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3305 struct ext4_map_blocks punch_map;
3303 3306
3304 ext_debug("blocks %u/%u requested for inode %lu\n", 3307 ext_debug("blocks %u/%u requested for inode %lu\n",
3305 map->m_lblk, map->m_len, inode->i_ino); 3308 map->m_lblk, map->m_len, inode->i_ino);
3306 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 3309 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3307 3310
3308 /* check in cache */ 3311 /* check in cache */
3309 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { 3312 if (ext4_ext_in_cache(inode, map->m_lblk, &newex) &&
3313 ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0)) {
3310 if (!newex.ee_start_lo && !newex.ee_start_hi) { 3314 if (!newex.ee_start_lo && !newex.ee_start_hi) {
3311 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3315 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3312 /* 3316 /*
@@ -3371,16 +3375,84 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3371 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 3375 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3372 ee_block, ee_len, newblock); 3376 ee_block, ee_len, newblock);
3373 3377
3374 /* Do not put uninitialized extent in the cache */ 3378 if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) {
3375 if (!ext4_ext_is_uninitialized(ex)) { 3379 /*
3376 ext4_ext_put_in_cache(inode, ee_block, 3380 * Do not put uninitialized extent
3377 ee_len, ee_start); 3381 * in the cache
3378 goto out; 3382 */
3383 if (!ext4_ext_is_uninitialized(ex)) {
3384 ext4_ext_put_in_cache(inode, ee_block,
3385 ee_len, ee_start);
3386 goto out;
3387 }
3388 ret = ext4_ext_handle_uninitialized_extents(
3389 handle, inode, map, path, flags,
3390 allocated, newblock);
3391 return ret;
3379 } 3392 }
3380 ret = ext4_ext_handle_uninitialized_extents(handle, 3393
3381 inode, map, path, flags, allocated, 3394 /*
3382 newblock); 3395 * Punch out the map length, but only to the
3383 return ret; 3396 * end of the extent
3397 */
3398 punched_out = allocated < map->m_len ?
3399 allocated : map->m_len;
3400
3401 /*
3402 * Sense extents need to be converted to
3403 * uninitialized, they must fit in an
3404 * uninitialized extent
3405 */
3406 if (punched_out > EXT_UNINIT_MAX_LEN)
3407 punched_out = EXT_UNINIT_MAX_LEN;
3408
3409 punch_map.m_lblk = map->m_lblk;
3410 punch_map.m_pblk = newblock;
3411 punch_map.m_len = punched_out;
3412 punch_map.m_flags = 0;
3413
3414 /* Check to see if the extent needs to be split */
3415 if (punch_map.m_len != ee_len ||
3416 punch_map.m_lblk != ee_block) {
3417
3418 ret = ext4_split_extent(handle, inode,
3419 path, &punch_map, 0,
3420 EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
3421 EXT4_GET_BLOCKS_PRE_IO);
3422
3423 if (ret < 0) {
3424 err = ret;
3425 goto out2;
3426 }
3427 /*
3428 * find extent for the block at
3429 * the start of the hole
3430 */
3431 ext4_ext_drop_refs(path);
3432 kfree(path);
3433
3434 path = ext4_ext_find_extent(inode,
3435 map->m_lblk, NULL);
3436 if (IS_ERR(path)) {
3437 err = PTR_ERR(path);
3438 path = NULL;
3439 goto out2;
3440 }
3441
3442 depth = ext_depth(inode);
3443 ex = path[depth].p_ext;
3444 ee_len = ext4_ext_get_actual_len(ex);
3445 ee_block = le32_to_cpu(ex->ee_block);
3446 ee_start = ext4_ext_pblock(ex);
3447
3448 }
3449
3450 ext4_ext_mark_uninitialized(ex);
3451
3452 err = ext4_ext_remove_space(inode, map->m_lblk,
3453 map->m_lblk + punched_out);
3454
3455 goto out2;
3384 } 3456 }
3385 } 3457 }
3386 3458
@@ -3525,7 +3597,11 @@ out2:
3525 } 3597 }
3526 trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, 3598 trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
3527 newblock, map->m_len, err ? err : allocated); 3599 newblock, map->m_len, err ? err : allocated);
3528 return err ? err : allocated; 3600
3601 result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ?
3602 punched_out : allocated;
3603
3604 return err ? err : result;
3529} 3605}
3530 3606
3531void ext4_ext_truncate(struct inode *inode) 3607void ext4_ext_truncate(struct inode *inode)