aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/extents.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2010-10-27 21:23:12 -0400
committerTheodore Ts'o <tytso@mit.edu>2010-10-27 21:23:12 -0400
commit58590b06d79f7ce5ab64ff3b6d537180fa50dc84 (patch)
tree4e3c785e4ec6709385f62b741389de34a9ade279 /fs/ext4/extents.c
parent899611ee7d373e5eeda08e9a8632684e1ebbbf00 (diff)
ext4: fix EOFBLOCKS_FL handling
It turns out we have several problems with how EOFBLOCKS_FL is handled. First of all, there was a fencepost error where we were not clearing the EOFBLOCKS_FL when fill in the last uninitialized block, but rather when we allocate the next block _after_ the uninitalized block. Secondly we were not testing to see if we needed to clear the EOFBLOCKS_FL when writing to the file O_DIRECT or when were converting an uninitialized block (which is the most common case). Google-Bug-Id: 2928259 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r--fs/ext4/extents.c98
1 files changed, 69 insertions, 29 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 06328d3e5717..820278410220 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3180,6 +3180,57 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3180 unmap_underlying_metadata(bdev, block + i); 3180 unmap_underlying_metadata(bdev, block + i);
3181} 3181}
3182 3182
3183/*
3184 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3185 */
3186static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3187 struct ext4_map_blocks *map,
3188 struct ext4_ext_path *path,
3189 unsigned int len)
3190{
3191 int i, depth;
3192 struct ext4_extent_header *eh;
3193 struct ext4_extent *ex, *last_ex;
3194
3195 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3196 return 0;
3197
3198 depth = ext_depth(inode);
3199 eh = path[depth].p_hdr;
3200 ex = path[depth].p_ext;
3201
3202 if (unlikely(!eh->eh_entries)) {
3203 EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
3204 "EOFBLOCKS_FL set");
3205 return -EIO;
3206 }
3207 last_ex = EXT_LAST_EXTENT(eh);
3208 /*
3209 * We should clear the EOFBLOCKS_FL flag if we are writing the
3210 * last block in the last extent in the file. We test this by
3211 * first checking to see if the caller to
3212 * ext4_ext_get_blocks() was interested in the last block (or
3213 * a block beyond the last block) in the current extent. If
3214 * this turns out to be false, we can bail out from this
3215 * function immediately.
3216 */
3217 if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) +
3218 ext4_ext_get_actual_len(last_ex))
3219 return 0;
3220 /*
3221 * If the caller does appear to be planning to write at or
3222 * beyond the end of the current extent, we then test to see
3223 * if the current extent is the last extent in the file, by
3224 * checking to make sure it was reached via the rightmost node
3225 * at each level of the tree.
3226 */
3227 for (i = depth-1; i >= 0; i--)
3228 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3229 return 0;
3230 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3231 return ext4_mark_inode_dirty(handle, inode);
3232}
3233
3183static int 3234static int
3184ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3235ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3185 struct ext4_map_blocks *map, 3236 struct ext4_map_blocks *map,
@@ -3217,8 +3268,12 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3217 if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3268 if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3218 ret = ext4_convert_unwritten_extents_endio(handle, inode, 3269 ret = ext4_convert_unwritten_extents_endio(handle, inode,
3219 path); 3270 path);
3220 if (ret >= 0) 3271 if (ret >= 0) {
3221 ext4_update_inode_fsync_trans(handle, inode, 1); 3272 ext4_update_inode_fsync_trans(handle, inode, 1);
3273 err = check_eofblocks_fl(handle, inode, map, path,
3274 map->m_len);
3275 } else
3276 err = ret;
3222 goto out2; 3277 goto out2;
3223 } 3278 }
3224 /* buffered IO case */ 3279 /* buffered IO case */
@@ -3244,8 +3299,13 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3244 3299
3245 /* buffered write, writepage time, convert*/ 3300 /* buffered write, writepage time, convert*/
3246 ret = ext4_ext_convert_to_initialized(handle, inode, map, path); 3301 ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3247 if (ret >= 0) 3302 if (ret >= 0) {
3248 ext4_update_inode_fsync_trans(handle, inode, 1); 3303 ext4_update_inode_fsync_trans(handle, inode, 1);
3304 err = check_eofblocks_fl(handle, inode, map, path, map->m_len);
3305 if (err < 0)
3306 goto out2;
3307 }
3308
3249out: 3309out:
3250 if (ret <= 0) { 3310 if (ret <= 0) {
3251 err = ret; 3311 err = ret;
@@ -3292,6 +3352,7 @@ out2:
3292 } 3352 }
3293 return err ? err : allocated; 3353 return err ? err : allocated;
3294} 3354}
3355
3295/* 3356/*
3296 * Block allocation/map/preallocation routine for extents based files 3357 * Block allocation/map/preallocation routine for extents based files
3297 * 3358 *
@@ -3315,9 +3376,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3315{ 3376{
3316 struct ext4_ext_path *path = NULL; 3377 struct ext4_ext_path *path = NULL;
3317 struct ext4_extent_header *eh; 3378 struct ext4_extent_header *eh;
3318 struct ext4_extent newex, *ex, *last_ex; 3379 struct ext4_extent newex, *ex;
3319 ext4_fsblk_t newblock; 3380 ext4_fsblk_t newblock;
3320 int i, err = 0, depth, ret, cache_type; 3381 int err = 0, depth, ret, cache_type;
3321 unsigned int allocated = 0; 3382 unsigned int allocated = 0;
3322 struct ext4_allocation_request ar; 3383 struct ext4_allocation_request ar;
3323 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 3384 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
@@ -3497,31 +3558,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3497 map->m_flags |= EXT4_MAP_UNINIT; 3558 map->m_flags |= EXT4_MAP_UNINIT;
3498 } 3559 }
3499 3560
3500 if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) { 3561 err = check_eofblocks_fl(handle, inode, map, path, ar.len);
3501 if (unlikely(!eh->eh_entries)) { 3562 if (err)
3502 EXT4_ERROR_INODE(inode, 3563 goto out2;
3503 "eh->eh_entries == 0 and " 3564
3504 "EOFBLOCKS_FL set");
3505 err = -EIO;
3506 goto out2;
3507 }
3508 last_ex = EXT_LAST_EXTENT(eh);
3509 /*
3510 * If the current leaf block was reached by looking at
3511 * the last index block all the way down the tree, and
3512 * we are extending the inode beyond the last extent
3513 * in the current leaf block, then clear the
3514 * EOFBLOCKS_FL flag.
3515 */
3516 for (i = depth-1; i >= 0; i--) {
3517 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3518 break;
3519 }
3520 if ((i < 0) &&
3521 (map->m_lblk + ar.len > le32_to_cpu(last_ex->ee_block) +
3522 ext4_ext_get_actual_len(last_ex)))
3523 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3524 }
3525 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3565 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3526 if (err) { 3566 if (err) {
3527 /* free data blocks we just allocated */ 3567 /* free data blocks we just allocated */