diff options
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r-- | fs/ext4/extents.c | 77 |
1 files changed, 59 insertions, 18 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 3a7928f825e4..7d7b74e94687 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -296,29 +296,44 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check) | |||
296 | * to allocate @blocks | 296 | * to allocate @blocks |
297 | * Worse case is one block per extent | 297 | * Worse case is one block per extent |
298 | */ | 298 | */ |
299 | int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks) | 299 | int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock) |
300 | { | 300 | { |
301 | int lcap, icap, rcap, leafs, idxs, num; | 301 | struct ext4_inode_info *ei = EXT4_I(inode); |
302 | int newextents = blocks; | 302 | int idxs, num = 0; |
303 | |||
304 | rcap = ext4_ext_space_root_idx(inode, 0); | ||
305 | lcap = ext4_ext_space_block(inode, 0); | ||
306 | icap = ext4_ext_space_block_idx(inode, 0); | ||
307 | 303 | ||
308 | /* number of new leaf blocks needed */ | 304 | idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
309 | num = leafs = (newextents + lcap - 1) / lcap; | 305 | / sizeof(struct ext4_extent_idx)); |
310 | 306 | ||
311 | /* | 307 | /* |
312 | * Worse case, we need separate index block(s) | 308 | * If the new delayed allocation block is contiguous with the |
313 | * to link all new leaf blocks | 309 | * previous da block, it can share index blocks with the |
310 | * previous block, so we only need to allocate a new index | ||
311 | * block every idxs leaf blocks. At ldxs**2 blocks, we need | ||
312 | * an additional index block, and at ldxs**3 blocks, yet | ||
313 | * another index blocks. | ||
314 | */ | 314 | */ |
315 | idxs = (leafs + icap - 1) / icap; | 315 | if (ei->i_da_metadata_calc_len && |
316 | do { | 316 | ei->i_da_metadata_calc_last_lblock+1 == lblock) { |
317 | num += idxs; | 317 | if ((ei->i_da_metadata_calc_len % idxs) == 0) |
318 | idxs = (idxs + icap - 1) / icap; | 318 | num++; |
319 | } while (idxs > rcap); | 319 | if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) |
320 | num++; | ||
321 | if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { | ||
322 | num++; | ||
323 | ei->i_da_metadata_calc_len = 0; | ||
324 | } else | ||
325 | ei->i_da_metadata_calc_len++; | ||
326 | ei->i_da_metadata_calc_last_lblock++; | ||
327 | return num; | ||
328 | } | ||
320 | 329 | ||
321 | return num; | 330 | /* |
331 | * In the worst case we need a new set of index blocks at | ||
332 | * every level of the inode's extent tree. | ||
333 | */ | ||
334 | ei->i_da_metadata_calc_len = 1; | ||
335 | ei->i_da_metadata_calc_last_lblock = lblock; | ||
336 | return ext_depth(inode) + 1; | ||
322 | } | 337 | } |
323 | 338 | ||
324 | static int | 339 | static int |
@@ -3023,6 +3038,14 @@ out: | |||
3023 | return err; | 3038 | return err; |
3024 | } | 3039 | } |
3025 | 3040 | ||
3041 | static void unmap_underlying_metadata_blocks(struct block_device *bdev, | ||
3042 | sector_t block, int count) | ||
3043 | { | ||
3044 | int i; | ||
3045 | for (i = 0; i < count; i++) | ||
3046 | unmap_underlying_metadata(bdev, block + i); | ||
3047 | } | ||
3048 | |||
3026 | static int | 3049 | static int |
3027 | ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | 3050 | ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, |
3028 | ext4_lblk_t iblock, unsigned int max_blocks, | 3051 | ext4_lblk_t iblock, unsigned int max_blocks, |
@@ -3098,6 +3121,18 @@ out: | |||
3098 | } else | 3121 | } else |
3099 | allocated = ret; | 3122 | allocated = ret; |
3100 | set_buffer_new(bh_result); | 3123 | set_buffer_new(bh_result); |
3124 | /* | ||
3125 | * if we allocated more blocks than requested | ||
3126 | * we need to make sure we unmap the extra block | ||
3127 | * allocated. The actual needed block will get | ||
3128 | * unmapped later when we find the buffer_head marked | ||
3129 | * new. | ||
3130 | */ | ||
3131 | if (allocated > max_blocks) { | ||
3132 | unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, | ||
3133 | newblock + max_blocks, | ||
3134 | allocated - max_blocks); | ||
3135 | } | ||
3101 | map_out: | 3136 | map_out: |
3102 | set_buffer_mapped(bh_result); | 3137 | set_buffer_mapped(bh_result); |
3103 | out1: | 3138 | out1: |
@@ -3190,7 +3225,13 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3190 | * this situation is possible, though, _during_ tree modification; | 3225 | * this situation is possible, though, _during_ tree modification; |
3191 | * this is why assert can't be put in ext4_ext_find_extent() | 3226 | * this is why assert can't be put in ext4_ext_find_extent() |
3192 | */ | 3227 | */ |
3193 | BUG_ON(path[depth].p_ext == NULL && depth != 0); | 3228 | if (path[depth].p_ext == NULL && depth != 0) { |
3229 | ext4_error(inode->i_sb, __func__, "bad extent address " | ||
3230 | "inode: %lu, iblock: %d, depth: %d", | ||
3231 | inode->i_ino, iblock, depth); | ||
3232 | err = -EIO; | ||
3233 | goto out2; | ||
3234 | } | ||
3194 | eh = path[depth].p_hdr; | 3235 | eh = path[depth].p_hdr; |
3195 | 3236 | ||
3196 | ex = path[depth].p_ext; | 3237 | ex = path[depth].p_ext; |