aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2014-09-04 18:07:25 -0400
committerTheodore Ts'o <tytso@mit.edu>2014-09-04 18:07:25 -0400
commite3cf5d5d9a86df1c5e413bdd3725c25a16ff854c (patch)
tree77a44d159efb7807419e6443584cc67f28929c10 /fs/ext4
parenta521100231f816f8cdd9c8e77da14ff1e42c2b17 (diff)
ext4: prepare to drop EXT4_STATE_DELALLOC_RESERVED
The EXT4_STATE_DELALLOC_RESERVED flag was originally implemented because it was too hard to make sure the mballoc and get_block flags could be reliably passed down through all of the codepaths that end up calling ext4_mb_new_blocks(). Since then, we have mb_flags passed down through most of the code paths, so getting rid of EXT4_STATE_DELALLOC_RESERVED isn't as tricky as it used to. This commit plumbs in the last of what is required, and then adds a WARN_ON check to make sure we haven't missed anything. If this passes a full regression test run, we can then drop EXT4_STATE_DELALLOC_RESERVED. Signed-off-by: Theodore Ts'o <tytso@mit.edu> Reviewed-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/extents.c6
-rw-r--r--fs/ext4/indirect.c6
-rw-r--r--fs/ext4/mballoc.c10
-rw-r--r--fs/ext4/xattr.c6
5 files changed, 17 insertions, 14 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 581ef40fbe90..d70f154f6da3 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -636,8 +636,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
636 * Account for the allocated meta blocks. We will never 636 * Account for the allocated meta blocks. We will never
637 * fail EDQUOT for metdata, but we do account for it. 637 * fail EDQUOT for metdata, but we do account for it.
638 */ 638 */
639 if (!(*errp) && 639 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
640 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
641 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 640 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
642 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 641 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
643 dquot_alloc_block_nofail(inode, 642 dquot_alloc_block_nofail(inode,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 3ac1686efff8..8170b3254767 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1933,6 +1933,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1933 ext4_lblk_t next; 1933 ext4_lblk_t next;
1934 int mb_flags = 0, unwritten; 1934 int mb_flags = 0, unwritten;
1935 1935
1936 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1937 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1936 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1938 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1937 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1939 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1938 return -EIO; 1940 return -EIO;
@@ -2054,7 +2056,7 @@ prepend:
2054 * We're gonna add a new leaf in the tree. 2056 * We're gonna add a new leaf in the tree.
2055 */ 2057 */
2056 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2058 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2057 mb_flags = EXT4_MB_USE_RESERVED; 2059 mb_flags |= EXT4_MB_USE_RESERVED;
2058 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2060 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2059 ppath, newext); 2061 ppath, newext);
2060 if (err) 2062 if (err)
@@ -4438,6 +4440,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4438 ar.flags = 0; 4440 ar.flags = 0;
4439 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4441 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4440 ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4442 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4443 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4444 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4441 newblock = ext4_mb_new_blocks(handle, &ar, &err); 4445 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4442 if (!newblock) 4446 if (!newblock)
4443 goto out2; 4447 goto out2;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 69af0cd64724..36b369697a13 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -333,7 +333,9 @@ static int ext4_alloc_branch(handle_t *handle,
333 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err); 333 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
334 } else 334 } else
335 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle, 335 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
336 ar->inode, ar->goal, 0, NULL, &err); 336 ar->inode, ar->goal,
337 ar->flags & EXT4_MB_DELALLOC_RESERVED,
338 NULL, &err);
337 if (err) { 339 if (err) {
338 i--; 340 i--;
339 goto failed; 341 goto failed;
@@ -572,6 +574,8 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
572 ar.logical = map->m_lblk; 574 ar.logical = map->m_lblk;
573 if (S_ISREG(inode->i_mode)) 575 if (S_ISREG(inode->i_mode))
574 ar.flags = EXT4_MB_HINT_DATA; 576 ar.flags = EXT4_MB_HINT_DATA;
577 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
578 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
575 579
576 ar.goal = ext4_find_goal(inode, map->m_lblk, partial); 580 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
577 581
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 8b0f9ef517d6..15dffdac5907 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4415,9 +4415,12 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4415 * EDQUOT check, as blocks and quotas have been already 4415 * EDQUOT check, as blocks and quotas have been already
4416 * reserved when data being copied into pagecache. 4416 * reserved when data being copied into pagecache.
4417 */ 4417 */
4418 if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) 4418 if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) {
4419 WARN_ON((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0);
4419 ar->flags |= EXT4_MB_DELALLOC_RESERVED; 4420 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4420 else { 4421 }
4422
4423 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
4421 /* Without delayed allocation we need to verify 4424 /* Without delayed allocation we need to verify
4422 * there is enough free blocks to do block allocation 4425 * there is enough free blocks to do block allocation
4423 * and verify allocation doesn't exceed the quota limits. 4426 * and verify allocation doesn't exceed the quota limits.
@@ -4528,8 +4531,7 @@ out:
4528 if (inquota && ar->len < inquota) 4531 if (inquota && ar->len < inquota)
4529 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 4532 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
4530 if (!ar->len) { 4533 if (!ar->len) {
4531 if (!ext4_test_inode_state(ar->inode, 4534 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
4532 EXT4_STATE_DELALLOC_RESERVED))
4533 /* release all the reserved blocks if non delalloc */ 4535 /* release all the reserved blocks if non delalloc */
4534 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4536 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4535 reserv_clstrs); 4537 reserv_clstrs);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e7387337060c..da4df703c211 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -899,14 +899,8 @@ inserted:
899 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 899 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
900 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 900 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
901 901
902 /*
903 * take i_data_sem because we will test
904 * i_delalloc_reserved_flag in ext4_mb_new_blocks
905 */
906 down_read(&EXT4_I(inode)->i_data_sem);
907 block = ext4_new_meta_blocks(handle, inode, goal, 0, 902 block = ext4_new_meta_blocks(handle, inode, goal, 0,
908 NULL, &error); 903 NULL, &error);
909 up_read((&EXT4_I(inode)->i_data_sem));
910 if (error) 904 if (error)
911 goto cleanup; 905 goto cleanup;
912 906