aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/extents.c
diff options
context:
space:
mode:
authorLukas Czerner <lczerner@redhat.com>2013-03-10 22:46:30 -0400
committerTheodore Ts'o <tytso@mit.edu>2013-03-10 22:46:30 -0400
commit232ec8720d4e45405e37144c67053042c6b886d3 (patch)
treefdbf80788cb229f586d38549ca0655d1843022c0 /fs/ext4/extents.c
parentbb8b20ed94bc69120e31399c43cb336300dea109 (diff)
ext4: update reserved space after the 'correction'
Currently in ext4_ext_map_blocks() in delayed allocation writeback we would update the reservation and after that check whether we claimed cluster outside of the range of the allocation and if so, we'll give the block back to the reservation pool. However this also means that if the number of reserved data block dropped to zero before the correction, we would release all the metadata reservation as well, however we might still need it because the we're not done with the delayed allocation and there might be more blocks to come. This will result in error messages such as: EXT4-fs warning (device sdb): ext4_da_update_reserve_space:361: ino 12, allocated 1 with only 0 reserved metadata blocks (releasing 1 blocks with reserved 1 data blocks) This will only happen on bigalloc file system and it can be easily reproduced using fiemap-tester from xfstests like this: ./src/fiemap-tester -m DHDHDHDHD -S -p0 /mnt/test/file Or using xfstests such as 225. Fix this by doing the correction first and updating the reservation after that so that we do not accidentally decrease i_reserved_data_blocks to zero. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r--fs/ext4/extents.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 69df02ff96aa..bd69e906bd91 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4165,9 +4165,6 @@ got_allocated_blocks:
4165 } 4165 }
4166 } else { 4166 } else {
4167 BUG_ON(allocated_clusters < reserved_clusters); 4167 BUG_ON(allocated_clusters < reserved_clusters);
4168 /* We will claim quota for all newly allocated blocks.*/
4169 ext4_da_update_reserve_space(inode, allocated_clusters,
4170 1);
4171 if (reserved_clusters < allocated_clusters) { 4168 if (reserved_clusters < allocated_clusters) {
4172 struct ext4_inode_info *ei = EXT4_I(inode); 4169 struct ext4_inode_info *ei = EXT4_I(inode);
4173 int reservation = allocated_clusters - 4170 int reservation = allocated_clusters -
@@ -4218,6 +4215,15 @@ got_allocated_blocks:
4218 ei->i_reserved_data_blocks += reservation; 4215 ei->i_reserved_data_blocks += reservation;
4219 spin_unlock(&ei->i_block_reservation_lock); 4216 spin_unlock(&ei->i_block_reservation_lock);
4220 } 4217 }
4218 /*
4219 * We will claim quota for all newly allocated blocks.
4220 * We're updating the reserved space *after* the
4221 * correction above so we do not accidentally free
4222 * all the metadata reservation because we might
4223 * actually need it later on.
4224 */
4225 ext4_da_update_reserve_space(inode, allocated_clusters,
4226 1);
4221 } 4227 }
4222 } 4228 }
4223 4229