aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/inode.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 25f809dc45a3..89b59cb7f9b8 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1182,6 +1182,17 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1182 struct ext4_inode_info *ei = EXT4_I(inode); 1182 struct ext4_inode_info *ei = EXT4_I(inode);
1183 unsigned int md_needed; 1183 unsigned int md_needed;
1184 int ret; 1184 int ret;
1185 ext4_lblk_t save_last_lblock;
1186 int save_len;
1187
1188 /*
1189 * We will charge metadata quota at writeout time; this saves
1190 * us from metadata over-estimation, though we may go over by
1191 * a small amount in the end. Here we just reserve for data.
1192 */
1193 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1194 if (ret)
1195 return ret;
1185 1196
1186 /* 1197 /*
1187 * recalculate the amount of metadata blocks to reserve 1198 * recalculate the amount of metadata blocks to reserve
@@ -1190,32 +1201,31 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1190 */ 1201 */
1191repeat: 1202repeat:
1192 spin_lock(&ei->i_block_reservation_lock); 1203 spin_lock(&ei->i_block_reservation_lock);
1204 /*
1205 * ext4_calc_metadata_amount() has side effects, which we have
1206 * to be prepared undo if we fail to claim space.
1207 */
1208 save_len = ei->i_da_metadata_calc_len;
1209 save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1193 md_needed = EXT4_NUM_B2C(sbi, 1210 md_needed = EXT4_NUM_B2C(sbi,
1194 ext4_calc_metadata_amount(inode, lblock)); 1211 ext4_calc_metadata_amount(inode, lblock));
1195 trace_ext4_da_reserve_space(inode, md_needed); 1212 trace_ext4_da_reserve_space(inode, md_needed);
1196 spin_unlock(&ei->i_block_reservation_lock);
1197 1213
1198 /* 1214 /*
1199 * We will charge metadata quota at writeout time; this saves
1200 * us from metadata over-estimation, though we may go over by
1201 * a small amount in the end. Here we just reserve for data.
1202 */
1203 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1204 if (ret)
1205 return ret;
1206 /*
1207 * We do still charge estimated metadata to the sb though; 1215 * We do still charge estimated metadata to the sb though;
1208 * we cannot afford to run out of free blocks. 1216 * we cannot afford to run out of free blocks.
1209 */ 1217 */
1210 if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { 1218 if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1211 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1219 ei->i_da_metadata_calc_len = save_len;
1220 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1221 spin_unlock(&ei->i_block_reservation_lock);
1212 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1222 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1213 yield(); 1223 yield();
1214 goto repeat; 1224 goto repeat;
1215 } 1225 }
1226 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1216 return -ENOSPC; 1227 return -ENOSPC;
1217 } 1228 }
1218 spin_lock(&ei->i_block_reservation_lock);
1219 ei->i_reserved_data_blocks++; 1229 ei->i_reserved_data_blocks++;
1220 ei->i_reserved_meta_blocks += md_needed; 1230 ei->i_reserved_meta_blocks += md_needed;
1221 spin_unlock(&ei->i_block_reservation_lock); 1231 spin_unlock(&ei->i_block_reservation_lock);