aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk@kernel.org>2016-07-19 22:20:11 -0400
committerJaegeuk Kim <jaegeuk@kernel.org>2016-07-20 17:53:21 -0400
commitdd11a5df5219b4d3c4d3f38b9cae48c3518d3152 (patch)
tree1146c8143ab7d871e45342d230a6050e5b45cc48
parent4dd6f977fc778e5a0da604e5f8cb2f36d163d27b (diff)
f2fs: avoid data race when deciding checkpoin in f2fs_sync_file
When fs utilization is almost full, f2fs_sync_file should do checkpoint if there is not enough space for roll-forward later. (i.e. space_for_roll_forward) So, currently we have no lock for sbi->alloc_valid_block_count, resulting in race condition. In rare case, we can get -ENOSPC when doing roll-forward which triggers if (is_valid_blkaddr(sbi, dest, META_POR)) { if (src == NULL_ADDR) { err = reserve_new_block(&dn); f2fs_bug_on(sbi, err); ... } ... } in do_recover_data. So, this patch avoids that situation in advance. Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
-rw-r--r--fs/f2fs/f2fs.h13
1 files changed, 11 insertions, 2 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 7a57279b2c54..30981094dff8 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1147,24 +1147,33 @@ static inline void f2fs_i_blocks_write(struct inode *, blkcnt_t, bool);
1147static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, 1147static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
1148 struct inode *inode, blkcnt_t *count) 1148 struct inode *inode, blkcnt_t *count)
1149{ 1149{
1150 blkcnt_t diff;
1151
1150#ifdef CONFIG_F2FS_FAULT_INJECTION 1152#ifdef CONFIG_F2FS_FAULT_INJECTION
1151 if (time_to_inject(FAULT_BLOCK)) 1153 if (time_to_inject(FAULT_BLOCK))
1152 return false; 1154 return false;
1153#endif 1155#endif
1156 /*
1157 * let's increase this in prior to actual block count change in order
1158 * for f2fs_sync_file to avoid data races when deciding checkpoint.
1159 */
1160 percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
1161
1154 spin_lock(&sbi->stat_lock); 1162 spin_lock(&sbi->stat_lock);
1155 sbi->total_valid_block_count += (block_t)(*count); 1163 sbi->total_valid_block_count += (block_t)(*count);
1156 if (unlikely(sbi->total_valid_block_count > sbi->user_block_count)) { 1164 if (unlikely(sbi->total_valid_block_count > sbi->user_block_count)) {
1157 *count -= sbi->total_valid_block_count - sbi->user_block_count; 1165 diff = sbi->total_valid_block_count - sbi->user_block_count;
1166 *count -= diff;
1158 sbi->total_valid_block_count = sbi->user_block_count; 1167 sbi->total_valid_block_count = sbi->user_block_count;
1159 if (!*count) { 1168 if (!*count) {
1160 spin_unlock(&sbi->stat_lock); 1169 spin_unlock(&sbi->stat_lock);
1170 percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
1161 return false; 1171 return false;
1162 } 1172 }
1163 } 1173 }
1164 spin_unlock(&sbi->stat_lock); 1174 spin_unlock(&sbi->stat_lock);
1165 1175
1166 f2fs_i_blocks_write(inode, *count, true); 1176 f2fs_i_blocks_write(inode, *count, true);
1167 percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
1168 return true; 1177 return true;
1169} 1178}
1170 1179