aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2013-01-25 04:33:41 -0500
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-02-11 17:15:00 -0500
commitbd43df021ac37247f2db58ff376fb4032170f754 (patch)
treee22482138f5ea62d84a1cbe327e0f546670b1a06 /fs/f2fs
parent577e349514452fa3fcd99fd06e587b02d3d1cf28 (diff)
f2fs: cover global locks for reserve_new_block
The fill_zero() from fallocate() calls get_new_data_page() in which calls reserve_new_block(). The reserve_new_block() should be covered by *DATA_NEW*, one of global locks. And also, before getting the lock, we should check free sections by calling f2fs_balance_fs(). If we break this rule, f2fs is able to face with out-of-control free space management and fall into infinite loop like the following scenario as well. [f2fs_sync_fs()] [fallocate()] - write_checkpoint() - fill_zero() - block_operations() - get_new_data_page() : grab NODE_NEW - get_dnode_of_data() : get locked dirty node page - sync_node_pages() : try to grab NODE_NEW for data allocation : trylock and skip the dirty node page : call sync_node_pages() repeatedly in order to flush all the dirty node pages! In order to avoid this, we should grab another global lock such as DATA_NEW before calling get_new_data_page() in fill_zero(). Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/file.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 3191b52aafb0..6cdab2c64fc6 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -387,12 +387,17 @@ const struct inode_operations f2fs_file_inode_operations = {
387static void fill_zero(struct inode *inode, pgoff_t index, 387static void fill_zero(struct inode *inode, pgoff_t index,
388 loff_t start, loff_t len) 388 loff_t start, loff_t len)
389{ 389{
390 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
390 struct page *page; 391 struct page *page;
391 392
392 if (!len) 393 if (!len)
393 return; 394 return;
394 395
396 f2fs_balance_fs(sbi);
397
398 mutex_lock_op(sbi, DATA_NEW);
395 page = get_new_data_page(inode, index, false); 399 page = get_new_data_page(inode, index, false);
400 mutex_unlock_op(sbi, DATA_NEW);
396 401
397 if (!IS_ERR(page)) { 402 if (!IS_ERR(page)) {
398 wait_on_page_writeback(page); 403 wait_on_page_writeback(page);