aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/recovery.c
diff options
context:
space:
mode:
authorGu Zheng <guz.fnst@cn.fujitsu.com>2013-09-27 06:08:30 -0400
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-10-06 22:33:05 -0400
commite479556bfdd136669854292eb57ed0139d7253d5 (patch)
tree95772ba1ac8cf1e79c89145daf40c417814896da /fs/f2fs/recovery.c
parent2e5558f4a5cf16a7394fd5770087303db8912c66 (diff)
f2fs: use rw_sem instead of fs_lock(locks mutex)
The fs_locks is used to block other ops(ex, recovery) when doing checkpoint. And each other operate routine(besides checkpoint) needs to acquire a fs_lock, there is a terrible problem here, if these are too many concurrency threads acquiring fs_lock, so that they will block each other and may lead to some performance problem, but this is not the phenomenon we want to see. Though there are some optimization patches introduced to enhance the usage of fs_lock, but the thorough solution is using a *rw_sem* to replace the fs_lock. Checkpoint routine takes write_sem, and other ops take read_sem, so that we can block other ops(ex, recovery) when doing checkpoint, and other ops will not disturb each other, this can avoid the problem described above completely. Because of the weakness of rw_sem, the above change may introduce a potential problem that the checkpoint thread might get starved if other threads are intensively locking the read semaphore for I/O.(Pointed out by Xu Jin) In order to avoid this, a wait_list is introduced, the appending read semaphore ops will be dropped into the wait_list if checkpoint thread is waiting for write semaphore, and will be waked up when checkpoint thread gives up write semaphore. Thanks to Kim's previous review and test, and will be very glad to see other guys' performance tests about this patch. V2: -fix the potential starvation problem. -use more suitable func name suggested by Xu Jin. Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com> [Jaegeuk Kim: adjust minor coding standard] Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs/recovery.c')
-rw-r--r--fs/f2fs/recovery.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index a15d122fdc50..353cf4f66c7b 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -292,7 +292,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
292 struct f2fs_summary sum; 292 struct f2fs_summary sum;
293 struct node_info ni; 293 struct node_info ni;
294 int err = 0, recovered = 0; 294 int err = 0, recovered = 0;
295 int ilock;
296 295
297 start = start_bidx_of_node(ofs_of_node(page), fi); 296 start = start_bidx_of_node(ofs_of_node(page), fi);
298 if (IS_INODE(page)) 297 if (IS_INODE(page))
@@ -300,12 +299,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
300 else 299 else
301 end = start + ADDRS_PER_BLOCK; 300 end = start + ADDRS_PER_BLOCK;
302 301
303 ilock = mutex_lock_op(sbi); 302 f2fs_lock_op(sbi);
304 set_new_dnode(&dn, inode, NULL, NULL, 0); 303 set_new_dnode(&dn, inode, NULL, NULL, 0);
305 304
306 err = get_dnode_of_data(&dn, start, ALLOC_NODE); 305 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
307 if (err) { 306 if (err) {
308 mutex_unlock_op(sbi, ilock); 307 f2fs_unlock_op(sbi);
309 return err; 308 return err;
310 } 309 }
311 310
@@ -356,7 +355,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
356 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr); 355 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
357err: 356err:
358 f2fs_put_dnode(&dn); 357 f2fs_put_dnode(&dn);
359 mutex_unlock_op(sbi, ilock); 358 f2fs_unlock_op(sbi);
360 359
361 f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, " 360 f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
362 "recovered_data = %d blocks, err = %d", 361 "recovered_data = %d blocks, err = %d",