aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2013-02-04 01:11:17 -0500
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-02-11 17:15:02 -0500
commit437275272f9e635673f065300e5d95226a25cb06 (patch)
treebb0b8945d70971c200d098106b8879c1f84e109a /fs
parentb1f1daf8c72d615b64163e26488d8effeed29b60 (diff)
f2fs: clarify and enhance the f2fs_gc flow
This patch makes clearer the ambiguous f2fs_gc flow as follows. 1. Remove intermediate checkpoint condition during f2fs_gc (i.e., should_do_checkpoint() and GC_BLOCKED) 2. Remove unnecessary return values of f2fs_gc because of #1. (i.e., GC_NODE, GC_OK, etc) 3. Simplify write_checkpoint() because of #2. 4. Clarify the main f2fs_gc flow. o monitor how many freed sections during one iteration of do_garbage_collect(). o do GC more without checkpoints if we can't get enough free sections. o do checkpoint once we've got enough free sections through forground GCs. 5. Adopt thread-logging (Slack-Space-Recycle) scheme more aggressively on data log types. See. get_ssr_segement() Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/f2fs/checkpoint.c10
-rw-r--r--fs/f2fs/f2fs.h3
-rw-r--r--fs/f2fs/gc.c107
-rw-r--r--fs/f2fs/gc.h16
-rw-r--r--fs/f2fs/node.c2
-rw-r--r--fs/f2fs/recovery.c2
-rw-r--r--fs/f2fs/segment.c21
-rw-r--r--fs/f2fs/segment.h12
-rw-r--r--fs/f2fs/super.c4
9 files changed, 72 insertions, 105 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 2887c196b0a2..2b6fc131e2ce 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -539,7 +539,7 @@ retry:
539/* 539/*
540 * Freeze all the FS-operations for checkpoint. 540 * Freeze all the FS-operations for checkpoint.
541 */ 541 */
542void block_operations(struct f2fs_sb_info *sbi) 542static void block_operations(struct f2fs_sb_info *sbi)
543{ 543{
544 int t; 544 int t;
545 struct writeback_control wbc = { 545 struct writeback_control wbc = {
@@ -722,15 +722,13 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
722/* 722/*
723 * We guarantee that this checkpoint procedure should not fail. 723 * We guarantee that this checkpoint procedure should not fail.
724 */ 724 */
725void write_checkpoint(struct f2fs_sb_info *sbi, bool blocked, bool is_umount) 725void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
726{ 726{
727 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 727 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
728 unsigned long long ckpt_ver; 728 unsigned long long ckpt_ver;
729 729
730 if (!blocked) { 730 mutex_lock(&sbi->cp_mutex);
731 mutex_lock(&sbi->cp_mutex); 731 block_operations(sbi);
732 block_operations(sbi);
733 }
734 732
735 f2fs_submit_bio(sbi, DATA, true); 733 f2fs_submit_bio(sbi, DATA, true);
736 f2fs_submit_bio(sbi, NODE, true); 734 f2fs_submit_bio(sbi, NODE, true);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 87840bccaf21..e7e7a29767d6 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -969,8 +969,7 @@ int get_valid_checkpoint(struct f2fs_sb_info *);
969void set_dirty_dir_page(struct inode *, struct page *); 969void set_dirty_dir_page(struct inode *, struct page *);
970void remove_dirty_dir_inode(struct inode *); 970void remove_dirty_dir_inode(struct inode *);
971void sync_dirty_dir_inodes(struct f2fs_sb_info *); 971void sync_dirty_dir_inodes(struct f2fs_sb_info *);
972void block_operations(struct f2fs_sb_info *); 972void write_checkpoint(struct f2fs_sb_info *, bool);
973void write_checkpoint(struct f2fs_sb_info *, bool, bool);
974void init_orphan_info(struct f2fs_sb_info *); 973void init_orphan_info(struct f2fs_sb_info *);
975int __init create_checkpoint_caches(void); 974int __init create_checkpoint_caches(void);
976void destroy_checkpoint_caches(void); 975void destroy_checkpoint_caches(void);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 16fdec355201..52d3a391b922 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -78,7 +78,8 @@ static int gc_thread_func(void *data)
78 78
79 sbi->bg_gc++; 79 sbi->bg_gc++;
80 80
81 if (f2fs_gc(sbi) == GC_NONE) 81 /* if return value is not zero, no victim was selected */
82 if (f2fs_gc(sbi))
82 wait_ms = GC_THREAD_NOGC_SLEEP_TIME; 83 wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
83 else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) 84 else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
84 wait_ms = GC_THREAD_MAX_SLEEP_TIME; 85 wait_ms = GC_THREAD_MAX_SLEEP_TIME;
@@ -360,7 +361,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
360 sentry = get_seg_entry(sbi, segno); 361 sentry = get_seg_entry(sbi, segno);
361 ret = f2fs_test_bit(offset, sentry->cur_valid_map); 362 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
362 mutex_unlock(&sit_i->sentry_lock); 363 mutex_unlock(&sit_i->sentry_lock);
363 return ret ? GC_OK : GC_NEXT; 364 return ret;
364} 365}
365 366
366/* 367/*
@@ -368,7 +369,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
368 * On validity, copy that node with cold status, otherwise (invalid node) 369 * On validity, copy that node with cold status, otherwise (invalid node)
369 * ignore that. 370 * ignore that.
370 */ 371 */
371static int gc_node_segment(struct f2fs_sb_info *sbi, 372static void gc_node_segment(struct f2fs_sb_info *sbi,
372 struct f2fs_summary *sum, unsigned int segno, int gc_type) 373 struct f2fs_summary *sum, unsigned int segno, int gc_type)
373{ 374{
374 bool initial = true; 375 bool initial = true;
@@ -380,21 +381,12 @@ next_step:
380 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { 381 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
381 nid_t nid = le32_to_cpu(entry->nid); 382 nid_t nid = le32_to_cpu(entry->nid);
382 struct page *node_page; 383 struct page *node_page;
383 int err;
384 384
385 /* 385 /* stop BG_GC if there is not enough free sections. */
386 * It makes sure that free segments are able to write 386 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
387 * all the dirty node pages before CP after this CP. 387 return;
388 * So let's check the space of dirty node pages.
389 */
390 if (should_do_checkpoint(sbi)) {
391 mutex_lock(&sbi->cp_mutex);
392 block_operations(sbi);
393 return GC_BLOCKED;
394 }
395 388
396 err = check_valid_map(sbi, segno, off); 389 if (check_valid_map(sbi, segno, off) == 0)
397 if (err == GC_NEXT)
398 continue; 390 continue;
399 391
400 if (initial) { 392 if (initial) {
@@ -424,7 +416,6 @@ next_step:
424 }; 416 };
425 sync_node_pages(sbi, 0, &wbc); 417 sync_node_pages(sbi, 0, &wbc);
426 } 418 }
427 return GC_DONE;
428} 419}
429 420
430/* 421/*
@@ -467,13 +458,13 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
467 458
468 node_page = get_node_page(sbi, nid); 459 node_page = get_node_page(sbi, nid);
469 if (IS_ERR(node_page)) 460 if (IS_ERR(node_page))
470 return GC_NEXT; 461 return 0;
471 462
472 get_node_info(sbi, nid, dni); 463 get_node_info(sbi, nid, dni);
473 464
474 if (sum->version != dni->version) { 465 if (sum->version != dni->version) {
475 f2fs_put_page(node_page, 1); 466 f2fs_put_page(node_page, 1);
476 return GC_NEXT; 467 return 0;
477 } 468 }
478 469
479 *nofs = ofs_of_node(node_page); 470 *nofs = ofs_of_node(node_page);
@@ -481,8 +472,8 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
481 f2fs_put_page(node_page, 1); 472 f2fs_put_page(node_page, 1);
482 473
483 if (source_blkaddr != blkaddr) 474 if (source_blkaddr != blkaddr)
484 return GC_NEXT; 475 return 0;
485 return GC_OK; 476 return 1;
486} 477}
487 478
488static void move_data_page(struct inode *inode, struct page *page, int gc_type) 479static void move_data_page(struct inode *inode, struct page *page, int gc_type)
@@ -523,13 +514,13 @@ out:
523 * If the parent node is not valid or the data block address is different, 514 * If the parent node is not valid or the data block address is different,
524 * the victim data block is ignored. 515 * the victim data block is ignored.
525 */ 516 */
526static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 517static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
527 struct list_head *ilist, unsigned int segno, int gc_type) 518 struct list_head *ilist, unsigned int segno, int gc_type)
528{ 519{
529 struct super_block *sb = sbi->sb; 520 struct super_block *sb = sbi->sb;
530 struct f2fs_summary *entry; 521 struct f2fs_summary *entry;
531 block_t start_addr; 522 block_t start_addr;
532 int err, off; 523 int off;
533 int phase = 0; 524 int phase = 0;
534 525
535 start_addr = START_BLOCK(sbi, segno); 526 start_addr = START_BLOCK(sbi, segno);
@@ -543,20 +534,11 @@ next_step:
543 unsigned int ofs_in_node, nofs; 534 unsigned int ofs_in_node, nofs;
544 block_t start_bidx; 535 block_t start_bidx;
545 536
546 /* 537 /* stop BG_GC if there is not enough free sections. */
547 * It makes sure that free segments are able to write 538 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
548 * all the dirty node pages before CP after this CP. 539 return;
549 * So let's check the space of dirty node pages.
550 */
551 if (should_do_checkpoint(sbi)) {
552 mutex_lock(&sbi->cp_mutex);
553 block_operations(sbi);
554 err = GC_BLOCKED;
555 goto stop;
556 }
557 540
558 err = check_valid_map(sbi, segno, off); 541 if (check_valid_map(sbi, segno, off) == 0)
559 if (err == GC_NEXT)
560 continue; 542 continue;
561 543
562 if (phase == 0) { 544 if (phase == 0) {
@@ -565,8 +547,7 @@ next_step:
565 } 547 }
566 548
567 /* Get an inode by ino with checking validity */ 549 /* Get an inode by ino with checking validity */
568 err = check_dnode(sbi, entry, &dni, start_addr + off, &nofs); 550 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
569 if (err == GC_NEXT)
570 continue; 551 continue;
571 552
572 if (phase == 1) { 553 if (phase == 1) {
@@ -606,11 +587,9 @@ next_iput:
606 } 587 }
607 if (++phase < 4) 588 if (++phase < 4)
608 goto next_step; 589 goto next_step;
609 err = GC_DONE; 590
610stop:
611 if (gc_type == FG_GC) 591 if (gc_type == FG_GC)
612 f2fs_submit_bio(sbi, DATA, true); 592 f2fs_submit_bio(sbi, DATA, true);
613 return err;
614} 593}
615 594
616static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 595static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -624,17 +603,16 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
624 return ret; 603 return ret;
625} 604}
626 605
627static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, 606static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
628 struct list_head *ilist, int gc_type) 607 struct list_head *ilist, int gc_type)
629{ 608{
630 struct page *sum_page; 609 struct page *sum_page;
631 struct f2fs_summary_block *sum; 610 struct f2fs_summary_block *sum;
632 int ret = GC_DONE;
633 611
634 /* read segment summary of victim */ 612 /* read segment summary of victim */
635 sum_page = get_sum_page(sbi, segno); 613 sum_page = get_sum_page(sbi, segno);
636 if (IS_ERR(sum_page)) 614 if (IS_ERR(sum_page))
637 return GC_ERROR; 615 return;
638 616
639 /* 617 /*
640 * CP needs to lock sum_page. In this time, we don't need 618 * CP needs to lock sum_page. In this time, we don't need
@@ -646,17 +624,16 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
646 624
647 switch (GET_SUM_TYPE((&sum->footer))) { 625 switch (GET_SUM_TYPE((&sum->footer))) {
648 case SUM_TYPE_NODE: 626 case SUM_TYPE_NODE:
649 ret = gc_node_segment(sbi, sum->entries, segno, gc_type); 627 gc_node_segment(sbi, sum->entries, segno, gc_type);
650 break; 628 break;
651 case SUM_TYPE_DATA: 629 case SUM_TYPE_DATA:
652 ret = gc_data_segment(sbi, sum->entries, ilist, segno, gc_type); 630 gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
653 break; 631 break;
654 } 632 }
655 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer))); 633 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
656 stat_inc_call_count(sbi->stat_info); 634 stat_inc_call_count(sbi->stat_info);
657 635
658 f2fs_put_page(sum_page, 0); 636 f2fs_put_page(sum_page, 0);
659 return ret;
660} 637}
661 638
662int f2fs_gc(struct f2fs_sb_info *sbi) 639int f2fs_gc(struct f2fs_sb_info *sbi)
@@ -664,40 +641,38 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
664 struct list_head ilist; 641 struct list_head ilist;
665 unsigned int segno, i; 642 unsigned int segno, i;
666 int gc_type = BG_GC; 643 int gc_type = BG_GC;
667 int gc_status = GC_NONE; 644 int nfree = 0;
645 int ret = -1;
668 646
669 INIT_LIST_HEAD(&ilist); 647 INIT_LIST_HEAD(&ilist);
670gc_more: 648gc_more:
671 if (!(sbi->sb->s_flags & MS_ACTIVE)) 649 if (!(sbi->sb->s_flags & MS_ACTIVE))
672 goto stop; 650 goto stop;
673 651
674 if (gc_type == BG_GC && has_not_enough_free_secs(sbi)) 652 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree))
675 gc_type = FG_GC; 653 gc_type = FG_GC;
676 654
677 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) 655 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
678 goto stop; 656 goto stop;
657 ret = 0;
679 658
680 for (i = 0; i < sbi->segs_per_sec; i++) { 659 for (i = 0; i < sbi->segs_per_sec; i++)
681 /* 660 do_garbage_collect(sbi, segno + i, &ilist, gc_type);
682 * do_garbage_collect will give us three gc_status: 661
683 * GC_ERROR, GC_DONE, and GC_BLOCKED. 662 if (gc_type == FG_GC &&
684 * If GC is finished uncleanly, we have to return 663 get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
685 * the victim to dirty segment list. 664 nfree++;
686 */ 665
687 gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type); 666 if (has_not_enough_free_secs(sbi, nfree))
688 if (gc_status != GC_DONE) 667 goto gc_more;
689 break; 668
690 } 669 if (gc_type == FG_GC)
691 if (has_not_enough_free_secs(sbi)) { 670 write_checkpoint(sbi, false);
692 write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
693 if (has_not_enough_free_secs(sbi))
694 goto gc_more;
695 }
696stop: 671stop:
697 mutex_unlock(&sbi->gc_mutex); 672 mutex_unlock(&sbi->gc_mutex);
698 673
699 put_gc_inode(&ilist); 674 put_gc_inode(&ilist);
700 return gc_status; 675 return ret;
701} 676}
702 677
703void build_gc_manager(struct f2fs_sb_info *sbi) 678void build_gc_manager(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index c407a75a7daa..30b2db003acd 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -22,15 +22,6 @@
22/* Search max. number of dirty segments to select a victim segment */ 22/* Search max. number of dirty segments to select a victim segment */
23#define MAX_VICTIM_SEARCH 20 23#define MAX_VICTIM_SEARCH 20
24 24
25enum {
26 GC_NONE = 0,
27 GC_ERROR,
28 GC_OK,
29 GC_NEXT,
30 GC_BLOCKED,
31 GC_DONE,
32};
33
34struct f2fs_gc_kthread { 25struct f2fs_gc_kthread {
35 struct task_struct *f2fs_gc_task; 26 struct task_struct *f2fs_gc_task;
36 wait_queue_head_t gc_wait_queue_head; 27 wait_queue_head_t gc_wait_queue_head;
@@ -103,10 +94,3 @@ static inline int is_idle(struct f2fs_sb_info *sbi)
103 struct request_list *rl = &q->root_rl; 94 struct request_list *rl = &q->root_rl;
104 return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]); 95 return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
105} 96}
106
107static inline bool should_do_checkpoint(struct f2fs_sb_info *sbi)
108{
109 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
110 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
111 return free_sections(sbi) <= (node_secs + 2 * dent_secs + 2);
112}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 33fa6d506d94..43ce16422b75 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1135,7 +1135,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
1135 1135
1136 /* First check balancing cached NAT entries */ 1136 /* First check balancing cached NAT entries */
1137 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { 1137 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1138 write_checkpoint(sbi, false, false); 1138 write_checkpoint(sbi, false);
1139 return 0; 1139 return 0;
1140 } 1140 }
1141 1141
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index e2a3e1a8eae9..01e1a03b54c8 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -373,5 +373,5 @@ void recover_fsync_data(struct f2fs_sb_info *sbi)
373out: 373out:
374 destroy_fsync_dnodes(sbi, &inode_list); 374 destroy_fsync_dnodes(sbi, &inode_list);
375 kmem_cache_destroy(fsync_entry_slab); 375 kmem_cache_destroy(fsync_entry_slab);
376 write_checkpoint(sbi, false, false); 376 write_checkpoint(sbi, false);
377} 377}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 7aa270f3538a..777f17e496e6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -29,7 +29,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
29 * We should do GC or end up with checkpoint, if there are so many dirty 29 * We should do GC or end up with checkpoint, if there are so many dirty
30 * dir/node pages without enough free segments. 30 * dir/node pages without enough free segments.
31 */ 31 */
32 if (has_not_enough_free_secs(sbi)) { 32 if (has_not_enough_free_secs(sbi, 0)) {
33 mutex_lock(&sbi->gc_mutex); 33 mutex_lock(&sbi->gc_mutex);
34 f2fs_gc(sbi); 34 f2fs_gc(sbi);
35 } 35 }
@@ -308,7 +308,7 @@ static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
308 * If there is not enough reserved sections, 308 * If there is not enough reserved sections,
309 * we should not reuse prefree segments. 309 * we should not reuse prefree segments.
310 */ 310 */
311 if (has_not_enough_free_secs(sbi)) 311 if (has_not_enough_free_secs(sbi, 0))
312 return NULL_SEGNO; 312 return NULL_SEGNO;
313 313
314 /* 314 /*
@@ -536,6 +536,23 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
536 } 536 }
537} 537}
538 538
539static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
540{
541 struct curseg_info *curseg = CURSEG_I(sbi, type);
542 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
543
544 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
545 return v_ops->get_victim(sbi,
546 &(curseg)->next_segno, BG_GC, type, SSR);
547
548 /* For data segments, let's do SSR more intensively */
549 for (; type >= CURSEG_HOT_DATA; type--)
550 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
551 BG_GC, type, SSR))
552 return 1;
553 return 0;
554}
555
539/* 556/*
540 * flush out current segment and replace it with new segment 557 * flush out current segment and replace it with new segment
541 * This function should be returned with success, otherwise BUG 558 * This function should be returned with success, otherwise BUG
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 458bf5c726f7..552dadbb2327 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -450,21 +450,15 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
450 return (free_sections(sbi) < overprovision_sections(sbi)); 450 return (free_sections(sbi) < overprovision_sections(sbi));
451} 451}
452 452
453static inline int get_ssr_segment(struct f2fs_sb_info *sbi, int type) 453static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
454{
455 struct curseg_info *curseg = CURSEG_I(sbi, type);
456 return DIRTY_I(sbi)->v_ops->get_victim(sbi,
457 &(curseg)->next_segno, BG_GC, type, SSR);
458}
459
460static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi)
461{ 454{
462 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 455 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
463 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 456 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
457
464 if (sbi->por_doing) 458 if (sbi->por_doing)
465 return false; 459 return false;
466 460
467 return (free_sections(sbi) <= (node_secs + 2 * dent_secs + 461 return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
468 reserved_sections(sbi))); 462 reserved_sections(sbi)));
469} 463}
470 464
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index ddb665f54d17..8c117649a035 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -112,7 +112,7 @@ static void f2fs_put_super(struct super_block *sb)
112 f2fs_destroy_stats(sbi); 112 f2fs_destroy_stats(sbi);
113 stop_gc_thread(sbi); 113 stop_gc_thread(sbi);
114 114
115 write_checkpoint(sbi, false, true); 115 write_checkpoint(sbi, true);
116 116
117 iput(sbi->node_inode); 117 iput(sbi->node_inode);
118 iput(sbi->meta_inode); 118 iput(sbi->meta_inode);
@@ -136,7 +136,7 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
136 return 0; 136 return 0;
137 137
138 if (sync) 138 if (sync)
139 write_checkpoint(sbi, false, false); 139 write_checkpoint(sbi, false);
140 else 140 else
141 f2fs_balance_fs(sbi); 141 f2fs_balance_fs(sbi);
142 142