aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/gc.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/gc.c')
-rw-r--r--fs/f2fs/gc.c245
1 files changed, 143 insertions, 102 deletions
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index f610c2a9bdde..b0051a97824c 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -245,6 +245,18 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
245 return get_cb_cost(sbi, segno); 245 return get_cb_cost(sbi, segno);
246} 246}
247 247
248static unsigned int count_bits(const unsigned long *addr,
249 unsigned int offset, unsigned int len)
250{
251 unsigned int end = offset + len, sum = 0;
252
253 while (offset < end) {
254 if (test_bit(offset++, addr))
255 ++sum;
256 }
257 return sum;
258}
259
248/* 260/*
249 * This function is called from two paths. 261 * This function is called from two paths.
250 * One is garbage collection and the other is SSR segment selection. 262 * One is garbage collection and the other is SSR segment selection.
@@ -258,9 +270,9 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
258{ 270{
259 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 271 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
260 struct victim_sel_policy p; 272 struct victim_sel_policy p;
261 unsigned int secno, max_cost; 273 unsigned int secno, max_cost, last_victim;
262 unsigned int last_segment = MAIN_SEGS(sbi); 274 unsigned int last_segment = MAIN_SEGS(sbi);
263 int nsearched = 0; 275 unsigned int nsearched = 0;
264 276
265 mutex_lock(&dirty_i->seglist_lock); 277 mutex_lock(&dirty_i->seglist_lock);
266 278
@@ -273,6 +285,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
273 if (p.max_search == 0) 285 if (p.max_search == 0)
274 goto out; 286 goto out;
275 287
288 last_victim = sbi->last_victim[p.gc_mode];
276 if (p.alloc_mode == LFS && gc_type == FG_GC) { 289 if (p.alloc_mode == LFS && gc_type == FG_GC) {
277 p.min_segno = check_bg_victims(sbi); 290 p.min_segno = check_bg_victims(sbi);
278 if (p.min_segno != NULL_SEGNO) 291 if (p.min_segno != NULL_SEGNO)
@@ -295,27 +308,35 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
295 } 308 }
296 309
297 p.offset = segno + p.ofs_unit; 310 p.offset = segno + p.ofs_unit;
298 if (p.ofs_unit > 1) 311 if (p.ofs_unit > 1) {
299 p.offset -= segno % p.ofs_unit; 312 p.offset -= segno % p.ofs_unit;
313 nsearched += count_bits(p.dirty_segmap,
314 p.offset - p.ofs_unit,
315 p.ofs_unit);
316 } else {
317 nsearched++;
318 }
319
300 320
301 secno = GET_SECNO(sbi, segno); 321 secno = GET_SECNO(sbi, segno);
302 322
303 if (sec_usage_check(sbi, secno)) 323 if (sec_usage_check(sbi, secno))
304 continue; 324 goto next;
305 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) 325 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
306 continue; 326 goto next;
307 327
308 cost = get_gc_cost(sbi, segno, &p); 328 cost = get_gc_cost(sbi, segno, &p);
309 329
310 if (p.min_cost > cost) { 330 if (p.min_cost > cost) {
311 p.min_segno = segno; 331 p.min_segno = segno;
312 p.min_cost = cost; 332 p.min_cost = cost;
313 } else if (unlikely(cost == max_cost)) {
314 continue;
315 } 333 }
316 334next:
317 if (nsearched++ >= p.max_search) { 335 if (nsearched >= p.max_search) {
318 sbi->last_victim[p.gc_mode] = segno; 336 if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
337 sbi->last_victim[p.gc_mode] = last_victim + 1;
338 else
339 sbi->last_victim[p.gc_mode] = segno + 1;
319 break; 340 break;
320 } 341 }
321 } 342 }
@@ -399,7 +420,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
399 * On validity, copy that node with cold status, otherwise (invalid node) 420 * On validity, copy that node with cold status, otherwise (invalid node)
400 * ignore that. 421 * ignore that.
401 */ 422 */
402static int gc_node_segment(struct f2fs_sb_info *sbi, 423static void gc_node_segment(struct f2fs_sb_info *sbi,
403 struct f2fs_summary *sum, unsigned int segno, int gc_type) 424 struct f2fs_summary *sum, unsigned int segno, int gc_type)
404{ 425{
405 bool initial = true; 426 bool initial = true;
@@ -419,7 +440,7 @@ next_step:
419 440
420 /* stop BG_GC if there is not enough free sections. */ 441 /* stop BG_GC if there is not enough free sections. */
421 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) 442 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
422 return 0; 443 return;
423 444
424 if (check_valid_map(sbi, segno, off) == 0) 445 if (check_valid_map(sbi, segno, off) == 0)
425 continue; 446 continue;
@@ -446,7 +467,7 @@ next_step:
446 467
447 /* set page dirty and write it */ 468 /* set page dirty and write it */
448 if (gc_type == FG_GC) { 469 if (gc_type == FG_GC) {
449 f2fs_wait_on_page_writeback(node_page, NODE); 470 f2fs_wait_on_page_writeback(node_page, NODE, true);
450 set_page_dirty(node_page); 471 set_page_dirty(node_page);
451 } else { 472 } else {
452 if (!PageWriteback(node_page)) 473 if (!PageWriteback(node_page))
@@ -460,20 +481,6 @@ next_step:
460 initial = false; 481 initial = false;
461 goto next_step; 482 goto next_step;
462 } 483 }
463
464 if (gc_type == FG_GC) {
465 struct writeback_control wbc = {
466 .sync_mode = WB_SYNC_ALL,
467 .nr_to_write = LONG_MAX,
468 .for_reclaim = 0,
469 };
470 sync_node_pages(sbi, 0, &wbc);
471
472 /* return 1 only if FG_GC succefully reclaimed one */
473 if (get_valid_blocks(sbi, segno, 1) == 0)
474 return 1;
475 }
476 return 0;
477} 484}
478 485
479/* 486/*
@@ -483,7 +490,7 @@ next_step:
483 * as indirect or double indirect node blocks, are given, it must be a caller's 490 * as indirect or double indirect node blocks, are given, it must be a caller's
484 * bug. 491 * bug.
485 */ 492 */
486block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi) 493block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
487{ 494{
488 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; 495 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
489 unsigned int bidx; 496 unsigned int bidx;
@@ -500,7 +507,7 @@ block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
500 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); 507 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
501 bidx = node_ofs - 5 - dec; 508 bidx = node_ofs - 5 - dec;
502 } 509 }
503 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi); 510 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
504} 511}
505 512
506static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 513static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@@ -546,6 +553,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
546 struct f2fs_summary sum; 553 struct f2fs_summary sum;
547 struct node_info ni; 554 struct node_info ni;
548 struct page *page; 555 struct page *page;
556 block_t newaddr;
549 int err; 557 int err;
550 558
551 /* do not read out */ 559 /* do not read out */
@@ -567,21 +575,24 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
567 * don't cache encrypted data into meta inode until previous dirty 575 * don't cache encrypted data into meta inode until previous dirty
568 * data were writebacked to avoid racing between GC and flush. 576 * data were writebacked to avoid racing between GC and flush.
569 */ 577 */
570 f2fs_wait_on_page_writeback(page, DATA); 578 f2fs_wait_on_page_writeback(page, DATA, true);
571 579
572 get_node_info(fio.sbi, dn.nid, &ni); 580 get_node_info(fio.sbi, dn.nid, &ni);
573 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 581 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
574 582
575 /* read page */ 583 /* read page */
576 fio.page = page; 584 fio.page = page;
577 fio.blk_addr = dn.data_blkaddr; 585 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
578 586
579 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), 587 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
580 fio.blk_addr, 588 &sum, CURSEG_COLD_DATA);
581 FGP_LOCK|FGP_CREAT, 589
582 GFP_NOFS); 590 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
583 if (!fio.encrypted_page) 591 FGP_LOCK | FGP_CREAT, GFP_NOFS);
584 goto put_out; 592 if (!fio.encrypted_page) {
593 err = -ENOMEM;
594 goto recover_block;
595 }
585 596
586 err = f2fs_submit_page_bio(&fio); 597 err = f2fs_submit_page_bio(&fio);
587 if (err) 598 if (err)
@@ -590,33 +601,39 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
590 /* write page */ 601 /* write page */
591 lock_page(fio.encrypted_page); 602 lock_page(fio.encrypted_page);
592 603
593 if (unlikely(!PageUptodate(fio.encrypted_page))) 604 if (unlikely(!PageUptodate(fio.encrypted_page))) {
605 err = -EIO;
594 goto put_page_out; 606 goto put_page_out;
595 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) 607 }
608 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
609 err = -EIO;
596 goto put_page_out; 610 goto put_page_out;
611 }
597 612
598 set_page_dirty(fio.encrypted_page); 613 set_page_dirty(fio.encrypted_page);
599 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA); 614 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
600 if (clear_page_dirty_for_io(fio.encrypted_page)) 615 if (clear_page_dirty_for_io(fio.encrypted_page))
601 dec_page_count(fio.sbi, F2FS_DIRTY_META); 616 dec_page_count(fio.sbi, F2FS_DIRTY_META);
602 617
603 set_page_writeback(fio.encrypted_page); 618 set_page_writeback(fio.encrypted_page);
604 619
605 /* allocate block address */ 620 /* allocate block address */
606 f2fs_wait_on_page_writeback(dn.node_page, NODE); 621 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
607 allocate_data_block(fio.sbi, NULL, fio.blk_addr, 622
608 &fio.blk_addr, &sum, CURSEG_COLD_DATA);
609 fio.rw = WRITE_SYNC; 623 fio.rw = WRITE_SYNC;
624 fio.new_blkaddr = newaddr;
610 f2fs_submit_page_mbio(&fio); 625 f2fs_submit_page_mbio(&fio);
611 626
612 dn.data_blkaddr = fio.blk_addr; 627 f2fs_update_data_blkaddr(&dn, newaddr);
613 set_data_blkaddr(&dn);
614 f2fs_update_extent_cache(&dn);
615 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 628 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
616 if (page->index == 0) 629 if (page->index == 0)
617 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); 630 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
618put_page_out: 631put_page_out:
619 f2fs_put_page(fio.encrypted_page, 1); 632 f2fs_put_page(fio.encrypted_page, 1);
633recover_block:
634 if (err)
635 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
636 true, true);
620put_out: 637put_out:
621 f2fs_put_dnode(&dn); 638 f2fs_put_dnode(&dn);
622out: 639out:
@@ -645,7 +662,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
645 .encrypted_page = NULL, 662 .encrypted_page = NULL,
646 }; 663 };
647 set_page_dirty(page); 664 set_page_dirty(page);
648 f2fs_wait_on_page_writeback(page, DATA); 665 f2fs_wait_on_page_writeback(page, DATA, true);
649 if (clear_page_dirty_for_io(page)) 666 if (clear_page_dirty_for_io(page))
650 inode_dec_dirty_pages(inode); 667 inode_dec_dirty_pages(inode);
651 set_cold_data(page); 668 set_cold_data(page);
@@ -663,7 +680,7 @@ out:
663 * If the parent node is not valid or the data block address is different, 680 * If the parent node is not valid or the data block address is different,
664 * the victim data block is ignored. 681 * the victim data block is ignored.
665 */ 682 */
666static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 683static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
667 struct gc_inode_list *gc_list, unsigned int segno, int gc_type) 684 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
668{ 685{
669 struct super_block *sb = sbi->sb; 686 struct super_block *sb = sbi->sb;
@@ -686,7 +703,7 @@ next_step:
686 703
687 /* stop BG_GC if there is not enough free sections. */ 704 /* stop BG_GC if there is not enough free sections. */
688 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) 705 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
689 return 0; 706 return;
690 707
691 if (check_valid_map(sbi, segno, off) == 0) 708 if (check_valid_map(sbi, segno, off) == 0)
692 continue; 709 continue;
@@ -719,7 +736,7 @@ next_step:
719 continue; 736 continue;
720 } 737 }
721 738
722 start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)); 739 start_bidx = start_bidx_of_node(nofs, inode);
723 data_page = get_read_data_page(inode, 740 data_page = get_read_data_page(inode,
724 start_bidx + ofs_in_node, READA, true); 741 start_bidx + ofs_in_node, READA, true);
725 if (IS_ERR(data_page)) { 742 if (IS_ERR(data_page)) {
@@ -735,7 +752,7 @@ next_step:
735 /* phase 3 */ 752 /* phase 3 */
736 inode = find_gc_inode(gc_list, dni.ino); 753 inode = find_gc_inode(gc_list, dni.ino);
737 if (inode) { 754 if (inode) {
738 start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)) 755 start_bidx = start_bidx_of_node(nofs, inode)
739 + ofs_in_node; 756 + ofs_in_node;
740 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 757 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
741 move_encrypted_block(inode, start_bidx); 758 move_encrypted_block(inode, start_bidx);
@@ -747,15 +764,6 @@ next_step:
747 764
748 if (++phase < 4) 765 if (++phase < 4)
749 goto next_step; 766 goto next_step;
750
751 if (gc_type == FG_GC) {
752 f2fs_submit_merged_bio(sbi, DATA, WRITE);
753
754 /* return 1 only if FG_GC succefully reclaimed one */
755 if (get_valid_blocks(sbi, segno, 1) == 0)
756 return 1;
757 }
758 return 0;
759} 767}
760 768
761static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 769static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -771,53 +779,92 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
771 return ret; 779 return ret;
772} 780}
773 781
774static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, 782static int do_garbage_collect(struct f2fs_sb_info *sbi,
783 unsigned int start_segno,
775 struct gc_inode_list *gc_list, int gc_type) 784 struct gc_inode_list *gc_list, int gc_type)
776{ 785{
777 struct page *sum_page; 786 struct page *sum_page;
778 struct f2fs_summary_block *sum; 787 struct f2fs_summary_block *sum;
779 struct blk_plug plug; 788 struct blk_plug plug;
780 int nfree = 0; 789 unsigned int segno = start_segno;
790 unsigned int end_segno = start_segno + sbi->segs_per_sec;
791 int seg_freed = 0;
792 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
793 SUM_TYPE_DATA : SUM_TYPE_NODE;
781 794
782 /* read segment summary of victim */ 795 /* readahead multi ssa blocks those have contiguous address */
783 sum_page = get_sum_page(sbi, segno); 796 if (sbi->segs_per_sec > 1)
797 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
798 sbi->segs_per_sec, META_SSA, true);
799
800 /* reference all summary page */
801 while (segno < end_segno) {
802 sum_page = get_sum_page(sbi, segno++);
803 unlock_page(sum_page);
804 }
784 805
785 blk_start_plug(&plug); 806 blk_start_plug(&plug);
786 807
787 sum = page_address(sum_page); 808 for (segno = start_segno; segno < end_segno; segno++) {
809 /* find segment summary of victim */
810 sum_page = find_get_page(META_MAPPING(sbi),
811 GET_SUM_BLOCK(sbi, segno));
812 f2fs_bug_on(sbi, !PageUptodate(sum_page));
813 f2fs_put_page(sum_page, 0);
788 814
789 /* 815 sum = page_address(sum_page);
790 * this is to avoid deadlock: 816 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
791 * - lock_page(sum_page) - f2fs_replace_block 817
792 * - check_valid_map() - mutex_lock(sentry_lock) 818 /*
793 * - mutex_lock(sentry_lock) - change_curseg() 819 * this is to avoid deadlock:
794 * - lock_page(sum_page) 820 * - lock_page(sum_page) - f2fs_replace_block
795 */ 821 * - check_valid_map() - mutex_lock(sentry_lock)
796 unlock_page(sum_page); 822 * - mutex_lock(sentry_lock) - change_curseg()
797 823 * - lock_page(sum_page)
798 switch (GET_SUM_TYPE((&sum->footer))) { 824 */
799 case SUM_TYPE_NODE: 825
800 nfree = gc_node_segment(sbi, sum->entries, segno, gc_type); 826 if (type == SUM_TYPE_NODE)
801 break; 827 gc_node_segment(sbi, sum->entries, segno, gc_type);
802 case SUM_TYPE_DATA: 828 else
803 nfree = gc_data_segment(sbi, sum->entries, gc_list, 829 gc_data_segment(sbi, sum->entries, gc_list, segno,
804 segno, gc_type); 830 gc_type);
805 break; 831
832 stat_inc_seg_count(sbi, type, gc_type);
833
834 f2fs_put_page(sum_page, 0);
835 }
836
837 if (gc_type == FG_GC) {
838 if (type == SUM_TYPE_NODE) {
839 struct writeback_control wbc = {
840 .sync_mode = WB_SYNC_ALL,
841 .nr_to_write = LONG_MAX,
842 .for_reclaim = 0,
843 };
844 sync_node_pages(sbi, 0, &wbc);
845 } else {
846 f2fs_submit_merged_bio(sbi, DATA, WRITE);
847 }
806 } 848 }
849
807 blk_finish_plug(&plug); 850 blk_finish_plug(&plug);
808 851
809 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type); 852 if (gc_type == FG_GC) {
853 while (start_segno < end_segno)
854 if (get_valid_blocks(sbi, start_segno++, 1) == 0)
855 seg_freed++;
856 }
857
810 stat_inc_call_count(sbi->stat_info); 858 stat_inc_call_count(sbi->stat_info);
811 859
812 f2fs_put_page(sum_page, 0); 860 return seg_freed;
813 return nfree;
814} 861}
815 862
816int f2fs_gc(struct f2fs_sb_info *sbi, bool sync) 863int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
817{ 864{
818 unsigned int segno, i; 865 unsigned int segno;
819 int gc_type = sync ? FG_GC : BG_GC; 866 int gc_type = sync ? FG_GC : BG_GC;
820 int sec_freed = 0; 867 int sec_freed = 0, seg_freed;
821 int ret = -EINVAL; 868 int ret = -EINVAL;
822 struct cp_control cpc; 869 struct cp_control cpc;
823 struct gc_inode_list gc_list = { 870 struct gc_inode_list gc_list = {
@@ -838,30 +885,24 @@ gc_more:
838 885
839 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) { 886 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
840 gc_type = FG_GC; 887 gc_type = FG_GC;
888 /*
889 * If there is no victim and no prefree segment but still not
890 * enough free sections, we should flush dent/node blocks and do
891 * garbage collections.
892 */
841 if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi)) 893 if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
842 write_checkpoint(sbi, &cpc); 894 write_checkpoint(sbi, &cpc);
895 else if (has_not_enough_free_secs(sbi, 0))
896 write_checkpoint(sbi, &cpc);
843 } 897 }
844 898
845 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type)) 899 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
846 goto stop; 900 goto stop;
847 ret = 0; 901 ret = 0;
848 902
849 /* readahead multi ssa blocks those have contiguous address */ 903 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
850 if (sbi->segs_per_sec > 1)
851 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
852 META_SSA, true);
853
854 for (i = 0; i < sbi->segs_per_sec; i++) {
855 /*
856 * for FG_GC case, halt gcing left segments once failed one
857 * of segments in selected section to avoid long latency.
858 */
859 if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
860 gc_type == FG_GC)
861 break;
862 }
863 904
864 if (i == sbi->segs_per_sec && gc_type == FG_GC) 905 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
865 sec_freed++; 906 sec_freed++;
866 907
867 if (gc_type == FG_GC) 908 if (gc_type == FG_GC)