From 437275272f9e635673f065300e5d95226a25cb06 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 4 Feb 2013 15:11:17 +0900 Subject: f2fs: clarify and enhance the f2fs_gc flow This patch makes clearer the ambiguous f2fs_gc flow as follows. 1. Remove intermediate checkpoint condition during f2fs_gc (i.e., should_do_checkpoint() and GC_BLOCKED) 2. Remove unnecessary return values of f2fs_gc because of #1. (i.e., GC_NODE, GC_OK, etc) 3. Simplify write_checkpoint() because of #2. 4. Clarify the main f2fs_gc flow. o monitor how many freed sections during one iteration of do_garbage_collect(). o do GC more without checkpoints if we can't get enough free sections. o do checkpoint once we've got enough free sections through forground GCs. 5. Adopt thread-logging (Slack-Space-Recycle) scheme more aggressively on data log types. See. get_ssr_segement() Signed-off-by: Jaegeuk Kim --- fs/f2fs/gc.c | 107 +++++++++++++++++++++++------------------------------------ 1 file changed, 41 insertions(+), 66 deletions(-) (limited to 'fs/f2fs/gc.c') diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 16fdec355201..52d3a391b922 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -78,7 +78,8 @@ static int gc_thread_func(void *data) sbi->bg_gc++; - if (f2fs_gc(sbi) == GC_NONE) + /* if return value is not zero, no victim was selected */ + if (f2fs_gc(sbi)) wait_ms = GC_THREAD_NOGC_SLEEP_TIME; else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) wait_ms = GC_THREAD_MAX_SLEEP_TIME; @@ -360,7 +361,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi, sentry = get_seg_entry(sbi, segno); ret = f2fs_test_bit(offset, sentry->cur_valid_map); mutex_unlock(&sit_i->sentry_lock); - return ret ? GC_OK : GC_NEXT; + return ret; } /* @@ -368,7 +369,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi, * On validity, copy that node with cold status, otherwise (invalid node) * ignore that. */ -static int gc_node_segment(struct f2fs_sb_info *sbi, +static void gc_node_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, unsigned int segno, int gc_type) { bool initial = true; @@ -380,21 +381,12 @@ next_step: for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { nid_t nid = le32_to_cpu(entry->nid); struct page *node_page; - int err; - /* - * It makes sure that free segments are able to write - * all the dirty node pages before CP after this CP. - * So let's check the space of dirty node pages. - */ - if (should_do_checkpoint(sbi)) { - mutex_lock(&sbi->cp_mutex); - block_operations(sbi); - return GC_BLOCKED; - } + /* stop BG_GC if there is not enough free sections. */ + if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) + return; - err = check_valid_map(sbi, segno, off); - if (err == GC_NEXT) + if (check_valid_map(sbi, segno, off) == 0) continue; if (initial) { @@ -424,7 +416,6 @@ next_step: }; sync_node_pages(sbi, 0, &wbc); } - return GC_DONE; } /* @@ -467,13 +458,13 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, node_page = get_node_page(sbi, nid); if (IS_ERR(node_page)) - return GC_NEXT; + return 0; get_node_info(sbi, nid, dni); if (sum->version != dni->version) { f2fs_put_page(node_page, 1); - return GC_NEXT; + return 0; } *nofs = ofs_of_node(node_page); @@ -481,8 +472,8 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, f2fs_put_page(node_page, 1); if (source_blkaddr != blkaddr) - return GC_NEXT; - return GC_OK; + return 0; + return 1; } static void move_data_page(struct inode *inode, struct page *page, int gc_type) @@ -523,13 +514,13 @@ out: * If the parent node is not valid or the data block address is different, * the victim data block is ignored. */ -static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, +static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, struct list_head *ilist, unsigned int segno, int gc_type) { struct super_block *sb = sbi->sb; struct f2fs_summary *entry; block_t start_addr; - int err, off; + int off; int phase = 0; start_addr = START_BLOCK(sbi, segno); @@ -543,20 +534,11 @@ next_step: unsigned int ofs_in_node, nofs; block_t start_bidx; - /* - * It makes sure that free segments are able to write - * all the dirty node pages before CP after this CP. - * So let's check the space of dirty node pages. - */ - if (should_do_checkpoint(sbi)) { - mutex_lock(&sbi->cp_mutex); - block_operations(sbi); - err = GC_BLOCKED; - goto stop; - } + /* stop BG_GC if there is not enough free sections. */ + if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) + return; - err = check_valid_map(sbi, segno, off); - if (err == GC_NEXT) + if (check_valid_map(sbi, segno, off) == 0) continue; if (phase == 0) { @@ -565,8 +547,7 @@ next_step: } /* Get an inode by ino with checking validity */ - err = check_dnode(sbi, entry, &dni, start_addr + off, &nofs); - if (err == GC_NEXT) + if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0) continue; if (phase == 1) { @@ -606,11 +587,9 @@ next_iput: } if (++phase < 4) goto next_step; - err = GC_DONE; -stop: + if (gc_type == FG_GC) f2fs_submit_bio(sbi, DATA, true); - return err; } static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, @@ -624,17 +603,16 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, return ret; } -static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, +static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, struct list_head *ilist, int gc_type) { struct page *sum_page; struct f2fs_summary_block *sum; - int ret = GC_DONE; /* read segment summary of victim */ sum_page = get_sum_page(sbi, segno); if (IS_ERR(sum_page)) - return GC_ERROR; + return; /* * CP needs to lock sum_page. In this time, we don't need @@ -646,17 +624,16 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, switch (GET_SUM_TYPE((&sum->footer))) { case SUM_TYPE_NODE: - ret = gc_node_segment(sbi, sum->entries, segno, gc_type); + gc_node_segment(sbi, sum->entries, segno, gc_type); break; case SUM_TYPE_DATA: - ret = gc_data_segment(sbi, sum->entries, ilist, segno, gc_type); + gc_data_segment(sbi, sum->entries, ilist, segno, gc_type); break; } stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer))); stat_inc_call_count(sbi->stat_info); f2fs_put_page(sum_page, 0); - return ret; } int f2fs_gc(struct f2fs_sb_info *sbi) @@ -664,40 +641,38 @@ int f2fs_gc(struct f2fs_sb_info *sbi) struct list_head ilist; unsigned int segno, i; int gc_type = BG_GC; - int gc_status = GC_NONE; + int nfree = 0; + int ret = -1; INIT_LIST_HEAD(&ilist); gc_more: if (!(sbi->sb->s_flags & MS_ACTIVE)) goto stop; - if (gc_type == BG_GC && has_not_enough_free_secs(sbi)) + if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) gc_type = FG_GC; if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) goto stop; + ret = 0; - for (i = 0; i < sbi->segs_per_sec; i++) { - /* - * do_garbage_collect will give us three gc_status: - * GC_ERROR, GC_DONE, and GC_BLOCKED. - * If GC is finished uncleanly, we have to return - * the victim to dirty segment list. - */ - gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type); - if (gc_status != GC_DONE) - break; - } - if (has_not_enough_free_secs(sbi)) { - write_checkpoint(sbi, (gc_status == GC_BLOCKED), false); - if (has_not_enough_free_secs(sbi)) - goto gc_more; - } + for (i = 0; i < sbi->segs_per_sec; i++) + do_garbage_collect(sbi, segno + i, &ilist, gc_type); + + if (gc_type == FG_GC && + get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0) + nfree++; + + if (has_not_enough_free_secs(sbi, nfree)) + goto gc_more; + + if (gc_type == FG_GC) + write_checkpoint(sbi, false); stop: mutex_unlock(&sbi->gc_mutex); put_gc_inode(&ilist); - return gc_status; + return ret; } void build_gc_manager(struct f2fs_sb_info *sbi) -- cgit v1.2.2