aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/debug.c
diff options
context:
space:
mode:
authorChao Yu <yuchao0@huawei.com>2016-10-12 07:28:29 -0400
committerJaegeuk Kim <jaegeuk@kernel.org>2016-11-23 15:11:00 -0500
commitb8559dc242d1d47dcf99660a4d6afded727e0cc0 (patch)
tree58d9b26dcfad551706a894d8c4fff80a5bdbff6f /fs/f2fs/debug.c
parenta11b9f65eae766b17ec3451a6a1766f0a9d1dbff (diff)
f2fs: split free nid list
During free nid allocation, in order to do preallocation, we will tag free nid entry as allocated one and still leave it in free nid list, for other allocators who want to grab free nids, it needs to traverse the free nid list for lookup. It becomes overhead in scenario of allocating free nid intensively by multithreads. This patch splits free nid list to two list: {free,alloc}_nid_list, to keep free nids and preallocated free nids separately, after that, traverse latency will be gone, besides split nid_cnt for separate statistic. Additionally, introduce __insert_nid_to_list and __remove_nid_from_list for cleanup. Signed-off-by: Chao Yu <yuchao0@huawei.com> [Jaegeuk Kim: modify f2fs_bug_on to avoid needless branches] Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/debug.c')
-rw-r--r--fs/f2fs/debug.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index fb245bd302e4..6af146c48644 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -74,7 +74,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
74 si->dirty_nats = NM_I(sbi)->dirty_nat_cnt; 74 si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
75 si->sits = MAIN_SEGS(sbi); 75 si->sits = MAIN_SEGS(sbi);
76 si->dirty_sits = SIT_I(sbi)->dirty_sentries; 76 si->dirty_sits = SIT_I(sbi)->dirty_sentries;
77 si->fnids = NM_I(sbi)->fcnt; 77 si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST];
78 si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST];
78 si->bg_gc = sbi->bg_gc; 79 si->bg_gc = sbi->bg_gc;
79 si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg) 80 si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
80 * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg) 81 * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
@@ -194,7 +195,9 @@ get_cache:
194 si->cache_mem += sizeof(struct flush_cmd_control); 195 si->cache_mem += sizeof(struct flush_cmd_control);
195 196
196 /* free nids */ 197 /* free nids */
197 si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid); 198 si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] +
199 NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]) *
200 sizeof(struct free_nid);
198 si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry); 201 si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
199 si->cache_mem += NM_I(sbi)->dirty_nat_cnt * 202 si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
200 sizeof(struct nat_entry_set); 203 sizeof(struct nat_entry_set);
@@ -324,8 +327,8 @@ static int stat_show(struct seq_file *s, void *v)
324 si->ndirty_imeta); 327 si->ndirty_imeta);
325 seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n", 328 seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
326 si->dirty_nats, si->nats, si->dirty_sits, si->sits); 329 si->dirty_nats, si->nats, si->dirty_sits, si->sits);
327 seq_printf(s, " - free_nids: %9d\n", 330 seq_printf(s, " - free_nids: %9d, alloc_nids: %9d\n",
328 si->fnids); 331 si->free_nids, si->alloc_nids);
329 seq_puts(s, "\nDistribution of User Blocks:"); 332 seq_puts(s, "\nDistribution of User Blocks:");
330 seq_puts(s, " [ valid | invalid | free ]\n"); 333 seq_puts(s, " [ valid | invalid | free ]\n");
331 seq_puts(s, " ["); 334 seq_puts(s, " [");