diff options
author | Chao Yu <yuchao0@huawei.com> | 2018-08-05 11:08:59 -0400 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk@kernel.org> | 2018-08-13 13:48:17 -0400 |
commit | 22969158083c9e5c92f66718dde1a372baa1a49d (patch) | |
tree | 6a5f4a7921b9bca47068352fc0b6cdb92f49bd06 | |
parent | a33c150237a20d97a174243bc658c86502f9d370 (diff) |
f2fs: refresh recent accessed nat entry in lru list
Introduce nat_list_lock to protect nm_i->nat_entries list, and manage
it as a LRU list, refresh location for therein recent accessed entries
in the list.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
-rw-r--r-- | fs/f2fs/f2fs.h | 1 | ||||
-rw-r--r-- | fs/f2fs/node.c | 46 |
2 files changed, 43 insertions, 4 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 1647a13be7f9..d9df58163f29 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -780,6 +780,7 @@ struct f2fs_nm_info { | |||
780 | struct radix_tree_root nat_set_root;/* root of the nat set cache */ | 780 | struct radix_tree_root nat_set_root;/* root of the nat set cache */ |
781 | struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ | 781 | struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ |
782 | struct list_head nat_entries; /* cached nat entry list (clean) */ | 782 | struct list_head nat_entries; /* cached nat entry list (clean) */ |
783 | spinlock_t nat_list_lock; /* protect clean nat entry list */ | ||
783 | unsigned int nat_cnt; /* the # of cached nat entries */ | 784 | unsigned int nat_cnt; /* the # of cached nat entries */ |
784 | unsigned int dirty_nat_cnt; /* total num of nat entries in set */ | 785 | unsigned int dirty_nat_cnt; /* total num of nat entries in set */ |
785 | unsigned int nat_blocks; /* # of nat blocks */ | 786 | unsigned int nat_blocks; /* # of nat blocks */ |
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 81fb2f3edb52..472dd643b074 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -174,14 +174,30 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, | |||
174 | 174 | ||
175 | if (raw_ne) | 175 | if (raw_ne) |
176 | node_info_from_raw_nat(&ne->ni, raw_ne); | 176 | node_info_from_raw_nat(&ne->ni, raw_ne); |
177 | |||
178 | spin_lock(&nm_i->nat_list_lock); | ||
177 | list_add_tail(&ne->list, &nm_i->nat_entries); | 179 | list_add_tail(&ne->list, &nm_i->nat_entries); |
180 | spin_unlock(&nm_i->nat_list_lock); | ||
181 | |||
178 | nm_i->nat_cnt++; | 182 | nm_i->nat_cnt++; |
179 | return ne; | 183 | return ne; |
180 | } | 184 | } |
181 | 185 | ||
182 | static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) | 186 | static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) |
183 | { | 187 | { |
184 | return radix_tree_lookup(&nm_i->nat_root, n); | 188 | struct nat_entry *ne; |
189 | |||
190 | ne = radix_tree_lookup(&nm_i->nat_root, n); | ||
191 | |||
192 | /* for recent accessed nat entry, move it to tail of lru list */ | ||
193 | if (ne && !get_nat_flag(ne, IS_DIRTY)) { | ||
194 | spin_lock(&nm_i->nat_list_lock); | ||
195 | if (!list_empty(&ne->list)) | ||
196 | list_move_tail(&ne->list, &nm_i->nat_entries); | ||
197 | spin_unlock(&nm_i->nat_list_lock); | ||
198 | } | ||
199 | |||
200 | return ne; | ||
185 | } | 201 | } |
186 | 202 | ||
187 | static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, | 203 | static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, |
@@ -192,7 +208,6 @@ static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, | |||
192 | 208 | ||
193 | static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) | 209 | static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) |
194 | { | 210 | { |
195 | list_del(&e->list); | ||
196 | radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); | 211 | radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); |
197 | nm_i->nat_cnt--; | 212 | nm_i->nat_cnt--; |
198 | __free_nat_entry(e); | 213 | __free_nat_entry(e); |
@@ -243,16 +258,21 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, | |||
243 | nm_i->dirty_nat_cnt++; | 258 | nm_i->dirty_nat_cnt++; |
244 | set_nat_flag(ne, IS_DIRTY, true); | 259 | set_nat_flag(ne, IS_DIRTY, true); |
245 | refresh_list: | 260 | refresh_list: |
261 | spin_lock(&nm_i->nat_list_lock); | ||
246 | if (new_ne) | 262 | if (new_ne) |
247 | list_del_init(&ne->list); | 263 | list_del_init(&ne->list); |
248 | else | 264 | else |
249 | list_move_tail(&ne->list, &head->entry_list); | 265 | list_move_tail(&ne->list, &head->entry_list); |
266 | spin_unlock(&nm_i->nat_list_lock); | ||
250 | } | 267 | } |
251 | 268 | ||
252 | static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, | 269 | static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, |
253 | struct nat_entry_set *set, struct nat_entry *ne) | 270 | struct nat_entry_set *set, struct nat_entry *ne) |
254 | { | 271 | { |
272 | spin_lock(&nm_i->nat_list_lock); | ||
255 | list_move_tail(&ne->list, &nm_i->nat_entries); | 273 | list_move_tail(&ne->list, &nm_i->nat_entries); |
274 | spin_unlock(&nm_i->nat_list_lock); | ||
275 | |||
256 | set_nat_flag(ne, IS_DIRTY, false); | 276 | set_nat_flag(ne, IS_DIRTY, false); |
257 | set->entry_cnt--; | 277 | set->entry_cnt--; |
258 | nm_i->dirty_nat_cnt--; | 278 | nm_i->dirty_nat_cnt--; |
@@ -469,13 +489,25 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) | |||
469 | if (!down_write_trylock(&nm_i->nat_tree_lock)) | 489 | if (!down_write_trylock(&nm_i->nat_tree_lock)) |
470 | return 0; | 490 | return 0; |
471 | 491 | ||
472 | while (nr_shrink && !list_empty(&nm_i->nat_entries)) { | 492 | spin_lock(&nm_i->nat_list_lock); |
493 | while (nr_shrink) { | ||
473 | struct nat_entry *ne; | 494 | struct nat_entry *ne; |
495 | |||
496 | if (list_empty(&nm_i->nat_entries)) | ||
497 | break; | ||
498 | |||
474 | ne = list_first_entry(&nm_i->nat_entries, | 499 | ne = list_first_entry(&nm_i->nat_entries, |
475 | struct nat_entry, list); | 500 | struct nat_entry, list); |
501 | list_del(&ne->list); | ||
502 | spin_unlock(&nm_i->nat_list_lock); | ||
503 | |||
476 | __del_from_nat_cache(nm_i, ne); | 504 | __del_from_nat_cache(nm_i, ne); |
477 | nr_shrink--; | 505 | nr_shrink--; |
506 | |||
507 | spin_lock(&nm_i->nat_list_lock); | ||
478 | } | 508 | } |
509 | spin_unlock(&nm_i->nat_list_lock); | ||
510 | |||
479 | up_write(&nm_i->nat_tree_lock); | 511 | up_write(&nm_i->nat_tree_lock); |
480 | return nr - nr_shrink; | 512 | return nr - nr_shrink; |
481 | } | 513 | } |
@@ -2906,6 +2938,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi) | |||
2906 | INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); | 2938 | INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); |
2907 | INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); | 2939 | INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); |
2908 | INIT_LIST_HEAD(&nm_i->nat_entries); | 2940 | INIT_LIST_HEAD(&nm_i->nat_entries); |
2941 | spin_lock_init(&nm_i->nat_list_lock); | ||
2909 | 2942 | ||
2910 | mutex_init(&nm_i->build_lock); | 2943 | mutex_init(&nm_i->build_lock); |
2911 | spin_lock_init(&nm_i->nid_list_lock); | 2944 | spin_lock_init(&nm_i->nid_list_lock); |
@@ -3024,8 +3057,13 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) | |||
3024 | unsigned idx; | 3057 | unsigned idx; |
3025 | 3058 | ||
3026 | nid = nat_get_nid(natvec[found - 1]) + 1; | 3059 | nid = nat_get_nid(natvec[found - 1]) + 1; |
3027 | for (idx = 0; idx < found; idx++) | 3060 | for (idx = 0; idx < found; idx++) { |
3061 | spin_lock(&nm_i->nat_list_lock); | ||
3062 | list_del(&natvec[idx]->list); | ||
3063 | spin_unlock(&nm_i->nat_list_lock); | ||
3064 | |||
3028 | __del_from_nat_cache(nm_i, natvec[idx]); | 3065 | __del_from_nat_cache(nm_i, natvec[idx]); |
3066 | } | ||
3029 | } | 3067 | } |
3030 | f2fs_bug_on(sbi, nm_i->nat_cnt); | 3068 | f2fs_bug_on(sbi, nm_i->nat_cnt); |
3031 | 3069 | ||