aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYunlei He <heyunlei@huawei.com>2017-11-10 16:36:51 -0500
committerJaegeuk Kim <jaegeuk@kernel.org>2017-11-13 21:28:48 -0500
commit12f9ef379a5039b8271b4636362b965267a78dda (patch)
treed4dd79eac1898d120cca9f5e0ba61a4980864483
parent0dd99ca76f473d488fa9acac67f6a42ca1d7d2b1 (diff)
f2fs: separate nat entry mem alloc from nat_tree_lock
This patch splits memory allocation part in nat_entry to avoid lock contention. Signed-off-by: Yunlei He <heyunlei@huawei.com> Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
-rw-r--r--fs/f2fs/node.c98
1 files changed, 59 insertions, 39 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 9abfdbb5aae5..fe1fc662af2a 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -138,6 +138,44 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
138 return dst_page; 138 return dst_page;
139} 139}
140 140
141static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
142{
143 struct nat_entry *new;
144
145 if (no_fail)
146 new = f2fs_kmem_cache_alloc(nat_entry_slab,
147 GFP_NOFS | __GFP_ZERO);
148 else
149 new = kmem_cache_alloc(nat_entry_slab,
150 GFP_NOFS | __GFP_ZERO);
151 if (new) {
152 nat_set_nid(new, nid);
153 nat_reset_flag(new);
154 }
155 return new;
156}
157
158static void __free_nat_entry(struct nat_entry *e)
159{
160 kmem_cache_free(nat_entry_slab, e);
161}
162
163/* must be locked by nat_tree_lock */
164static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
165 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
166{
167 if (no_fail)
168 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
169 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
170 return NULL;
171
172 if (raw_ne)
173 node_info_from_raw_nat(&ne->ni, raw_ne);
174 list_add_tail(&ne->list, &nm_i->nat_entries);
175 nm_i->nat_cnt++;
176 return ne;
177}
178
141static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 179static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
142{ 180{
143 return radix_tree_lookup(&nm_i->nat_root, n); 181 return radix_tree_lookup(&nm_i->nat_root, n);
@@ -154,7 +192,7 @@ static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
154 list_del(&e->list); 192 list_del(&e->list);
155 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 193 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
156 nm_i->nat_cnt--; 194 nm_i->nat_cnt--;
157 kmem_cache_free(nat_entry_slab, e); 195 __free_nat_entry(e);
158} 196}
159 197
160static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 198static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
@@ -250,49 +288,29 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
250 return need_update; 288 return need_update;
251} 289}
252 290
253static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 291/* must be locked by nat_tree_lock */
254 bool no_fail)
255{
256 struct nat_entry *new;
257
258 if (no_fail) {
259 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
260 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
261 } else {
262 new = kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
263 if (!new)
264 return NULL;
265 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
266 kmem_cache_free(nat_entry_slab, new);
267 return NULL;
268 }
269 }
270
271 memset(new, 0, sizeof(struct nat_entry));
272 nat_set_nid(new, nid);
273 nat_reset_flag(new);
274 list_add_tail(&new->list, &nm_i->nat_entries);
275 nm_i->nat_cnt++;
276 return new;
277}
278
279static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 292static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
280 struct f2fs_nat_entry *ne) 293 struct f2fs_nat_entry *ne)
281{ 294{
282 struct f2fs_nm_info *nm_i = NM_I(sbi); 295 struct f2fs_nm_info *nm_i = NM_I(sbi);
283 struct nat_entry *e; 296 struct nat_entry *new, *e;
284 297
298 new = __alloc_nat_entry(nid, false);
299 if (!new)
300 return;
301
302 down_write(&nm_i->nat_tree_lock);
285 e = __lookup_nat_cache(nm_i, nid); 303 e = __lookup_nat_cache(nm_i, nid);
286 if (!e) { 304 if (!e)
287 e = grab_nat_entry(nm_i, nid, false); 305 e = __init_nat_entry(nm_i, new, ne, false);
288 if (e) 306 else
289 node_info_from_raw_nat(&e->ni, ne);
290 } else {
291 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || 307 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
292 nat_get_blkaddr(e) != 308 nat_get_blkaddr(e) !=
293 le32_to_cpu(ne->block_addr) || 309 le32_to_cpu(ne->block_addr) ||
294 nat_get_version(e) != ne->version); 310 nat_get_version(e) != ne->version);
295 } 311 up_write(&nm_i->nat_tree_lock);
312 if (e != new)
313 __free_nat_entry(new);
296} 314}
297 315
298static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 316static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
@@ -300,11 +318,12 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
300{ 318{
301 struct f2fs_nm_info *nm_i = NM_I(sbi); 319 struct f2fs_nm_info *nm_i = NM_I(sbi);
302 struct nat_entry *e; 320 struct nat_entry *e;
321 struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
303 322
304 down_write(&nm_i->nat_tree_lock); 323 down_write(&nm_i->nat_tree_lock);
305 e = __lookup_nat_cache(nm_i, ni->nid); 324 e = __lookup_nat_cache(nm_i, ni->nid);
306 if (!e) { 325 if (!e) {
307 e = grab_nat_entry(nm_i, ni->nid, true); 326 e = __init_nat_entry(nm_i, new, NULL, true);
308 copy_node_info(&e->ni, ni); 327 copy_node_info(&e->ni, ni);
309 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 328 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
310 } else if (new_blkaddr == NEW_ADDR) { 329 } else if (new_blkaddr == NEW_ADDR) {
@@ -316,6 +335,9 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
316 copy_node_info(&e->ni, ni); 335 copy_node_info(&e->ni, ni);
317 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 336 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
318 } 337 }
338 /* let's free early to reduce memory consumption */
339 if (e != new)
340 __free_nat_entry(new);
319 341
320 /* sanity check */ 342 /* sanity check */
321 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 343 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
@@ -424,9 +446,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
424 f2fs_put_page(page, 1); 446 f2fs_put_page(page, 1);
425cache: 447cache:
426 /* cache nat entry */ 448 /* cache nat entry */
427 down_write(&nm_i->nat_tree_lock);
428 cache_nat_entry(sbi, nid, &ne); 449 cache_nat_entry(sbi, nid, &ne);
429 up_write(&nm_i->nat_tree_lock);
430} 450}
431 451
432/* 452/*
@@ -2374,8 +2394,8 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2374 2394
2375 ne = __lookup_nat_cache(nm_i, nid); 2395 ne = __lookup_nat_cache(nm_i, nid);
2376 if (!ne) { 2396 if (!ne) {
2377 ne = grab_nat_entry(nm_i, nid, true); 2397 ne = __alloc_nat_entry(nid, true);
2378 node_info_from_raw_nat(&ne->ni, &raw_ne); 2398 __init_nat_entry(nm_i, ne, &raw_ne, true);
2379 } 2399 }
2380 2400
2381 /* 2401 /*