diff options
author | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2014-04-15 21:47:06 -0400 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2014-05-06 21:21:55 -0400 |
commit | 6fb03f3a40805a412c9b285010ffdc2e7563f81b (patch) | |
tree | 05698049e0f21bc265952aaa75a708ea006ca56a /fs/f2fs/node.c | |
parent | e8271fa3908de52937d298b339f9f7984c491cc6 (diff) |
f2fs: adjust free mem size to flush dentry blocks
If so many dirty dentry blocks are cached, not reached to the flush condition,
we should fall into livelock in balance_dirty_pages.
So, let's consider the mem size for the condition.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r-- | fs/f2fs/node.c | 44 |
1 files changed, 26 insertions, 18 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 64755f49d6e7..2803ef6cf533 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -26,20 +26,26 @@ | |||
26 | static struct kmem_cache *nat_entry_slab; | 26 | static struct kmem_cache *nat_entry_slab; |
27 | static struct kmem_cache *free_nid_slab; | 27 | static struct kmem_cache *free_nid_slab; |
28 | 28 | ||
29 | static inline bool available_free_memory(struct f2fs_nm_info *nm_i, int type) | 29 | bool available_free_memory(struct f2fs_sb_info *sbi, int type) |
30 | { | 30 | { |
31 | struct f2fs_nm_info *nm_i = NM_I(sbi); | ||
31 | struct sysinfo val; | 32 | struct sysinfo val; |
32 | unsigned long mem_size = 0; | 33 | unsigned long mem_size = 0; |
34 | bool res = false; | ||
33 | 35 | ||
34 | si_meminfo(&val); | 36 | si_meminfo(&val); |
35 | if (type == FREE_NIDS) | 37 | /* give 25%, 25%, 50% memory for each components respectively */ |
36 | mem_size = nm_i->fcnt * sizeof(struct free_nid); | 38 | if (type == FREE_NIDS) { |
37 | else if (type == NAT_ENTRIES) | 39 | mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 12; |
38 | mem_size += nm_i->nat_cnt * sizeof(struct nat_entry); | 40 | res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); |
39 | mem_size >>= 12; | 41 | } else if (type == NAT_ENTRIES) { |
40 | 42 | mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12; | |
41 | /* give 50:50 memory for free nids and nat caches respectively */ | 43 | res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); |
42 | return (mem_size < ((val.totalram * nm_i->ram_thresh) >> 11)); | 44 | } else if (type == DIRTY_DENTS) { |
45 | mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); | ||
46 | res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1); | ||
47 | } | ||
48 | return res; | ||
43 | } | 49 | } |
44 | 50 | ||
45 | static void clear_node_page_dirty(struct page *page) | 51 | static void clear_node_page_dirty(struct page *page) |
@@ -241,7 +247,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) | |||
241 | { | 247 | { |
242 | struct f2fs_nm_info *nm_i = NM_I(sbi); | 248 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
243 | 249 | ||
244 | if (available_free_memory(nm_i, NAT_ENTRIES)) | 250 | if (available_free_memory(sbi, NAT_ENTRIES)) |
245 | return 0; | 251 | return 0; |
246 | 252 | ||
247 | write_lock(&nm_i->nat_tree_lock); | 253 | write_lock(&nm_i->nat_tree_lock); |
@@ -1310,13 +1316,14 @@ static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i, | |||
1310 | radix_tree_delete(&nm_i->free_nid_root, i->nid); | 1316 | radix_tree_delete(&nm_i->free_nid_root, i->nid); |
1311 | } | 1317 | } |
1312 | 1318 | ||
1313 | static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build) | 1319 | static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) |
1314 | { | 1320 | { |
1321 | struct f2fs_nm_info *nm_i = NM_I(sbi); | ||
1315 | struct free_nid *i; | 1322 | struct free_nid *i; |
1316 | struct nat_entry *ne; | 1323 | struct nat_entry *ne; |
1317 | bool allocated = false; | 1324 | bool allocated = false; |
1318 | 1325 | ||
1319 | if (!available_free_memory(nm_i, FREE_NIDS)) | 1326 | if (!available_free_memory(sbi, FREE_NIDS)) |
1320 | return -1; | 1327 | return -1; |
1321 | 1328 | ||
1322 | /* 0 nid should not be used */ | 1329 | /* 0 nid should not be used */ |
@@ -1369,9 +1376,10 @@ static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) | |||
1369 | kmem_cache_free(free_nid_slab, i); | 1376 | kmem_cache_free(free_nid_slab, i); |
1370 | } | 1377 | } |
1371 | 1378 | ||
1372 | static void scan_nat_page(struct f2fs_nm_info *nm_i, | 1379 | static void scan_nat_page(struct f2fs_sb_info *sbi, |
1373 | struct page *nat_page, nid_t start_nid) | 1380 | struct page *nat_page, nid_t start_nid) |
1374 | { | 1381 | { |
1382 | struct f2fs_nm_info *nm_i = NM_I(sbi); | ||
1375 | struct f2fs_nat_block *nat_blk = page_address(nat_page); | 1383 | struct f2fs_nat_block *nat_blk = page_address(nat_page); |
1376 | block_t blk_addr; | 1384 | block_t blk_addr; |
1377 | int i; | 1385 | int i; |
@@ -1386,7 +1394,7 @@ static void scan_nat_page(struct f2fs_nm_info *nm_i, | |||
1386 | blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); | 1394 | blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); |
1387 | f2fs_bug_on(blk_addr == NEW_ADDR); | 1395 | f2fs_bug_on(blk_addr == NEW_ADDR); |
1388 | if (blk_addr == NULL_ADDR) { | 1396 | if (blk_addr == NULL_ADDR) { |
1389 | if (add_free_nid(nm_i, start_nid, true) < 0) | 1397 | if (add_free_nid(sbi, start_nid, true) < 0) |
1390 | break; | 1398 | break; |
1391 | } | 1399 | } |
1392 | } | 1400 | } |
@@ -1410,7 +1418,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi) | |||
1410 | while (1) { | 1418 | while (1) { |
1411 | struct page *page = get_current_nat_page(sbi, nid); | 1419 | struct page *page = get_current_nat_page(sbi, nid); |
1412 | 1420 | ||
1413 | scan_nat_page(nm_i, page, nid); | 1421 | scan_nat_page(sbi, page, nid); |
1414 | f2fs_put_page(page, 1); | 1422 | f2fs_put_page(page, 1); |
1415 | 1423 | ||
1416 | nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); | 1424 | nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); |
@@ -1430,7 +1438,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi) | |||
1430 | block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); | 1438 | block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); |
1431 | nid = le32_to_cpu(nid_in_journal(sum, i)); | 1439 | nid = le32_to_cpu(nid_in_journal(sum, i)); |
1432 | if (addr == NULL_ADDR) | 1440 | if (addr == NULL_ADDR) |
1433 | add_free_nid(nm_i, nid, true); | 1441 | add_free_nid(sbi, nid, true); |
1434 | else | 1442 | else |
1435 | remove_free_nid(nm_i, nid); | 1443 | remove_free_nid(nm_i, nid); |
1436 | } | 1444 | } |
@@ -1507,7 +1515,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) | |||
1507 | spin_lock(&nm_i->free_nid_list_lock); | 1515 | spin_lock(&nm_i->free_nid_list_lock); |
1508 | i = __lookup_free_nid_list(nm_i, nid); | 1516 | i = __lookup_free_nid_list(nm_i, nid); |
1509 | f2fs_bug_on(!i || i->state != NID_ALLOC); | 1517 | f2fs_bug_on(!i || i->state != NID_ALLOC); |
1510 | if (!available_free_memory(nm_i, FREE_NIDS)) { | 1518 | if (!available_free_memory(sbi, FREE_NIDS)) { |
1511 | __del_from_free_nid_list(nm_i, i); | 1519 | __del_from_free_nid_list(nm_i, i); |
1512 | need_free = true; | 1520 | need_free = true; |
1513 | } else { | 1521 | } else { |
@@ -1835,7 +1843,7 @@ flush_now: | |||
1835 | } | 1843 | } |
1836 | 1844 | ||
1837 | if (nat_get_blkaddr(ne) == NULL_ADDR && | 1845 | if (nat_get_blkaddr(ne) == NULL_ADDR && |
1838 | add_free_nid(NM_I(sbi), nid, false) <= 0) { | 1846 | add_free_nid(sbi, nid, false) <= 0) { |
1839 | write_lock(&nm_i->nat_tree_lock); | 1847 | write_lock(&nm_i->nat_tree_lock); |
1840 | __del_from_nat_cache(nm_i, ne); | 1848 | __del_from_nat_cache(nm_i, ne); |
1841 | write_unlock(&nm_i->nat_tree_lock); | 1849 | write_unlock(&nm_i->nat_tree_lock); |