aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/node.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r--fs/f2fs/node.c154
1 files changed, 67 insertions, 87 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index f83326ca32ef..97bd9d3db882 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -19,6 +19,7 @@
19#include "f2fs.h" 19#include "f2fs.h"
20#include "node.h" 20#include "node.h"
21#include "segment.h" 21#include "segment.h"
22#include "trace.h"
22#include <trace/events/f2fs.h> 23#include <trace/events/f2fs.h>
23 24
24#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) 25#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
@@ -57,12 +58,13 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
57 } else if (type == INO_ENTRIES) { 58 } else if (type == INO_ENTRIES) {
58 int i; 59 int i;
59 60
60 if (sbi->sb->s_bdi->dirty_exceeded)
61 return false;
62 for (i = 0; i <= UPDATE_INO; i++) 61 for (i = 0; i <= UPDATE_INO; i++)
63 mem_size += (sbi->im[i].ino_num * 62 mem_size += (sbi->im[i].ino_num *
64 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; 63 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
65 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 64 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
65 } else {
66 if (sbi->sb->s_bdi->dirty_exceeded)
67 return false;
66 } 68 }
67 return res; 69 return res;
68} 70}
@@ -268,7 +270,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
268 e = __lookup_nat_cache(nm_i, ni->nid); 270 e = __lookup_nat_cache(nm_i, ni->nid);
269 if (!e) { 271 if (!e) {
270 e = grab_nat_entry(nm_i, ni->nid); 272 e = grab_nat_entry(nm_i, ni->nid);
271 e->ni = *ni; 273 copy_node_info(&e->ni, ni);
272 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 274 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
273 } else if (new_blkaddr == NEW_ADDR) { 275 } else if (new_blkaddr == NEW_ADDR) {
274 /* 276 /*
@@ -276,7 +278,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
276 * previous nat entry can be remained in nat cache. 278 * previous nat entry can be remained in nat cache.
277 * So, reinitialize it with new information. 279 * So, reinitialize it with new information.
278 */ 280 */
279 e->ni = *ni; 281 copy_node_info(&e->ni, ni);
280 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 282 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
281 } 283 }
282 284
@@ -346,7 +348,6 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
346 struct nat_entry *e; 348 struct nat_entry *e;
347 int i; 349 int i;
348 350
349 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
350 ni->nid = nid; 351 ni->nid = nid;
351 352
352 /* Check nat cache */ 353 /* Check nat cache */
@@ -361,6 +362,8 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
361 if (e) 362 if (e)
362 return; 363 return;
363 364
365 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
366
364 /* Check current segment summary */ 367 /* Check current segment summary */
365 mutex_lock(&curseg->curseg_mutex); 368 mutex_lock(&curseg->curseg_mutex);
366 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 369 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
@@ -471,7 +474,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
471{ 474{
472 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 475 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
473 struct page *npage[4]; 476 struct page *npage[4];
474 struct page *parent; 477 struct page *parent = NULL;
475 int offset[4]; 478 int offset[4];
476 unsigned int noffset[4]; 479 unsigned int noffset[4];
477 nid_t nids[4]; 480 nid_t nids[4];
@@ -488,6 +491,14 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
488 if (IS_ERR(npage[0])) 491 if (IS_ERR(npage[0]))
489 return PTR_ERR(npage[0]); 492 return PTR_ERR(npage[0]);
490 } 493 }
494
495 /* if inline_data is set, should not report any block indices */
496 if (f2fs_has_inline_data(dn->inode) && index) {
497 err = -EINVAL;
498 f2fs_put_page(npage[0], 1);
499 goto release_out;
500 }
501
491 parent = npage[0]; 502 parent = npage[0];
492 if (level != 0) 503 if (level != 0)
493 nids[1] = get_nid(parent, offset[0], true); 504 nids[1] = get_nid(parent, offset[0], true);
@@ -585,7 +596,7 @@ static void truncate_node(struct dnode_of_data *dn)
585 } 596 }
586invalidate: 597invalidate:
587 clear_node_page_dirty(dn->node_page); 598 clear_node_page_dirty(dn->node_page);
588 F2FS_SET_SB_DIRT(sbi); 599 set_sbi_flag(sbi, SBI_IS_DIRTY);
589 600
590 f2fs_put_page(dn->node_page, 1); 601 f2fs_put_page(dn->node_page, 1);
591 602
@@ -976,6 +987,10 @@ static int read_node_page(struct page *page, int rw)
976{ 987{
977 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 988 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
978 struct node_info ni; 989 struct node_info ni;
990 struct f2fs_io_info fio = {
991 .type = NODE,
992 .rw = rw,
993 };
979 994
980 get_node_info(sbi, page->index, &ni); 995 get_node_info(sbi, page->index, &ni);
981 996
@@ -987,7 +1002,8 @@ static int read_node_page(struct page *page, int rw)
987 if (PageUptodate(page)) 1002 if (PageUptodate(page))
988 return LOCKED_PAGE; 1003 return LOCKED_PAGE;
989 1004
990 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw); 1005 fio.blk_addr = ni.blk_addr;
1006 return f2fs_submit_page_bio(sbi, page, &fio);
991} 1007}
992 1008
993/* 1009/*
@@ -1028,11 +1044,11 @@ repeat:
1028 err = read_node_page(page, READ_SYNC); 1044 err = read_node_page(page, READ_SYNC);
1029 if (err < 0) 1045 if (err < 0)
1030 return ERR_PTR(err); 1046 return ERR_PTR(err);
1031 else if (err == LOCKED_PAGE) 1047 else if (err != LOCKED_PAGE)
1032 goto got_it; 1048 lock_page(page);
1033 1049
1034 lock_page(page);
1035 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) { 1050 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
1051 ClearPageUptodate(page);
1036 f2fs_put_page(page, 1); 1052 f2fs_put_page(page, 1);
1037 return ERR_PTR(-EIO); 1053 return ERR_PTR(-EIO);
1038 } 1054 }
@@ -1040,7 +1056,6 @@ repeat:
1040 f2fs_put_page(page, 1); 1056 f2fs_put_page(page, 1);
1041 goto repeat; 1057 goto repeat;
1042 } 1058 }
1043got_it:
1044 return page; 1059 return page;
1045} 1060}
1046 1061
@@ -1268,7 +1283,6 @@ static int f2fs_write_node_page(struct page *page,
1268{ 1283{
1269 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1284 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1270 nid_t nid; 1285 nid_t nid;
1271 block_t new_addr;
1272 struct node_info ni; 1286 struct node_info ni;
1273 struct f2fs_io_info fio = { 1287 struct f2fs_io_info fio = {
1274 .type = NODE, 1288 .type = NODE,
@@ -1277,7 +1291,7 @@ static int f2fs_write_node_page(struct page *page,
1277 1291
1278 trace_f2fs_writepage(page, NODE); 1292 trace_f2fs_writepage(page, NODE);
1279 1293
1280 if (unlikely(sbi->por_doing)) 1294 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1281 goto redirty_out; 1295 goto redirty_out;
1282 if (unlikely(f2fs_cp_error(sbi))) 1296 if (unlikely(f2fs_cp_error(sbi)))
1283 goto redirty_out; 1297 goto redirty_out;
@@ -1303,9 +1317,11 @@ static int f2fs_write_node_page(struct page *page,
1303 } else { 1317 } else {
1304 down_read(&sbi->node_write); 1318 down_read(&sbi->node_write);
1305 } 1319 }
1320
1306 set_page_writeback(page); 1321 set_page_writeback(page);
1307 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr); 1322 fio.blk_addr = ni.blk_addr;
1308 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page)); 1323 write_node_page(sbi, page, nid, &fio);
1324 set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page));
1309 dec_page_count(sbi, F2FS_DIRTY_NODES); 1325 dec_page_count(sbi, F2FS_DIRTY_NODES);
1310 up_read(&sbi->node_write); 1326 up_read(&sbi->node_write);
1311 unlock_page(page); 1327 unlock_page(page);
@@ -1355,26 +1371,12 @@ static int f2fs_set_node_page_dirty(struct page *page)
1355 __set_page_dirty_nobuffers(page); 1371 __set_page_dirty_nobuffers(page);
1356 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 1372 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1357 SetPagePrivate(page); 1373 SetPagePrivate(page);
1374 f2fs_trace_pid(page);
1358 return 1; 1375 return 1;
1359 } 1376 }
1360 return 0; 1377 return 0;
1361} 1378}
1362 1379
1363static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
1364 unsigned int length)
1365{
1366 struct inode *inode = page->mapping->host;
1367 if (PageDirty(page))
1368 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_NODES);
1369 ClearPagePrivate(page);
1370}
1371
1372static int f2fs_release_node_page(struct page *page, gfp_t wait)
1373{
1374 ClearPagePrivate(page);
1375 return 1;
1376}
1377
1378/* 1380/*
1379 * Structure of the f2fs node operations 1381 * Structure of the f2fs node operations
1380 */ 1382 */
@@ -1382,8 +1384,8 @@ const struct address_space_operations f2fs_node_aops = {
1382 .writepage = f2fs_write_node_page, 1384 .writepage = f2fs_write_node_page,
1383 .writepages = f2fs_write_node_pages, 1385 .writepages = f2fs_write_node_pages,
1384 .set_page_dirty = f2fs_set_node_page_dirty, 1386 .set_page_dirty = f2fs_set_node_page_dirty,
1385 .invalidatepage = f2fs_invalidate_node_page, 1387 .invalidatepage = f2fs_invalidate_page,
1386 .releasepage = f2fs_release_node_page, 1388 .releasepage = f2fs_release_page,
1387}; 1389};
1388 1390
1389static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 1391static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
@@ -1726,80 +1728,41 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1726 return 0; 1728 return 0;
1727} 1729}
1728 1730
1729/*
1730 * ra_sum_pages() merge contiguous pages into one bio and submit.
1731 * these pre-read pages are allocated in bd_inode's mapping tree.
1732 */
1733static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
1734 int start, int nrpages)
1735{
1736 struct inode *inode = sbi->sb->s_bdev->bd_inode;
1737 struct address_space *mapping = inode->i_mapping;
1738 int i, page_idx = start;
1739 struct f2fs_io_info fio = {
1740 .type = META,
1741 .rw = READ_SYNC | REQ_META | REQ_PRIO
1742 };
1743
1744 for (i = 0; page_idx < start + nrpages; page_idx++, i++) {
1745 /* alloc page in bd_inode for reading node summary info */
1746 pages[i] = grab_cache_page(mapping, page_idx);
1747 if (!pages[i])
1748 break;
1749 f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio);
1750 }
1751
1752 f2fs_submit_merged_bio(sbi, META, READ);
1753 return i;
1754}
1755
1756int restore_node_summary(struct f2fs_sb_info *sbi, 1731int restore_node_summary(struct f2fs_sb_info *sbi,
1757 unsigned int segno, struct f2fs_summary_block *sum) 1732 unsigned int segno, struct f2fs_summary_block *sum)
1758{ 1733{
1759 struct f2fs_node *rn; 1734 struct f2fs_node *rn;
1760 struct f2fs_summary *sum_entry; 1735 struct f2fs_summary *sum_entry;
1761 struct inode *inode = sbi->sb->s_bdev->bd_inode;
1762 block_t addr; 1736 block_t addr;
1763 int bio_blocks = MAX_BIO_BLOCKS(sbi); 1737 int bio_blocks = MAX_BIO_BLOCKS(sbi);
1764 struct page *pages[bio_blocks]; 1738 int i, idx, last_offset, nrpages;
1765 int i, idx, last_offset, nrpages, err = 0;
1766 1739
1767 /* scan the node segment */ 1740 /* scan the node segment */
1768 last_offset = sbi->blocks_per_seg; 1741 last_offset = sbi->blocks_per_seg;
1769 addr = START_BLOCK(sbi, segno); 1742 addr = START_BLOCK(sbi, segno);
1770 sum_entry = &sum->entries[0]; 1743 sum_entry = &sum->entries[0];
1771 1744
1772 for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) { 1745 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
1773 nrpages = min(last_offset - i, bio_blocks); 1746 nrpages = min(last_offset - i, bio_blocks);
1774 1747
1775 /* readahead node pages */ 1748 /* readahead node pages */
1776 nrpages = ra_sum_pages(sbi, pages, addr, nrpages); 1749 ra_meta_pages(sbi, addr, nrpages, META_POR);
1777 if (!nrpages)
1778 return -ENOMEM;
1779 1750
1780 for (idx = 0; idx < nrpages; idx++) { 1751 for (idx = addr; idx < addr + nrpages; idx++) {
1781 if (err) 1752 struct page *page = get_meta_page(sbi, idx);
1782 goto skip;
1783 1753
1784 lock_page(pages[idx]); 1754 rn = F2FS_NODE(page);
1785 if (unlikely(!PageUptodate(pages[idx]))) { 1755 sum_entry->nid = rn->footer.nid;
1786 err = -EIO; 1756 sum_entry->version = 0;
1787 } else { 1757 sum_entry->ofs_in_node = 0;
1788 rn = F2FS_NODE(pages[idx]); 1758 sum_entry++;
1789 sum_entry->nid = rn->footer.nid; 1759 f2fs_put_page(page, 1);
1790 sum_entry->version = 0;
1791 sum_entry->ofs_in_node = 0;
1792 sum_entry++;
1793 }
1794 unlock_page(pages[idx]);
1795skip:
1796 page_cache_release(pages[idx]);
1797 } 1760 }
1798 1761
1799 invalidate_mapping_pages(inode->i_mapping, addr, 1762 invalidate_mapping_pages(META_MAPPING(sbi), addr,
1800 addr + nrpages); 1763 addr + nrpages);
1801 } 1764 }
1802 return err; 1765 return 0;
1803} 1766}
1804 1767
1805static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 1768static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
@@ -1923,7 +1886,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
1923 struct f2fs_nm_info *nm_i = NM_I(sbi); 1886 struct f2fs_nm_info *nm_i = NM_I(sbi);
1924 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1887 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1925 struct f2fs_summary_block *sum = curseg->sum_blk; 1888 struct f2fs_summary_block *sum = curseg->sum_blk;
1926 struct nat_entry_set *setvec[NATVEC_SIZE]; 1889 struct nat_entry_set *setvec[SETVEC_SIZE];
1927 struct nat_entry_set *set, *tmp; 1890 struct nat_entry_set *set, *tmp;
1928 unsigned int found; 1891 unsigned int found;
1929 nid_t set_idx = 0; 1892 nid_t set_idx = 0;
@@ -1940,7 +1903,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
1940 remove_nats_in_journal(sbi); 1903 remove_nats_in_journal(sbi);
1941 1904
1942 while ((found = __gang_lookup_nat_set(nm_i, 1905 while ((found = __gang_lookup_nat_set(nm_i,
1943 set_idx, NATVEC_SIZE, setvec))) { 1906 set_idx, SETVEC_SIZE, setvec))) {
1944 unsigned idx; 1907 unsigned idx;
1945 set_idx = setvec[found - 1]->set + 1; 1908 set_idx = setvec[found - 1]->set + 1;
1946 for (idx = 0; idx < found; idx++) 1909 for (idx = 0; idx < found; idx++)
@@ -2020,6 +1983,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
2020 struct f2fs_nm_info *nm_i = NM_I(sbi); 1983 struct f2fs_nm_info *nm_i = NM_I(sbi);
2021 struct free_nid *i, *next_i; 1984 struct free_nid *i, *next_i;
2022 struct nat_entry *natvec[NATVEC_SIZE]; 1985 struct nat_entry *natvec[NATVEC_SIZE];
1986 struct nat_entry_set *setvec[SETVEC_SIZE];
2023 nid_t nid = 0; 1987 nid_t nid = 0;
2024 unsigned int found; 1988 unsigned int found;
2025 1989
@@ -2044,11 +2008,27 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
2044 while ((found = __gang_lookup_nat_cache(nm_i, 2008 while ((found = __gang_lookup_nat_cache(nm_i,
2045 nid, NATVEC_SIZE, natvec))) { 2009 nid, NATVEC_SIZE, natvec))) {
2046 unsigned idx; 2010 unsigned idx;
2011
2047 nid = nat_get_nid(natvec[found - 1]) + 1; 2012 nid = nat_get_nid(natvec[found - 1]) + 1;
2048 for (idx = 0; idx < found; idx++) 2013 for (idx = 0; idx < found; idx++)
2049 __del_from_nat_cache(nm_i, natvec[idx]); 2014 __del_from_nat_cache(nm_i, natvec[idx]);
2050 } 2015 }
2051 f2fs_bug_on(sbi, nm_i->nat_cnt); 2016 f2fs_bug_on(sbi, nm_i->nat_cnt);
2017
2018 /* destroy nat set cache */
2019 nid = 0;
2020 while ((found = __gang_lookup_nat_set(nm_i,
2021 nid, SETVEC_SIZE, setvec))) {
2022 unsigned idx;
2023
2024 nid = setvec[found - 1]->set + 1;
2025 for (idx = 0; idx < found; idx++) {
2026 /* entry_cnt is not zero, when cp_error was occurred */
2027 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
2028 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
2029 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2030 }
2031 }
2052 up_write(&nm_i->nat_tree_lock); 2032 up_write(&nm_i->nat_tree_lock);
2053 2033
2054 kfree(nm_i->nat_bitmap); 2034 kfree(nm_i->nat_bitmap);