aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/checkpoint.c
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk@kernel.org>2014-09-11 16:49:55 -0400
committerJaegeuk Kim <jaegeuk@kernel.org>2014-09-23 14:10:12 -0400
commit4c521f493b625c7982cf2eae246e86c893f62dfa (patch)
treecb88bac09865787eb17fb7e98854e25906729717 /fs/f2fs/checkpoint.c
parent60979115a69e0e7916a1c1796f902264f1350977 (diff)
f2fs: use meta_inode cache to improve roll-forward speed
Previously, all the dnode pages should be read during the roll-forward recovery. Even worsely, whole the chain was traversed twice. This patch removes that redundant and costly read operations by using page cache of meta_inode and readahead function as well. Reviewed-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/checkpoint.c')
-rw-r--r--fs/f2fs/checkpoint.c33
1 files changed, 28 insertions, 5 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index d44d287cdae9..a1786d680906 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -72,7 +72,23 @@ out:
72 return page; 72 return page;
73} 73}
74 74
75static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type) 75struct page *get_meta_page_ra(struct f2fs_sb_info *sbi, pgoff_t index)
76{
77 bool readahead = false;
78 struct page *page;
79
80 page = find_get_page(META_MAPPING(sbi), index);
81 if (!page || (page && !PageUptodate(page)))
82 readahead = true;
83 f2fs_put_page(page, 0);
84
85 if (readahead)
86 ra_meta_pages(sbi, index,
87 MAX_BIO_BLOCKS(max_hw_blocks(sbi)), META_POR);
88 return get_meta_page(sbi, index);
89}
90
91static inline block_t get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
76{ 92{
77 switch (type) { 93 switch (type) {
78 case META_NAT: 94 case META_NAT:
@@ -82,6 +98,8 @@ static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
82 case META_SSA: 98 case META_SSA:
83 case META_CP: 99 case META_CP:
84 return 0; 100 return 0;
101 case META_POR:
102 return SM_I(sbi)->seg0_blkaddr + TOTAL_BLKS(sbi);
85 default: 103 default:
86 BUG(); 104 BUG();
87 } 105 }
@@ -90,12 +108,13 @@ static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
90/* 108/*
91 * Readahead CP/NAT/SIT/SSA pages 109 * Readahead CP/NAT/SIT/SSA pages
92 */ 110 */
93int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type) 111int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type)
94{ 112{
95 block_t prev_blk_addr = 0; 113 block_t prev_blk_addr = 0;
96 struct page *page; 114 struct page *page;
97 int blkno = start; 115 block_t blkno = start;
98 int max_blks = get_max_meta_blks(sbi, type); 116 block_t max_blks = get_max_meta_blks(sbi, type);
117 block_t min_blks = SM_I(sbi)->seg0_blkaddr;
99 118
100 struct f2fs_io_info fio = { 119 struct f2fs_io_info fio = {
101 .type = META, 120 .type = META,
@@ -125,7 +144,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
125 break; 144 break;
126 case META_SSA: 145 case META_SSA:
127 case META_CP: 146 case META_CP:
128 /* get ssa/cp block addr */ 147 case META_POR:
148 if (unlikely(blkno >= max_blks))
149 goto out;
150 if (unlikely(blkno < min_blks))
151 goto out;
129 blk_addr = blkno; 152 blk_addr = blkno;
130 break; 153 break;
131 default: 154 default: