aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs
diff options
context:
space:
mode:
authorChao Yu <chao2.yu@samsung.com>2014-12-08 02:02:52 -0500
committerJaegeuk Kim <jaegeuk@kernel.org>2014-12-08 17:19:09 -0500
commit635aee1fefef921ae4124b127fced62ea6008839 (patch)
tree92559fbad6271bbb0db0c7047b0e6bb370a9f1bc /fs/f2fs
parent66b00c186764e29765e8962a03556c329dee48e5 (diff)
f2fs: avoid to ra unneeded blocks in recover flow
To improve recovery speed, f2fs try to readahead many contiguous blocks in warm node segment, but for most time, abnormal power-off do not occur frequently, so when mount a normal power-off f2fs image, by contrary ra so many blocks and then invalid them will hurt the performance of mount. It's better to just ra the first next-block for normal condition. Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/checkpoint.c29
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/recovery.c10
3 files changed, 23 insertions, 18 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index b2d5431d6850..e6c271fefaca 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -72,21 +72,6 @@ out:
72 return page; 72 return page;
73} 73}
74 74
75struct page *get_meta_page_ra(struct f2fs_sb_info *sbi, pgoff_t index)
76{
77 bool readahead = false;
78 struct page *page;
79
80 page = find_get_page(META_MAPPING(sbi), index);
81 if (!page || (page && !PageUptodate(page)))
82 readahead = true;
83 f2fs_put_page(page, 0);
84
85 if (readahead)
86 ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR);
87 return get_meta_page(sbi, index);
88}
89
90static inline bool is_valid_blkaddr(struct f2fs_sb_info *sbi, 75static inline bool is_valid_blkaddr(struct f2fs_sb_info *sbi,
91 block_t blkaddr, int type) 76 block_t blkaddr, int type)
92{ 77{
@@ -181,6 +166,20 @@ out:
181 return blkno - start; 166 return blkno - start;
182} 167}
183 168
169void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
170{
171 struct page *page;
172 bool readahead = false;
173
174 page = find_get_page(META_MAPPING(sbi), index);
175 if (!page || (page && !PageUptodate(page)))
176 readahead = true;
177 f2fs_put_page(page, 0);
178
179 if (readahead)
180 ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR);
181}
182
184static int f2fs_write_meta_page(struct page *page, 183static int f2fs_write_meta_page(struct page *page,
185 struct writeback_control *wbc) 184 struct writeback_control *wbc)
186{ 185{
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 2695d78d57dd..ec58bb2373fc 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1431,8 +1431,8 @@ void destroy_segment_manager_caches(void);
1431 */ 1431 */
1432struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); 1432struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
1433struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); 1433struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
1434struct page *get_meta_page_ra(struct f2fs_sb_info *, pgoff_t);
1435int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int); 1434int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int);
1435void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
1436long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); 1436long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
1437void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type); 1437void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
1438void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type); 1438void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 9a93a6e29b05..9160a37e1c7a 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -170,13 +170,15 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
170 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); 170 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
171 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 171 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
172 172
173 ra_meta_pages(sbi, blkaddr, 1, META_POR);
174
173 while (1) { 175 while (1) {
174 struct fsync_inode_entry *entry; 176 struct fsync_inode_entry *entry;
175 177
176 if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi)) 178 if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
177 return 0; 179 return 0;
178 180
179 page = get_meta_page_ra(sbi, blkaddr); 181 page = get_meta_page(sbi, blkaddr);
180 182
181 if (cp_ver != cpver_of_node(page)) 183 if (cp_ver != cpver_of_node(page))
182 break; 184 break;
@@ -227,6 +229,8 @@ next:
227 /* check next segment */ 229 /* check next segment */
228 blkaddr = next_blkaddr_of_node(page); 230 blkaddr = next_blkaddr_of_node(page);
229 f2fs_put_page(page, 1); 231 f2fs_put_page(page, 1);
232
233 ra_meta_pages_cond(sbi, blkaddr);
230 } 234 }
231 f2fs_put_page(page, 1); 235 f2fs_put_page(page, 1);
232 return err; 236 return err;
@@ -436,7 +440,9 @@ static int recover_data(struct f2fs_sb_info *sbi,
436 if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi)) 440 if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
437 break; 441 break;
438 442
439 page = get_meta_page_ra(sbi, blkaddr); 443 ra_meta_pages_cond(sbi, blkaddr);
444
445 page = get_meta_page(sbi, blkaddr);
440 446
441 if (cp_ver != cpver_of_node(page)) { 447 if (cp_ver != cpver_of_node(page)) {
442 f2fs_put_page(page, 1); 448 f2fs_put_page(page, 1);