aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2013-01-18 00:54:13 -0500
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-01-21 20:48:59 -0500
commita7fdffbd3ea4b3cc2993af006bde38a423b38b72 (patch)
tree4d642d76fa47af5514eca7be710b1f8c063cd637 /fs
parentc01e54b770e69c65525295eb2668be3dc0822406 (diff)
f2fs: avoid issuing small bios due to several dirty node pages
If some small bios of dirty node pages are supposed to be issued during the sequential data writes, there-in well-produced consecutive data bios are able to be split by the small node bios, resulting in performance degradation. So, let's collect a number of dirty node pages until reaching a threshold. And, by default, I set the threshold as 2MB, a segment size. This improves sequential write performance on i5, 512GB SSD (830 w/ SATA2) as follows. Before: 231 MB/s -> After: 255 MB/s Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com> Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/f2fs/node.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index f177c018745c..9bda63c9c166 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1124,6 +1124,12 @@ static int f2fs_write_node_page(struct page *page,
1124 return 0; 1124 return 0;
1125} 1125}
1126 1126
1127/*
1128 * It is very important to gather dirty pages and write at once, so that we can
1129 * submit a big bio without interfering other data writes.
1130 * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1131 */
1132#define COLLECT_DIRTY_NODES 512
1127static int f2fs_write_node_pages(struct address_space *mapping, 1133static int f2fs_write_node_pages(struct address_space *mapping,
1128 struct writeback_control *wbc) 1134 struct writeback_control *wbc)
1129{ 1135{
@@ -1131,17 +1137,16 @@ static int f2fs_write_node_pages(struct address_space *mapping,
1131 struct block_device *bdev = sbi->sb->s_bdev; 1137 struct block_device *bdev = sbi->sb->s_bdev;
1132 long nr_to_write = wbc->nr_to_write; 1138 long nr_to_write = wbc->nr_to_write;
1133 1139
1134 if (wbc->for_kupdate) 1140 /* First check balancing cached NAT entries */
1135 return 0;
1136
1137 if (get_pages(sbi, F2FS_DIRTY_NODES) == 0)
1138 return 0;
1139
1140 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { 1141 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1141 write_checkpoint(sbi, false, false); 1142 write_checkpoint(sbi, false, false);
1142 return 0; 1143 return 0;
1143 } 1144 }
1144 1145
1146 /* collect a number of dirty node pages and write together */
1147 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1148 return 0;
1149
1145 /* if mounting is failed, skip writing node pages */ 1150 /* if mounting is failed, skip writing node pages */
1146 wbc->nr_to_write = bio_get_nr_vecs(bdev); 1151 wbc->nr_to_write = bio_get_nr_vecs(bdev);
1147 sync_node_pages(sbi, 0, wbc); 1152 sync_node_pages(sbi, 0, wbc);