diff options
author | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2014-01-07 20:09:51 -0500 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2014-01-07 21:16:20 -0500 |
commit | fb5566da9181d33ecdd9892e44f90320e7d4cc9f (patch) | |
tree | d3b1656ec7e454ea2d831f673a538f2f78d1f85a /fs/f2fs/node.c | |
parent | 04a17fb17fafada39f96bfb41ceb2dc1c11b2af6 (diff) |
f2fs: improve write performance under frequent fsync calls
When considering a bunch of data writes with very frequent fsync calls, we
are able to think the following performance regression.
N: Node IO, D: Data IO, IO scheduler: cfq
Issue pending IOs
D1 D2 D3 D4
D1 D2 D3 D4 N1
D2 D3 D4 N1 N2
N1 D3 D4 N2 D1
--> N1 can be selected by cfq becase of the same priority of N and D.
Then D3 and D4 would be delayed, resuling in performance degradation.
So, when processing the fsync call, it'd better give higher priority to data IOs
than node IOs by assigning WRITE and WRITE_SYNC respectively.
This patch improves the random wirte performance with frequent fsync calls by up
to 10%.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r-- | fs/f2fs/node.c | 7 |
1 files changed, 6 insertions, 1 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 0230326be495..b8c9301db52c 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -1194,6 +1194,10 @@ static int f2fs_write_node_page(struct page *page, | |||
1194 | nid_t nid; | 1194 | nid_t nid; |
1195 | block_t new_addr; | 1195 | block_t new_addr; |
1196 | struct node_info ni; | 1196 | struct node_info ni; |
1197 | struct f2fs_io_info fio = { | ||
1198 | .type = NODE, | ||
1199 | .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC: WRITE, | ||
1200 | }; | ||
1197 | 1201 | ||
1198 | if (unlikely(sbi->por_doing)) | 1202 | if (unlikely(sbi->por_doing)) |
1199 | goto redirty_out; | 1203 | goto redirty_out; |
@@ -1218,7 +1222,7 @@ static int f2fs_write_node_page(struct page *page, | |||
1218 | 1222 | ||
1219 | mutex_lock(&sbi->node_write); | 1223 | mutex_lock(&sbi->node_write); |
1220 | set_page_writeback(page); | 1224 | set_page_writeback(page); |
1221 | write_node_page(sbi, page, nid, ni.blk_addr, &new_addr); | 1225 | write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr); |
1222 | set_node_addr(sbi, &ni, new_addr); | 1226 | set_node_addr(sbi, &ni, new_addr); |
1223 | dec_page_count(sbi, F2FS_DIRTY_NODES); | 1227 | dec_page_count(sbi, F2FS_DIRTY_NODES); |
1224 | mutex_unlock(&sbi->node_write); | 1228 | mutex_unlock(&sbi->node_write); |
@@ -1253,6 +1257,7 @@ static int f2fs_write_node_pages(struct address_space *mapping, | |||
1253 | 1257 | ||
1254 | /* if mounting is failed, skip writing node pages */ | 1258 | /* if mounting is failed, skip writing node pages */ |
1255 | wbc->nr_to_write = 3 * max_hw_blocks(sbi); | 1259 | wbc->nr_to_write = 3 * max_hw_blocks(sbi); |
1260 | wbc->sync_mode = WB_SYNC_NONE; | ||
1256 | sync_node_pages(sbi, 0, wbc); | 1261 | sync_node_pages(sbi, 0, wbc); |
1257 | wbc->nr_to_write = nr_to_write - (3 * max_hw_blocks(sbi) - | 1262 | wbc->nr_to_write = nr_to_write - (3 * max_hw_blocks(sbi) - |
1258 | wbc->nr_to_write); | 1263 | wbc->nr_to_write); |