diff options
author | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2013-04-29 03:58:39 -0400 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2013-04-29 23:07:32 -0400 |
commit | ac5d156c78a68b39955ee9b09498ba93831c77d7 (patch) | |
tree | 638245b8a39332d3bb2878c5a0a8574e518fcc37 /fs/f2fs | |
parent | b743ba78ae4c7c6a6e08e623af824b6208f58019 (diff) |
f2fs: modify the number of issued pages to merge IOs
When testing f2fs on an SSD, I found some 128 page IOs followed by 1 page IO
were issued by f2fs_write_node_pages.
This means that there were some mishandling flows which degrades performance.
Previous f2fs_write_node_pages determines the number of pages to be written,
nr_to_write, as follows.
1. The bio_get_nr_vecs returns 129 pages.
2. The bio_alloc makes a room for 128 pages.
3. The initial 128 pages go into one bio.
4. The existing bio is submitted, and a new bio is prepared for the last 1 page.
5. Finally, sync_node_pages submits the last 1 page bio.
The problem is from the use of bio_get_nr_vecs, so this patch replace it
with max_hw_blocks using queue_max_sectors.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs')
-rw-r--r-- | fs/f2fs/node.c | 6 | ||||
-rw-r--r-- | fs/f2fs/segment.c | 2 | ||||
-rw-r--r-- | fs/f2fs/segment.h | 11 |
3 files changed, 14 insertions, 5 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index f14eb7b8b2c4..7209d637f942 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -1171,7 +1171,6 @@ static int f2fs_write_node_pages(struct address_space *mapping, | |||
1171 | struct writeback_control *wbc) | 1171 | struct writeback_control *wbc) |
1172 | { | 1172 | { |
1173 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); | 1173 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); |
1174 | struct block_device *bdev = sbi->sb->s_bdev; | ||
1175 | long nr_to_write = wbc->nr_to_write; | 1174 | long nr_to_write = wbc->nr_to_write; |
1176 | 1175 | ||
1177 | /* First check balancing cached NAT entries */ | 1176 | /* First check balancing cached NAT entries */ |
@@ -1185,10 +1184,9 @@ static int f2fs_write_node_pages(struct address_space *mapping, | |||
1185 | return 0; | 1184 | return 0; |
1186 | 1185 | ||
1187 | /* if mounting is failed, skip writing node pages */ | 1186 | /* if mounting is failed, skip writing node pages */ |
1188 | wbc->nr_to_write = bio_get_nr_vecs(bdev); | 1187 | wbc->nr_to_write = max_hw_blocks(sbi); |
1189 | sync_node_pages(sbi, 0, wbc); | 1188 | sync_node_pages(sbi, 0, wbc); |
1190 | wbc->nr_to_write = nr_to_write - | 1189 | wbc->nr_to_write = nr_to_write - (max_hw_blocks(sbi) - wbc->nr_to_write); |
1191 | (bio_get_nr_vecs(bdev) - wbc->nr_to_write); | ||
1192 | return 0; | 1190 | return 0; |
1193 | } | 1191 | } |
1194 | 1192 | ||
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 392ccb3d10b9..d8e84e49a5c3 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
@@ -734,7 +734,7 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, | |||
734 | do_submit_bio(sbi, type, false); | 734 | do_submit_bio(sbi, type, false); |
735 | alloc_new: | 735 | alloc_new: |
736 | if (sbi->bio[type] == NULL) { | 736 | if (sbi->bio[type] == NULL) { |
737 | sbi->bio[type] = f2fs_bio_alloc(bdev, bio_get_nr_vecs(bdev)); | 737 | sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi)); |
738 | sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); | 738 | sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); |
739 | /* | 739 | /* |
740 | * The end_io will be assigned at the sumbission phase. | 740 | * The end_io will be assigned at the sumbission phase. |
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 26fc0540f144..062424a0e4c3 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h | |||
@@ -8,6 +8,8 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #include <linux/blkdev.h> | ||
12 | |||
11 | /* constant macro */ | 13 | /* constant macro */ |
12 | #define NULL_SEGNO ((unsigned int)(~0)) | 14 | #define NULL_SEGNO ((unsigned int)(~0)) |
13 | #define NULL_SECNO ((unsigned int)(~0)) | 15 | #define NULL_SECNO ((unsigned int)(~0)) |
@@ -86,6 +88,8 @@ | |||
86 | 88 | ||
87 | #define SECTOR_FROM_BLOCK(sbi, blk_addr) \ | 89 | #define SECTOR_FROM_BLOCK(sbi, blk_addr) \ |
88 | (blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE)) | 90 | (blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE)) |
91 | #define SECTOR_TO_BLOCK(sbi, sectors) \ | ||
92 | (sectors >> ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE)) | ||
89 | 93 | ||
90 | /* during checkpoint, bio_private is used to synchronize the last bio */ | 94 | /* during checkpoint, bio_private is used to synchronize the last bio */ |
91 | struct bio_private { | 95 | struct bio_private { |
@@ -624,3 +628,10 @@ static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) | |||
624 | return true; | 628 | return true; |
625 | return false; | 629 | return false; |
626 | } | 630 | } |
631 | |||
632 | static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi) | ||
633 | { | ||
634 | struct block_device *bdev = sbi->sb->s_bdev; | ||
635 | struct request_queue *q = bdev_get_queue(bdev); | ||
636 | return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q)); | ||
637 | } | ||