aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2014-03-17 23:40:49 -0400
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2014-03-18 00:58:59 -0400
commit87d6f890944d092c4ef5b84053f0d0d5d8137b0b (patch)
treeb66d6ff07f4e9d838f43e3ddb0eae916507ce343
parentf8b2c1f940dca2843fe13b55ba5868bac8040551 (diff)
f2fs: avoid small data writes by skipping writepages
This patch introduces nr_pages_to_skip(sbi, type) to determine writepages can be skipped. The dentry, node, and meta pages can be conrolled by F2FS without breaking the FS consistency. Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
-rw-r--r--fs/f2fs/checkpoint.c4
-rw-r--r--fs/f2fs/data.c4
-rw-r--r--fs/f2fs/node.c8
-rw-r--r--fs/f2fs/segment.h19
4 files changed, 26 insertions, 9 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 4c0e98ddf3db..1f52b70ff9d1 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -187,7 +187,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
187 struct writeback_control *wbc) 187 struct writeback_control *wbc)
188{ 188{
189 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 189 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
190 int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 190 int nrpages = nr_pages_to_skip(sbi, META);
191 long written; 191 long written;
192 192
193 if (wbc->for_kupdate) 193 if (wbc->for_kupdate)
@@ -682,7 +682,7 @@ retry:
682 inode = igrab(entry->inode); 682 inode = igrab(entry->inode);
683 spin_unlock(&sbi->dir_inode_lock); 683 spin_unlock(&sbi->dir_inode_lock);
684 if (inode) { 684 if (inode) {
685 filemap_flush(inode->i_mapping); 685 filemap_fdatawrite(inode->i_mapping);
686 iput(inode); 686 iput(inode);
687 } else { 687 } else {
688 /* 688 /*
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 101b4cd4170d..e3b7cfa17b99 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -868,6 +868,10 @@ static int f2fs_write_data_pages(struct address_space *mapping,
868 if (!mapping->a_ops->writepage) 868 if (!mapping->a_ops->writepage)
869 return 0; 869 return 0;
870 870
871 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
872 get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA))
873 return 0;
874
871 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) { 875 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
872 desired_nrtw = MAX_DESIRED_PAGES_WP; 876 desired_nrtw = MAX_DESIRED_PAGES_WP;
873 excess_nrtw = desired_nrtw - wbc->nr_to_write; 877 excess_nrtw = desired_nrtw - wbc->nr_to_write;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3e36240d81c1..cb514f1896ab 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1198,12 +1198,6 @@ redirty_out:
1198 return AOP_WRITEPAGE_ACTIVATE; 1198 return AOP_WRITEPAGE_ACTIVATE;
1199} 1199}
1200 1200
1201/*
1202 * It is very important to gather dirty pages and write at once, so that we can
1203 * submit a big bio without interfering other data writes.
1204 * Be default, 512 pages (2MB) * 3 node types, is more reasonable.
1205 */
1206#define COLLECT_DIRTY_NODES 1536
1207static int f2fs_write_node_pages(struct address_space *mapping, 1201static int f2fs_write_node_pages(struct address_space *mapping,
1208 struct writeback_control *wbc) 1202 struct writeback_control *wbc)
1209{ 1203{
@@ -1214,7 +1208,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
1214 f2fs_balance_fs_bg(sbi); 1208 f2fs_balance_fs_bg(sbi);
1215 1209
1216 /* collect a number of dirty node pages and write together */ 1210 /* collect a number of dirty node pages and write together */
1217 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) 1211 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1218 return 0; 1212 return 0;
1219 1213
1220 /* if mounting is failed, skip writing node pages */ 1214 /* if mounting is failed, skip writing node pages */
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index c3d5e3689ffc..bbd976100d14 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -664,3 +664,22 @@ static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
664 struct request_queue *q = bdev_get_queue(bdev); 664 struct request_queue *q = bdev_get_queue(bdev);
665 return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q)); 665 return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q));
666} 666}
667
668/*
669 * It is very important to gather dirty pages and write at once, so that we can
670 * submit a big bio without interfering other data writes.
671 * By default, 512 pages for directory data,
672 * 512 pages (2MB) * 3 for three types of nodes, and
673 * max_bio_blocks for meta are set.
674 */
675static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
676{
677 if (type == DATA)
678 return sbi->blocks_per_seg;
679 else if (type == NODE)
680 return 3 * sbi->blocks_per_seg;
681 else if (type == META)
682 return MAX_BIO_BLOCKS(max_hw_blocks(sbi));
683 else
684 return 0;
685}