aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChao Yu <yuchao0@huawei.com>2016-08-29 11:58:34 -0400
committerJaegeuk Kim <jaegeuk@kernel.org>2016-09-07 20:27:38 -0400
commit275b66b09e85cf0520dc610dd89706952751a473 (patch)
tree9fc6b695893ea53d227b874cff443c766e56800d
parent167451efb53c7999fb72591c46f29de09cd8f8b0 (diff)
f2fs: support async discard
Like most filesystems, f2fs will issue discard command synchronously, so when user trigger fstrim through ioctl, multiple discard commands will be issued serially with sync mode, which makes poor performance. In this patch we try to support async discard, so that all discard commands can be issued and be waited for endio in batch to improve performance. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
-rw-r--r--fs/f2fs/checkpoint.c3
-rw-r--r--fs/f2fs/f2fs.h9
-rw-r--r--fs/f2fs/recovery.c2
-rw-r--r--fs/f2fs/segment.c87
4 files changed, 99 insertions, 2 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index cd0443d25063..64a685d5c11b 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1197,6 +1197,7 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1197 f2fs_bug_on(sbi, prefree_segments(sbi)); 1197 f2fs_bug_on(sbi, prefree_segments(sbi));
1198 flush_sit_entries(sbi, cpc); 1198 flush_sit_entries(sbi, cpc);
1199 clear_prefree_segments(sbi, cpc); 1199 clear_prefree_segments(sbi, cpc);
1200 f2fs_wait_all_discard_bio(sbi);
1200 unblock_operations(sbi); 1201 unblock_operations(sbi);
1201 goto out; 1202 goto out;
1202 } 1203 }
@@ -1216,6 +1217,8 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1216 /* unlock all the fs_lock[] in do_checkpoint() */ 1217 /* unlock all the fs_lock[] in do_checkpoint() */
1217 err = do_checkpoint(sbi, cpc); 1218 err = do_checkpoint(sbi, cpc);
1218 1219
1220 f2fs_wait_all_discard_bio(sbi);
1221
1219 unblock_operations(sbi); 1222 unblock_operations(sbi);
1220 stat_inc_cp_count(sbi->stat_info); 1223 stat_inc_cp_count(sbi->stat_info);
1221 1224
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 98c4093b50b3..c2478a19bfaa 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -211,6 +211,13 @@ struct discard_entry {
211 int len; /* # of consecutive blocks of the discard */ 211 int len; /* # of consecutive blocks of the discard */
212}; 212};
213 213
214struct bio_entry {
215 struct list_head list;
216 struct bio *bio;
217 struct completion event;
218 int error;
219};
220
214/* for the list of fsync inodes, used only during recovery */ 221/* for the list of fsync inodes, used only during recovery */
215struct fsync_inode_entry { 222struct fsync_inode_entry {
216 struct list_head list; /* list head */ 223 struct list_head list; /* list head */
@@ -645,6 +652,7 @@ struct f2fs_sm_info {
645 652
646 /* for small discard management */ 653 /* for small discard management */
647 struct list_head discard_list; /* 4KB discard list */ 654 struct list_head discard_list; /* 4KB discard list */
655 struct list_head wait_list; /* linked with issued discard bio */
648 int nr_discards; /* # of discards in the list */ 656 int nr_discards; /* # of discards in the list */
649 int max_discards; /* max. discards to be issued */ 657 int max_discards; /* max. discards to be issued */
650 658
@@ -2026,6 +2034,7 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *);
2026void invalidate_blocks(struct f2fs_sb_info *, block_t); 2034void invalidate_blocks(struct f2fs_sb_info *, block_t);
2027bool is_checkpointed_data(struct f2fs_sb_info *, block_t); 2035bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
2028void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t); 2036void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
2037void f2fs_wait_all_discard_bio(struct f2fs_sb_info *);
2029void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *); 2038void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
2030void release_discard_addrs(struct f2fs_sb_info *); 2039void release_discard_addrs(struct f2fs_sb_info *);
2031bool discard_next_dnode(struct f2fs_sb_info *, block_t); 2040bool discard_next_dnode(struct f2fs_sb_info *, block_t);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 9e652d5a659b..2f38bbbeec2c 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -636,6 +636,8 @@ out:
636 invalidate = true; 636 invalidate = true;
637 } 637 }
638 638
639 f2fs_wait_all_discard_bio(sbi);
640
639 /* Flush all the NAT/SIT pages */ 641 /* Flush all the NAT/SIT pages */
640 while (get_pages(sbi, F2FS_DIRTY_META)) 642 while (get_pages(sbi, F2FS_DIRTY_META))
641 sync_meta_pages(sbi, META, LONG_MAX); 643 sync_meta_pages(sbi, META, LONG_MAX);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index d0f74eb521ae..93c5e26c7fc5 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -26,6 +26,7 @@
26#define __reverse_ffz(x) __reverse_ffs(~(x)) 26#define __reverse_ffz(x) __reverse_ffs(~(x))
27 27
28static struct kmem_cache *discard_entry_slab; 28static struct kmem_cache *discard_entry_slab;
29static struct kmem_cache *bio_entry_slab;
29static struct kmem_cache *sit_entry_set_slab; 30static struct kmem_cache *sit_entry_set_slab;
30static struct kmem_cache *inmem_entry_slab; 31static struct kmem_cache *inmem_entry_slab;
31 32
@@ -580,6 +581,74 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
580 mutex_unlock(&dirty_i->seglist_lock); 581 mutex_unlock(&dirty_i->seglist_lock);
581} 582}
582 583
584static struct bio_entry *__add_bio_entry(struct f2fs_sb_info *sbi,
585 struct bio *bio)
586{
587 struct list_head *wait_list = &(SM_I(sbi)->wait_list);
588 struct bio_entry *be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
589
590 INIT_LIST_HEAD(&be->list);
591 be->bio = bio;
592 init_completion(&be->event);
593 list_add_tail(&be->list, wait_list);
594
595 return be;
596}
597
598void f2fs_wait_all_discard_bio(struct f2fs_sb_info *sbi)
599{
600 struct list_head *wait_list = &(SM_I(sbi)->wait_list);
601 struct bio_entry *be, *tmp;
602
603 list_for_each_entry_safe(be, tmp, wait_list, list) {
604 struct bio *bio = be->bio;
605 int err;
606
607 wait_for_completion_io(&be->event);
608 err = be->error;
609 if (err == -EOPNOTSUPP)
610 err = 0;
611
612 if (err)
613 f2fs_msg(sbi->sb, KERN_INFO,
614 "Issue discard failed, ret: %d", err);
615
616 bio_put(bio);
617 list_del(&be->list);
618 kmem_cache_free(bio_entry_slab, be);
619 }
620}
621
622static void f2fs_submit_bio_wait_endio(struct bio *bio)
623{
624 struct bio_entry *be = (struct bio_entry *)bio->bi_private;
625
626 be->error = bio->bi_error;
627 complete(&be->event);
628}
629
630/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
631int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, sector_t sector,
632 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
633{
634 struct block_device *bdev = sbi->sb->s_bdev;
635 struct bio *bio = NULL;
636 int err;
637
638 err = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
639 &bio);
640 if (!err && bio) {
641 struct bio_entry *be = __add_bio_entry(sbi, bio);
642
643 bio->bi_private = be;
644 bio->bi_end_io = f2fs_submit_bio_wait_endio;
645 bio->bi_opf |= REQ_SYNC;
646 submit_bio(bio);
647 }
648
649 return err;
650}
651
583static int f2fs_issue_discard(struct f2fs_sb_info *sbi, 652static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
584 block_t blkstart, block_t blklen) 653 block_t blkstart, block_t blklen)
585{ 654{
@@ -597,7 +666,7 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
597 sbi->discard_blks--; 666 sbi->discard_blks--;
598 } 667 }
599 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); 668 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
600 return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); 669 return __f2fs_issue_discard_async(sbi, start, len, GFP_NOFS, 0);
601} 670}
602 671
603bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr) 672bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
@@ -719,11 +788,14 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
719 struct list_head *head = &(SM_I(sbi)->discard_list); 788 struct list_head *head = &(SM_I(sbi)->discard_list);
720 struct discard_entry *entry, *this; 789 struct discard_entry *entry, *this;
721 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 790 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
791 struct blk_plug plug;
722 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; 792 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
723 unsigned int start = 0, end = -1; 793 unsigned int start = 0, end = -1;
724 unsigned int secno, start_segno; 794 unsigned int secno, start_segno;
725 bool force = (cpc->reason == CP_DISCARD); 795 bool force = (cpc->reason == CP_DISCARD);
726 796
797 blk_start_plug(&plug);
798
727 mutex_lock(&dirty_i->seglist_lock); 799 mutex_lock(&dirty_i->seglist_lock);
728 800
729 while (1) { 801 while (1) {
@@ -772,6 +844,8 @@ skip:
772 SM_I(sbi)->nr_discards -= entry->len; 844 SM_I(sbi)->nr_discards -= entry->len;
773 kmem_cache_free(discard_entry_slab, entry); 845 kmem_cache_free(discard_entry_slab, entry);
774 } 846 }
847
848 blk_finish_plug(&plug);
775} 849}
776 850
777static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 851static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
@@ -2457,6 +2531,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
2457 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; 2531 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
2458 2532
2459 INIT_LIST_HEAD(&sm_info->discard_list); 2533 INIT_LIST_HEAD(&sm_info->discard_list);
2534 INIT_LIST_HEAD(&sm_info->wait_list);
2460 sm_info->nr_discards = 0; 2535 sm_info->nr_discards = 0;
2461 sm_info->max_discards = 0; 2536 sm_info->max_discards = 0;
2462 2537
@@ -2600,10 +2675,15 @@ int __init create_segment_manager_caches(void)
2600 if (!discard_entry_slab) 2675 if (!discard_entry_slab)
2601 goto fail; 2676 goto fail;
2602 2677
2678 bio_entry_slab = f2fs_kmem_cache_create("bio_entry",
2679 sizeof(struct bio_entry));
2680 if (!bio_entry_slab)
2681 goto destory_discard_entry;
2682
2603 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", 2683 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
2604 sizeof(struct sit_entry_set)); 2684 sizeof(struct sit_entry_set));
2605 if (!sit_entry_set_slab) 2685 if (!sit_entry_set_slab)
2606 goto destory_discard_entry; 2686 goto destroy_bio_entry;
2607 2687
2608 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", 2688 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
2609 sizeof(struct inmem_pages)); 2689 sizeof(struct inmem_pages));
@@ -2613,6 +2693,8 @@ int __init create_segment_manager_caches(void)
2613 2693
2614destroy_sit_entry_set: 2694destroy_sit_entry_set:
2615 kmem_cache_destroy(sit_entry_set_slab); 2695 kmem_cache_destroy(sit_entry_set_slab);
2696destroy_bio_entry:
2697 kmem_cache_destroy(bio_entry_slab);
2616destory_discard_entry: 2698destory_discard_entry:
2617 kmem_cache_destroy(discard_entry_slab); 2699 kmem_cache_destroy(discard_entry_slab);
2618fail: 2700fail:
@@ -2622,6 +2704,7 @@ fail:
2622void destroy_segment_manager_caches(void) 2704void destroy_segment_manager_caches(void)
2623{ 2705{
2624 kmem_cache_destroy(sit_entry_set_slab); 2706 kmem_cache_destroy(sit_entry_set_slab);
2707 kmem_cache_destroy(bio_entry_slab);
2625 kmem_cache_destroy(discard_entry_slab); 2708 kmem_cache_destroy(discard_entry_slab);
2626 kmem_cache_destroy(inmem_entry_slab); 2709 kmem_cache_destroy(inmem_entry_slab);
2627} 2710}