diff options
author | Changman Lee <cm224.lee@samsung.com> | 2014-11-28 10:49:40 -0500 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk@kernel.org> | 2014-12-02 14:02:50 -0500 |
commit | 7dda2af83b2b7593458828d4f15443167b3da8c4 (patch) | |
tree | 0ecb8d2e1a2ef4a0745bd704ff09d86ed3cccb3f /fs/f2fs | |
parent | 9c01503f4da3ff9c327d37249fe148ed7c188b20 (diff) |
f2fs: more fast lookup for gc_inode list
If there are many inodes that have data blocks in victim segment,
it takes long time to find a inode in gc_inode list.
Let's use radix_tree to reduce lookup time.
Signed-off-by: Changman Lee <cm224.lee@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r-- | fs/f2fs/gc.c | 48 | ||||
-rw-r--r-- | fs/f2fs/gc.h | 5 |
2 files changed, 34 insertions, 19 deletions
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 6acd5f240224..a1af74f1a1d9 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c | |||
@@ -338,34 +338,42 @@ static const struct victim_selection default_v_ops = { | |||
338 | .get_victim = get_victim_by_default, | 338 | .get_victim = get_victim_by_default, |
339 | }; | 339 | }; |
340 | 340 | ||
341 | static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist) | 341 | static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) |
342 | { | 342 | { |
343 | struct inode_entry *ie; | 343 | struct inode_entry *ie; |
344 | 344 | ||
345 | list_for_each_entry(ie, ilist, list) | 345 | ie = radix_tree_lookup(&gc_list->iroot, ino); |
346 | if (ie->inode->i_ino == ino) | 346 | if (ie) |
347 | return ie->inode; | 347 | return ie->inode; |
348 | return NULL; | 348 | return NULL; |
349 | } | 349 | } |
350 | 350 | ||
351 | static void add_gc_inode(struct inode *inode, struct list_head *ilist) | 351 | static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) |
352 | { | 352 | { |
353 | struct inode_entry *new_ie; | 353 | struct inode_entry *new_ie; |
354 | int ret; | ||
354 | 355 | ||
355 | if (inode == find_gc_inode(inode->i_ino, ilist)) { | 356 | if (inode == find_gc_inode(gc_list, inode->i_ino)) { |
356 | iput(inode); | 357 | iput(inode); |
357 | return; | 358 | return; |
358 | } | 359 | } |
359 | 360 | retry: | |
360 | new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS); | 361 | new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS); |
361 | new_ie->inode = inode; | 362 | new_ie->inode = inode; |
362 | list_add_tail(&new_ie->list, ilist); | 363 | |
364 | ret = radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); | ||
365 | if (ret) { | ||
366 | kmem_cache_free(winode_slab, new_ie); | ||
367 | goto retry; | ||
368 | } | ||
369 | list_add_tail(&new_ie->list, &gc_list->ilist); | ||
363 | } | 370 | } |
364 | 371 | ||
365 | static void put_gc_inode(struct list_head *ilist) | 372 | static void put_gc_inode(struct gc_inode_list *gc_list) |
366 | { | 373 | { |
367 | struct inode_entry *ie, *next_ie; | 374 | struct inode_entry *ie, *next_ie; |
368 | list_for_each_entry_safe(ie, next_ie, ilist, list) { | 375 | list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { |
376 | radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); | ||
369 | iput(ie->inode); | 377 | iput(ie->inode); |
370 | list_del(&ie->list); | 378 | list_del(&ie->list); |
371 | kmem_cache_free(winode_slab, ie); | 379 | kmem_cache_free(winode_slab, ie); |
@@ -551,7 +559,7 @@ out: | |||
551 | * the victim data block is ignored. | 559 | * the victim data block is ignored. |
552 | */ | 560 | */ |
553 | static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, | 561 | static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
554 | struct list_head *ilist, unsigned int segno, int gc_type) | 562 | struct gc_inode_list *gc_list, unsigned int segno, int gc_type) |
555 | { | 563 | { |
556 | struct super_block *sb = sbi->sb; | 564 | struct super_block *sb = sbi->sb; |
557 | struct f2fs_summary *entry; | 565 | struct f2fs_summary *entry; |
@@ -609,12 +617,12 @@ next_step: | |||
609 | } | 617 | } |
610 | 618 | ||
611 | f2fs_put_page(data_page, 0); | 619 | f2fs_put_page(data_page, 0); |
612 | add_gc_inode(inode, ilist); | 620 | add_gc_inode(gc_list, inode); |
613 | continue; | 621 | continue; |
614 | } | 622 | } |
615 | 623 | ||
616 | /* phase 3 */ | 624 | /* phase 3 */ |
617 | inode = find_gc_inode(dni.ino, ilist); | 625 | inode = find_gc_inode(gc_list, dni.ino); |
618 | if (inode) { | 626 | if (inode) { |
619 | start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)); | 627 | start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)); |
620 | data_page = get_lock_data_page(inode, | 628 | data_page = get_lock_data_page(inode, |
@@ -657,7 +665,7 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, | |||
657 | } | 665 | } |
658 | 666 | ||
659 | static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, | 667 | static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, |
660 | struct list_head *ilist, int gc_type) | 668 | struct gc_inode_list *gc_list, int gc_type) |
661 | { | 669 | { |
662 | struct page *sum_page; | 670 | struct page *sum_page; |
663 | struct f2fs_summary_block *sum; | 671 | struct f2fs_summary_block *sum; |
@@ -675,7 +683,7 @@ static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, | |||
675 | gc_node_segment(sbi, sum->entries, segno, gc_type); | 683 | gc_node_segment(sbi, sum->entries, segno, gc_type); |
676 | break; | 684 | break; |
677 | case SUM_TYPE_DATA: | 685 | case SUM_TYPE_DATA: |
678 | gc_data_segment(sbi, sum->entries, ilist, segno, gc_type); | 686 | gc_data_segment(sbi, sum->entries, gc_list, segno, gc_type); |
679 | break; | 687 | break; |
680 | } | 688 | } |
681 | blk_finish_plug(&plug); | 689 | blk_finish_plug(&plug); |
@@ -688,16 +696,18 @@ static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, | |||
688 | 696 | ||
689 | int f2fs_gc(struct f2fs_sb_info *sbi) | 697 | int f2fs_gc(struct f2fs_sb_info *sbi) |
690 | { | 698 | { |
691 | struct list_head ilist; | ||
692 | unsigned int segno, i; | 699 | unsigned int segno, i; |
693 | int gc_type = BG_GC; | 700 | int gc_type = BG_GC; |
694 | int nfree = 0; | 701 | int nfree = 0; |
695 | int ret = -1; | 702 | int ret = -1; |
696 | struct cp_control cpc; | 703 | struct cp_control cpc; |
704 | struct gc_inode_list gc_list = { | ||
705 | .ilist = LIST_HEAD_INIT(gc_list.ilist), | ||
706 | .iroot = RADIX_TREE_INIT(GFP_ATOMIC), | ||
707 | }; | ||
697 | 708 | ||
698 | cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC; | 709 | cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC; |
699 | 710 | ||
700 | INIT_LIST_HEAD(&ilist); | ||
701 | gc_more: | 711 | gc_more: |
702 | if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) | 712 | if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) |
703 | goto stop; | 713 | goto stop; |
@@ -719,7 +729,7 @@ gc_more: | |||
719 | META_SSA); | 729 | META_SSA); |
720 | 730 | ||
721 | for (i = 0; i < sbi->segs_per_sec; i++) | 731 | for (i = 0; i < sbi->segs_per_sec; i++) |
722 | do_garbage_collect(sbi, segno + i, &ilist, gc_type); | 732 | do_garbage_collect(sbi, segno + i, &gc_list, gc_type); |
723 | 733 | ||
724 | if (gc_type == FG_GC) { | 734 | if (gc_type == FG_GC) { |
725 | sbi->cur_victim_sec = NULL_SEGNO; | 735 | sbi->cur_victim_sec = NULL_SEGNO; |
@@ -735,7 +745,7 @@ gc_more: | |||
735 | stop: | 745 | stop: |
736 | mutex_unlock(&sbi->gc_mutex); | 746 | mutex_unlock(&sbi->gc_mutex); |
737 | 747 | ||
738 | put_gc_inode(&ilist); | 748 | put_gc_inode(&gc_list); |
739 | return ret; | 749 | return ret; |
740 | } | 750 | } |
741 | 751 | ||
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index 16f0b2b22999..6ff7ad38463e 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h | |||
@@ -40,6 +40,11 @@ struct inode_entry { | |||
40 | struct inode *inode; | 40 | struct inode *inode; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | struct gc_inode_list { | ||
44 | struct list_head ilist; | ||
45 | struct radix_tree_root iroot; | ||
46 | }; | ||
47 | |||
43 | /* | 48 | /* |
44 | * inline functions | 49 | * inline functions |
45 | */ | 50 | */ |