aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/delayed-ref.c
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2012-11-20 21:21:28 -0500
committerJosef Bacik <jbacik@fusionio.com>2013-02-20 09:36:34 -0500
commit78a6184a3ff9041280ee56273c01e5679a831b39 (patch)
tree0310eb020cf6ed7a8ff71feb8c468bb6e42488fa /fs/btrfs/delayed-ref.c
parent6f60cbd3ae442cb35861bb522f388db123d42ec1 (diff)
Btrfs: use slabs for delayed reference allocation
The delayed reference allocation is in the fast path of the IO, so use slabs to improve the speed of the allocation. And besides that, it can do check for leaked objects when the module is removed. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r--fs/btrfs/delayed-ref.c74
1 files changed, 63 insertions, 11 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index ae9411773397..455894f1ca3b 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -23,6 +23,10 @@
23#include "delayed-ref.h" 23#include "delayed-ref.h"
24#include "transaction.h" 24#include "transaction.h"
25 25
26struct kmem_cache *btrfs_delayed_ref_head_cachep;
27struct kmem_cache *btrfs_delayed_tree_ref_cachep;
28struct kmem_cache *btrfs_delayed_data_ref_cachep;
29struct kmem_cache *btrfs_delayed_extent_op_cachep;
26/* 30/*
27 * delayed back reference update tracking. For subvolume trees 31 * delayed back reference update tracking. For subvolume trees
28 * we queue up extent allocations and backref maintenance for 32 * we queue up extent allocations and backref maintenance for
@@ -511,7 +515,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
511 ref->extent_op->flags_to_set; 515 ref->extent_op->flags_to_set;
512 existing_ref->extent_op->update_flags = 1; 516 existing_ref->extent_op->update_flags = 1;
513 } 517 }
514 kfree(ref->extent_op); 518 btrfs_free_delayed_extent_op(ref->extent_op);
515 } 519 }
516 } 520 }
517 /* 521 /*
@@ -592,7 +596,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
592 * we've updated the existing ref, free the newly 596 * we've updated the existing ref, free the newly
593 * allocated ref 597 * allocated ref
594 */ 598 */
595 kfree(head_ref); 599 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
596 } else { 600 } else {
597 delayed_refs->num_heads++; 601 delayed_refs->num_heads++;
598 delayed_refs->num_heads_ready++; 602 delayed_refs->num_heads_ready++;
@@ -653,7 +657,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
653 * we've updated the existing ref, free the newly 657 * we've updated the existing ref, free the newly
654 * allocated ref 658 * allocated ref
655 */ 659 */
656 kfree(full_ref); 660 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
657 } else { 661 } else {
658 delayed_refs->num_entries++; 662 delayed_refs->num_entries++;
659 trans->delayed_ref_updates++; 663 trans->delayed_ref_updates++;
@@ -714,7 +718,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
714 * we've updated the existing ref, free the newly 718 * we've updated the existing ref, free the newly
715 * allocated ref 719 * allocated ref
716 */ 720 */
717 kfree(full_ref); 721 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
718 } else { 722 } else {
719 delayed_refs->num_entries++; 723 delayed_refs->num_entries++;
720 trans->delayed_ref_updates++; 724 trans->delayed_ref_updates++;
@@ -738,13 +742,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
738 struct btrfs_delayed_ref_root *delayed_refs; 742 struct btrfs_delayed_ref_root *delayed_refs;
739 743
740 BUG_ON(extent_op && extent_op->is_data); 744 BUG_ON(extent_op && extent_op->is_data);
741 ref = kmalloc(sizeof(*ref), GFP_NOFS); 745 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
742 if (!ref) 746 if (!ref)
743 return -ENOMEM; 747 return -ENOMEM;
744 748
745 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); 749 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
746 if (!head_ref) { 750 if (!head_ref) {
747 kfree(ref); 751 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
748 return -ENOMEM; 752 return -ENOMEM;
749 } 753 }
750 754
@@ -786,13 +790,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
786 struct btrfs_delayed_ref_root *delayed_refs; 790 struct btrfs_delayed_ref_root *delayed_refs;
787 791
788 BUG_ON(extent_op && !extent_op->is_data); 792 BUG_ON(extent_op && !extent_op->is_data);
789 ref = kmalloc(sizeof(*ref), GFP_NOFS); 793 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
790 if (!ref) 794 if (!ref)
791 return -ENOMEM; 795 return -ENOMEM;
792 796
793 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); 797 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
794 if (!head_ref) { 798 if (!head_ref) {
795 kfree(ref); 799 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
796 return -ENOMEM; 800 return -ENOMEM;
797 } 801 }
798 802
@@ -826,7 +830,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
826 struct btrfs_delayed_ref_head *head_ref; 830 struct btrfs_delayed_ref_head *head_ref;
827 struct btrfs_delayed_ref_root *delayed_refs; 831 struct btrfs_delayed_ref_root *delayed_refs;
828 832
829 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); 833 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
830 if (!head_ref) 834 if (!head_ref)
831 return -ENOMEM; 835 return -ENOMEM;
832 836
@@ -860,3 +864,51 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
860 return btrfs_delayed_node_to_head(ref); 864 return btrfs_delayed_node_to_head(ref);
861 return NULL; 865 return NULL;
862} 866}
867
868void btrfs_delayed_ref_exit(void)
869{
870 if (btrfs_delayed_ref_head_cachep)
871 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
872 if (btrfs_delayed_tree_ref_cachep)
873 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
874 if (btrfs_delayed_data_ref_cachep)
875 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
876 if (btrfs_delayed_extent_op_cachep)
877 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
878}
879
880int btrfs_delayed_ref_init(void)
881{
882 btrfs_delayed_ref_head_cachep = kmem_cache_create(
883 "btrfs_delayed_ref_head",
884 sizeof(struct btrfs_delayed_ref_head), 0,
885 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
886 if (!btrfs_delayed_ref_head_cachep)
887 goto fail;
888
889 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
890 "btrfs_delayed_tree_ref",
891 sizeof(struct btrfs_delayed_tree_ref), 0,
892 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
893 if (!btrfs_delayed_tree_ref_cachep)
894 goto fail;
895
896 btrfs_delayed_data_ref_cachep = kmem_cache_create(
897 "btrfs_delayed_data_ref",
898 sizeof(struct btrfs_delayed_data_ref), 0,
899 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
900 if (!btrfs_delayed_data_ref_cachep)
901 goto fail;
902
903 btrfs_delayed_extent_op_cachep = kmem_cache_create(
904 "btrfs_delayed_extent_op",
905 sizeof(struct btrfs_delayed_extent_op), 0,
906 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
907 if (!btrfs_delayed_extent_op_cachep)
908 goto fail;
909
910 return 0;
911fail:
912 btrfs_delayed_ref_exit();
913 return -ENOMEM;
914}