diff options
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r-- | fs/btrfs/delayed-ref.c | 82 |
1 files changed, 71 insertions, 11 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index ae9411773397..b7a0641ead77 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c | |||
@@ -23,6 +23,10 @@ | |||
23 | #include "delayed-ref.h" | 23 | #include "delayed-ref.h" |
24 | #include "transaction.h" | 24 | #include "transaction.h" |
25 | 25 | ||
26 | struct kmem_cache *btrfs_delayed_ref_head_cachep; | ||
27 | struct kmem_cache *btrfs_delayed_tree_ref_cachep; | ||
28 | struct kmem_cache *btrfs_delayed_data_ref_cachep; | ||
29 | struct kmem_cache *btrfs_delayed_extent_op_cachep; | ||
26 | /* | 30 | /* |
27 | * delayed back reference update tracking. For subvolume trees | 31 | * delayed back reference update tracking. For subvolume trees |
28 | * we queue up extent allocations and backref maintenance for | 32 | * we queue up extent allocations and backref maintenance for |
@@ -422,6 +426,14 @@ again: | |||
422 | return 1; | 426 | return 1; |
423 | } | 427 | } |
424 | 428 | ||
429 | void btrfs_release_ref_cluster(struct list_head *cluster) | ||
430 | { | ||
431 | struct list_head *pos, *q; | ||
432 | |||
433 | list_for_each_safe(pos, q, cluster) | ||
434 | list_del_init(pos); | ||
435 | } | ||
436 | |||
425 | /* | 437 | /* |
426 | * helper function to update an extent delayed ref in the | 438 | * helper function to update an extent delayed ref in the |
427 | * rbtree. existing and update must both have the same | 439 | * rbtree. existing and update must both have the same |
@@ -511,7 +523,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing, | |||
511 | ref->extent_op->flags_to_set; | 523 | ref->extent_op->flags_to_set; |
512 | existing_ref->extent_op->update_flags = 1; | 524 | existing_ref->extent_op->update_flags = 1; |
513 | } | 525 | } |
514 | kfree(ref->extent_op); | 526 | btrfs_free_delayed_extent_op(ref->extent_op); |
515 | } | 527 | } |
516 | } | 528 | } |
517 | /* | 529 | /* |
@@ -592,7 +604,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info, | |||
592 | * we've updated the existing ref, free the newly | 604 | * we've updated the existing ref, free the newly |
593 | * allocated ref | 605 | * allocated ref |
594 | */ | 606 | */ |
595 | kfree(head_ref); | 607 | kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); |
596 | } else { | 608 | } else { |
597 | delayed_refs->num_heads++; | 609 | delayed_refs->num_heads++; |
598 | delayed_refs->num_heads_ready++; | 610 | delayed_refs->num_heads_ready++; |
@@ -653,7 +665,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info, | |||
653 | * we've updated the existing ref, free the newly | 665 | * we've updated the existing ref, free the newly |
654 | * allocated ref | 666 | * allocated ref |
655 | */ | 667 | */ |
656 | kfree(full_ref); | 668 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref); |
657 | } else { | 669 | } else { |
658 | delayed_refs->num_entries++; | 670 | delayed_refs->num_entries++; |
659 | trans->delayed_ref_updates++; | 671 | trans->delayed_ref_updates++; |
@@ -714,7 +726,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info, | |||
714 | * we've updated the existing ref, free the newly | 726 | * we've updated the existing ref, free the newly |
715 | * allocated ref | 727 | * allocated ref |
716 | */ | 728 | */ |
717 | kfree(full_ref); | 729 | kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref); |
718 | } else { | 730 | } else { |
719 | delayed_refs->num_entries++; | 731 | delayed_refs->num_entries++; |
720 | trans->delayed_ref_updates++; | 732 | trans->delayed_ref_updates++; |
@@ -738,13 +750,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, | |||
738 | struct btrfs_delayed_ref_root *delayed_refs; | 750 | struct btrfs_delayed_ref_root *delayed_refs; |
739 | 751 | ||
740 | BUG_ON(extent_op && extent_op->is_data); | 752 | BUG_ON(extent_op && extent_op->is_data); |
741 | ref = kmalloc(sizeof(*ref), GFP_NOFS); | 753 | ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); |
742 | if (!ref) | 754 | if (!ref) |
743 | return -ENOMEM; | 755 | return -ENOMEM; |
744 | 756 | ||
745 | head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); | 757 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
746 | if (!head_ref) { | 758 | if (!head_ref) { |
747 | kfree(ref); | 759 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); |
748 | return -ENOMEM; | 760 | return -ENOMEM; |
749 | } | 761 | } |
750 | 762 | ||
@@ -786,13 +798,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, | |||
786 | struct btrfs_delayed_ref_root *delayed_refs; | 798 | struct btrfs_delayed_ref_root *delayed_refs; |
787 | 799 | ||
788 | BUG_ON(extent_op && !extent_op->is_data); | 800 | BUG_ON(extent_op && !extent_op->is_data); |
789 | ref = kmalloc(sizeof(*ref), GFP_NOFS); | 801 | ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); |
790 | if (!ref) | 802 | if (!ref) |
791 | return -ENOMEM; | 803 | return -ENOMEM; |
792 | 804 | ||
793 | head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); | 805 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
794 | if (!head_ref) { | 806 | if (!head_ref) { |
795 | kfree(ref); | 807 | kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); |
796 | return -ENOMEM; | 808 | return -ENOMEM; |
797 | } | 809 | } |
798 | 810 | ||
@@ -826,7 +838,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, | |||
826 | struct btrfs_delayed_ref_head *head_ref; | 838 | struct btrfs_delayed_ref_head *head_ref; |
827 | struct btrfs_delayed_ref_root *delayed_refs; | 839 | struct btrfs_delayed_ref_root *delayed_refs; |
828 | 840 | ||
829 | head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); | 841 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
830 | if (!head_ref) | 842 | if (!head_ref) |
831 | return -ENOMEM; | 843 | return -ENOMEM; |
832 | 844 | ||
@@ -860,3 +872,51 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) | |||
860 | return btrfs_delayed_node_to_head(ref); | 872 | return btrfs_delayed_node_to_head(ref); |
861 | return NULL; | 873 | return NULL; |
862 | } | 874 | } |
875 | |||
876 | void btrfs_delayed_ref_exit(void) | ||
877 | { | ||
878 | if (btrfs_delayed_ref_head_cachep) | ||
879 | kmem_cache_destroy(btrfs_delayed_ref_head_cachep); | ||
880 | if (btrfs_delayed_tree_ref_cachep) | ||
881 | kmem_cache_destroy(btrfs_delayed_tree_ref_cachep); | ||
882 | if (btrfs_delayed_data_ref_cachep) | ||
883 | kmem_cache_destroy(btrfs_delayed_data_ref_cachep); | ||
884 | if (btrfs_delayed_extent_op_cachep) | ||
885 | kmem_cache_destroy(btrfs_delayed_extent_op_cachep); | ||
886 | } | ||
887 | |||
888 | int btrfs_delayed_ref_init(void) | ||
889 | { | ||
890 | btrfs_delayed_ref_head_cachep = kmem_cache_create( | ||
891 | "btrfs_delayed_ref_head", | ||
892 | sizeof(struct btrfs_delayed_ref_head), 0, | ||
893 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | ||
894 | if (!btrfs_delayed_ref_head_cachep) | ||
895 | goto fail; | ||
896 | |||
897 | btrfs_delayed_tree_ref_cachep = kmem_cache_create( | ||
898 | "btrfs_delayed_tree_ref", | ||
899 | sizeof(struct btrfs_delayed_tree_ref), 0, | ||
900 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | ||
901 | if (!btrfs_delayed_tree_ref_cachep) | ||
902 | goto fail; | ||
903 | |||
904 | btrfs_delayed_data_ref_cachep = kmem_cache_create( | ||
905 | "btrfs_delayed_data_ref", | ||
906 | sizeof(struct btrfs_delayed_data_ref), 0, | ||
907 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | ||
908 | if (!btrfs_delayed_data_ref_cachep) | ||
909 | goto fail; | ||
910 | |||
911 | btrfs_delayed_extent_op_cachep = kmem_cache_create( | ||
912 | "btrfs_delayed_extent_op", | ||
913 | sizeof(struct btrfs_delayed_extent_op), 0, | ||
914 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | ||
915 | if (!btrfs_delayed_extent_op_cachep) | ||
916 | goto fail; | ||
917 | |||
918 | return 0; | ||
919 | fail: | ||
920 | btrfs_delayed_ref_exit(); | ||
921 | return -ENOMEM; | ||
922 | } | ||