diff options
author | Miao Xie <miaox@cn.fujitsu.com> | 2012-09-06 06:01:51 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@fusionio.com> | 2012-10-01 15:19:11 -0400 |
commit | 6352b91da1a2108bb8cc5115e8714f90d706f15f (patch) | |
tree | 41897bbe836bb7f8b2bbfef24528407d1c79b8b0 /fs/btrfs/ordered-data.c | |
parent | b9a8cc5bef963b76c5b6c3016b7e91988a3e758b (diff) |
Btrfs: use a slab for ordered extents allocation
The ordered extent allocation is in the fast path of the IO, so use a slab
to improve the speed of the allocation.
"Size of the struct is 280, so this will fall into the size-512 bucket,
giving 8 objects per page, while own slab will pack 14 objects into a page.
Another benefit I see is to check for leaked objects when the module is
removed (and the cache destroy takes place)."
-- David Sterba
Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Diffstat (limited to 'fs/btrfs/ordered-data.c')
-rw-r--r-- | fs/btrfs/ordered-data.c | 23 |
1 files changed, 21 insertions, 2 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index cd8ecb73c05c..e2b3d994ec01 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include "btrfs_inode.h" | 25 | #include "btrfs_inode.h" |
26 | #include "extent_io.h" | 26 | #include "extent_io.h" |
27 | 27 | ||
28 | static struct kmem_cache *btrfs_ordered_extent_cache; | ||
29 | |||
28 | static u64 entry_end(struct btrfs_ordered_extent *entry) | 30 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
29 | { | 31 | { |
30 | if (entry->file_offset + entry->len < entry->file_offset) | 32 | if (entry->file_offset + entry->len < entry->file_offset) |
@@ -187,7 +189,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | |||
187 | struct btrfs_ordered_extent *entry; | 189 | struct btrfs_ordered_extent *entry; |
188 | 190 | ||
189 | tree = &BTRFS_I(inode)->ordered_tree; | 191 | tree = &BTRFS_I(inode)->ordered_tree; |
190 | entry = kzalloc(sizeof(*entry), GFP_NOFS); | 192 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
191 | if (!entry) | 193 | if (!entry) |
192 | return -ENOMEM; | 194 | return -ENOMEM; |
193 | 195 | ||
@@ -421,7 +423,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) | |||
421 | list_del(&sum->list); | 423 | list_del(&sum->list); |
422 | kfree(sum); | 424 | kfree(sum); |
423 | } | 425 | } |
424 | kfree(entry); | 426 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
425 | } | 427 | } |
426 | } | 428 | } |
427 | 429 | ||
@@ -958,3 +960,20 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, | |||
958 | } | 960 | } |
959 | spin_unlock(&root->fs_info->ordered_extent_lock); | 961 | spin_unlock(&root->fs_info->ordered_extent_lock); |
960 | } | 962 | } |
963 | |||
964 | int __init ordered_data_init(void) | ||
965 | { | ||
966 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", | ||
967 | sizeof(struct btrfs_ordered_extent), 0, | ||
968 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | ||
969 | NULL); | ||
970 | if (!btrfs_ordered_extent_cache) | ||
971 | return -ENOMEM; | ||
972 | return 0; | ||
973 | } | ||
974 | |||
975 | void ordered_data_exit(void) | ||
976 | { | ||
977 | if (btrfs_ordered_extent_cache) | ||
978 | kmem_cache_destroy(btrfs_ordered_extent_cache); | ||
979 | } | ||