aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2011-04-22 06:12:22 -0400
committerChris Mason <chris.mason@oracle.com>2011-05-21 09:30:56 -0400
commit16cdcec736cd214350cdb591bf1091f8beedefa0 (patch)
tree5598d4561660c4d7a1d4de8b3703d6dd3cc7f9e7 /fs/btrfs/extent-tree.c
parent61c4f2c81c61f73549928dfd9f3e8f26aa36a8cf (diff)
btrfs: implement delayed inode items operation
Changelog V5 -> V6: - Fix oom when the memory load is high, by storing the delayed nodes into the root's radix tree, and letting btrfs inodes go. Changelog V4 -> V5: - Fix the race on adding the delayed node to the inode, which is spotted by Chris Mason. - Merge Chris Mason's incremental patch into this patch. - Fix deadlock between readdir() and memory fault, which is reported by Itaru Kitayama. Changelog V3 -> V4: - Fix nested lock, which is reported by Itaru Kitayama, by updating space cache inode in time. Changelog V2 -> V3: - Fix the race between the delayed worker and the task which does delayed items balance, which is reported by Tsutomu Itoh. - Modify the patch address David Sterba's comment. - Fix the bug of the cpu recursion spinlock, reported by Chris Mason Changelog V1 -> V2: - break up the global rb-tree, use a list to manage the delayed nodes, which is created for every directory and file, and used to manage the delayed directory name index items and the delayed inode item. - introduce a worker to deal with the delayed nodes. Compare with Ext3/4, the performance of file creation and deletion on btrfs is very poor. the reason is that btrfs must do a lot of b+ tree insertions, such as inode item, directory name item, directory name index and so on. If we can do some delayed b+ tree insertion or deletion, we can improve the performance, so we made this patch which implemented delayed directory name index insertion/deletion and delayed inode update. Implementation: - introduce a delayed root object into the filesystem, that use two lists to manage the delayed nodes which are created for every file/directory. One is used to manage all the delayed nodes that have delayed items. And the other is used to manage the delayed nodes which is waiting to be dealt with by the work thread. - Every delayed node has two rb-tree, one is used to manage the directory name index which is going to be inserted into b+ tree, and the other is used to manage the directory name index which is going to be deleted from b+ tree. - introduce a worker to deal with the delayed operation. This worker is used to deal with the works of the delayed directory name index items insertion and deletion and the delayed inode update. When the delayed items is beyond the lower limit, we create works for some delayed nodes and insert them into the work queue of the worker, and then go back. When the delayed items is beyond the upper bound, we create works for all the delayed nodes that haven't been dealt with, and insert them into the work queue of the worker, and then wait for that the untreated items is below some threshold value. - When we want to insert a directory name index into b+ tree, we just add the information into the delayed inserting rb-tree. And then we check the number of the delayed items and do delayed items balance. (The balance policy is above.) - When we want to delete a directory name index from the b+ tree, we search it in the inserting rb-tree at first. If we look it up, just drop it. If not, add the key of it into the delayed deleting rb-tree. Similar to the delayed inserting rb-tree, we also check the number of the delayed items and do delayed items balance. (The same to inserting manipulation) - When we want to update the metadata of some inode, we cached the data of the inode into the delayed node. the worker will flush it into the b+ tree after dealing with the delayed insertion and deletion. - We will move the delayed node to the tail of the list after we access the delayed node, By this way, we can cache more delayed items and merge more inode updates. - If we want to commit transaction, we will deal with all the delayed node. - the delayed node will be freed when we free the btrfs inode. - Before we log the inode items, we commit all the directory name index items and the delayed inode update. I did a quick test by the benchmark tool[1] and found we can improve the performance of file creation by ~15%, and file deletion by ~20%. Before applying this patch: Create files: Total files: 50000 Total time: 1.096108 Average time: 0.000022 Delete files: Total files: 50000 Total time: 1.510403 Average time: 0.000030 After applying this patch: Create files: Total files: 50000 Total time: 0.932899 Average time: 0.000019 Delete files: Total files: 50000 Total time: 1.215732 Average time: 0.000024 [1] http://marc.info/?l=linux-btrfs&m=128212635122920&q=p3 Many thanks for Kitayama-san's help! Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Reviewed-by: David Sterba <dave@jikos.cz> Tested-by: Tsutomu Itoh <t-itoh@jp.fujitsu.com> Tested-by: Itaru Kitayama <kitayama@cl.bb4u.ne.jp> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9ee6bd55e16c..7b0433866f36 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3973,12 +3973,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3973 WARN_ON(fs_info->chunk_block_rsv.reserved > 0); 3973 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3974} 3974}
3975 3975
3976static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3977{
3978 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3979 3 * num_items;
3980}
3981
3982int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, 3976int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3983 struct btrfs_root *root, 3977 struct btrfs_root *root,
3984 int num_items) 3978 int num_items)
@@ -3989,7 +3983,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3989 if (num_items == 0 || root->fs_info->chunk_root == root) 3983 if (num_items == 0 || root->fs_info->chunk_root == root)
3990 return 0; 3984 return 0;
3991 3985
3992 num_bytes = calc_trans_metadata_size(root, num_items); 3986 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
3993 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, 3987 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3994 num_bytes); 3988 num_bytes);
3995 if (!ret) { 3989 if (!ret) {
@@ -4028,14 +4022,14 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4028 * If all of the metadata space is used, we can commit 4022 * If all of the metadata space is used, we can commit
4029 * transaction and use space it freed. 4023 * transaction and use space it freed.
4030 */ 4024 */
4031 u64 num_bytes = calc_trans_metadata_size(root, 4); 4025 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
4032 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 4026 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4033} 4027}
4034 4028
4035void btrfs_orphan_release_metadata(struct inode *inode) 4029void btrfs_orphan_release_metadata(struct inode *inode)
4036{ 4030{
4037 struct btrfs_root *root = BTRFS_I(inode)->root; 4031 struct btrfs_root *root = BTRFS_I(inode)->root;
4038 u64 num_bytes = calc_trans_metadata_size(root, 4); 4032 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
4039 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); 4033 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4040} 4034}
4041 4035
@@ -4049,7 +4043,7 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4049 * two for root back/forward refs, two for directory entries 4043 * two for root back/forward refs, two for directory entries
4050 * and one for root of the snapshot. 4044 * and one for root of the snapshot.
4051 */ 4045 */
4052 u64 num_bytes = calc_trans_metadata_size(root, 5); 4046 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4053 dst_rsv->space_info = src_rsv->space_info; 4047 dst_rsv->space_info = src_rsv->space_info;
4054 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 4048 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4055} 4049}
@@ -4078,7 +4072,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4078 4072
4079 if (nr_extents > reserved_extents) { 4073 if (nr_extents > reserved_extents) {
4080 nr_extents -= reserved_extents; 4074 nr_extents -= reserved_extents;
4081 to_reserve = calc_trans_metadata_size(root, nr_extents); 4075 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4082 } else { 4076 } else {
4083 nr_extents = 0; 4077 nr_extents = 0;
4084 to_reserve = 0; 4078 to_reserve = 0;
@@ -4132,7 +4126,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4132 4126
4133 to_free = calc_csum_metadata_size(inode, num_bytes); 4127 to_free = calc_csum_metadata_size(inode, num_bytes);
4134 if (nr_extents > 0) 4128 if (nr_extents > 0)
4135 to_free += calc_trans_metadata_size(root, nr_extents); 4129 to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
4136 4130
4137 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, 4131 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4138 to_free); 4132 to_free);