aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2013-09-09 01:19:42 -0400
committerChris Mason <chris.mason@fusionio.com>2013-09-21 11:05:23 -0400
commita482039889b85c45fc9616e65d560db7a35d4f54 (patch)
tree58482aa49aa50c30cdd2e7b496f30dd1c11ac860 /fs/btrfs/extent-tree.c
parent13fd8da98f79317d26277360d510caa1edf9bab3 (diff)
Btrfs: allocate the free space by the existed max extent size when ENOSPC
By the current code, if the requested size is very large, and all the extents in the free space cache are small, we will waste lots of the cpu time to cut the requested size in half and search the cache again and again until it gets down to the size the allocator can return. In fact, we can know the max extent size in the cache after the first search, so we needn't cut the size in half repeatedly, and just use the max extent size directly. This way can save lots of cpu time and make the performance grow up when there are only fragments in the free space cache. According to my test, if there are only 4KB free space extents in the fs, and the total size of those extents are 256MB, we can reduce the execute time of the following test from 5.4s to 1.4s. dd if=/dev/zero of=<testfile> bs=1MB count=1 oflag=sync Changelog v2 -> v3: - fix the problem that we skip the block group with the space which is less than we need. Changelog v1 -> v2: - address the problem that we return a wrong start position when searching the free space in a bitmap. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fusionio.com> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c33
1 files changed, 24 insertions, 9 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index cfb3cf711b34..2f03181b777f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6117,10 +6117,13 @@ enum btrfs_loop_type {
6117/* 6117/*
6118 * walks the btree of allocated extents and find a hole of a given size. 6118 * walks the btree of allocated extents and find a hole of a given size.
6119 * The key ins is changed to record the hole: 6119 * The key ins is changed to record the hole:
6120 * ins->objectid == block start 6120 * ins->objectid == start position
6121 * ins->flags = BTRFS_EXTENT_ITEM_KEY 6121 * ins->flags = BTRFS_EXTENT_ITEM_KEY
6122 * ins->offset == number of blocks 6122 * ins->offset == the size of the hole.
6123 * Any available blocks before search_start are skipped. 6123 * Any available blocks before search_start are skipped.
6124 *
6125 * If there is no suitable free space, we will record the max size of
6126 * the free space extent currently.
6124 */ 6127 */
6125static noinline int find_free_extent(struct btrfs_root *orig_root, 6128static noinline int find_free_extent(struct btrfs_root *orig_root,
6126 u64 num_bytes, u64 empty_size, 6129 u64 num_bytes, u64 empty_size,
@@ -6133,6 +6136,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
6133 struct btrfs_block_group_cache *block_group = NULL; 6136 struct btrfs_block_group_cache *block_group = NULL;
6134 struct btrfs_block_group_cache *used_block_group; 6137 struct btrfs_block_group_cache *used_block_group;
6135 u64 search_start = 0; 6138 u64 search_start = 0;
6139 u64 max_extent_size = 0;
6136 int empty_cluster = 2 * 1024 * 1024; 6140 int empty_cluster = 2 * 1024 * 1024;
6137 struct btrfs_space_info *space_info; 6141 struct btrfs_space_info *space_info;
6138 int loop = 0; 6142 int loop = 0;
@@ -6292,7 +6296,10 @@ have_block_group:
6292 btrfs_get_block_group(used_block_group); 6296 btrfs_get_block_group(used_block_group);
6293 6297
6294 offset = btrfs_alloc_from_cluster(used_block_group, 6298 offset = btrfs_alloc_from_cluster(used_block_group,
6295 last_ptr, num_bytes, used_block_group->key.objectid); 6299 last_ptr,
6300 num_bytes,
6301 used_block_group->key.objectid,
6302 &max_extent_size);
6296 if (offset) { 6303 if (offset) {
6297 /* we have a block, we're done */ 6304 /* we have a block, we're done */
6298 spin_unlock(&last_ptr->refill_lock); 6305 spin_unlock(&last_ptr->refill_lock);
@@ -6355,8 +6362,10 @@ refill_cluster:
6355 * cluster 6362 * cluster
6356 */ 6363 */
6357 offset = btrfs_alloc_from_cluster(block_group, 6364 offset = btrfs_alloc_from_cluster(block_group,
6358 last_ptr, num_bytes, 6365 last_ptr,
6359 search_start); 6366 num_bytes,
6367 search_start,
6368 &max_extent_size);
6360 if (offset) { 6369 if (offset) {
6361 /* we found one, proceed */ 6370 /* we found one, proceed */
6362 spin_unlock(&last_ptr->refill_lock); 6371 spin_unlock(&last_ptr->refill_lock);
@@ -6391,13 +6400,18 @@ unclustered_alloc:
6391 if (cached && 6400 if (cached &&
6392 block_group->free_space_ctl->free_space < 6401 block_group->free_space_ctl->free_space <
6393 num_bytes + empty_cluster + empty_size) { 6402 num_bytes + empty_cluster + empty_size) {
6403 if (block_group->free_space_ctl->free_space >
6404 max_extent_size)
6405 max_extent_size =
6406 block_group->free_space_ctl->free_space;
6394 spin_unlock(&block_group->free_space_ctl->tree_lock); 6407 spin_unlock(&block_group->free_space_ctl->tree_lock);
6395 goto loop; 6408 goto loop;
6396 } 6409 }
6397 spin_unlock(&block_group->free_space_ctl->tree_lock); 6410 spin_unlock(&block_group->free_space_ctl->tree_lock);
6398 6411
6399 offset = btrfs_find_space_for_alloc(block_group, search_start, 6412 offset = btrfs_find_space_for_alloc(block_group, search_start,
6400 num_bytes, empty_size); 6413 num_bytes, empty_size,
6414 &max_extent_size);
6401 /* 6415 /*
6402 * If we didn't find a chunk, and we haven't failed on this 6416 * If we didn't find a chunk, and we haven't failed on this
6403 * block group before, and this block group is in the middle of 6417 * block group before, and this block group is in the middle of
@@ -6515,7 +6529,8 @@ loop:
6515 ret = 0; 6529 ret = 0;
6516 } 6530 }
6517out: 6531out:
6518 6532 if (ret == -ENOSPC)
6533 ins->offset = max_extent_size;
6519 return ret; 6534 return ret;
6520} 6535}
6521 6536
@@ -6573,8 +6588,8 @@ again:
6573 flags); 6588 flags);
6574 6589
6575 if (ret == -ENOSPC) { 6590 if (ret == -ENOSPC) {
6576 if (!final_tried) { 6591 if (!final_tried && ins->offset) {
6577 num_bytes = num_bytes >> 1; 6592 num_bytes = min(num_bytes >> 1, ins->offset);
6578 num_bytes = round_down(num_bytes, root->sectorsize); 6593 num_bytes = round_down(num_bytes, root->sectorsize);
6579 num_bytes = max(num_bytes, min_alloc_size); 6594 num_bytes = max(num_bytes, min_alloc_size);
6580 if (num_bytes == min_alloc_size) 6595 if (num_bytes == min_alloc_size)