aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fb.com>2015-10-01 14:54:10 -0400
committerChris Mason <clm@fb.com>2015-10-21 21:55:37 -0400
commita5e681d9bd641c4f0677e87d3a0c92a8f4f16293 (patch)
treef10e2f374d3309bdcb5afb24ec0018d882a4537c
parent2968b1f48bd7366dd7310acde1ee6d1bf7791142 (diff)
Btrfs: cut down on loops through the allocator
We try really really hard to make allocations, but sometimes it is just not going to happen, especially when free space is extremely fragmented. So add a few short cuts through the looping states. For example if we couldn't allocate a chunk, just go straight to the NO_EMPTY_SIZE loop. If there are no uncached block groups and we've done a full search, go straight to the ALLOC_CHUNK stage. And finally if we already have empty_size and empty_cluster set to 0 go ahead and return -ENOSPC. Thanks, Signed-off-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Chris Mason <clm@fb.com>
-rw-r--r--fs/btrfs/extent-tree.c39
1 files changed, 36 insertions, 3 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3185c457f025..9f18eb0e86b6 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6921,6 +6921,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
6921 bool failed_alloc = false; 6921 bool failed_alloc = false;
6922 bool use_cluster = true; 6922 bool use_cluster = true;
6923 bool have_caching_bg = false; 6923 bool have_caching_bg = false;
6924 bool full_search = false;
6924 6925
6925 WARN_ON(num_bytes < root->sectorsize); 6926 WARN_ON(num_bytes < root->sectorsize);
6926 ins->type = BTRFS_EXTENT_ITEM_KEY; 6927 ins->type = BTRFS_EXTENT_ITEM_KEY;
@@ -7023,6 +7024,8 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
7023 } 7024 }
7024search: 7025search:
7025 have_caching_bg = false; 7026 have_caching_bg = false;
7027 if (index == 0 || index == __get_raid_index(flags))
7028 full_search = true;
7026 down_read(&space_info->groups_sem); 7029 down_read(&space_info->groups_sem);
7027 list_for_each_entry(block_group, &space_info->block_groups[index], 7030 list_for_each_entry(block_group, &space_info->block_groups[index],
7028 list) { 7031 list) {
@@ -7056,6 +7059,7 @@ search:
7056have_block_group: 7059have_block_group:
7057 cached = block_group_cache_done(block_group); 7060 cached = block_group_cache_done(block_group);
7058 if (unlikely(!cached)) { 7061 if (unlikely(!cached)) {
7062 have_caching_bg = true;
7059 ret = cache_block_group(block_group, 0); 7063 ret = cache_block_group(block_group, 0);
7060 BUG_ON(ret < 0); 7064 BUG_ON(ret < 0);
7061 ret = 0; 7065 ret = 0;
@@ -7228,8 +7232,6 @@ unclustered_alloc:
7228 failed_alloc = true; 7232 failed_alloc = true;
7229 goto have_block_group; 7233 goto have_block_group;
7230 } else if (!offset) { 7234 } else if (!offset) {
7231 if (!cached)
7232 have_caching_bg = true;
7233 goto loop; 7235 goto loop;
7234 } 7236 }
7235checks: 7237checks:
@@ -7286,7 +7288,20 @@ loop:
7286 */ 7288 */
7287 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) { 7289 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7288 index = 0; 7290 index = 0;
7289 loop++; 7291 if (loop == LOOP_CACHING_NOWAIT) {
7292 /*
7293 * We want to skip the LOOP_CACHING_WAIT step if we
7294 * don't have any unached bgs and we've alrelady done a
7295 * full search through.
7296 */
7297 if (have_caching_bg || !full_search)
7298 loop = LOOP_CACHING_WAIT;
7299 else
7300 loop = LOOP_ALLOC_CHUNK;
7301 } else {
7302 loop++;
7303 }
7304
7290 if (loop == LOOP_ALLOC_CHUNK) { 7305 if (loop == LOOP_ALLOC_CHUNK) {
7291 struct btrfs_trans_handle *trans; 7306 struct btrfs_trans_handle *trans;
7292 int exist = 0; 7307 int exist = 0;
@@ -7304,6 +7319,15 @@ loop:
7304 7319
7305 ret = do_chunk_alloc(trans, root, flags, 7320 ret = do_chunk_alloc(trans, root, flags,
7306 CHUNK_ALLOC_FORCE); 7321 CHUNK_ALLOC_FORCE);
7322
7323 /*
7324 * If we can't allocate a new chunk we've already looped
7325 * through at least once, move on to the NO_EMPTY_SIZE
7326 * case.
7327 */
7328 if (ret == -ENOSPC)
7329 loop = LOOP_NO_EMPTY_SIZE;
7330
7307 /* 7331 /*
7308 * Do not bail out on ENOSPC since we 7332 * Do not bail out on ENOSPC since we
7309 * can do more things. 7333 * can do more things.
@@ -7320,6 +7344,15 @@ loop:
7320 } 7344 }
7321 7345
7322 if (loop == LOOP_NO_EMPTY_SIZE) { 7346 if (loop == LOOP_NO_EMPTY_SIZE) {
7347 /*
7348 * Don't loop again if we already have no empty_size and
7349 * no empty_cluster.
7350 */
7351 if (empty_size == 0 &&
7352 empty_cluster == 0) {
7353 ret = -ENOSPC;
7354 goto out;
7355 }
7323 empty_size = 0; 7356 empty_size = 0;
7324 empty_cluster = 0; 7357 empty_cluster = 0;
7325 } 7358 }