aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent-tree.c93
1 files changed, 8 insertions, 85 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 37e0a800d34e..eccef6c06237 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5260,11 +5260,10 @@ static int get_block_group_index(struct btrfs_block_group_cache *cache)
5260} 5260}
5261 5261
5262enum btrfs_loop_type { 5262enum btrfs_loop_type {
5263 LOOP_FIND_IDEAL = 0, 5263 LOOP_CACHING_NOWAIT = 0,
5264 LOOP_CACHING_NOWAIT = 1, 5264 LOOP_CACHING_WAIT = 1,
5265 LOOP_CACHING_WAIT = 2, 5265 LOOP_ALLOC_CHUNK = 2,
5266 LOOP_ALLOC_CHUNK = 3, 5266 LOOP_NO_EMPTY_SIZE = 3,
5267 LOOP_NO_EMPTY_SIZE = 4,
5268}; 5267};
5269 5268
5270/* 5269/*
@@ -5300,8 +5299,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5300 bool failed_alloc = false; 5299 bool failed_alloc = false;
5301 bool use_cluster = true; 5300 bool use_cluster = true;
5302 bool have_caching_bg = false; 5301 bool have_caching_bg = false;
5303 u64 ideal_cache_percent = 0;
5304 u64 ideal_cache_offset = 0;
5305 5302
5306 WARN_ON(num_bytes < root->sectorsize); 5303 WARN_ON(num_bytes < root->sectorsize);
5307 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 5304 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -5351,7 +5348,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5351 empty_cluster = 0; 5348 empty_cluster = 0;
5352 5349
5353 if (search_start == hint_byte) { 5350 if (search_start == hint_byte) {
5354ideal_cache:
5355 block_group = btrfs_lookup_block_group(root->fs_info, 5351 block_group = btrfs_lookup_block_group(root->fs_info,
5356 search_start); 5352 search_start);
5357 used_block_group = block_group; 5353 used_block_group = block_group;
@@ -5363,8 +5359,7 @@ ideal_cache:
5363 * picked out then we don't care that the block group is cached. 5359 * picked out then we don't care that the block group is cached.
5364 */ 5360 */
5365 if (block_group && block_group_bits(block_group, data) && 5361 if (block_group && block_group_bits(block_group, data) &&
5366 (block_group->cached != BTRFS_CACHE_NO || 5362 block_group->cached != BTRFS_CACHE_NO) {
5367 search_start == ideal_cache_offset)) {
5368 down_read(&space_info->groups_sem); 5363 down_read(&space_info->groups_sem);
5369 if (list_empty(&block_group->list) || 5364 if (list_empty(&block_group->list) ||
5370 block_group->ro) { 5365 block_group->ro) {
@@ -5418,44 +5413,12 @@ search:
5418have_block_group: 5413have_block_group:
5419 cached = block_group_cache_done(block_group); 5414 cached = block_group_cache_done(block_group);
5420 if (unlikely(!cached)) { 5415 if (unlikely(!cached)) {
5421 u64 free_percent;
5422
5423 found_uncached_bg = true; 5416 found_uncached_bg = true;
5424 ret = cache_block_group(block_group, trans, 5417 ret = cache_block_group(block_group, trans,
5425 orig_root, 1); 5418 orig_root, 0);
5426 if (block_group->cached == BTRFS_CACHE_FINISHED) 5419 BUG_ON(ret);
5427 goto alloc;
5428
5429 free_percent = btrfs_block_group_used(&block_group->item);
5430 free_percent *= 100;
5431 free_percent = div64_u64(free_percent,
5432 block_group->key.offset);
5433 free_percent = 100 - free_percent;
5434 if (free_percent > ideal_cache_percent &&
5435 likely(!block_group->ro)) {
5436 ideal_cache_offset = block_group->key.objectid;
5437 ideal_cache_percent = free_percent;
5438 }
5439
5440 /*
5441 * The caching workers are limited to 2 threads, so we
5442 * can queue as much work as we care to.
5443 */
5444 if (loop > LOOP_FIND_IDEAL) {
5445 ret = cache_block_group(block_group, trans,
5446 orig_root, 0);
5447 BUG_ON(ret);
5448 }
5449
5450 /*
5451 * If loop is set for cached only, try the next block
5452 * group.
5453 */
5454 if (loop == LOOP_FIND_IDEAL)
5455 goto loop;
5456 } 5420 }
5457 5421
5458alloc:
5459 if (unlikely(block_group->ro)) 5422 if (unlikely(block_group->ro))
5460 goto loop; 5423 goto loop;
5461 5424
@@ -5661,9 +5624,7 @@ loop:
5661 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES) 5624 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5662 goto search; 5625 goto search;
5663 5626
5664 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for 5627 /*
5665 * for them to make caching progress. Also
5666 * determine the best possible bg to cache
5667 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking 5628 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5668 * caching kthreads as we move along 5629 * caching kthreads as we move along
5669 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 5630 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
@@ -5673,45 +5634,7 @@ loop:
5673 */ 5634 */
5674 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) { 5635 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5675 index = 0; 5636 index = 0;
5676 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5677 found_uncached_bg = false;
5678 loop++;
5679 if (!ideal_cache_percent)
5680 goto search;
5681
5682 /*
5683 * 1 of the following 2 things have happened so far
5684 *
5685 * 1) We found an ideal block group for caching that
5686 * is mostly full and will cache quickly, so we might
5687 * as well wait for it.
5688 *
5689 * 2) We searched for cached only and we didn't find
5690 * anything, and we didn't start any caching kthreads
5691 * either, so chances are we will loop through and
5692 * start a couple caching kthreads, and then come back
5693 * around and just wait for them. This will be slower
5694 * because we will have 2 caching kthreads reading at
5695 * the same time when we could have just started one
5696 * and waited for it to get far enough to give us an
5697 * allocation, so go ahead and go to the wait caching
5698 * loop.
5699 */
5700 loop = LOOP_CACHING_WAIT;
5701 search_start = ideal_cache_offset;
5702 ideal_cache_percent = 0;
5703 goto ideal_cache;
5704 } else if (loop == LOOP_FIND_IDEAL) {
5705 /*
5706 * Didn't find a uncached bg, wait on anything we find
5707 * next.
5708 */
5709 loop = LOOP_CACHING_WAIT;
5710 goto search;
5711 }
5712
5713 loop++; 5637 loop++;
5714
5715 if (loop == LOOP_ALLOC_CHUNK) { 5638 if (loop == LOOP_ALLOC_CHUNK) {
5716 if (allowed_chunk_alloc) { 5639 if (allowed_chunk_alloc) {
5717 ret = do_chunk_alloc(trans, root, num_bytes + 5640 ret = do_chunk_alloc(trans, root, num_bytes +