aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2014-08-29 08:35:13 -0400
committerChris Mason <clm@fb.com>2014-09-17 16:38:13 -0400
commit200055239604cf4bfaed40d8f404228ea606b4f9 (patch)
tree155a427ba103e04a892aaef1fbd993b9775d5868 /fs
parent3c1dbdf54a31f4f049a33214c3096595988786bf (diff)
Btrfs: improve free space cache management and space allocation
While under random IO, a block group's free space cache eventually reaches a state where it has a mix of extent entries and bitmap entries representing free space regions. As later free space regions are returned to the cache, some of them are merged with existing extent entries if they are contiguous with them. But others are not merged, because despite the existence of adjacent free space regions in the cache, the merging doesn't happen because the existing free space regions are represented in bitmap extents. Even when new free space regions are merged with existing extent entries (enlarging the free space range they represent), we create chances of having after an enlarged region that is contiguous with some other region represented in a bitmap entry. Both clustered and non-clustered space allocation work by iterating over our extent and bitmap entries and skipping any that represents a region smaller then the allocation request (and giving preference to extent entries before bitmap entries). By having a contiguous free space region that is represented by 2 (or more) entries (mix of extent and bitmap entries), we end up not satisfying an allocation request with a size larger than the size of any of the entries but no larger than the sum of their sizes. Making the caller assume we're under a ENOSPC condition or force it to allocate multiple smaller space regions (as we do for file data writes), which adds extra overhead and more chances of causing fragmentation due to the smaller regions being all spread apart from each other (more likely when under concurrency). For example, if we have the following in the cache: * extent entry representing free space range: [128Mb - 256Kb, 128Mb[ * bitmap entry covering the range [128Mb, 256Mb[, but only with the bits representing the range [128Mb, 128Mb + 768Kb[ set - that is, only that space in this 128Mb area is marked as free An allocation request for 1Mb, starting at offset not greater than 128Mb - 256Kb, would fail before, despite the existence of such contiguous free space area in the cache. The caller could only allocate up to 768Kb of space at once and later another 256Kb (or vice-versa). In between each smaller allocation request, another task working on a different file/inode might come in and take that space, preventing the former task of getting a contiguous 1Mb region of free space. Therefore this change implements the ability to move free space from bitmap entries into existing and new free space regions represented with extent entries. This is done when a space region is added to the cache. A test was added to the sanity tests that explains in detail the issue too. Some performance test results with compilebench on a 4 cores machine, with 32Gb of ram and using an HDD follow. Test: compilebench -D /mnt -i 30 -r 1000 --makej Before this change: intial create total runs 30 avg 69.02 MB/s (user 0.28s sys 0.57s) compile total runs 30 avg 314.96 MB/s (user 0.12s sys 0.25s) read compiled tree total runs 3 avg 27.14 MB/s (user 1.52s sys 0.90s) delete compiled tree total runs 30 avg 3.14 seconds (user 0.15s sys 0.66s) After this change: intial create total runs 30 avg 68.37 MB/s (user 0.29s sys 0.55s) compile total runs 30 avg 382.83 MB/s (user 0.12s sys 0.24s) read compiled tree total runs 3 avg 27.82 MB/s (user 1.45s sys 0.97s) delete compiled tree total runs 30 avg 3.18 seconds (user 0.17s sys 0.65s) Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/free-space-cache.c140
-rw-r--r--fs/btrfs/tests/free-space-tests.c514
2 files changed, 653 insertions, 1 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 2f0fe1028e51..33848196550e 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1997,6 +1997,128 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1997 return merged; 1997 return merged;
1998} 1998}
1999 1999
2000static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
2001 struct btrfs_free_space *info,
2002 bool update_stat)
2003{
2004 struct btrfs_free_space *bitmap;
2005 unsigned long i;
2006 unsigned long j;
2007 const u64 end = info->offset + info->bytes;
2008 const u64 bitmap_offset = offset_to_bitmap(ctl, end);
2009 u64 bytes;
2010
2011 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2012 if (!bitmap)
2013 return false;
2014
2015 i = offset_to_bit(bitmap->offset, ctl->unit, end);
2016 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
2017 if (j == i)
2018 return false;
2019 bytes = (j - i) * ctl->unit;
2020 info->bytes += bytes;
2021
2022 if (update_stat)
2023 bitmap_clear_bits(ctl, bitmap, end, bytes);
2024 else
2025 __bitmap_clear_bits(ctl, bitmap, end, bytes);
2026
2027 if (!bitmap->bytes)
2028 free_bitmap(ctl, bitmap);
2029
2030 return true;
2031}
2032
2033static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
2034 struct btrfs_free_space *info,
2035 bool update_stat)
2036{
2037 struct btrfs_free_space *bitmap;
2038 u64 bitmap_offset;
2039 unsigned long i;
2040 unsigned long j;
2041 unsigned long prev_j;
2042 u64 bytes;
2043
2044 bitmap_offset = offset_to_bitmap(ctl, info->offset);
2045 /* If we're on a boundary, try the previous logical bitmap. */
2046 if (bitmap_offset == info->offset) {
2047 if (info->offset == 0)
2048 return false;
2049 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
2050 }
2051
2052 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2053 if (!bitmap)
2054 return false;
2055
2056 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
2057 j = 0;
2058 prev_j = (unsigned long)-1;
2059 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
2060 if (j > i)
2061 break;
2062 prev_j = j;
2063 }
2064 if (prev_j == i)
2065 return false;
2066
2067 if (prev_j == (unsigned long)-1)
2068 bytes = (i + 1) * ctl->unit;
2069 else
2070 bytes = (i - prev_j) * ctl->unit;
2071
2072 info->offset -= bytes;
2073 info->bytes += bytes;
2074
2075 if (update_stat)
2076 bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
2077 else
2078 __bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
2079
2080 if (!bitmap->bytes)
2081 free_bitmap(ctl, bitmap);
2082
2083 return true;
2084}
2085
2086/*
2087 * We prefer always to allocate from extent entries, both for clustered and
2088 * non-clustered allocation requests. So when attempting to add a new extent
2089 * entry, try to see if there's adjacent free space in bitmap entries, and if
2090 * there is, migrate that space from the bitmaps to the extent.
2091 * Like this we get better chances of satisfying space allocation requests
2092 * because we attempt to satisfy them based on a single cache entry, and never
2093 * on 2 or more entries - even if the entries represent a contiguous free space
2094 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2095 * ends).
2096 */
2097static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
2098 struct btrfs_free_space *info,
2099 bool update_stat)
2100{
2101 /*
2102 * Only work with disconnected entries, as we can change their offset,
2103 * and must be extent entries.
2104 */
2105 ASSERT(!info->bitmap);
2106 ASSERT(RB_EMPTY_NODE(&info->offset_index));
2107
2108 if (ctl->total_bitmaps > 0) {
2109 bool stole_end;
2110 bool stole_front = false;
2111
2112 stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
2113 if (ctl->total_bitmaps > 0)
2114 stole_front = steal_from_bitmap_to_front(ctl, info,
2115 update_stat);
2116
2117 if (stole_end || stole_front)
2118 try_merge_free_space(ctl, info, update_stat);
2119 }
2120}
2121
2000int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, 2122int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
2001 u64 offset, u64 bytes) 2123 u64 offset, u64 bytes)
2002{ 2124{
@@ -2009,6 +2131,7 @@ int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
2009 2131
2010 info->offset = offset; 2132 info->offset = offset;
2011 info->bytes = bytes; 2133 info->bytes = bytes;
2134 RB_CLEAR_NODE(&info->offset_index);
2012 2135
2013 spin_lock(&ctl->tree_lock); 2136 spin_lock(&ctl->tree_lock);
2014 2137
@@ -2028,6 +2151,14 @@ int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
2028 goto out; 2151 goto out;
2029 } 2152 }
2030link: 2153link:
2154 /*
2155 * Only steal free space from adjacent bitmaps if we're sure we're not
2156 * going to add the new free space to existing bitmap entries - because
2157 * that would mean unnecessary work that would be reverted. Therefore
2158 * attempt to steal space from bitmaps if we're adding an extent entry.
2159 */
2160 steal_from_bitmap(ctl, info, true);
2161
2031 ret = link_free_space(ctl, info); 2162 ret = link_free_space(ctl, info);
2032 if (ret) 2163 if (ret)
2033 kmem_cache_free(btrfs_free_space_cachep, info); 2164 kmem_cache_free(btrfs_free_space_cachep, info);
@@ -2204,10 +2335,13 @@ __btrfs_return_cluster_to_free_space(
2204 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2335 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2205 node = rb_next(&entry->offset_index); 2336 node = rb_next(&entry->offset_index);
2206 rb_erase(&entry->offset_index, &cluster->root); 2337 rb_erase(&entry->offset_index, &cluster->root);
2338 RB_CLEAR_NODE(&entry->offset_index);
2207 2339
2208 bitmap = (entry->bitmap != NULL); 2340 bitmap = (entry->bitmap != NULL);
2209 if (!bitmap) 2341 if (!bitmap) {
2210 try_merge_free_space(ctl, entry, false); 2342 try_merge_free_space(ctl, entry, false);
2343 steal_from_bitmap(ctl, entry, false);
2344 }
2211 tree_insert_offset(&ctl->free_space_offset, 2345 tree_insert_offset(&ctl->free_space_offset,
2212 entry->offset, &entry->offset_index, bitmap); 2346 entry->offset, &entry->offset_index, bitmap);
2213 } 2347 }
@@ -3175,6 +3309,7 @@ again:
3175 map = NULL; 3309 map = NULL;
3176 add_new_bitmap(ctl, info, offset); 3310 add_new_bitmap(ctl, info, offset);
3177 bitmap_info = info; 3311 bitmap_info = info;
3312 info = NULL;
3178 } 3313 }
3179 3314
3180 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); 3315 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
@@ -3185,6 +3320,8 @@ again:
3185 if (bytes) 3320 if (bytes)
3186 goto again; 3321 goto again;
3187 3322
3323 if (info)
3324 kmem_cache_free(btrfs_free_space_cachep, info);
3188 if (map) 3325 if (map)
3189 kfree(map); 3326 kfree(map);
3190 return 0; 3327 return 0;
@@ -3259,6 +3396,7 @@ have_info:
3259 goto have_info; 3396 goto have_info;
3260 } 3397 }
3261 3398
3399 ret = 0;
3262 goto out; 3400 goto out;
3263 } 3401 }
3264 3402
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index c8d9ddf84c69..d78ae10d0446 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -40,6 +40,7 @@ static struct btrfs_block_group_cache *init_test_block_group(void)
40 cache->key.offset = 1024 * 1024 * 1024; 40 cache->key.offset = 1024 * 1024 * 1024;
41 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 41 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
42 cache->sectorsize = 4096; 42 cache->sectorsize = 4096;
43 cache->full_stripe_len = 4096;
43 44
44 spin_lock_init(&cache->lock); 45 spin_lock_init(&cache->lock);
45 INIT_LIST_HEAD(&cache->list); 46 INIT_LIST_HEAD(&cache->list);
@@ -364,6 +365,517 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
364 return 0; 365 return 0;
365} 366}
366 367
368/* Used by test_steal_space_from_bitmap_to_extent(). */
369static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl,
370 struct btrfs_free_space *info)
371{
372 return ctl->free_extents > 0;
373}
374
375/* Used by test_steal_space_from_bitmap_to_extent(). */
376static int
377check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache,
378 const int num_extents,
379 const int num_bitmaps)
380{
381 if (cache->free_space_ctl->free_extents != num_extents) {
382 test_msg("Incorrect # of extent entries in the cache: %d, expected %d\n",
383 cache->free_space_ctl->free_extents, num_extents);
384 return -EINVAL;
385 }
386 if (cache->free_space_ctl->total_bitmaps != num_bitmaps) {
387 test_msg("Incorrect # of extent entries in the cache: %d, expected %d\n",
388 cache->free_space_ctl->total_bitmaps, num_bitmaps);
389 return -EINVAL;
390 }
391 return 0;
392}
393
394/* Used by test_steal_space_from_bitmap_to_extent(). */
395static int check_cache_empty(struct btrfs_block_group_cache *cache)
396{
397 u64 offset;
398 u64 max_extent_size;
399
400 /*
401 * Now lets confirm that there's absolutely no free space left to
402 * allocate.
403 */
404 if (cache->free_space_ctl->free_space != 0) {
405 test_msg("Cache free space is not 0\n");
406 return -EINVAL;
407 }
408
409 /* And any allocation request, no matter how small, should fail now. */
410 offset = btrfs_find_space_for_alloc(cache, 0, 4096, 0,
411 &max_extent_size);
412 if (offset != 0) {
413 test_msg("Space allocation did not fail, returned offset: %llu",
414 offset);
415 return -EINVAL;
416 }
417
418 /* And no extent nor bitmap entries in the cache anymore. */
419 return check_num_extents_and_bitmaps(cache, 0, 0);
420}
421
422/*
423 * Before we were able to steal free space from a bitmap entry to an extent
424 * entry, we could end up with 2 entries representing a contiguous free space.
425 * One would be an extent entry and the other a bitmap entry. Since in order
426 * to allocate space to a caller we use only 1 entry, we couldn't return that
427 * whole range to the caller if it was requested. This forced the caller to
428 * either assume ENOSPC or perform several smaller space allocations, which
429 * wasn't optimal as they could be spread all over the block group while under
430 * concurrency (extra overhead and fragmentation).
431 *
432 * This stealing approach is benefical, since we always prefer to allocate from
433 * extent entries, both for clustered and non-clustered allocation requests.
434 */
435static int
436test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
437{
438 int ret;
439 u64 offset;
440 u64 max_extent_size;
441
442 bool (*use_bitmap_op)(struct btrfs_free_space_ctl *,
443 struct btrfs_free_space *);
444
445 test_msg("Running space stealing from bitmap to extent\n");
446
447 /*
448 * For this test, we want to ensure we end up with an extent entry
449 * immediately adjacent to a bitmap entry, where the bitmap starts
450 * at an offset where the extent entry ends. We keep adding and
451 * removing free space to reach into this state, but to get there
452 * we need to reach a point where marking new free space doesn't
453 * result in adding new extent entries or merging the new space
454 * with existing extent entries - the space ends up being marked
455 * in an existing bitmap that covers the new free space range.
456 *
457 * To get there, we need to reach the threshold defined set at
458 * cache->free_space_ctl->extents_thresh, which currently is
459 * 256 extents on a x86_64 system at least, and a few other
460 * conditions (check free_space_cache.c). Instead of making the
461 * test much longer and complicated, use a "use_bitmap" operation
462 * that forces use of bitmaps as soon as we have at least 1
463 * extent entry.
464 */
465 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
466 cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
467
468 /*
469 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
470 */
471 ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 - 256 * 1024,
472 128 * 1024, 0);
473 if (ret) {
474 test_msg("Couldn't add extent entry %d\n", ret);
475 return ret;
476 }
477
478 /* Bitmap entry covering free space range [128Mb + 512Kb, 256Mb[ */
479 ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 512 * 1024,
480 128 * 1024 * 1024 - 512 * 1024, 1);
481 if (ret) {
482 test_msg("Couldn't add bitmap entry %d\n", ret);
483 return ret;
484 }
485
486 ret = check_num_extents_and_bitmaps(cache, 2, 1);
487 if (ret)
488 return ret;
489
490 /*
491 * Now make only the first 256Kb of the bitmap marked as free, so that
492 * we end up with only the following ranges marked as free space:
493 *
494 * [128Mb - 256Kb, 128Mb - 128Kb[
495 * [128Mb + 512Kb, 128Mb + 768Kb[
496 */
497 ret = btrfs_remove_free_space(cache,
498 128 * 1024 * 1024 + 768 * 1024,
499 128 * 1024 * 1024 - 768 * 1024);
500 if (ret) {
501 test_msg("Failed to free part of bitmap space %d\n", ret);
502 return ret;
503 }
504
505 /* Confirm that only those 2 ranges are marked as free. */
506 if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024,
507 128 * 1024)) {
508 test_msg("Free space range missing\n");
509 return -ENOENT;
510 }
511 if (!test_check_exists(cache, 128 * 1024 * 1024 + 512 * 1024,
512 256 * 1024)) {
513 test_msg("Free space range missing\n");
514 return -ENOENT;
515 }
516
517 /*
518 * Confirm that the bitmap range [128Mb + 768Kb, 256Mb[ isn't marked
519 * as free anymore.
520 */
521 if (test_check_exists(cache, 128 * 1024 * 1024 + 768 * 1024,
522 128 * 1024 * 1024 - 768 * 1024)) {
523 test_msg("Bitmap region not removed from space cache\n");
524 return -EINVAL;
525 }
526
527 /*
528 * Confirm that the region [128Mb + 256Kb, 128Mb + 512Kb[, which is
529 * covered by the bitmap, isn't marked as free.
530 */
531 if (test_check_exists(cache, 128 * 1024 * 1024 + 256 * 1024,
532 256 * 1024)) {
533 test_msg("Invalid bitmap region marked as free\n");
534 return -EINVAL;
535 }
536
537 /*
538 * Confirm that the region [128Mb, 128Mb + 256Kb[, which is covered
539 * by the bitmap too, isn't marked as free either.
540 */
541 if (test_check_exists(cache, 128 * 1024 * 1024,
542 256 * 1024)) {
543 test_msg("Invalid bitmap region marked as free\n");
544 return -EINVAL;
545 }
546
547 /*
548 * Now lets mark the region [128Mb, 128Mb + 512Kb[ as free too. But,
549 * lets make sure the free space cache marks it as free in the bitmap,
550 * and doesn't insert a new extent entry to represent this region.
551 */
552 ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 512 * 1024);
553 if (ret) {
554 test_msg("Error adding free space: %d\n", ret);
555 return ret;
556 }
557 /* Confirm the region is marked as free. */
558 if (!test_check_exists(cache, 128 * 1024 * 1024, 512 * 1024)) {
559 test_msg("Bitmap region not marked as free\n");
560 return -ENOENT;
561 }
562
563 /*
564 * Confirm that no new extent entries or bitmap entries were added to
565 * the cache after adding that free space region.
566 */
567 ret = check_num_extents_and_bitmaps(cache, 2, 1);
568 if (ret)
569 return ret;
570
571 /*
572 * Now lets add a small free space region to the right of the previous
573 * one, which is not contiguous with it and is part of the bitmap too.
574 * The goal is to test that the bitmap entry space stealing doesn't
575 * steal this space region.
576 */
577 ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 + 16 * 1024 * 1024,
578 4096);
579 if (ret) {
580 test_msg("Error adding free space: %d\n", ret);
581 return ret;
582 }
583
584 /*
585 * Confirm that no new extent entries or bitmap entries were added to
586 * the cache after adding that free space region.
587 */
588 ret = check_num_extents_and_bitmaps(cache, 2, 1);
589 if (ret)
590 return ret;
591
592 /*
593 * Now mark the region [128Mb - 128Kb, 128Mb[ as free too. This will
594 * expand the range covered by the existing extent entry that represents
595 * the free space [128Mb - 256Kb, 128Mb - 128Kb[.
596 */
597 ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 128 * 1024,
598 128 * 1024);
599 if (ret) {
600 test_msg("Error adding free space: %d\n", ret);
601 return ret;
602 }
603 /* Confirm the region is marked as free. */
604 if (!test_check_exists(cache, 128 * 1024 * 1024 - 128 * 1024,
605 128 * 1024)) {
606 test_msg("Extent region not marked as free\n");
607 return -ENOENT;
608 }
609
610 /*
611 * Confirm that our extent entry didn't stole all free space from the
612 * bitmap, because of the small 4Kb free space region.
613 */
614 ret = check_num_extents_and_bitmaps(cache, 2, 1);
615 if (ret)
616 return ret;
617
618 /*
619 * So now we have the range [128Mb - 256Kb, 128Mb + 768Kb[ as free
620 * space. Without stealing bitmap free space into extent entry space,
621 * we would have all this free space represented by 2 entries in the
622 * cache:
623 *
624 * extent entry covering range: [128Mb - 256Kb, 128Mb[
625 * bitmap entry covering range: [128Mb, 128Mb + 768Kb[
626 *
627 * Attempting to allocate the whole free space (1Mb) would fail, because
628 * we can't allocate from multiple entries.
629 * With the bitmap free space stealing, we get a single extent entry
630 * that represents the 1Mb free space, and therefore we're able to
631 * allocate the whole free space at once.
632 */
633 if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024,
634 1 * 1024 * 1024)) {
635 test_msg("Expected region not marked as free\n");
636 return -ENOENT;
637 }
638
639 if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 4096)) {
640 test_msg("Cache free space is not 1Mb + 4Kb\n");
641 return -EINVAL;
642 }
643
644 offset = btrfs_find_space_for_alloc(cache,
645 0, 1 * 1024 * 1024, 0,
646 &max_extent_size);
647 if (offset != (128 * 1024 * 1024 - 256 * 1024)) {
648 test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n",
649 offset);
650 return -EINVAL;
651 }
652
653 /* All that remains is a 4Kb free space region in a bitmap. Confirm. */
654 ret = check_num_extents_and_bitmaps(cache, 1, 1);
655 if (ret)
656 return ret;
657
658 if (cache->free_space_ctl->free_space != 4096) {
659 test_msg("Cache free space is not 4Kb\n");
660 return -EINVAL;
661 }
662
663 offset = btrfs_find_space_for_alloc(cache,
664 0, 4096, 0,
665 &max_extent_size);
666 if (offset != (128 * 1024 * 1024 + 16 * 1024 * 1024)) {
667 test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n",
668 offset);
669 return -EINVAL;
670 }
671
672 ret = check_cache_empty(cache);
673 if (ret)
674 return ret;
675
676 __btrfs_remove_free_space_cache(cache->free_space_ctl);
677
678 /*
679 * Now test a similar scenario, but where our extent entry is located
680 * to the right of the bitmap entry, so that we can check that stealing
681 * space from a bitmap to the front of an extent entry works.
682 */
683
684 /*
685 * Extent entry covering free space range [128Mb + 128Kb, 128Mb + 256Kb[
686 */
687 ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 128 * 1024,
688 128 * 1024, 0);
689 if (ret) {
690 test_msg("Couldn't add extent entry %d\n", ret);
691 return ret;
692 }
693
694 /* Bitmap entry covering free space range [0, 128Mb - 512Kb[ */
695 ret = test_add_free_space_entry(cache, 0,
696 128 * 1024 * 1024 - 512 * 1024, 1);
697 if (ret) {
698 test_msg("Couldn't add bitmap entry %d\n", ret);
699 return ret;
700 }
701
702 ret = check_num_extents_and_bitmaps(cache, 2, 1);
703 if (ret)
704 return ret;
705
706 /*
707 * Now make only the last 256Kb of the bitmap marked as free, so that
708 * we end up with only the following ranges marked as free space:
709 *
710 * [128Mb + 128b, 128Mb + 256Kb[
711 * [128Mb - 768Kb, 128Mb - 512Kb[
712 */
713 ret = btrfs_remove_free_space(cache,
714 0,
715 128 * 1024 * 1024 - 768 * 1024);
716 if (ret) {
717 test_msg("Failed to free part of bitmap space %d\n", ret);
718 return ret;
719 }
720
721 /* Confirm that only those 2 ranges are marked as free. */
722 if (!test_check_exists(cache, 128 * 1024 * 1024 + 128 * 1024,
723 128 * 1024)) {
724 test_msg("Free space range missing\n");
725 return -ENOENT;
726 }
727 if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024,
728 256 * 1024)) {
729 test_msg("Free space range missing\n");
730 return -ENOENT;
731 }
732
733 /*
734 * Confirm that the bitmap range [0, 128Mb - 768Kb[ isn't marked
735 * as free anymore.
736 */
737 if (test_check_exists(cache, 0,
738 128 * 1024 * 1024 - 768 * 1024)) {
739 test_msg("Bitmap region not removed from space cache\n");
740 return -EINVAL;
741 }
742
743 /*
744 * Confirm that the region [128Mb - 512Kb, 128Mb[, which is
745 * covered by the bitmap, isn't marked as free.
746 */
747 if (test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024,
748 512 * 1024)) {
749 test_msg("Invalid bitmap region marked as free\n");
750 return -EINVAL;
751 }
752
753 /*
754 * Now lets mark the region [128Mb - 512Kb, 128Mb[ as free too. But,
755 * lets make sure the free space cache marks it as free in the bitmap,
756 * and doesn't insert a new extent entry to represent this region.
757 */
758 ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 512 * 1024,
759 512 * 1024);
760 if (ret) {
761 test_msg("Error adding free space: %d\n", ret);
762 return ret;
763 }
764 /* Confirm the region is marked as free. */
765 if (!test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024,
766 512 * 1024)) {
767 test_msg("Bitmap region not marked as free\n");
768 return -ENOENT;
769 }
770
771 /*
772 * Confirm that no new extent entries or bitmap entries were added to
773 * the cache after adding that free space region.
774 */
775 ret = check_num_extents_and_bitmaps(cache, 2, 1);
776 if (ret)
777 return ret;
778
779 /*
780 * Now lets add a small free space region to the left of the previous
781 * one, which is not contiguous with it and is part of the bitmap too.
782 * The goal is to test that the bitmap entry space stealing doesn't
783 * steal this space region.
784 */
785 ret = btrfs_add_free_space(cache, 32 * 1024 * 1024, 8192);
786 if (ret) {
787 test_msg("Error adding free space: %d\n", ret);
788 return ret;
789 }
790
791 /*
792 * Now mark the region [128Mb, 128Mb + 128Kb[ as free too. This will
793 * expand the range covered by the existing extent entry that represents
794 * the free space [128Mb + 128Kb, 128Mb + 256Kb[.
795 */
796 ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 128 * 1024);
797 if (ret) {
798 test_msg("Error adding free space: %d\n", ret);
799 return ret;
800 }
801 /* Confirm the region is marked as free. */
802 if (!test_check_exists(cache, 128 * 1024 * 1024, 128 * 1024)) {
803 test_msg("Extent region not marked as free\n");
804 return -ENOENT;
805 }
806
807 /*
808 * Confirm that our extent entry didn't stole all free space from the
809 * bitmap, because of the small 8Kb free space region.
810 */
811 ret = check_num_extents_and_bitmaps(cache, 2, 1);
812 if (ret)
813 return ret;
814
815 /*
816 * So now we have the range [128Mb - 768Kb, 128Mb + 256Kb[ as free
817 * space. Without stealing bitmap free space into extent entry space,
818 * we would have all this free space represented by 2 entries in the
819 * cache:
820 *
821 * extent entry covering range: [128Mb, 128Mb + 256Kb[
822 * bitmap entry covering range: [128Mb - 768Kb, 128Mb[
823 *
824 * Attempting to allocate the whole free space (1Mb) would fail, because
825 * we can't allocate from multiple entries.
826 * With the bitmap free space stealing, we get a single extent entry
827 * that represents the 1Mb free space, and therefore we're able to
828 * allocate the whole free space at once.
829 */
830 if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024,
831 1 * 1024 * 1024)) {
832 test_msg("Expected region not marked as free\n");
833 return -ENOENT;
834 }
835
836 if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 8192)) {
837 test_msg("Cache free space is not 1Mb + 8Kb\n");
838 return -EINVAL;
839 }
840
841 offset = btrfs_find_space_for_alloc(cache,
842 0, 1 * 1024 * 1024, 0,
843 &max_extent_size);
844 if (offset != (128 * 1024 * 1024 - 768 * 1024)) {
845 test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n",
846 offset);
847 return -EINVAL;
848 }
849
850 /* All that remains is a 8Kb free space region in a bitmap. Confirm. */
851 ret = check_num_extents_and_bitmaps(cache, 1, 1);
852 if (ret)
853 return ret;
854
855 if (cache->free_space_ctl->free_space != 8192) {
856 test_msg("Cache free space is not 8Kb\n");
857 return -EINVAL;
858 }
859
860 offset = btrfs_find_space_for_alloc(cache,
861 0, 8192, 0,
862 &max_extent_size);
863 if (offset != (32 * 1024 * 1024)) {
864 test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n",
865 offset);
866 return -EINVAL;
867 }
868
869 ret = check_cache_empty(cache);
870 if (ret)
871 return ret;
872
873 cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
874 __btrfs_remove_free_space_cache(cache->free_space_ctl);
875
876 return 0;
877}
878
367int btrfs_test_free_space_cache(void) 879int btrfs_test_free_space_cache(void)
368{ 880{
369 struct btrfs_block_group_cache *cache; 881 struct btrfs_block_group_cache *cache;
@@ -386,6 +898,8 @@ int btrfs_test_free_space_cache(void)
386 ret = test_bitmaps_and_extents(cache); 898 ret = test_bitmaps_and_extents(cache);
387 if (ret) 899 if (ret)
388 goto out; 900 goto out;
901
902 ret = test_steal_space_from_bitmap_to_extent(cache);
389out: 903out:
390 __btrfs_remove_free_space_cache(cache->free_space_ctl); 904 __btrfs_remove_free_space_cache(cache->free_space_ctl);
391 kfree(cache->free_space_ctl); 905 kfree(cache->free_space_ctl);