aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c62
1 files changed, 51 insertions, 11 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 0be7a8742a43..1f84fc09c1a8 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1356,6 +1356,8 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1356 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; 1356 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
1357 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); 1357 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1358 1358
1359 max_bitmaps = max(max_bitmaps, 1);
1360
1359 BUG_ON(ctl->total_bitmaps > max_bitmaps); 1361 BUG_ON(ctl->total_bitmaps > max_bitmaps);
1360 1362
1361 /* 1363 /*
@@ -1463,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1463} 1465}
1464 1466
1465static struct btrfs_free_space * 1467static struct btrfs_free_space *
1466find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) 1468find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1469 unsigned long align)
1467{ 1470{
1468 struct btrfs_free_space *entry; 1471 struct btrfs_free_space *entry;
1469 struct rb_node *node; 1472 struct rb_node *node;
1473 u64 ctl_off;
1474 u64 tmp;
1475 u64 align_off;
1470 int ret; 1476 int ret;
1471 1477
1472 if (!ctl->free_space_offset.rb_node) 1478 if (!ctl->free_space_offset.rb_node)
@@ -1481,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1481 if (entry->bytes < *bytes) 1487 if (entry->bytes < *bytes)
1482 continue; 1488 continue;
1483 1489
1490 /* make sure the space returned is big enough
1491 * to match our requested alignment
1492 */
1493 if (*bytes >= align) {
1494 ctl_off = entry->offset - ctl->start;
1495 tmp = ctl_off + align - 1;;
1496 do_div(tmp, align);
1497 tmp = tmp * align + ctl->start;
1498 align_off = tmp - entry->offset;
1499 } else {
1500 align_off = 0;
1501 tmp = entry->offset;
1502 }
1503
1504 if (entry->bytes < *bytes + align_off)
1505 continue;
1506
1484 if (entry->bitmap) { 1507 if (entry->bitmap) {
1485 ret = search_bitmap(ctl, entry, offset, bytes); 1508 ret = search_bitmap(ctl, entry, &tmp, bytes);
1486 if (!ret) 1509 if (!ret) {
1510 *offset = tmp;
1487 return entry; 1511 return entry;
1512 }
1488 continue; 1513 continue;
1489 } 1514 }
1490 1515
1491 *offset = entry->offset; 1516 *offset = tmp;
1492 *bytes = entry->bytes; 1517 *bytes = entry->bytes - align_off;
1493 return entry; 1518 return entry;
1494 } 1519 }
1495 1520
@@ -1636,10 +1661,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1636 } 1661 }
1637 1662
1638 /* 1663 /*
1639 * some block groups are so tiny they can't be enveloped by a bitmap, so 1664 * The original block groups from mkfs can be really small, like 8
1640 * don't even bother to create a bitmap for this 1665 * megabytes, so don't bother with a bitmap for those entries. However
1666 * some block groups can be smaller than what a bitmap would cover but
1667 * are still large enough that they could overflow the 32k memory limit,
1668 * so allow those block groups to still be allowed to have a bitmap
1669 * entry.
1641 */ 1670 */
1642 if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset) 1671 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
1643 return false; 1672 return false;
1644 1673
1645 return true; 1674 return true;
@@ -2095,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2095 struct btrfs_free_space *entry = NULL; 2124 struct btrfs_free_space *entry = NULL;
2096 u64 bytes_search = bytes + empty_size; 2125 u64 bytes_search = bytes + empty_size;
2097 u64 ret = 0; 2126 u64 ret = 0;
2127 u64 align_gap = 0;
2128 u64 align_gap_len = 0;
2098 2129
2099 spin_lock(&ctl->tree_lock); 2130 spin_lock(&ctl->tree_lock);
2100 entry = find_free_space(ctl, &offset, &bytes_search); 2131 entry = find_free_space(ctl, &offset, &bytes_search,
2132 block_group->full_stripe_len);
2101 if (!entry) 2133 if (!entry)
2102 goto out; 2134 goto out;
2103 2135
@@ -2107,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2107 if (!entry->bytes) 2139 if (!entry->bytes)
2108 free_bitmap(ctl, entry); 2140 free_bitmap(ctl, entry);
2109 } else { 2141 } else {
2142
2110 unlink_free_space(ctl, entry); 2143 unlink_free_space(ctl, entry);
2111 entry->offset += bytes; 2144 align_gap_len = offset - entry->offset;
2112 entry->bytes -= bytes; 2145 align_gap = entry->offset;
2146
2147 entry->offset = offset + bytes;
2148 WARN_ON(entry->bytes < bytes + align_gap_len);
2149
2150 entry->bytes -= bytes + align_gap_len;
2113 if (!entry->bytes) 2151 if (!entry->bytes)
2114 kmem_cache_free(btrfs_free_space_cachep, entry); 2152 kmem_cache_free(btrfs_free_space_cachep, entry);
2115 else 2153 else
@@ -2119,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2119out: 2157out:
2120 spin_unlock(&ctl->tree_lock); 2158 spin_unlock(&ctl->tree_lock);
2121 2159
2160 if (align_gap_len)
2161 __btrfs_add_free_space(ctl, align_gap, align_gap_len);
2122 return ret; 2162 return ret;
2123} 2163}
2124 2164