aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c161
1 files changed, 99 insertions, 62 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 60d684266959..a5501edc3c9f 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
987 return entry; 987 return entry;
988} 988}
989 989
990static void unlink_free_space(struct btrfs_block_group_cache *block_group, 990static inline void
991 struct btrfs_free_space *info) 991__unlink_free_space(struct btrfs_block_group_cache *block_group,
992 struct btrfs_free_space *info)
992{ 993{
993 rb_erase(&info->offset_index, &block_group->free_space_offset); 994 rb_erase(&info->offset_index, &block_group->free_space_offset);
994 block_group->free_extents--; 995 block_group->free_extents--;
996}
997
998static void unlink_free_space(struct btrfs_block_group_cache *block_group,
999 struct btrfs_free_space *info)
1000{
1001 __unlink_free_space(block_group, info);
995 block_group->free_space -= info->bytes; 1002 block_group->free_space -= info->bytes;
996} 1003}
997 1004
@@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
1016 u64 max_bytes; 1023 u64 max_bytes;
1017 u64 bitmap_bytes; 1024 u64 bitmap_bytes;
1018 u64 extent_bytes; 1025 u64 extent_bytes;
1026 u64 size = block_group->key.offset;
1019 1027
1020 /* 1028 /*
1021 * The goal is to keep the total amount of memory used per 1gb of space 1029 * The goal is to keep the total amount of memory used per 1gb of space
1022 * at or below 32k, so we need to adjust how much memory we allow to be 1030 * at or below 32k, so we need to adjust how much memory we allow to be
1023 * used by extent based free space tracking 1031 * used by extent based free space tracking
1024 */ 1032 */
1025 max_bytes = MAX_CACHE_BYTES_PER_GIG * 1033 if (size < 1024 * 1024 * 1024)
1026 (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); 1034 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1035 else
1036 max_bytes = MAX_CACHE_BYTES_PER_GIG *
1037 div64_u64(size, 1024 * 1024 * 1024);
1027 1038
1028 /* 1039 /*
1029 * we want to account for 1 more bitmap than what we have so we can make 1040 * we want to account for 1 more bitmap than what we have so we can make
@@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
1171 recalculate_thresholds(block_group); 1182 recalculate_thresholds(block_group);
1172} 1183}
1173 1184
1185static void free_bitmap(struct btrfs_block_group_cache *block_group,
1186 struct btrfs_free_space *bitmap_info)
1187{
1188 unlink_free_space(block_group, bitmap_info);
1189 kfree(bitmap_info->bitmap);
1190 kfree(bitmap_info);
1191 block_group->total_bitmaps--;
1192 recalculate_thresholds(block_group);
1193}
1194
1174static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, 1195static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
1175 struct btrfs_free_space *bitmap_info, 1196 struct btrfs_free_space *bitmap_info,
1176 u64 *offset, u64 *bytes) 1197 u64 *offset, u64 *bytes)
@@ -1211,13 +1232,8 @@ again:
1211 1232
1212 if (*bytes) { 1233 if (*bytes) {
1213 struct rb_node *next = rb_next(&bitmap_info->offset_index); 1234 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1214 if (!bitmap_info->bytes) { 1235 if (!bitmap_info->bytes)
1215 unlink_free_space(block_group, bitmap_info); 1236 free_bitmap(block_group, bitmap_info);
1216 kfree(bitmap_info->bitmap);
1217 kfree(bitmap_info);
1218 block_group->total_bitmaps--;
1219 recalculate_thresholds(block_group);
1220 }
1221 1237
1222 /* 1238 /*
1223 * no entry after this bitmap, but we still have bytes to 1239 * no entry after this bitmap, but we still have bytes to
@@ -1250,13 +1266,8 @@ again:
1250 return -EAGAIN; 1266 return -EAGAIN;
1251 1267
1252 goto again; 1268 goto again;
1253 } else if (!bitmap_info->bytes) { 1269 } else if (!bitmap_info->bytes)
1254 unlink_free_space(block_group, bitmap_info); 1270 free_bitmap(block_group, bitmap_info);
1255 kfree(bitmap_info->bitmap);
1256 kfree(bitmap_info);
1257 block_group->total_bitmaps--;
1258 recalculate_thresholds(block_group);
1259 }
1260 1271
1261 return 0; 1272 return 0;
1262} 1273}
@@ -1359,22 +1370,14 @@ out:
1359 return ret; 1370 return ret;
1360} 1371}
1361 1372
1362int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 1373bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1363 u64 offset, u64 bytes) 1374 struct btrfs_free_space *info, bool update_stat)
1364{ 1375{
1365 struct btrfs_free_space *right_info = NULL; 1376 struct btrfs_free_space *left_info;
1366 struct btrfs_free_space *left_info = NULL; 1377 struct btrfs_free_space *right_info;
1367 struct btrfs_free_space *info = NULL; 1378 bool merged = false;
1368 int ret = 0; 1379 u64 offset = info->offset;
1369 1380 u64 bytes = info->bytes;
1370 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
1371 if (!info)
1372 return -ENOMEM;
1373
1374 info->offset = offset;
1375 info->bytes = bytes;
1376
1377 spin_lock(&block_group->tree_lock);
1378 1381
1379 /* 1382 /*
1380 * first we want to see if there is free space adjacent to the range we 1383 * first we want to see if there is free space adjacent to the range we
@@ -1388,37 +1391,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1388 else 1391 else
1389 left_info = tree_search_offset(block_group, offset - 1, 0, 0); 1392 left_info = tree_search_offset(block_group, offset - 1, 0, 0);
1390 1393
1391 /*
1392 * If there was no extent directly to the left or right of this new
1393 * extent then we know we're going to have to allocate a new extent, so
1394 * before we do that see if we need to drop this into a bitmap
1395 */
1396 if ((!left_info || left_info->bitmap) &&
1397 (!right_info || right_info->bitmap)) {
1398 ret = insert_into_bitmap(block_group, info);
1399
1400 if (ret < 0) {
1401 goto out;
1402 } else if (ret) {
1403 ret = 0;
1404 goto out;
1405 }
1406 }
1407
1408 if (right_info && !right_info->bitmap) { 1394 if (right_info && !right_info->bitmap) {
1409 unlink_free_space(block_group, right_info); 1395 if (update_stat)
1396 unlink_free_space(block_group, right_info);
1397 else
1398 __unlink_free_space(block_group, right_info);
1410 info->bytes += right_info->bytes; 1399 info->bytes += right_info->bytes;
1411 kfree(right_info); 1400 kfree(right_info);
1401 merged = true;
1412 } 1402 }
1413 1403
1414 if (left_info && !left_info->bitmap && 1404 if (left_info && !left_info->bitmap &&
1415 left_info->offset + left_info->bytes == offset) { 1405 left_info->offset + left_info->bytes == offset) {
1416 unlink_free_space(block_group, left_info); 1406 if (update_stat)
1407 unlink_free_space(block_group, left_info);
1408 else
1409 __unlink_free_space(block_group, left_info);
1417 info->offset = left_info->offset; 1410 info->offset = left_info->offset;
1418 info->bytes += left_info->bytes; 1411 info->bytes += left_info->bytes;
1419 kfree(left_info); 1412 kfree(left_info);
1413 merged = true;
1420 } 1414 }
1421 1415
1416 return merged;
1417}
1418
1419int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1420 u64 offset, u64 bytes)
1421{
1422 struct btrfs_free_space *info;
1423 int ret = 0;
1424
1425 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
1426 if (!info)
1427 return -ENOMEM;
1428
1429 info->offset = offset;
1430 info->bytes = bytes;
1431
1432 spin_lock(&block_group->tree_lock);
1433
1434 if (try_merge_free_space(block_group, info, true))
1435 goto link;
1436
1437 /*
1438 * There was no extent directly to the left or right of this new
1439 * extent then we know we're going to have to allocate a new extent, so
1440 * before we do that see if we need to drop this into a bitmap
1441 */
1442 ret = insert_into_bitmap(block_group, info);
1443 if (ret < 0) {
1444 goto out;
1445 } else if (ret) {
1446 ret = 0;
1447 goto out;
1448 }
1449link:
1422 ret = link_free_space(block_group, info); 1450 ret = link_free_space(block_group, info);
1423 if (ret) 1451 if (ret)
1424 kfree(info); 1452 kfree(info);
@@ -1621,6 +1649,7 @@ __btrfs_return_cluster_to_free_space(
1621 node = rb_next(&entry->offset_index); 1649 node = rb_next(&entry->offset_index);
1622 rb_erase(&entry->offset_index, &cluster->root); 1650 rb_erase(&entry->offset_index, &cluster->root);
1623 BUG_ON(entry->bitmap); 1651 BUG_ON(entry->bitmap);
1652 try_merge_free_space(block_group, entry, false);
1624 tree_insert_offset(&block_group->free_space_offset, 1653 tree_insert_offset(&block_group->free_space_offset,
1625 entry->offset, &entry->offset_index, 0); 1654 entry->offset, &entry->offset_index, 0);
1626 } 1655 }
@@ -1685,13 +1714,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1685 ret = offset; 1714 ret = offset;
1686 if (entry->bitmap) { 1715 if (entry->bitmap) {
1687 bitmap_clear_bits(block_group, entry, offset, bytes); 1716 bitmap_clear_bits(block_group, entry, offset, bytes);
1688 if (!entry->bytes) { 1717 if (!entry->bytes)
1689 unlink_free_space(block_group, entry); 1718 free_bitmap(block_group, entry);
1690 kfree(entry->bitmap);
1691 kfree(entry);
1692 block_group->total_bitmaps--;
1693 recalculate_thresholds(block_group);
1694 }
1695 } else { 1719 } else {
1696 unlink_free_space(block_group, entry); 1720 unlink_free_space(block_group, entry);
1697 entry->offset += bytes; 1721 entry->offset += bytes;
@@ -1789,6 +1813,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1789 1813
1790 ret = search_start; 1814 ret = search_start;
1791 bitmap_clear_bits(block_group, entry, ret, bytes); 1815 bitmap_clear_bits(block_group, entry, ret, bytes);
1816 if (entry->bytes == 0)
1817 free_bitmap(block_group, entry);
1792out: 1818out:
1793 spin_unlock(&cluster->lock); 1819 spin_unlock(&cluster->lock);
1794 spin_unlock(&block_group->tree_lock); 1820 spin_unlock(&block_group->tree_lock);
@@ -1842,15 +1868,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1842 entry->offset += bytes; 1868 entry->offset += bytes;
1843 entry->bytes -= bytes; 1869 entry->bytes -= bytes;
1844 1870
1845 if (entry->bytes == 0) { 1871 if (entry->bytes == 0)
1846 rb_erase(&entry->offset_index, &cluster->root); 1872 rb_erase(&entry->offset_index, &cluster->root);
1847 kfree(entry);
1848 }
1849 break; 1873 break;
1850 } 1874 }
1851out: 1875out:
1852 spin_unlock(&cluster->lock); 1876 spin_unlock(&cluster->lock);
1853 1877
1878 if (!ret)
1879 return 0;
1880
1881 spin_lock(&block_group->tree_lock);
1882
1883 block_group->free_space -= bytes;
1884 if (entry->bytes == 0) {
1885 block_group->free_extents--;
1886 kfree(entry);
1887 }
1888
1889 spin_unlock(&block_group->tree_lock);
1890
1854 return ret; 1891 return ret;
1855} 1892}
1856 1893