diff options
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 162 |
1 files changed, 100 insertions, 62 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 60d684266959..a0390657451b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
987 | return entry; | 987 | return entry; |
988 | } | 988 | } |
989 | 989 | ||
990 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | 990 | static inline void |
991 | struct btrfs_free_space *info) | 991 | __unlink_free_space(struct btrfs_block_group_cache *block_group, |
992 | struct btrfs_free_space *info) | ||
992 | { | 993 | { |
993 | rb_erase(&info->offset_index, &block_group->free_space_offset); | 994 | rb_erase(&info->offset_index, &block_group->free_space_offset); |
994 | block_group->free_extents--; | 995 | block_group->free_extents--; |
996 | } | ||
997 | |||
998 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | ||
999 | struct btrfs_free_space *info) | ||
1000 | { | ||
1001 | __unlink_free_space(block_group, info); | ||
995 | block_group->free_space -= info->bytes; | 1002 | block_group->free_space -= info->bytes; |
996 | } | 1003 | } |
997 | 1004 | ||
@@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | |||
1016 | u64 max_bytes; | 1023 | u64 max_bytes; |
1017 | u64 bitmap_bytes; | 1024 | u64 bitmap_bytes; |
1018 | u64 extent_bytes; | 1025 | u64 extent_bytes; |
1026 | u64 size = block_group->key.offset; | ||
1019 | 1027 | ||
1020 | /* | 1028 | /* |
1021 | * The goal is to keep the total amount of memory used per 1gb of space | 1029 | * The goal is to keep the total amount of memory used per 1gb of space |
1022 | * at or below 32k, so we need to adjust how much memory we allow to be | 1030 | * at or below 32k, so we need to adjust how much memory we allow to be |
1023 | * used by extent based free space tracking | 1031 | * used by extent based free space tracking |
1024 | */ | 1032 | */ |
1025 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | 1033 | if (size < 1024 * 1024 * 1024) |
1026 | (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); | 1034 | max_bytes = MAX_CACHE_BYTES_PER_GIG; |
1035 | else | ||
1036 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | ||
1037 | div64_u64(size, 1024 * 1024 * 1024); | ||
1027 | 1038 | ||
1028 | /* | 1039 | /* |
1029 | * we want to account for 1 more bitmap than what we have so we can make | 1040 | * we want to account for 1 more bitmap than what we have so we can make |
@@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group, | |||
1171 | recalculate_thresholds(block_group); | 1182 | recalculate_thresholds(block_group); |
1172 | } | 1183 | } |
1173 | 1184 | ||
1185 | static void free_bitmap(struct btrfs_block_group_cache *block_group, | ||
1186 | struct btrfs_free_space *bitmap_info) | ||
1187 | { | ||
1188 | unlink_free_space(block_group, bitmap_info); | ||
1189 | kfree(bitmap_info->bitmap); | ||
1190 | kfree(bitmap_info); | ||
1191 | block_group->total_bitmaps--; | ||
1192 | recalculate_thresholds(block_group); | ||
1193 | } | ||
1194 | |||
1174 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, | 1195 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, |
1175 | struct btrfs_free_space *bitmap_info, | 1196 | struct btrfs_free_space *bitmap_info, |
1176 | u64 *offset, u64 *bytes) | 1197 | u64 *offset, u64 *bytes) |
@@ -1195,6 +1216,7 @@ again: | |||
1195 | */ | 1216 | */ |
1196 | search_start = *offset; | 1217 | search_start = *offset; |
1197 | search_bytes = *bytes; | 1218 | search_bytes = *bytes; |
1219 | search_bytes = min(search_bytes, end - search_start + 1); | ||
1198 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1220 | ret = search_bitmap(block_group, bitmap_info, &search_start, |
1199 | &search_bytes); | 1221 | &search_bytes); |
1200 | BUG_ON(ret < 0 || search_start != *offset); | 1222 | BUG_ON(ret < 0 || search_start != *offset); |
@@ -1211,13 +1233,8 @@ again: | |||
1211 | 1233 | ||
1212 | if (*bytes) { | 1234 | if (*bytes) { |
1213 | struct rb_node *next = rb_next(&bitmap_info->offset_index); | 1235 | struct rb_node *next = rb_next(&bitmap_info->offset_index); |
1214 | if (!bitmap_info->bytes) { | 1236 | if (!bitmap_info->bytes) |
1215 | unlink_free_space(block_group, bitmap_info); | 1237 | free_bitmap(block_group, bitmap_info); |
1216 | kfree(bitmap_info->bitmap); | ||
1217 | kfree(bitmap_info); | ||
1218 | block_group->total_bitmaps--; | ||
1219 | recalculate_thresholds(block_group); | ||
1220 | } | ||
1221 | 1238 | ||
1222 | /* | 1239 | /* |
1223 | * no entry after this bitmap, but we still have bytes to | 1240 | * no entry after this bitmap, but we still have bytes to |
@@ -1250,13 +1267,8 @@ again: | |||
1250 | return -EAGAIN; | 1267 | return -EAGAIN; |
1251 | 1268 | ||
1252 | goto again; | 1269 | goto again; |
1253 | } else if (!bitmap_info->bytes) { | 1270 | } else if (!bitmap_info->bytes) |
1254 | unlink_free_space(block_group, bitmap_info); | 1271 | free_bitmap(block_group, bitmap_info); |
1255 | kfree(bitmap_info->bitmap); | ||
1256 | kfree(bitmap_info); | ||
1257 | block_group->total_bitmaps--; | ||
1258 | recalculate_thresholds(block_group); | ||
1259 | } | ||
1260 | 1272 | ||
1261 | return 0; | 1273 | return 0; |
1262 | } | 1274 | } |
@@ -1359,22 +1371,14 @@ out: | |||
1359 | return ret; | 1371 | return ret; |
1360 | } | 1372 | } |
1361 | 1373 | ||
1362 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 1374 | bool try_merge_free_space(struct btrfs_block_group_cache *block_group, |
1363 | u64 offset, u64 bytes) | 1375 | struct btrfs_free_space *info, bool update_stat) |
1364 | { | 1376 | { |
1365 | struct btrfs_free_space *right_info = NULL; | 1377 | struct btrfs_free_space *left_info; |
1366 | struct btrfs_free_space *left_info = NULL; | 1378 | struct btrfs_free_space *right_info; |
1367 | struct btrfs_free_space *info = NULL; | 1379 | bool merged = false; |
1368 | int ret = 0; | 1380 | u64 offset = info->offset; |
1369 | 1381 | u64 bytes = info->bytes; | |
1370 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
1371 | if (!info) | ||
1372 | return -ENOMEM; | ||
1373 | |||
1374 | info->offset = offset; | ||
1375 | info->bytes = bytes; | ||
1376 | |||
1377 | spin_lock(&block_group->tree_lock); | ||
1378 | 1382 | ||
1379 | /* | 1383 | /* |
1380 | * first we want to see if there is free space adjacent to the range we | 1384 | * first we want to see if there is free space adjacent to the range we |
@@ -1388,37 +1392,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1388 | else | 1392 | else |
1389 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); | 1393 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); |
1390 | 1394 | ||
1391 | /* | ||
1392 | * If there was no extent directly to the left or right of this new | ||
1393 | * extent then we know we're going to have to allocate a new extent, so | ||
1394 | * before we do that see if we need to drop this into a bitmap | ||
1395 | */ | ||
1396 | if ((!left_info || left_info->bitmap) && | ||
1397 | (!right_info || right_info->bitmap)) { | ||
1398 | ret = insert_into_bitmap(block_group, info); | ||
1399 | |||
1400 | if (ret < 0) { | ||
1401 | goto out; | ||
1402 | } else if (ret) { | ||
1403 | ret = 0; | ||
1404 | goto out; | ||
1405 | } | ||
1406 | } | ||
1407 | |||
1408 | if (right_info && !right_info->bitmap) { | 1395 | if (right_info && !right_info->bitmap) { |
1409 | unlink_free_space(block_group, right_info); | 1396 | if (update_stat) |
1397 | unlink_free_space(block_group, right_info); | ||
1398 | else | ||
1399 | __unlink_free_space(block_group, right_info); | ||
1410 | info->bytes += right_info->bytes; | 1400 | info->bytes += right_info->bytes; |
1411 | kfree(right_info); | 1401 | kfree(right_info); |
1402 | merged = true; | ||
1412 | } | 1403 | } |
1413 | 1404 | ||
1414 | if (left_info && !left_info->bitmap && | 1405 | if (left_info && !left_info->bitmap && |
1415 | left_info->offset + left_info->bytes == offset) { | 1406 | left_info->offset + left_info->bytes == offset) { |
1416 | unlink_free_space(block_group, left_info); | 1407 | if (update_stat) |
1408 | unlink_free_space(block_group, left_info); | ||
1409 | else | ||
1410 | __unlink_free_space(block_group, left_info); | ||
1417 | info->offset = left_info->offset; | 1411 | info->offset = left_info->offset; |
1418 | info->bytes += left_info->bytes; | 1412 | info->bytes += left_info->bytes; |
1419 | kfree(left_info); | 1413 | kfree(left_info); |
1414 | merged = true; | ||
1420 | } | 1415 | } |
1421 | 1416 | ||
1417 | return merged; | ||
1418 | } | ||
1419 | |||
1420 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | ||
1421 | u64 offset, u64 bytes) | ||
1422 | { | ||
1423 | struct btrfs_free_space *info; | ||
1424 | int ret = 0; | ||
1425 | |||
1426 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
1427 | if (!info) | ||
1428 | return -ENOMEM; | ||
1429 | |||
1430 | info->offset = offset; | ||
1431 | info->bytes = bytes; | ||
1432 | |||
1433 | spin_lock(&block_group->tree_lock); | ||
1434 | |||
1435 | if (try_merge_free_space(block_group, info, true)) | ||
1436 | goto link; | ||
1437 | |||
1438 | /* | ||
1439 | * There was no extent directly to the left or right of this new | ||
1440 | * extent then we know we're going to have to allocate a new extent, so | ||
1441 | * before we do that see if we need to drop this into a bitmap | ||
1442 | */ | ||
1443 | ret = insert_into_bitmap(block_group, info); | ||
1444 | if (ret < 0) { | ||
1445 | goto out; | ||
1446 | } else if (ret) { | ||
1447 | ret = 0; | ||
1448 | goto out; | ||
1449 | } | ||
1450 | link: | ||
1422 | ret = link_free_space(block_group, info); | 1451 | ret = link_free_space(block_group, info); |
1423 | if (ret) | 1452 | if (ret) |
1424 | kfree(info); | 1453 | kfree(info); |
@@ -1621,6 +1650,7 @@ __btrfs_return_cluster_to_free_space( | |||
1621 | node = rb_next(&entry->offset_index); | 1650 | node = rb_next(&entry->offset_index); |
1622 | rb_erase(&entry->offset_index, &cluster->root); | 1651 | rb_erase(&entry->offset_index, &cluster->root); |
1623 | BUG_ON(entry->bitmap); | 1652 | BUG_ON(entry->bitmap); |
1653 | try_merge_free_space(block_group, entry, false); | ||
1624 | tree_insert_offset(&block_group->free_space_offset, | 1654 | tree_insert_offset(&block_group->free_space_offset, |
1625 | entry->offset, &entry->offset_index, 0); | 1655 | entry->offset, &entry->offset_index, 0); |
1626 | } | 1656 | } |
@@ -1685,13 +1715,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | |||
1685 | ret = offset; | 1715 | ret = offset; |
1686 | if (entry->bitmap) { | 1716 | if (entry->bitmap) { |
1687 | bitmap_clear_bits(block_group, entry, offset, bytes); | 1717 | bitmap_clear_bits(block_group, entry, offset, bytes); |
1688 | if (!entry->bytes) { | 1718 | if (!entry->bytes) |
1689 | unlink_free_space(block_group, entry); | 1719 | free_bitmap(block_group, entry); |
1690 | kfree(entry->bitmap); | ||
1691 | kfree(entry); | ||
1692 | block_group->total_bitmaps--; | ||
1693 | recalculate_thresholds(block_group); | ||
1694 | } | ||
1695 | } else { | 1720 | } else { |
1696 | unlink_free_space(block_group, entry); | 1721 | unlink_free_space(block_group, entry); |
1697 | entry->offset += bytes; | 1722 | entry->offset += bytes; |
@@ -1789,6 +1814,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
1789 | 1814 | ||
1790 | ret = search_start; | 1815 | ret = search_start; |
1791 | bitmap_clear_bits(block_group, entry, ret, bytes); | 1816 | bitmap_clear_bits(block_group, entry, ret, bytes); |
1817 | if (entry->bytes == 0) | ||
1818 | free_bitmap(block_group, entry); | ||
1792 | out: | 1819 | out: |
1793 | spin_unlock(&cluster->lock); | 1820 | spin_unlock(&cluster->lock); |
1794 | spin_unlock(&block_group->tree_lock); | 1821 | spin_unlock(&block_group->tree_lock); |
@@ -1842,15 +1869,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1842 | entry->offset += bytes; | 1869 | entry->offset += bytes; |
1843 | entry->bytes -= bytes; | 1870 | entry->bytes -= bytes; |
1844 | 1871 | ||
1845 | if (entry->bytes == 0) { | 1872 | if (entry->bytes == 0) |
1846 | rb_erase(&entry->offset_index, &cluster->root); | 1873 | rb_erase(&entry->offset_index, &cluster->root); |
1847 | kfree(entry); | ||
1848 | } | ||
1849 | break; | 1874 | break; |
1850 | } | 1875 | } |
1851 | out: | 1876 | out: |
1852 | spin_unlock(&cluster->lock); | 1877 | spin_unlock(&cluster->lock); |
1853 | 1878 | ||
1879 | if (!ret) | ||
1880 | return 0; | ||
1881 | |||
1882 | spin_lock(&block_group->tree_lock); | ||
1883 | |||
1884 | block_group->free_space -= bytes; | ||
1885 | if (entry->bytes == 0) { | ||
1886 | block_group->free_extents--; | ||
1887 | kfree(entry); | ||
1888 | } | ||
1889 | |||
1890 | spin_unlock(&block_group->tree_lock); | ||
1891 | |||
1854 | return ret; | 1892 | return ret; |
1855 | } | 1893 | } |
1856 | 1894 | ||