diff options
author | Miao Xie <miaox@cn.fujitsu.com> | 2013-09-09 01:19:42 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@fusionio.com> | 2013-09-21 11:05:23 -0400 |
commit | a482039889b85c45fc9616e65d560db7a35d4f54 (patch) | |
tree | 58482aa49aa50c30cdd2e7b496f30dd1c11ac860 /fs | |
parent | 13fd8da98f79317d26277360d510caa1edf9bab3 (diff) |
Btrfs: allocate the free space by the existed max extent size when ENOSPC
By the current code, if the requested size is very large, and all the extents
in the free space cache are small, we will waste lots of the cpu time to cut
the requested size in half and search the cache again and again until it gets
down to the size the allocator can return. In fact, we can know the max extent
size in the cache after the first search, so we needn't cut the size in half
repeatedly, and just use the max extent size directly. This way can save
lots of cpu time and make the performance grow up when there are only fragments
in the free space cache.
According to my test, if there are only 4KB free space extents in the fs,
and the total size of those extents are 256MB, we can reduce the execute
time of the following test from 5.4s to 1.4s.
dd if=/dev/zero of=<testfile> bs=1MB count=1 oflag=sync
Changelog v2 -> v3:
- fix the problem that we skip the block group with the space which is
less than we need.
Changelog v1 -> v2:
- address the problem that we return a wrong start position when searching
the free space in a bitmap.
Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/extent-tree.c | 33 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 67 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.h | 5 |
3 files changed, 74 insertions, 31 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cfb3cf711b34..2f03181b777f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -6117,10 +6117,13 @@ enum btrfs_loop_type { | |||
6117 | /* | 6117 | /* |
6118 | * walks the btree of allocated extents and find a hole of a given size. | 6118 | * walks the btree of allocated extents and find a hole of a given size. |
6119 | * The key ins is changed to record the hole: | 6119 | * The key ins is changed to record the hole: |
6120 | * ins->objectid == block start | 6120 | * ins->objectid == start position |
6121 | * ins->flags = BTRFS_EXTENT_ITEM_KEY | 6121 | * ins->flags = BTRFS_EXTENT_ITEM_KEY |
6122 | * ins->offset == number of blocks | 6122 | * ins->offset == the size of the hole. |
6123 | * Any available blocks before search_start are skipped. | 6123 | * Any available blocks before search_start are skipped. |
6124 | * | ||
6125 | * If there is no suitable free space, we will record the max size of | ||
6126 | * the free space extent currently. | ||
6124 | */ | 6127 | */ |
6125 | static noinline int find_free_extent(struct btrfs_root *orig_root, | 6128 | static noinline int find_free_extent(struct btrfs_root *orig_root, |
6126 | u64 num_bytes, u64 empty_size, | 6129 | u64 num_bytes, u64 empty_size, |
@@ -6133,6 +6136,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root, | |||
6133 | struct btrfs_block_group_cache *block_group = NULL; | 6136 | struct btrfs_block_group_cache *block_group = NULL; |
6134 | struct btrfs_block_group_cache *used_block_group; | 6137 | struct btrfs_block_group_cache *used_block_group; |
6135 | u64 search_start = 0; | 6138 | u64 search_start = 0; |
6139 | u64 max_extent_size = 0; | ||
6136 | int empty_cluster = 2 * 1024 * 1024; | 6140 | int empty_cluster = 2 * 1024 * 1024; |
6137 | struct btrfs_space_info *space_info; | 6141 | struct btrfs_space_info *space_info; |
6138 | int loop = 0; | 6142 | int loop = 0; |
@@ -6292,7 +6296,10 @@ have_block_group: | |||
6292 | btrfs_get_block_group(used_block_group); | 6296 | btrfs_get_block_group(used_block_group); |
6293 | 6297 | ||
6294 | offset = btrfs_alloc_from_cluster(used_block_group, | 6298 | offset = btrfs_alloc_from_cluster(used_block_group, |
6295 | last_ptr, num_bytes, used_block_group->key.objectid); | 6299 | last_ptr, |
6300 | num_bytes, | ||
6301 | used_block_group->key.objectid, | ||
6302 | &max_extent_size); | ||
6296 | if (offset) { | 6303 | if (offset) { |
6297 | /* we have a block, we're done */ | 6304 | /* we have a block, we're done */ |
6298 | spin_unlock(&last_ptr->refill_lock); | 6305 | spin_unlock(&last_ptr->refill_lock); |
@@ -6355,8 +6362,10 @@ refill_cluster: | |||
6355 | * cluster | 6362 | * cluster |
6356 | */ | 6363 | */ |
6357 | offset = btrfs_alloc_from_cluster(block_group, | 6364 | offset = btrfs_alloc_from_cluster(block_group, |
6358 | last_ptr, num_bytes, | 6365 | last_ptr, |
6359 | search_start); | 6366 | num_bytes, |
6367 | search_start, | ||
6368 | &max_extent_size); | ||
6360 | if (offset) { | 6369 | if (offset) { |
6361 | /* we found one, proceed */ | 6370 | /* we found one, proceed */ |
6362 | spin_unlock(&last_ptr->refill_lock); | 6371 | spin_unlock(&last_ptr->refill_lock); |
@@ -6391,13 +6400,18 @@ unclustered_alloc: | |||
6391 | if (cached && | 6400 | if (cached && |
6392 | block_group->free_space_ctl->free_space < | 6401 | block_group->free_space_ctl->free_space < |
6393 | num_bytes + empty_cluster + empty_size) { | 6402 | num_bytes + empty_cluster + empty_size) { |
6403 | if (block_group->free_space_ctl->free_space > | ||
6404 | max_extent_size) | ||
6405 | max_extent_size = | ||
6406 | block_group->free_space_ctl->free_space; | ||
6394 | spin_unlock(&block_group->free_space_ctl->tree_lock); | 6407 | spin_unlock(&block_group->free_space_ctl->tree_lock); |
6395 | goto loop; | 6408 | goto loop; |
6396 | } | 6409 | } |
6397 | spin_unlock(&block_group->free_space_ctl->tree_lock); | 6410 | spin_unlock(&block_group->free_space_ctl->tree_lock); |
6398 | 6411 | ||
6399 | offset = btrfs_find_space_for_alloc(block_group, search_start, | 6412 | offset = btrfs_find_space_for_alloc(block_group, search_start, |
6400 | num_bytes, empty_size); | 6413 | num_bytes, empty_size, |
6414 | &max_extent_size); | ||
6401 | /* | 6415 | /* |
6402 | * If we didn't find a chunk, and we haven't failed on this | 6416 | * If we didn't find a chunk, and we haven't failed on this |
6403 | * block group before, and this block group is in the middle of | 6417 | * block group before, and this block group is in the middle of |
@@ -6515,7 +6529,8 @@ loop: | |||
6515 | ret = 0; | 6529 | ret = 0; |
6516 | } | 6530 | } |
6517 | out: | 6531 | out: |
6518 | 6532 | if (ret == -ENOSPC) | |
6533 | ins->offset = max_extent_size; | ||
6519 | return ret; | 6534 | return ret; |
6520 | } | 6535 | } |
6521 | 6536 | ||
@@ -6573,8 +6588,8 @@ again: | |||
6573 | flags); | 6588 | flags); |
6574 | 6589 | ||
6575 | if (ret == -ENOSPC) { | 6590 | if (ret == -ENOSPC) { |
6576 | if (!final_tried) { | 6591 | if (!final_tried && ins->offset) { |
6577 | num_bytes = num_bytes >> 1; | 6592 | num_bytes = min(num_bytes >> 1, ins->offset); |
6578 | num_bytes = round_down(num_bytes, root->sectorsize); | 6593 | num_bytes = round_down(num_bytes, root->sectorsize); |
6579 | num_bytes = max(num_bytes, min_alloc_size); | 6594 | num_bytes = max(num_bytes, min_alloc_size); |
6580 | if (num_bytes == min_alloc_size) | 6595 | if (num_bytes == min_alloc_size) |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ef3bea7bb257..4f419bafd071 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -1433,13 +1433,19 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, | |||
1433 | ctl->free_space += bytes; | 1433 | ctl->free_space += bytes; |
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | /* | ||
1437 | * If we can not find suitable extent, we will use bytes to record | ||
1438 | * the size of the max extent. | ||
1439 | */ | ||
1436 | static int search_bitmap(struct btrfs_free_space_ctl *ctl, | 1440 | static int search_bitmap(struct btrfs_free_space_ctl *ctl, |
1437 | struct btrfs_free_space *bitmap_info, u64 *offset, | 1441 | struct btrfs_free_space *bitmap_info, u64 *offset, |
1438 | u64 *bytes) | 1442 | u64 *bytes) |
1439 | { | 1443 | { |
1440 | unsigned long found_bits = 0; | 1444 | unsigned long found_bits = 0; |
1445 | unsigned long max_bits = 0; | ||
1441 | unsigned long bits, i; | 1446 | unsigned long bits, i; |
1442 | unsigned long next_zero; | 1447 | unsigned long next_zero; |
1448 | unsigned long extent_bits; | ||
1443 | 1449 | ||
1444 | i = offset_to_bit(bitmap_info->offset, ctl->unit, | 1450 | i = offset_to_bit(bitmap_info->offset, ctl->unit, |
1445 | max_t(u64, *offset, bitmap_info->offset)); | 1451 | max_t(u64, *offset, bitmap_info->offset)); |
@@ -1448,9 +1454,12 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl, | |||
1448 | for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { | 1454 | for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { |
1449 | next_zero = find_next_zero_bit(bitmap_info->bitmap, | 1455 | next_zero = find_next_zero_bit(bitmap_info->bitmap, |
1450 | BITS_PER_BITMAP, i); | 1456 | BITS_PER_BITMAP, i); |
1451 | if ((next_zero - i) >= bits) { | 1457 | extent_bits = next_zero - i; |
1452 | found_bits = next_zero - i; | 1458 | if (extent_bits >= bits) { |
1459 | found_bits = extent_bits; | ||
1453 | break; | 1460 | break; |
1461 | } else if (extent_bits > max_bits) { | ||
1462 | max_bits = extent_bits; | ||
1454 | } | 1463 | } |
1455 | i = next_zero; | 1464 | i = next_zero; |
1456 | } | 1465 | } |
@@ -1461,38 +1470,41 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl, | |||
1461 | return 0; | 1470 | return 0; |
1462 | } | 1471 | } |
1463 | 1472 | ||
1473 | *bytes = (u64)(max_bits) * ctl->unit; | ||
1464 | return -1; | 1474 | return -1; |
1465 | } | 1475 | } |
1466 | 1476 | ||
1477 | /* Cache the size of the max extent in bytes */ | ||
1467 | static struct btrfs_free_space * | 1478 | static struct btrfs_free_space * |
1468 | find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, | 1479 | find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, |
1469 | unsigned long align) | 1480 | unsigned long align, u64 *max_extent_size) |
1470 | { | 1481 | { |
1471 | struct btrfs_free_space *entry; | 1482 | struct btrfs_free_space *entry; |
1472 | struct rb_node *node; | 1483 | struct rb_node *node; |
1473 | u64 ctl_off; | ||
1474 | u64 tmp; | 1484 | u64 tmp; |
1475 | u64 align_off; | 1485 | u64 align_off; |
1476 | int ret; | 1486 | int ret; |
1477 | 1487 | ||
1478 | if (!ctl->free_space_offset.rb_node) | 1488 | if (!ctl->free_space_offset.rb_node) |
1479 | return NULL; | 1489 | goto out; |
1480 | 1490 | ||
1481 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); | 1491 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); |
1482 | if (!entry) | 1492 | if (!entry) |
1483 | return NULL; | 1493 | goto out; |
1484 | 1494 | ||
1485 | for (node = &entry->offset_index; node; node = rb_next(node)) { | 1495 | for (node = &entry->offset_index; node; node = rb_next(node)) { |
1486 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 1496 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
1487 | if (entry->bytes < *bytes) | 1497 | if (entry->bytes < *bytes) { |
1498 | if (entry->bytes > *max_extent_size) | ||
1499 | *max_extent_size = entry->bytes; | ||
1488 | continue; | 1500 | continue; |
1501 | } | ||
1489 | 1502 | ||
1490 | /* make sure the space returned is big enough | 1503 | /* make sure the space returned is big enough |
1491 | * to match our requested alignment | 1504 | * to match our requested alignment |
1492 | */ | 1505 | */ |
1493 | if (*bytes >= align) { | 1506 | if (*bytes >= align) { |
1494 | ctl_off = entry->offset - ctl->start; | 1507 | tmp = entry->offset - ctl->start + align - 1; |
1495 | tmp = ctl_off + align - 1;; | ||
1496 | do_div(tmp, align); | 1508 | do_div(tmp, align); |
1497 | tmp = tmp * align + ctl->start; | 1509 | tmp = tmp * align + ctl->start; |
1498 | align_off = tmp - entry->offset; | 1510 | align_off = tmp - entry->offset; |
@@ -1501,14 +1513,22 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, | |||
1501 | tmp = entry->offset; | 1513 | tmp = entry->offset; |
1502 | } | 1514 | } |
1503 | 1515 | ||
1504 | if (entry->bytes < *bytes + align_off) | 1516 | if (entry->bytes < *bytes + align_off) { |
1517 | if (entry->bytes > *max_extent_size) | ||
1518 | *max_extent_size = entry->bytes; | ||
1505 | continue; | 1519 | continue; |
1520 | } | ||
1506 | 1521 | ||
1507 | if (entry->bitmap) { | 1522 | if (entry->bitmap) { |
1508 | ret = search_bitmap(ctl, entry, &tmp, bytes); | 1523 | u64 size = *bytes; |
1524 | |||
1525 | ret = search_bitmap(ctl, entry, &tmp, &size); | ||
1509 | if (!ret) { | 1526 | if (!ret) { |
1510 | *offset = tmp; | 1527 | *offset = tmp; |
1528 | *bytes = size; | ||
1511 | return entry; | 1529 | return entry; |
1530 | } else if (size > *max_extent_size) { | ||
1531 | *max_extent_size = size; | ||
1512 | } | 1532 | } |
1513 | continue; | 1533 | continue; |
1514 | } | 1534 | } |
@@ -1517,7 +1537,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, | |||
1517 | *bytes = entry->bytes - align_off; | 1537 | *bytes = entry->bytes - align_off; |
1518 | return entry; | 1538 | return entry; |
1519 | } | 1539 | } |
1520 | 1540 | out: | |
1521 | return NULL; | 1541 | return NULL; |
1522 | } | 1542 | } |
1523 | 1543 | ||
@@ -2118,7 +2138,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
2118 | } | 2138 | } |
2119 | 2139 | ||
2120 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | 2140 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
2121 | u64 offset, u64 bytes, u64 empty_size) | 2141 | u64 offset, u64 bytes, u64 empty_size, |
2142 | u64 *max_extent_size) | ||
2122 | { | 2143 | { |
2123 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2144 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2124 | struct btrfs_free_space *entry = NULL; | 2145 | struct btrfs_free_space *entry = NULL; |
@@ -2129,7 +2150,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | |||
2129 | 2150 | ||
2130 | spin_lock(&ctl->tree_lock); | 2151 | spin_lock(&ctl->tree_lock); |
2131 | entry = find_free_space(ctl, &offset, &bytes_search, | 2152 | entry = find_free_space(ctl, &offset, &bytes_search, |
2132 | block_group->full_stripe_len); | 2153 | block_group->full_stripe_len, max_extent_size); |
2133 | if (!entry) | 2154 | if (!entry) |
2134 | goto out; | 2155 | goto out; |
2135 | 2156 | ||
@@ -2139,7 +2160,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | |||
2139 | if (!entry->bytes) | 2160 | if (!entry->bytes) |
2140 | free_bitmap(ctl, entry); | 2161 | free_bitmap(ctl, entry); |
2141 | } else { | 2162 | } else { |
2142 | |||
2143 | unlink_free_space(ctl, entry); | 2163 | unlink_free_space(ctl, entry); |
2144 | align_gap_len = offset - entry->offset; | 2164 | align_gap_len = offset - entry->offset; |
2145 | align_gap = entry->offset; | 2165 | align_gap = entry->offset; |
@@ -2153,7 +2173,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | |||
2153 | else | 2173 | else |
2154 | link_free_space(ctl, entry); | 2174 | link_free_space(ctl, entry); |
2155 | } | 2175 | } |
2156 | |||
2157 | out: | 2176 | out: |
2158 | spin_unlock(&ctl->tree_lock); | 2177 | spin_unlock(&ctl->tree_lock); |
2159 | 2178 | ||
@@ -2208,7 +2227,8 @@ int btrfs_return_cluster_to_free_space( | |||
2208 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | 2227 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, |
2209 | struct btrfs_free_cluster *cluster, | 2228 | struct btrfs_free_cluster *cluster, |
2210 | struct btrfs_free_space *entry, | 2229 | struct btrfs_free_space *entry, |
2211 | u64 bytes, u64 min_start) | 2230 | u64 bytes, u64 min_start, |
2231 | u64 *max_extent_size) | ||
2212 | { | 2232 | { |
2213 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2233 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2214 | int err; | 2234 | int err; |
@@ -2220,8 +2240,11 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
2220 | search_bytes = bytes; | 2240 | search_bytes = bytes; |
2221 | 2241 | ||
2222 | err = search_bitmap(ctl, entry, &search_start, &search_bytes); | 2242 | err = search_bitmap(ctl, entry, &search_start, &search_bytes); |
2223 | if (err) | 2243 | if (err) { |
2244 | if (search_bytes > *max_extent_size) | ||
2245 | *max_extent_size = search_bytes; | ||
2224 | return 0; | 2246 | return 0; |
2247 | } | ||
2225 | 2248 | ||
2226 | ret = search_start; | 2249 | ret = search_start; |
2227 | __bitmap_clear_bits(ctl, entry, ret, bytes); | 2250 | __bitmap_clear_bits(ctl, entry, ret, bytes); |
@@ -2236,7 +2259,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
2236 | */ | 2259 | */ |
2237 | u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | 2260 | u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, |
2238 | struct btrfs_free_cluster *cluster, u64 bytes, | 2261 | struct btrfs_free_cluster *cluster, u64 bytes, |
2239 | u64 min_start) | 2262 | u64 min_start, u64 *max_extent_size) |
2240 | { | 2263 | { |
2241 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2264 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2242 | struct btrfs_free_space *entry = NULL; | 2265 | struct btrfs_free_space *entry = NULL; |
@@ -2256,6 +2279,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
2256 | 2279 | ||
2257 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2280 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2258 | while(1) { | 2281 | while(1) { |
2282 | if (entry->bytes < bytes && entry->bytes > *max_extent_size) | ||
2283 | *max_extent_size = entry->bytes; | ||
2284 | |||
2259 | if (entry->bytes < bytes || | 2285 | if (entry->bytes < bytes || |
2260 | (!entry->bitmap && entry->offset < min_start)) { | 2286 | (!entry->bitmap && entry->offset < min_start)) { |
2261 | node = rb_next(&entry->offset_index); | 2287 | node = rb_next(&entry->offset_index); |
@@ -2269,7 +2295,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
2269 | if (entry->bitmap) { | 2295 | if (entry->bitmap) { |
2270 | ret = btrfs_alloc_from_bitmap(block_group, | 2296 | ret = btrfs_alloc_from_bitmap(block_group, |
2271 | cluster, entry, bytes, | 2297 | cluster, entry, bytes, |
2272 | cluster->window_start); | 2298 | cluster->window_start, |
2299 | max_extent_size); | ||
2273 | if (ret == 0) { | 2300 | if (ret == 0) { |
2274 | node = rb_next(&entry->offset_index); | 2301 | node = rb_next(&entry->offset_index); |
2275 | if (!node) | 2302 | if (!node) |
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index c74904167476..e737f92cf6d0 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h | |||
@@ -94,7 +94,8 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); | |||
94 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache | 94 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache |
95 | *block_group); | 95 | *block_group); |
96 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | 96 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
97 | u64 offset, u64 bytes, u64 empty_size); | 97 | u64 offset, u64 bytes, u64 empty_size, |
98 | u64 *max_extent_size); | ||
98 | u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); | 99 | u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); |
99 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 100 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
100 | u64 bytes); | 101 | u64 bytes); |
@@ -105,7 +106,7 @@ int btrfs_find_space_cluster(struct btrfs_root *root, | |||
105 | void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster); | 106 | void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster); |
106 | u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | 107 | u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, |
107 | struct btrfs_free_cluster *cluster, u64 bytes, | 108 | struct btrfs_free_cluster *cluster, u64 bytes, |
108 | u64 min_start); | 109 | u64 min_start, u64 *max_extent_size); |
109 | int btrfs_return_cluster_to_free_space( | 110 | int btrfs_return_cluster_to_free_space( |
110 | struct btrfs_block_group_cache *block_group, | 111 | struct btrfs_block_group_cache *block_group, |
111 | struct btrfs_free_cluster *cluster); | 112 | struct btrfs_free_cluster *cluster); |