aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2013-09-09 01:19:42 -0400
committerChris Mason <chris.mason@fusionio.com>2013-09-21 11:05:23 -0400
commita482039889b85c45fc9616e65d560db7a35d4f54 (patch)
tree58482aa49aa50c30cdd2e7b496f30dd1c11ac860 /fs/btrfs/free-space-cache.c
parent13fd8da98f79317d26277360d510caa1edf9bab3 (diff)
Btrfs: allocate the free space by the existed max extent size when ENOSPC
By the current code, if the requested size is very large, and all the extents in the free space cache are small, we will waste lots of the cpu time to cut the requested size in half and search the cache again and again until it gets down to the size the allocator can return. In fact, we can know the max extent size in the cache after the first search, so we needn't cut the size in half repeatedly, and just use the max extent size directly. This way can save lots of cpu time and make the performance grow up when there are only fragments in the free space cache. According to my test, if there are only 4KB free space extents in the fs, and the total size of those extents are 256MB, we can reduce the execute time of the following test from 5.4s to 1.4s. dd if=/dev/zero of=<testfile> bs=1MB count=1 oflag=sync Changelog v2 -> v3: - fix the problem that we skip the block group with the space which is less than we need. Changelog v1 -> v2: - address the problem that we return a wrong start position when searching the free space in a bitmap. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fusionio.com> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c67
1 files changed, 47 insertions, 20 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index ef3bea7bb257..4f419bafd071 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1433,13 +1433,19 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1433 ctl->free_space += bytes; 1433 ctl->free_space += bytes;
1434} 1434}
1435 1435
1436/*
1437 * If we can not find suitable extent, we will use bytes to record
1438 * the size of the max extent.
1439 */
1436static int search_bitmap(struct btrfs_free_space_ctl *ctl, 1440static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1437 struct btrfs_free_space *bitmap_info, u64 *offset, 1441 struct btrfs_free_space *bitmap_info, u64 *offset,
1438 u64 *bytes) 1442 u64 *bytes)
1439{ 1443{
1440 unsigned long found_bits = 0; 1444 unsigned long found_bits = 0;
1445 unsigned long max_bits = 0;
1441 unsigned long bits, i; 1446 unsigned long bits, i;
1442 unsigned long next_zero; 1447 unsigned long next_zero;
1448 unsigned long extent_bits;
1443 1449
1444 i = offset_to_bit(bitmap_info->offset, ctl->unit, 1450 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1445 max_t(u64, *offset, bitmap_info->offset)); 1451 max_t(u64, *offset, bitmap_info->offset));
@@ -1448,9 +1454,12 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1448 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { 1454 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1449 next_zero = find_next_zero_bit(bitmap_info->bitmap, 1455 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1450 BITS_PER_BITMAP, i); 1456 BITS_PER_BITMAP, i);
1451 if ((next_zero - i) >= bits) { 1457 extent_bits = next_zero - i;
1452 found_bits = next_zero - i; 1458 if (extent_bits >= bits) {
1459 found_bits = extent_bits;
1453 break; 1460 break;
1461 } else if (extent_bits > max_bits) {
1462 max_bits = extent_bits;
1454 } 1463 }
1455 i = next_zero; 1464 i = next_zero;
1456 } 1465 }
@@ -1461,38 +1470,41 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1461 return 0; 1470 return 0;
1462 } 1471 }
1463 1472
1473 *bytes = (u64)(max_bits) * ctl->unit;
1464 return -1; 1474 return -1;
1465} 1475}
1466 1476
1477/* Cache the size of the max extent in bytes */
1467static struct btrfs_free_space * 1478static struct btrfs_free_space *
1468find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, 1479find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1469 unsigned long align) 1480 unsigned long align, u64 *max_extent_size)
1470{ 1481{
1471 struct btrfs_free_space *entry; 1482 struct btrfs_free_space *entry;
1472 struct rb_node *node; 1483 struct rb_node *node;
1473 u64 ctl_off;
1474 u64 tmp; 1484 u64 tmp;
1475 u64 align_off; 1485 u64 align_off;
1476 int ret; 1486 int ret;
1477 1487
1478 if (!ctl->free_space_offset.rb_node) 1488 if (!ctl->free_space_offset.rb_node)
1479 return NULL; 1489 goto out;
1480 1490
1481 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); 1491 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1482 if (!entry) 1492 if (!entry)
1483 return NULL; 1493 goto out;
1484 1494
1485 for (node = &entry->offset_index; node; node = rb_next(node)) { 1495 for (node = &entry->offset_index; node; node = rb_next(node)) {
1486 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1496 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1487 if (entry->bytes < *bytes) 1497 if (entry->bytes < *bytes) {
1498 if (entry->bytes > *max_extent_size)
1499 *max_extent_size = entry->bytes;
1488 continue; 1500 continue;
1501 }
1489 1502
1490 /* make sure the space returned is big enough 1503 /* make sure the space returned is big enough
1491 * to match our requested alignment 1504 * to match our requested alignment
1492 */ 1505 */
1493 if (*bytes >= align) { 1506 if (*bytes >= align) {
1494 ctl_off = entry->offset - ctl->start; 1507 tmp = entry->offset - ctl->start + align - 1;
1495 tmp = ctl_off + align - 1;;
1496 do_div(tmp, align); 1508 do_div(tmp, align);
1497 tmp = tmp * align + ctl->start; 1509 tmp = tmp * align + ctl->start;
1498 align_off = tmp - entry->offset; 1510 align_off = tmp - entry->offset;
@@ -1501,14 +1513,22 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1501 tmp = entry->offset; 1513 tmp = entry->offset;
1502 } 1514 }
1503 1515
1504 if (entry->bytes < *bytes + align_off) 1516 if (entry->bytes < *bytes + align_off) {
1517 if (entry->bytes > *max_extent_size)
1518 *max_extent_size = entry->bytes;
1505 continue; 1519 continue;
1520 }
1506 1521
1507 if (entry->bitmap) { 1522 if (entry->bitmap) {
1508 ret = search_bitmap(ctl, entry, &tmp, bytes); 1523 u64 size = *bytes;
1524
1525 ret = search_bitmap(ctl, entry, &tmp, &size);
1509 if (!ret) { 1526 if (!ret) {
1510 *offset = tmp; 1527 *offset = tmp;
1528 *bytes = size;
1511 return entry; 1529 return entry;
1530 } else if (size > *max_extent_size) {
1531 *max_extent_size = size;
1512 } 1532 }
1513 continue; 1533 continue;
1514 } 1534 }
@@ -1517,7 +1537,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1517 *bytes = entry->bytes - align_off; 1537 *bytes = entry->bytes - align_off;
1518 return entry; 1538 return entry;
1519 } 1539 }
1520 1540out:
1521 return NULL; 1541 return NULL;
1522} 1542}
1523 1543
@@ -2118,7 +2138,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2118} 2138}
2119 2139
2120u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, 2140u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2121 u64 offset, u64 bytes, u64 empty_size) 2141 u64 offset, u64 bytes, u64 empty_size,
2142 u64 *max_extent_size)
2122{ 2143{
2123 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2144 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2124 struct btrfs_free_space *entry = NULL; 2145 struct btrfs_free_space *entry = NULL;
@@ -2129,7 +2150,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2129 2150
2130 spin_lock(&ctl->tree_lock); 2151 spin_lock(&ctl->tree_lock);
2131 entry = find_free_space(ctl, &offset, &bytes_search, 2152 entry = find_free_space(ctl, &offset, &bytes_search,
2132 block_group->full_stripe_len); 2153 block_group->full_stripe_len, max_extent_size);
2133 if (!entry) 2154 if (!entry)
2134 goto out; 2155 goto out;
2135 2156
@@ -2139,7 +2160,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2139 if (!entry->bytes) 2160 if (!entry->bytes)
2140 free_bitmap(ctl, entry); 2161 free_bitmap(ctl, entry);
2141 } else { 2162 } else {
2142
2143 unlink_free_space(ctl, entry); 2163 unlink_free_space(ctl, entry);
2144 align_gap_len = offset - entry->offset; 2164 align_gap_len = offset - entry->offset;
2145 align_gap = entry->offset; 2165 align_gap = entry->offset;
@@ -2153,7 +2173,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2153 else 2173 else
2154 link_free_space(ctl, entry); 2174 link_free_space(ctl, entry);
2155 } 2175 }
2156
2157out: 2176out:
2158 spin_unlock(&ctl->tree_lock); 2177 spin_unlock(&ctl->tree_lock);
2159 2178
@@ -2208,7 +2227,8 @@ int btrfs_return_cluster_to_free_space(
2208static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, 2227static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2209 struct btrfs_free_cluster *cluster, 2228 struct btrfs_free_cluster *cluster,
2210 struct btrfs_free_space *entry, 2229 struct btrfs_free_space *entry,
2211 u64 bytes, u64 min_start) 2230 u64 bytes, u64 min_start,
2231 u64 *max_extent_size)
2212{ 2232{
2213 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2233 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2214 int err; 2234 int err;
@@ -2220,8 +2240,11 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2220 search_bytes = bytes; 2240 search_bytes = bytes;
2221 2241
2222 err = search_bitmap(ctl, entry, &search_start, &search_bytes); 2242 err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2223 if (err) 2243 if (err) {
2244 if (search_bytes > *max_extent_size)
2245 *max_extent_size = search_bytes;
2224 return 0; 2246 return 0;
2247 }
2225 2248
2226 ret = search_start; 2249 ret = search_start;
2227 __bitmap_clear_bits(ctl, entry, ret, bytes); 2250 __bitmap_clear_bits(ctl, entry, ret, bytes);
@@ -2236,7 +2259,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2236 */ 2259 */
2237u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, 2260u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2238 struct btrfs_free_cluster *cluster, u64 bytes, 2261 struct btrfs_free_cluster *cluster, u64 bytes,
2239 u64 min_start) 2262 u64 min_start, u64 *max_extent_size)
2240{ 2263{
2241 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2264 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2242 struct btrfs_free_space *entry = NULL; 2265 struct btrfs_free_space *entry = NULL;
@@ -2256,6 +2279,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2256 2279
2257 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2280 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2258 while(1) { 2281 while(1) {
2282 if (entry->bytes < bytes && entry->bytes > *max_extent_size)
2283 *max_extent_size = entry->bytes;
2284
2259 if (entry->bytes < bytes || 2285 if (entry->bytes < bytes ||
2260 (!entry->bitmap && entry->offset < min_start)) { 2286 (!entry->bitmap && entry->offset < min_start)) {
2261 node = rb_next(&entry->offset_index); 2287 node = rb_next(&entry->offset_index);
@@ -2269,7 +2295,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2269 if (entry->bitmap) { 2295 if (entry->bitmap) {
2270 ret = btrfs_alloc_from_bitmap(block_group, 2296 ret = btrfs_alloc_from_bitmap(block_group,
2271 cluster, entry, bytes, 2297 cluster, entry, bytes,
2272 cluster->window_start); 2298 cluster->window_start,
2299 max_extent_size);
2273 if (ret == 0) { 2300 if (ret == 0) {
2274 node = rb_next(&entry->offset_index); 2301 node = rb_next(&entry->offset_index);
2275 if (!node) 2302 if (!node)