aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c82
1 files changed, 63 insertions, 19 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 59ea2e4349c9..1f84fc09c1a8 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1356,6 +1356,8 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1356 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; 1356 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
1357 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); 1357 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1358 1358
1359 max_bitmaps = max(max_bitmaps, 1);
1360
1359 BUG_ON(ctl->total_bitmaps > max_bitmaps); 1361 BUG_ON(ctl->total_bitmaps > max_bitmaps);
1360 1362
1361 /* 1363 /*
@@ -1463,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1463} 1465}
1464 1466
1465static struct btrfs_free_space * 1467static struct btrfs_free_space *
1466find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) 1468find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1469 unsigned long align)
1467{ 1470{
1468 struct btrfs_free_space *entry; 1471 struct btrfs_free_space *entry;
1469 struct rb_node *node; 1472 struct rb_node *node;
1473 u64 ctl_off;
1474 u64 tmp;
1475 u64 align_off;
1470 int ret; 1476 int ret;
1471 1477
1472 if (!ctl->free_space_offset.rb_node) 1478 if (!ctl->free_space_offset.rb_node)
@@ -1481,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1481 if (entry->bytes < *bytes) 1487 if (entry->bytes < *bytes)
1482 continue; 1488 continue;
1483 1489
1490 /* make sure the space returned is big enough
1491 * to match our requested alignment
1492 */
1493 if (*bytes >= align) {
1494 ctl_off = entry->offset - ctl->start;
1495 tmp = ctl_off + align - 1;;
1496 do_div(tmp, align);
1497 tmp = tmp * align + ctl->start;
1498 align_off = tmp - entry->offset;
1499 } else {
1500 align_off = 0;
1501 tmp = entry->offset;
1502 }
1503
1504 if (entry->bytes < *bytes + align_off)
1505 continue;
1506
1484 if (entry->bitmap) { 1507 if (entry->bitmap) {
1485 ret = search_bitmap(ctl, entry, offset, bytes); 1508 ret = search_bitmap(ctl, entry, &tmp, bytes);
1486 if (!ret) 1509 if (!ret) {
1510 *offset = tmp;
1487 return entry; 1511 return entry;
1512 }
1488 continue; 1513 continue;
1489 } 1514 }
1490 1515
1491 *offset = entry->offset; 1516 *offset = tmp;
1492 *bytes = entry->bytes; 1517 *bytes = entry->bytes - align_off;
1493 return entry; 1518 return entry;
1494 } 1519 }
1495 1520
@@ -1636,10 +1661,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1636 } 1661 }
1637 1662
1638 /* 1663 /*
1639 * some block groups are so tiny they can't be enveloped by a bitmap, so 1664 * The original block groups from mkfs can be really small, like 8
1640 * don't even bother to create a bitmap for this 1665 * megabytes, so don't bother with a bitmap for those entries. However
1666 * some block groups can be smaller than what a bitmap would cover but
1667 * are still large enough that they could overflow the 32k memory limit,
1668 * so allow those block groups to still be allowed to have a bitmap
1669 * entry.
1641 */ 1670 */
1642 if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset) 1671 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
1643 return false; 1672 return false;
1644 1673
1645 return true; 1674 return true;
@@ -1862,11 +1891,13 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1862{ 1891{
1863 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1892 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1864 struct btrfs_free_space *info; 1893 struct btrfs_free_space *info;
1865 int ret = 0; 1894 int ret;
1895 bool re_search = false;
1866 1896
1867 spin_lock(&ctl->tree_lock); 1897 spin_lock(&ctl->tree_lock);
1868 1898
1869again: 1899again:
1900 ret = 0;
1870 if (!bytes) 1901 if (!bytes)
1871 goto out_lock; 1902 goto out_lock;
1872 1903
@@ -1879,17 +1910,17 @@ again:
1879 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1910 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1880 1, 0); 1911 1, 0);
1881 if (!info) { 1912 if (!info) {
1882 /* the tree logging code might be calling us before we 1913 /*
1883 * have fully loaded the free space rbtree for this 1914 * If we found a partial bit of our free space in a
1884 * block group. So it is possible the entry won't 1915 * bitmap but then couldn't find the other part this may
1885 * be in the rbtree yet at all. The caching code 1916 * be a problem, so WARN about it.
1886 * will make sure not to put it in the rbtree if
1887 * the logging code has pinned it.
1888 */ 1917 */
1918 WARN_ON(re_search);
1889 goto out_lock; 1919 goto out_lock;
1890 } 1920 }
1891 } 1921 }
1892 1922
1923 re_search = false;
1893 if (!info->bitmap) { 1924 if (!info->bitmap) {
1894 unlink_free_space(ctl, info); 1925 unlink_free_space(ctl, info);
1895 if (offset == info->offset) { 1926 if (offset == info->offset) {
@@ -1935,8 +1966,10 @@ again:
1935 } 1966 }
1936 1967
1937 ret = remove_from_bitmap(ctl, info, &offset, &bytes); 1968 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1938 if (ret == -EAGAIN) 1969 if (ret == -EAGAIN) {
1970 re_search = true;
1939 goto again; 1971 goto again;
1972 }
1940 BUG_ON(ret); /* logic error */ 1973 BUG_ON(ret); /* logic error */
1941out_lock: 1974out_lock:
1942 spin_unlock(&ctl->tree_lock); 1975 spin_unlock(&ctl->tree_lock);
@@ -2091,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2091 struct btrfs_free_space *entry = NULL; 2124 struct btrfs_free_space *entry = NULL;
2092 u64 bytes_search = bytes + empty_size; 2125 u64 bytes_search = bytes + empty_size;
2093 u64 ret = 0; 2126 u64 ret = 0;
2127 u64 align_gap = 0;
2128 u64 align_gap_len = 0;
2094 2129
2095 spin_lock(&ctl->tree_lock); 2130 spin_lock(&ctl->tree_lock);
2096 entry = find_free_space(ctl, &offset, &bytes_search); 2131 entry = find_free_space(ctl, &offset, &bytes_search,
2132 block_group->full_stripe_len);
2097 if (!entry) 2133 if (!entry)
2098 goto out; 2134 goto out;
2099 2135
@@ -2103,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2103 if (!entry->bytes) 2139 if (!entry->bytes)
2104 free_bitmap(ctl, entry); 2140 free_bitmap(ctl, entry);
2105 } else { 2141 } else {
2142
2106 unlink_free_space(ctl, entry); 2143 unlink_free_space(ctl, entry);
2107 entry->offset += bytes; 2144 align_gap_len = offset - entry->offset;
2108 entry->bytes -= bytes; 2145 align_gap = entry->offset;
2146
2147 entry->offset = offset + bytes;
2148 WARN_ON(entry->bytes < bytes + align_gap_len);
2149
2150 entry->bytes -= bytes + align_gap_len;
2109 if (!entry->bytes) 2151 if (!entry->bytes)
2110 kmem_cache_free(btrfs_free_space_cachep, entry); 2152 kmem_cache_free(btrfs_free_space_cachep, entry);
2111 else 2153 else
@@ -2115,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2115out: 2157out:
2116 spin_unlock(&ctl->tree_lock); 2158 spin_unlock(&ctl->tree_lock);
2117 2159
2160 if (align_gap_len)
2161 __btrfs_add_free_space(ctl, align_gap, align_gap_len);
2118 return ret; 2162 return ret;
2119} 2163}
2120 2164