aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2011-01-28 17:05:48 -0500
committerJosef Bacik <josef@redhat.com>2011-03-17 14:21:20 -0400
commitdc89e9824464e91fa0b06267864ceabe3186fd8b (patch)
tree82952195464518dce48cb613c74d2326f228669b /fs/btrfs/free-space-cache.c
parent57a45ced94fe48a701361d64230fc16eefa189dd (diff)
Btrfs: use a slab for the free space entries
Since we alloc/free free space entries a whole lot, lets use a slab to keep track of them. This makes some of my tests slightly faster. Thanks, Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index a0390657451b..0282033041e1 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -393,7 +393,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
393 break; 393 break;
394 394
395 need_loop = 1; 395 need_loop = 1;
396 e = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); 396 e = kmem_cache_zalloc(btrfs_free_space_cachep,
397 GFP_NOFS);
397 if (!e) { 398 if (!e) {
398 kunmap(page); 399 kunmap(page);
399 unlock_page(page); 400 unlock_page(page);
@@ -405,7 +406,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
405 e->bytes = le64_to_cpu(entry->bytes); 406 e->bytes = le64_to_cpu(entry->bytes);
406 if (!e->bytes) { 407 if (!e->bytes) {
407 kunmap(page); 408 kunmap(page);
408 kfree(e); 409 kmem_cache_free(btrfs_free_space_cachep, e);
409 unlock_page(page); 410 unlock_page(page);
410 page_cache_release(page); 411 page_cache_release(page);
411 goto free_cache; 412 goto free_cache;
@@ -420,7 +421,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
420 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 421 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
421 if (!e->bitmap) { 422 if (!e->bitmap) {
422 kunmap(page); 423 kunmap(page);
423 kfree(e); 424 kmem_cache_free(
425 btrfs_free_space_cachep, e);
424 unlock_page(page); 426 unlock_page(page);
425 page_cache_release(page); 427 page_cache_release(page);
426 goto free_cache; 428 goto free_cache;
@@ -1187,7 +1189,7 @@ static void free_bitmap(struct btrfs_block_group_cache *block_group,
1187{ 1189{
1188 unlink_free_space(block_group, bitmap_info); 1190 unlink_free_space(block_group, bitmap_info);
1189 kfree(bitmap_info->bitmap); 1191 kfree(bitmap_info->bitmap);
1190 kfree(bitmap_info); 1192 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1191 block_group->total_bitmaps--; 1193 block_group->total_bitmaps--;
1192 recalculate_thresholds(block_group); 1194 recalculate_thresholds(block_group);
1193} 1195}
@@ -1342,8 +1344,8 @@ new_bitmap:
1342 1344
1343 /* no pre-allocated info, allocate a new one */ 1345 /* no pre-allocated info, allocate a new one */
1344 if (!info) { 1346 if (!info) {
1345 info = kzalloc(sizeof(struct btrfs_free_space), 1347 info = kmem_cache_zalloc(btrfs_free_space_cachep,
1346 GFP_NOFS); 1348 GFP_NOFS);
1347 if (!info) { 1349 if (!info) {
1348 spin_lock(&block_group->tree_lock); 1350 spin_lock(&block_group->tree_lock);
1349 ret = -ENOMEM; 1351 ret = -ENOMEM;
@@ -1365,7 +1367,7 @@ out:
1365 if (info) { 1367 if (info) {
1366 if (info->bitmap) 1368 if (info->bitmap)
1367 kfree(info->bitmap); 1369 kfree(info->bitmap);
1368 kfree(info); 1370 kmem_cache_free(btrfs_free_space_cachep, info);
1369 } 1371 }
1370 1372
1371 return ret; 1373 return ret;
@@ -1398,7 +1400,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1398 else 1400 else
1399 __unlink_free_space(block_group, right_info); 1401 __unlink_free_space(block_group, right_info);
1400 info->bytes += right_info->bytes; 1402 info->bytes += right_info->bytes;
1401 kfree(right_info); 1403 kmem_cache_free(btrfs_free_space_cachep, right_info);
1402 merged = true; 1404 merged = true;
1403 } 1405 }
1404 1406
@@ -1410,7 +1412,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1410 __unlink_free_space(block_group, left_info); 1412 __unlink_free_space(block_group, left_info);
1411 info->offset = left_info->offset; 1413 info->offset = left_info->offset;
1412 info->bytes += left_info->bytes; 1414 info->bytes += left_info->bytes;
1413 kfree(left_info); 1415 kmem_cache_free(btrfs_free_space_cachep, left_info);
1414 merged = true; 1416 merged = true;
1415 } 1417 }
1416 1418
@@ -1423,7 +1425,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1423 struct btrfs_free_space *info; 1425 struct btrfs_free_space *info;
1424 int ret = 0; 1426 int ret = 0;
1425 1427
1426 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); 1428 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1427 if (!info) 1429 if (!info)
1428 return -ENOMEM; 1430 return -ENOMEM;
1429 1431
@@ -1450,7 +1452,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1450link: 1452link:
1451 ret = link_free_space(block_group, info); 1453 ret = link_free_space(block_group, info);
1452 if (ret) 1454 if (ret)
1453 kfree(info); 1455 kmem_cache_free(btrfs_free_space_cachep, info);
1454out: 1456out:
1455 spin_unlock(&block_group->tree_lock); 1457 spin_unlock(&block_group->tree_lock);
1456 1458
@@ -1520,7 +1522,7 @@ again:
1520 kfree(info->bitmap); 1522 kfree(info->bitmap);
1521 block_group->total_bitmaps--; 1523 block_group->total_bitmaps--;
1522 } 1524 }
1523 kfree(info); 1525 kmem_cache_free(btrfs_free_space_cachep, info);
1524 goto out_lock; 1526 goto out_lock;
1525 } 1527 }
1526 1528
@@ -1556,7 +1558,7 @@ again:
1556 /* the hole we're creating ends at the end 1558 /* the hole we're creating ends at the end
1557 * of the info struct, just free the info 1559 * of the info struct, just free the info
1558 */ 1560 */
1559 kfree(info); 1561 kmem_cache_free(btrfs_free_space_cachep, info);
1560 } 1562 }
1561 spin_unlock(&block_group->tree_lock); 1563 spin_unlock(&block_group->tree_lock);
1562 1564
@@ -1689,7 +1691,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1689 unlink_free_space(block_group, info); 1691 unlink_free_space(block_group, info);
1690 if (info->bitmap) 1692 if (info->bitmap)
1691 kfree(info->bitmap); 1693 kfree(info->bitmap);
1692 kfree(info); 1694 kmem_cache_free(btrfs_free_space_cachep, info);
1693 if (need_resched()) { 1695 if (need_resched()) {
1694 spin_unlock(&block_group->tree_lock); 1696 spin_unlock(&block_group->tree_lock);
1695 cond_resched(); 1697 cond_resched();
@@ -1722,7 +1724,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1722 entry->offset += bytes; 1724 entry->offset += bytes;
1723 entry->bytes -= bytes; 1725 entry->bytes -= bytes;
1724 if (!entry->bytes) 1726 if (!entry->bytes)
1725 kfree(entry); 1727 kmem_cache_free(btrfs_free_space_cachep, entry);
1726 else 1728 else
1727 link_free_space(block_group, entry); 1729 link_free_space(block_group, entry);
1728 } 1730 }
@@ -1884,7 +1886,7 @@ out:
1884 block_group->free_space -= bytes; 1886 block_group->free_space -= bytes;
1885 if (entry->bytes == 0) { 1887 if (entry->bytes == 0) {
1886 block_group->free_extents--; 1888 block_group->free_extents--;
1887 kfree(entry); 1889 kmem_cache_free(btrfs_free_space_cachep, entry);
1888 } 1890 }
1889 1891
1890 spin_unlock(&block_group->tree_lock); 1892 spin_unlock(&block_group->tree_lock);