aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2011-03-29 01:46:06 -0400
committerLi Zefan <lizf@cn.fujitsu.com>2011-04-25 04:46:03 -0400
commit34d52cb6c50b5a43901709998f59fb1c5a43dc4a (patch)
tree151c61795cceefc97e48e8209dc36303274fbe10 /fs/btrfs/extent-tree.c
parentf38b6e754d8cc4605ac21d9c1094d569d88b163b (diff)
Btrfs: Make free space cache code generic
So we can re-use the code to cache free inode numbers. The change is quite straightforward. Two new structures are introduced. - struct btrfs_free_space_ctl We move those variables that are used for caching free space from struct btrfs_block_group_cache to this new struct. - struct btrfs_free_space_op We do block group specific work (e.g. calculation of extents threshold) through functions registered in this struct. And then we can remove references to struct btrfs_block_group_cache. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c37
1 files changed, 19 insertions, 18 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 31f33ba56fe8..904eae10ec65 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
105 WARN_ON(cache->pinned > 0); 105 WARN_ON(cache->pinned > 0);
106 WARN_ON(cache->reserved > 0); 106 WARN_ON(cache->reserved > 0);
107 WARN_ON(cache->reserved_pinned > 0); 107 WARN_ON(cache->reserved_pinned > 0);
108 kfree(cache->free_space_ctl);
108 kfree(cache); 109 kfree(cache);
109 } 110 }
110} 111}
@@ -4893,7 +4894,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4893 return 0; 4894 return 0;
4894 4895
4895 wait_event(caching_ctl->wait, block_group_cache_done(cache) || 4896 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4896 (cache->free_space >= num_bytes)); 4897 (cache->free_space_ctl->free_space >= num_bytes));
4897 4898
4898 put_caching_control(caching_ctl); 4899 put_caching_control(caching_ctl);
4899 return 0; 4900 return 0;
@@ -8551,10 +8552,16 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8551 ret = -ENOMEM; 8552 ret = -ENOMEM;
8552 goto error; 8553 goto error;
8553 } 8554 }
8555 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8556 GFP_NOFS);
8557 if (!cache->free_space_ctl) {
8558 kfree(cache);
8559 ret = -ENOMEM;
8560 goto error;
8561 }
8554 8562
8555 atomic_set(&cache->count, 1); 8563 atomic_set(&cache->count, 1);
8556 spin_lock_init(&cache->lock); 8564 spin_lock_init(&cache->lock);
8557 spin_lock_init(&cache->tree_lock);
8558 cache->fs_info = info; 8565 cache->fs_info = info;
8559 INIT_LIST_HEAD(&cache->list); 8566 INIT_LIST_HEAD(&cache->list);
8560 INIT_LIST_HEAD(&cache->cluster_list); 8567 INIT_LIST_HEAD(&cache->cluster_list);
@@ -8562,14 +8569,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8562 if (need_clear) 8569 if (need_clear)
8563 cache->disk_cache_state = BTRFS_DC_CLEAR; 8570 cache->disk_cache_state = BTRFS_DC_CLEAR;
8564 8571
8565 /*
8566 * we only want to have 32k of ram per block group for keeping
8567 * track of free space, and if we pass 1/2 of that we want to
8568 * start converting things over to using bitmaps
8569 */
8570 cache->extents_thresh = ((1024 * 32) / 2) /
8571 sizeof(struct btrfs_free_space);
8572
8573 read_extent_buffer(leaf, &cache->item, 8572 read_extent_buffer(leaf, &cache->item,
8574 btrfs_item_ptr_offset(leaf, path->slots[0]), 8573 btrfs_item_ptr_offset(leaf, path->slots[0]),
8575 sizeof(cache->item)); 8574 sizeof(cache->item));
@@ -8580,6 +8579,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8580 cache->flags = btrfs_block_group_flags(&cache->item); 8579 cache->flags = btrfs_block_group_flags(&cache->item);
8581 cache->sectorsize = root->sectorsize; 8580 cache->sectorsize = root->sectorsize;
8582 8581
8582 btrfs_init_free_space_ctl(cache);
8583
8583 /* 8584 /*
8584 * We need to exclude the super stripes now so that the space 8585 * We need to exclude the super stripes now so that the space
8585 * info has super bytes accounted for, otherwise we'll think 8586 * info has super bytes accounted for, otherwise we'll think
@@ -8666,6 +8667,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8666 cache = kzalloc(sizeof(*cache), GFP_NOFS); 8667 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8667 if (!cache) 8668 if (!cache)
8668 return -ENOMEM; 8669 return -ENOMEM;
8670 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8671 GFP_NOFS);
8672 if (!cache->free_space_ctl) {
8673 kfree(cache);
8674 return -ENOMEM;
8675 }
8669 8676
8670 cache->key.objectid = chunk_offset; 8677 cache->key.objectid = chunk_offset;
8671 cache->key.offset = size; 8678 cache->key.offset = size;
@@ -8673,19 +8680,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8673 cache->sectorsize = root->sectorsize; 8680 cache->sectorsize = root->sectorsize;
8674 cache->fs_info = root->fs_info; 8681 cache->fs_info = root->fs_info;
8675 8682
8676 /*
8677 * we only want to have 32k of ram per block group for keeping track
8678 * of free space, and if we pass 1/2 of that we want to start
8679 * converting things over to using bitmaps
8680 */
8681 cache->extents_thresh = ((1024 * 32) / 2) /
8682 sizeof(struct btrfs_free_space);
8683 atomic_set(&cache->count, 1); 8683 atomic_set(&cache->count, 1);
8684 spin_lock_init(&cache->lock); 8684 spin_lock_init(&cache->lock);
8685 spin_lock_init(&cache->tree_lock);
8686 INIT_LIST_HEAD(&cache->list); 8685 INIT_LIST_HEAD(&cache->list);
8687 INIT_LIST_HEAD(&cache->cluster_list); 8686 INIT_LIST_HEAD(&cache->cluster_list);
8688 8687
8688 btrfs_init_free_space_ctl(cache);
8689
8689 btrfs_set_block_group_used(&cache->item, bytes_used); 8690 btrfs_set_block_group_used(&cache->item, bytes_used);
8690 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); 8691 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8691 cache->flags = type; 8692 cache->flags = type;