diff options
author | Yan <zheng.yan@oracle.com> | 2008-07-30 16:29:20 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-09-25 11:04:05 -0400 |
commit | bcc63abbf3e9bf948a1b0129b3e6120ec7d7f698 (patch) | |
tree | 1c66dc210f948f79c86786368d2c75b57482875d /fs/btrfs/ref-cache.c | |
parent | 33958dc6d38fb4ca7e62273855fcb2db7e616263 (diff) |
Btrfs: implement memory reclaim for leaf reference cache
The memory reclaiming issue happens when snapshot exists. In that
case, some cache entries may not be used during old snapshot dropping,
so they will remain in the cache until umount.
The patch adds a field to struct btrfs_leaf_ref to record create time. Besides,
the patch makes all dead roots of a given snapshot linked together in order of
create time. After a old snapshot was completely dropped, we check the dead
root list and remove all cache entries created before the oldest dead root in
the list.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ref-cache.c')
-rw-r--r-- | fs/btrfs/ref-cache.c | 48 |
1 files changed, 25 insertions, 23 deletions
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index ec9587784a3d..272b9890c982 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c | |||
@@ -21,12 +21,18 @@ | |||
21 | #include "ref-cache.h" | 21 | #include "ref-cache.h" |
22 | #include "transaction.h" | 22 | #include "transaction.h" |
23 | 23 | ||
24 | struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents) | 24 | struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, |
25 | int nr_extents) | ||
25 | { | 26 | { |
26 | struct btrfs_leaf_ref *ref; | 27 | struct btrfs_leaf_ref *ref; |
28 | size_t size = btrfs_leaf_ref_size(nr_extents); | ||
27 | 29 | ||
28 | ref = kmalloc(btrfs_leaf_ref_size(nr_extents), GFP_NOFS); | 30 | ref = kmalloc(size, GFP_NOFS); |
29 | if (ref) { | 31 | if (ref) { |
32 | spin_lock(&root->fs_info->ref_cache_lock); | ||
33 | root->fs_info->total_ref_cache_size += size; | ||
34 | spin_unlock(&root->fs_info->ref_cache_lock); | ||
35 | |||
30 | memset(ref, 0, sizeof(*ref)); | 36 | memset(ref, 0, sizeof(*ref)); |
31 | atomic_set(&ref->usage, 1); | 37 | atomic_set(&ref->usage, 1); |
32 | INIT_LIST_HEAD(&ref->list); | 38 | INIT_LIST_HEAD(&ref->list); |
@@ -34,14 +40,20 @@ struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents) | |||
34 | return ref; | 40 | return ref; |
35 | } | 41 | } |
36 | 42 | ||
37 | void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref) | 43 | void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) |
38 | { | 44 | { |
39 | if (!ref) | 45 | if (!ref) |
40 | return; | 46 | return; |
41 | WARN_ON(atomic_read(&ref->usage) == 0); | 47 | WARN_ON(atomic_read(&ref->usage) == 0); |
42 | if (atomic_dec_and_test(&ref->usage)) { | 48 | if (atomic_dec_and_test(&ref->usage)) { |
49 | size_t size = btrfs_leaf_ref_size(ref->nritems); | ||
50 | |||
43 | BUG_ON(ref->in_tree); | 51 | BUG_ON(ref->in_tree); |
44 | kfree(ref); | 52 | kfree(ref); |
53 | |||
54 | spin_lock(&root->fs_info->ref_cache_lock); | ||
55 | root->fs_info->total_ref_cache_size -= size; | ||
56 | spin_unlock(&root->fs_info->ref_cache_lock); | ||
45 | } | 57 | } |
46 | } | 58 | } |
47 | 59 | ||
@@ -64,7 +76,7 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, | |||
64 | else | 76 | else |
65 | return parent; | 77 | return parent; |
66 | } | 78 | } |
67 | 79 | ||
68 | entry = rb_entry(node, struct btrfs_leaf_ref, rb_node); | 80 | entry = rb_entry(node, struct btrfs_leaf_ref, rb_node); |
69 | entry->in_tree = 1; | 81 | entry->in_tree = 1; |
70 | rb_link_node(node, parent, p); | 82 | rb_link_node(node, parent, p); |
@@ -91,9 +103,8 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) | |||
91 | return NULL; | 103 | return NULL; |
92 | } | 104 | } |
93 | 105 | ||
94 | int btrfs_remove_leaf_refs(struct btrfs_root *root) | 106 | int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen) |
95 | { | 107 | { |
96 | struct rb_node *rb; | ||
97 | struct btrfs_leaf_ref *ref = NULL; | 108 | struct btrfs_leaf_ref *ref = NULL; |
98 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; | 109 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; |
99 | 110 | ||
@@ -101,17 +112,18 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root) | |||
101 | return 0; | 112 | return 0; |
102 | 113 | ||
103 | spin_lock(&tree->lock); | 114 | spin_lock(&tree->lock); |
104 | while(!btrfs_leaf_ref_tree_empty(tree)) { | 115 | while(!list_empty(&tree->list)) { |
105 | rb = rb_first(&tree->root); | 116 | ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); |
106 | ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); | 117 | BUG_ON(!ref->in_tree); |
118 | if (ref->root_gen > max_root_gen) | ||
119 | break; | ||
120 | |||
107 | rb_erase(&ref->rb_node, &tree->root); | 121 | rb_erase(&ref->rb_node, &tree->root); |
108 | ref->in_tree = 0; | 122 | ref->in_tree = 0; |
109 | list_del_init(&ref->list); | 123 | list_del_init(&ref->list); |
110 | 124 | ||
111 | spin_unlock(&tree->lock); | 125 | spin_unlock(&tree->lock); |
112 | 126 | btrfs_free_leaf_ref(root, ref); | |
113 | btrfs_free_leaf_ref(ref); | ||
114 | |||
115 | cond_resched(); | 127 | cond_resched(); |
116 | spin_lock(&tree->lock); | 128 | spin_lock(&tree->lock); |
117 | } | 129 | } |
@@ -143,7 +155,6 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) | |||
143 | { | 155 | { |
144 | int ret = 0; | 156 | int ret = 0; |
145 | struct rb_node *rb; | 157 | struct rb_node *rb; |
146 | size_t size = btrfs_leaf_ref_size(ref->nritems); | ||
147 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; | 158 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; |
148 | 159 | ||
149 | spin_lock(&tree->lock); | 160 | spin_lock(&tree->lock); |
@@ -151,9 +162,6 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) | |||
151 | if (rb) { | 162 | if (rb) { |
152 | ret = -EEXIST; | 163 | ret = -EEXIST; |
153 | } else { | 164 | } else { |
154 | spin_lock(&root->fs_info->ref_cache_lock); | ||
155 | root->fs_info->total_ref_cache_size += size; | ||
156 | spin_unlock(&root->fs_info->ref_cache_lock); | ||
157 | atomic_inc(&ref->usage); | 165 | atomic_inc(&ref->usage); |
158 | list_add_tail(&ref->list, &tree->list); | 166 | list_add_tail(&ref->list, &tree->list); |
159 | } | 167 | } |
@@ -163,15 +171,10 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) | |||
163 | 171 | ||
164 | int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) | 172 | int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) |
165 | { | 173 | { |
166 | size_t size = btrfs_leaf_ref_size(ref->nritems); | ||
167 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; | 174 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; |
168 | 175 | ||
169 | BUG_ON(!ref->in_tree); | 176 | BUG_ON(!ref->in_tree); |
170 | spin_lock(&tree->lock); | 177 | spin_lock(&tree->lock); |
171 | |||
172 | spin_lock(&root->fs_info->ref_cache_lock); | ||
173 | root->fs_info->total_ref_cache_size -= size; | ||
174 | spin_unlock(&root->fs_info->ref_cache_lock); | ||
175 | 178 | ||
176 | rb_erase(&ref->rb_node, &tree->root); | 179 | rb_erase(&ref->rb_node, &tree->root); |
177 | ref->in_tree = 0; | 180 | ref->in_tree = 0; |
@@ -179,7 +182,6 @@ int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) | |||
179 | 182 | ||
180 | spin_unlock(&tree->lock); | 183 | spin_unlock(&tree->lock); |
181 | 184 | ||
182 | btrfs_free_leaf_ref(ref); | 185 | btrfs_free_leaf_ref(root, ref); |
183 | return 0; | 186 | return 0; |
184 | } | 187 | } |
185 | |||