diff options
author | Zheng Yan <zheng.yan@oracle.com> | 2008-09-26 10:04:53 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-09-26 10:04:53 -0400 |
commit | e465768938f95388723b0fd3c50a0ae48173edb9 (patch) | |
tree | 0f624a9a98dedfafae902e12b384d27e1100cd3a /fs/btrfs/ref-cache.c | |
parent | e8569813849b5da394a195c7e76b4faa452b12d1 (diff) |
Btrfs: Add shared reference cache
Btrfs has a cache of reference counts in leaves, allowing it to
avoid reading tree leaves while deleting snapshots. To reduce
contention with multiple subvolumes, this cache is private to each
subvolume.
This patch adds shared reference cache support. The new space
balancing code plays with multiple subvols at the same time, So
the old per-subvol reference cache is not well suited.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ref-cache.c')
-rw-r--r-- | fs/btrfs/ref-cache.c | 58 |
1 files changed, 38 insertions, 20 deletions
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index 272b9890c982..c5809988c875 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c | |||
@@ -78,7 +78,6 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, | |||
78 | } | 78 | } |
79 | 79 | ||
80 | entry = rb_entry(node, struct btrfs_leaf_ref, rb_node); | 80 | entry = rb_entry(node, struct btrfs_leaf_ref, rb_node); |
81 | entry->in_tree = 1; | ||
82 | rb_link_node(node, parent, p); | 81 | rb_link_node(node, parent, p); |
83 | rb_insert_color(node, root); | 82 | rb_insert_color(node, root); |
84 | return NULL; | 83 | return NULL; |
@@ -103,23 +102,29 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) | |||
103 | return NULL; | 102 | return NULL; |
104 | } | 103 | } |
105 | 104 | ||
106 | int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen) | 105 | int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, |
106 | int shared) | ||
107 | { | 107 | { |
108 | struct btrfs_leaf_ref *ref = NULL; | 108 | struct btrfs_leaf_ref *ref = NULL; |
109 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; | 109 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; |
110 | 110 | ||
111 | if (shared) | ||
112 | tree = &root->fs_info->shared_ref_tree; | ||
111 | if (!tree) | 113 | if (!tree) |
112 | return 0; | 114 | return 0; |
113 | 115 | ||
114 | spin_lock(&tree->lock); | 116 | spin_lock(&tree->lock); |
115 | while(!list_empty(&tree->list)) { | 117 | while(!list_empty(&tree->list)) { |
116 | ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); | 118 | ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); |
117 | BUG_ON(!ref->in_tree); | 119 | BUG_ON(ref->tree != tree); |
118 | if (ref->root_gen > max_root_gen) | 120 | if (ref->root_gen > max_root_gen) |
119 | break; | 121 | break; |
122 | if (!xchg(&ref->in_tree, 0)) { | ||
123 | cond_resched_lock(&tree->lock); | ||
124 | continue; | ||
125 | } | ||
120 | 126 | ||
121 | rb_erase(&ref->rb_node, &tree->root); | 127 | rb_erase(&ref->rb_node, &tree->root); |
122 | ref->in_tree = 0; | ||
123 | list_del_init(&ref->list); | 128 | list_del_init(&ref->list); |
124 | 129 | ||
125 | spin_unlock(&tree->lock); | 130 | spin_unlock(&tree->lock); |
@@ -137,32 +142,43 @@ struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, | |||
137 | struct rb_node *rb; | 142 | struct rb_node *rb; |
138 | struct btrfs_leaf_ref *ref = NULL; | 143 | struct btrfs_leaf_ref *ref = NULL; |
139 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; | 144 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; |
140 | 145 | again: | |
141 | if (!tree) | 146 | if (tree) { |
142 | return NULL; | 147 | spin_lock(&tree->lock); |
143 | 148 | rb = tree_search(&tree->root, bytenr); | |
144 | spin_lock(&tree->lock); | 149 | if (rb) |
145 | rb = tree_search(&tree->root, bytenr); | 150 | ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); |
146 | if (rb) | 151 | if (ref) |
147 | ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); | 152 | atomic_inc(&ref->usage); |
148 | if (ref) | 153 | spin_unlock(&tree->lock); |
149 | atomic_inc(&ref->usage); | 154 | if (ref) |
150 | spin_unlock(&tree->lock); | 155 | return ref; |
151 | return ref; | 156 | } |
157 | if (tree != &root->fs_info->shared_ref_tree) { | ||
158 | tree = &root->fs_info->shared_ref_tree; | ||
159 | goto again; | ||
160 | } | ||
161 | return NULL; | ||
152 | } | 162 | } |
153 | 163 | ||
154 | int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) | 164 | int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, |
165 | int shared) | ||
155 | { | 166 | { |
156 | int ret = 0; | 167 | int ret = 0; |
157 | struct rb_node *rb; | 168 | struct rb_node *rb; |
158 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; | 169 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; |
159 | 170 | ||
171 | if (shared) | ||
172 | tree = &root->fs_info->shared_ref_tree; | ||
173 | |||
160 | spin_lock(&tree->lock); | 174 | spin_lock(&tree->lock); |
161 | rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node); | 175 | rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node); |
162 | if (rb) { | 176 | if (rb) { |
163 | ret = -EEXIST; | 177 | ret = -EEXIST; |
164 | } else { | 178 | } else { |
165 | atomic_inc(&ref->usage); | 179 | atomic_inc(&ref->usage); |
180 | ref->tree = tree; | ||
181 | ref->in_tree = 1; | ||
166 | list_add_tail(&ref->list, &tree->list); | 182 | list_add_tail(&ref->list, &tree->list); |
167 | } | 183 | } |
168 | spin_unlock(&tree->lock); | 184 | spin_unlock(&tree->lock); |
@@ -171,13 +187,15 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) | |||
171 | 187 | ||
172 | int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) | 188 | int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) |
173 | { | 189 | { |
174 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; | 190 | struct btrfs_leaf_ref_tree *tree; |
191 | |||
192 | if (!xchg(&ref->in_tree, 0)) | ||
193 | return 0; | ||
175 | 194 | ||
176 | BUG_ON(!ref->in_tree); | 195 | tree = ref->tree; |
177 | spin_lock(&tree->lock); | 196 | spin_lock(&tree->lock); |
178 | 197 | ||
179 | rb_erase(&ref->rb_node, &tree->root); | 198 | rb_erase(&ref->rb_node, &tree->root); |
180 | ref->in_tree = 0; | ||
181 | list_del_init(&ref->list); | 199 | list_del_init(&ref->list); |
182 | 200 | ||
183 | spin_unlock(&tree->lock); | 201 | spin_unlock(&tree->lock); |