aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorArne Jansen <sensille@gmx.net>2011-09-13 09:16:43 -0400
committerJan Schmidt <list.btrfs@jan-o-sch.net>2012-01-04 10:12:45 -0500
commitd1270cd91f308c9d22b2804720c36ccd32dbc35e (patch)
tree3dfa3109f7df9983ce88dad5bcb4515f7d4b26b5 /fs
parent00f04b88791ff49dc64ada18819d40a5b0671709 (diff)
Btrfs: put back delayed refs that are too new
When processing a delayed ref, first check if there are still old refs in the process of being added. If so, put this ref back to the tree. To avoid looping on this ref, choose a newer one in the next loop. btrfs_find_ref_cluster has to take care of that. Signed-off-by: Arne Jansen <sensille@gmx.net> Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/delayed-ref.c43
-rw-r--r--fs/btrfs/extent-tree.c27
2 files changed, 47 insertions, 23 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index a405db0320e8..ee181989d444 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -155,16 +155,22 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
155 155
156/* 156/*
157 * find an head entry based on bytenr. This returns the delayed ref 157 * find an head entry based on bytenr. This returns the delayed ref
158 * head if it was able to find one, or NULL if nothing was in that spot 158 * head if it was able to find one, or NULL if nothing was in that spot.
159 * If return_bigger is given, the next bigger entry is returned if no exact
160 * match is found.
159 */ 161 */
160static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root, 162static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
161 u64 bytenr, 163 u64 bytenr,
162 struct btrfs_delayed_ref_node **last) 164 struct btrfs_delayed_ref_node **last,
165 int return_bigger)
163{ 166{
164 struct rb_node *n = root->rb_node; 167 struct rb_node *n;
165 struct btrfs_delayed_ref_node *entry; 168 struct btrfs_delayed_ref_node *entry;
166 int cmp; 169 int cmp = 0;
167 170
171again:
172 n = root->rb_node;
173 entry = NULL;
168 while (n) { 174 while (n) {
169 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 175 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
170 WARN_ON(!entry->in_tree); 176 WARN_ON(!entry->in_tree);
@@ -187,6 +193,19 @@ static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
187 else 193 else
188 return entry; 194 return entry;
189 } 195 }
196 if (entry && return_bigger) {
197 if (cmp > 0) {
198 n = rb_next(&entry->rb_node);
199 if (!n)
200 n = rb_first(root);
201 entry = rb_entry(n, struct btrfs_delayed_ref_node,
202 rb_node);
203 bytenr = entry->bytenr;
204 return_bigger = 0;
205 goto again;
206 }
207 return entry;
208 }
190 return NULL; 209 return NULL;
191} 210}
192 211
@@ -246,20 +265,8 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
246 node = rb_first(&delayed_refs->root); 265 node = rb_first(&delayed_refs->root);
247 } else { 266 } else {
248 ref = NULL; 267 ref = NULL;
249 find_ref_head(&delayed_refs->root, start, &ref); 268 find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
250 if (ref) { 269 if (ref) {
251 struct btrfs_delayed_ref_node *tmp;
252
253 node = rb_prev(&ref->rb_node);
254 while (node) {
255 tmp = rb_entry(node,
256 struct btrfs_delayed_ref_node,
257 rb_node);
258 if (tmp->bytenr < start)
259 break;
260 ref = tmp;
261 node = rb_prev(&ref->rb_node);
262 }
263 node = &ref->rb_node; 270 node = &ref->rb_node;
264 } else 271 } else
265 node = rb_first(&delayed_refs->root); 272 node = rb_first(&delayed_refs->root);
@@ -748,7 +755,7 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
748 struct btrfs_delayed_ref_root *delayed_refs; 755 struct btrfs_delayed_ref_root *delayed_refs;
749 756
750 delayed_refs = &trans->transaction->delayed_refs; 757 delayed_refs = &trans->transaction->delayed_refs;
751 ref = find_ref_head(&delayed_refs->root, bytenr, NULL); 758 ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
752 if (ref) 759 if (ref)
753 return btrfs_delayed_node_to_head(ref); 760 return btrfs_delayed_node_to_head(ref);
754 return NULL; 761 return NULL;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index dc8b9a834596..bbcca12fbbba 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2237,6 +2237,28 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2237 } 2237 }
2238 2238
2239 /* 2239 /*
2240 * locked_ref is the head node, so we have to go one
2241 * node back for any delayed ref updates
2242 */
2243 ref = select_delayed_ref(locked_ref);
2244
2245 if (ref && ref->seq &&
2246 btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
2247 /*
2248 * there are still refs with lower seq numbers in the
2249 * process of being added. Don't run this ref yet.
2250 */
2251 list_del_init(&locked_ref->cluster);
2252 mutex_unlock(&locked_ref->mutex);
2253 locked_ref = NULL;
2254 delayed_refs->num_heads_ready++;
2255 spin_unlock(&delayed_refs->lock);
2256 cond_resched();
2257 spin_lock(&delayed_refs->lock);
2258 continue;
2259 }
2260
2261 /*
2240 * record the must insert reserved flag before we 2262 * record the must insert reserved flag before we
2241 * drop the spin lock. 2263 * drop the spin lock.
2242 */ 2264 */
@@ -2246,11 +2268,6 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2246 extent_op = locked_ref->extent_op; 2268 extent_op = locked_ref->extent_op;
2247 locked_ref->extent_op = NULL; 2269 locked_ref->extent_op = NULL;
2248 2270
2249 /*
2250 * locked_ref is the head node, so we have to go one
2251 * node back for any delayed ref updates
2252 */
2253 ref = select_delayed_ref(locked_ref);
2254 if (!ref) { 2271 if (!ref) {
2255 /* All delayed refs have been processed, Go ahead 2272 /* All delayed refs have been processed, Go ahead
2256 * and send the head node to run_one_delayed_ref, 2273 * and send the head node to run_one_delayed_ref,