aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/delayed-ref.c34
-rw-r--r--fs/btrfs/delayed-ref.h70
-rw-r--r--fs/btrfs/transaction.c4
3 files changed, 108 insertions, 0 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index babd37badb43..a405db0320e8 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -101,6 +101,11 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
101 return -1; 101 return -1;
102 if (ref1->type > ref2->type) 102 if (ref1->type > ref2->type)
103 return 1; 103 return 1;
104 /* merging of sequenced refs is not allowed */
105 if (ref1->seq < ref2->seq)
106 return -1;
107 if (ref1->seq > ref2->seq)
108 return 1;
104 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || 109 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
105 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { 110 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
106 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), 111 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
@@ -209,6 +214,24 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
209 return 0; 214 return 0;
210} 215}
211 216
217int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
218 u64 seq)
219{
220 struct seq_list *elem;
221
222 assert_spin_locked(&delayed_refs->lock);
223 if (list_empty(&delayed_refs->seq_head))
224 return 0;
225
226 elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
227 if (seq >= elem->seq) {
228 pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
229 seq, elem->seq, delayed_refs);
230 return 1;
231 }
232 return 0;
233}
234
212int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, 235int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
213 struct list_head *cluster, u64 start) 236 struct list_head *cluster, u64 start)
214{ 237{
@@ -438,6 +461,7 @@ static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
438 ref->action = 0; 461 ref->action = 0;
439 ref->is_head = 1; 462 ref->is_head = 1;
440 ref->in_tree = 1; 463 ref->in_tree = 1;
464 ref->seq = 0;
441 465
442 head_ref = btrfs_delayed_node_to_head(ref); 466 head_ref = btrfs_delayed_node_to_head(ref);
443 head_ref->must_insert_reserved = must_insert_reserved; 467 head_ref->must_insert_reserved = must_insert_reserved;
@@ -479,6 +503,7 @@ static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
479 struct btrfs_delayed_ref_node *existing; 503 struct btrfs_delayed_ref_node *existing;
480 struct btrfs_delayed_tree_ref *full_ref; 504 struct btrfs_delayed_tree_ref *full_ref;
481 struct btrfs_delayed_ref_root *delayed_refs; 505 struct btrfs_delayed_ref_root *delayed_refs;
506 u64 seq = 0;
482 507
483 if (action == BTRFS_ADD_DELAYED_EXTENT) 508 if (action == BTRFS_ADD_DELAYED_EXTENT)
484 action = BTRFS_ADD_DELAYED_REF; 509 action = BTRFS_ADD_DELAYED_REF;
@@ -494,6 +519,10 @@ static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
494 ref->is_head = 0; 519 ref->is_head = 0;
495 ref->in_tree = 1; 520 ref->in_tree = 1;
496 521
522 if (need_ref_seq(for_cow, ref_root))
523 seq = inc_delayed_seq(delayed_refs);
524 ref->seq = seq;
525
497 full_ref = btrfs_delayed_node_to_tree_ref(ref); 526 full_ref = btrfs_delayed_node_to_tree_ref(ref);
498 full_ref->parent = parent; 527 full_ref->parent = parent;
499 full_ref->root = ref_root; 528 full_ref->root = ref_root;
@@ -534,6 +563,7 @@ static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
534 struct btrfs_delayed_ref_node *existing; 563 struct btrfs_delayed_ref_node *existing;
535 struct btrfs_delayed_data_ref *full_ref; 564 struct btrfs_delayed_data_ref *full_ref;
536 struct btrfs_delayed_ref_root *delayed_refs; 565 struct btrfs_delayed_ref_root *delayed_refs;
566 u64 seq = 0;
537 567
538 if (action == BTRFS_ADD_DELAYED_EXTENT) 568 if (action == BTRFS_ADD_DELAYED_EXTENT)
539 action = BTRFS_ADD_DELAYED_REF; 569 action = BTRFS_ADD_DELAYED_REF;
@@ -549,6 +579,10 @@ static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
549 ref->is_head = 0; 579 ref->is_head = 0;
550 ref->in_tree = 1; 580 ref->in_tree = 1;
551 581
582 if (need_ref_seq(for_cow, ref_root))
583 seq = inc_delayed_seq(delayed_refs);
584 ref->seq = seq;
585
552 full_ref = btrfs_delayed_node_to_data_ref(ref); 586 full_ref = btrfs_delayed_node_to_data_ref(ref);
553 full_ref->parent = parent; 587 full_ref->parent = parent;
554 full_ref->root = ref_root; 588 full_ref->root = ref_root;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index a5fb2bc83732..174416f7882b 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -33,6 +33,9 @@ struct btrfs_delayed_ref_node {
33 /* the size of the extent */ 33 /* the size of the extent */
34 u64 num_bytes; 34 u64 num_bytes;
35 35
36 /* seq number to keep track of insertion order */
37 u64 seq;
38
36 /* ref count on this data structure */ 39 /* ref count on this data structure */
37 atomic_t refs; 40 atomic_t refs;
38 41
@@ -136,6 +139,20 @@ struct btrfs_delayed_ref_root {
136 int flushing; 139 int flushing;
137 140
138 u64 run_delayed_start; 141 u64 run_delayed_start;
142
143 /*
144 * seq number of delayed refs. We need to know if a backref was being
145 * added before the currently processed ref or afterwards.
146 */
147 u64 seq;
148
149 /*
150 * seq_list holds a list of all seq numbers that are currently being
151 * added to the list. While walking backrefs (btrfs_find_all_roots,
152 * qgroups), which might take some time, no newer ref must be processed,
153 * as it might influence the outcome of the walk.
154 */
155 struct list_head seq_head;
139}; 156};
140 157
141static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) 158static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
@@ -171,6 +188,59 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
171 struct btrfs_delayed_ref_head *head); 188 struct btrfs_delayed_ref_head *head);
172int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, 189int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
173 struct list_head *cluster, u64 search_start); 190 struct list_head *cluster, u64 search_start);
191
192struct seq_list {
193 struct list_head list;
194 u64 seq;
195};
196
197static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
198{
199 assert_spin_locked(&delayed_refs->lock);
200 ++delayed_refs->seq;
201 return delayed_refs->seq;
202}
203
204static inline void
205btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
206 struct seq_list *elem)
207{
208 assert_spin_locked(&delayed_refs->lock);
209 elem->seq = delayed_refs->seq;
210 list_add_tail(&elem->list, &delayed_refs->seq_head);
211}
212
213static inline void
214btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
215 struct seq_list *elem)
216{
217 spin_lock(&delayed_refs->lock);
218 list_del(&elem->list);
219 spin_unlock(&delayed_refs->lock);
220}
221
222int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
223 u64 seq);
224
225/*
226 * delayed refs with a ref_seq > 0 must be held back during backref walking.
227 * this only applies to items in one of the fs-trees. for_cow items never need
228 * to be held back, so they won't get a ref_seq number.
229 */
230static inline int need_ref_seq(int for_cow, u64 rootid)
231{
232 if (for_cow)
233 return 0;
234
235 if (rootid == BTRFS_FS_TREE_OBJECTID)
236 return 1;
237
238 if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
239 return 1;
240
241 return 0;
242}
243
174/* 244/*
175 * a node might live in a head or a regular ref, this lets you 245 * a node might live in a head or a regular ref, this lets you
176 * test for the proper type to use. 246 * test for the proper type to use.
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index a2bfedcbcabc..31a7393af64e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -36,6 +36,8 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
36 WARN_ON(atomic_read(&transaction->use_count) == 0); 36 WARN_ON(atomic_read(&transaction->use_count) == 0);
37 if (atomic_dec_and_test(&transaction->use_count)) { 37 if (atomic_dec_and_test(&transaction->use_count)) {
38 BUG_ON(!list_empty(&transaction->list)); 38 BUG_ON(!list_empty(&transaction->list));
39 WARN_ON(transaction->delayed_refs.root.rb_node);
40 WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
39 memset(transaction, 0, sizeof(*transaction)); 41 memset(transaction, 0, sizeof(*transaction));
40 kmem_cache_free(btrfs_transaction_cachep, transaction); 42 kmem_cache_free(btrfs_transaction_cachep, transaction);
41 } 43 }
@@ -108,8 +110,10 @@ loop:
108 cur_trans->delayed_refs.num_heads = 0; 110 cur_trans->delayed_refs.num_heads = 0;
109 cur_trans->delayed_refs.flushing = 0; 111 cur_trans->delayed_refs.flushing = 0;
110 cur_trans->delayed_refs.run_delayed_start = 0; 112 cur_trans->delayed_refs.run_delayed_start = 0;
113 cur_trans->delayed_refs.seq = 1;
111 spin_lock_init(&cur_trans->commit_lock); 114 spin_lock_init(&cur_trans->commit_lock);
112 spin_lock_init(&cur_trans->delayed_refs.lock); 115 spin_lock_init(&cur_trans->delayed_refs.lock);
116 INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
113 117
114 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 118 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
115 list_add_tail(&cur_trans->list, &root->fs_info->trans_list); 119 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);