aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/delayed-ref.h
diff options
context:
space:
mode:
authorJan Schmidt <list.btrfs@jan-o-sch.net>2012-06-21 05:08:04 -0400
committerJan Schmidt <list.btrfs@jan-o-sch.net>2012-07-10 09:14:41 -0400
commit097b8a7c9e48e2cb50fd0eb9315791921beaf484 (patch)
tree03588f0e29000e415f7177d31a8f5b4c1689d6ad /fs/btrfs/delayed-ref.h
parentcf5388307a2b4faab4b11d732b61c85741be6169 (diff)
Btrfs: join tree mod log code with the code holding back delayed refs
We've got two mechanisms both required for reliable backref resolving (tree mod log and holding back delayed refs). You cannot make use of one without the other. So instead of requiring the user of this mechanism to setup both correctly, we join them into a single interface. Additionally, we stop inserting non-blockers into fs_info->tree_mod_seq_list as we did before, which was of no value. Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Diffstat (limited to 'fs/btrfs/delayed-ref.h')
-rw-r--r--fs/btrfs/delayed-ref.h49
1 files changed, 2 insertions, 47 deletions
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 413927fb9957..2b5cb27f9861 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -139,26 +139,6 @@ struct btrfs_delayed_ref_root {
139 int flushing; 139 int flushing;
140 140
141 u64 run_delayed_start; 141 u64 run_delayed_start;
142
143 /*
144 * seq number of delayed refs. We need to know if a backref was being
145 * added before the currently processed ref or afterwards.
146 */
147 u64 seq;
148
149 /*
150 * seq_list holds a list of all seq numbers that are currently being
151 * added to the list. While walking backrefs (btrfs_find_all_roots,
152 * qgroups), which might take some time, no newer ref must be processed,
153 * as it might influence the outcome of the walk.
154 */
155 struct list_head seq_head;
156
157 /*
158 * when the only refs we have in the list must not be processed, we want
159 * to wait for more refs to show up or for the end of backref walking.
160 */
161 wait_queue_head_t seq_wait;
162}; 142};
163 143
164static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) 144static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
@@ -195,33 +175,8 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
195int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, 175int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
196 struct list_head *cluster, u64 search_start); 176 struct list_head *cluster, u64 search_start);
197 177
198static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs) 178int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
199{ 179 struct btrfs_delayed_ref_root *delayed_refs,
200 assert_spin_locked(&delayed_refs->lock);
201 ++delayed_refs->seq;
202 return delayed_refs->seq;
203}
204
205static inline void
206btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
207 struct seq_list *elem)
208{
209 assert_spin_locked(&delayed_refs->lock);
210 elem->seq = delayed_refs->seq;
211 list_add_tail(&elem->list, &delayed_refs->seq_head);
212}
213
214static inline void
215btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
216 struct seq_list *elem)
217{
218 spin_lock(&delayed_refs->lock);
219 list_del(&elem->list);
220 wake_up(&delayed_refs->seq_wait);
221 spin_unlock(&delayed_refs->lock);
222}
223
224int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
225 u64 seq); 180 u64 seq);
226 181
227/* 182/*