diff options
author | Jan Schmidt <list.btrfs@jan-o-sch.net> | 2012-06-21 05:08:04 -0400 |
---|---|---|
committer | Jan Schmidt <list.btrfs@jan-o-sch.net> | 2012-07-10 09:14:41 -0400 |
commit | 097b8a7c9e48e2cb50fd0eb9315791921beaf484 (patch) | |
tree | 03588f0e29000e415f7177d31a8f5b4c1689d6ad /fs/btrfs/delayed-ref.h | |
parent | cf5388307a2b4faab4b11d732b61c85741be6169 (diff) |
Btrfs: join tree mod log code with the code holding back delayed refs
We've got two mechanisms both required for reliable backref resolving (tree
mod log and holding back delayed refs). You cannot make use of one without
the other. So instead of requiring the user of this mechanism to setup both
correctly, we join them into a single interface.
Additionally, we stop inserting non-blockers into fs_info->tree_mod_seq_list
as we did before, which was of no value.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Diffstat (limited to 'fs/btrfs/delayed-ref.h')
-rw-r--r-- | fs/btrfs/delayed-ref.h | 49 |
1 files changed, 2 insertions, 47 deletions
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 413927fb9957..2b5cb27f9861 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h | |||
@@ -139,26 +139,6 @@ struct btrfs_delayed_ref_root { | |||
139 | int flushing; | 139 | int flushing; |
140 | 140 | ||
141 | u64 run_delayed_start; | 141 | u64 run_delayed_start; |
142 | |||
143 | /* | ||
144 | * seq number of delayed refs. We need to know if a backref was being | ||
145 | * added before the currently processed ref or afterwards. | ||
146 | */ | ||
147 | u64 seq; | ||
148 | |||
149 | /* | ||
150 | * seq_list holds a list of all seq numbers that are currently being | ||
151 | * added to the list. While walking backrefs (btrfs_find_all_roots, | ||
152 | * qgroups), which might take some time, no newer ref must be processed, | ||
153 | * as it might influence the outcome of the walk. | ||
154 | */ | ||
155 | struct list_head seq_head; | ||
156 | |||
157 | /* | ||
158 | * when the only refs we have in the list must not be processed, we want | ||
159 | * to wait for more refs to show up or for the end of backref walking. | ||
160 | */ | ||
161 | wait_queue_head_t seq_wait; | ||
162 | }; | 142 | }; |
163 | 143 | ||
164 | static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) | 144 | static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) |
@@ -195,33 +175,8 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, | |||
195 | int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, | 175 | int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, |
196 | struct list_head *cluster, u64 search_start); | 176 | struct list_head *cluster, u64 search_start); |
197 | 177 | ||
198 | static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs) | 178 | int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, |
199 | { | 179 | struct btrfs_delayed_ref_root *delayed_refs, |
200 | assert_spin_locked(&delayed_refs->lock); | ||
201 | ++delayed_refs->seq; | ||
202 | return delayed_refs->seq; | ||
203 | } | ||
204 | |||
205 | static inline void | ||
206 | btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs, | ||
207 | struct seq_list *elem) | ||
208 | { | ||
209 | assert_spin_locked(&delayed_refs->lock); | ||
210 | elem->seq = delayed_refs->seq; | ||
211 | list_add_tail(&elem->list, &delayed_refs->seq_head); | ||
212 | } | ||
213 | |||
214 | static inline void | ||
215 | btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs, | ||
216 | struct seq_list *elem) | ||
217 | { | ||
218 | spin_lock(&delayed_refs->lock); | ||
219 | list_del(&elem->list); | ||
220 | wake_up(&delayed_refs->seq_wait); | ||
221 | spin_unlock(&delayed_refs->lock); | ||
222 | } | ||
223 | |||
224 | int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs, | ||
225 | u64 seq); | 180 | u64 seq); |
226 | 181 | ||
227 | /* | 182 | /* |