diff options
Diffstat (limited to 'fs/btrfs/delayed-ref.h')
| -rw-r--r-- | fs/btrfs/delayed-ref.h | 62 | 
1 files changed, 18 insertions, 44 deletions
| diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 413927fb9957..0d7c90c366b6 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h | |||
| @@ -139,26 +139,6 @@ struct btrfs_delayed_ref_root { | |||
| 139 | int flushing; | 139 | int flushing; | 
| 140 | 140 | ||
| 141 | u64 run_delayed_start; | 141 | u64 run_delayed_start; | 
| 142 | |||
| 143 | /* | ||
| 144 | * seq number of delayed refs. We need to know if a backref was being | ||
| 145 | * added before the currently processed ref or afterwards. | ||
| 146 | */ | ||
| 147 | u64 seq; | ||
| 148 | |||
| 149 | /* | ||
| 150 | * seq_list holds a list of all seq numbers that are currently being | ||
| 151 | * added to the list. While walking backrefs (btrfs_find_all_roots, | ||
| 152 | * qgroups), which might take some time, no newer ref must be processed, | ||
| 153 | * as it might influence the outcome of the walk. | ||
| 154 | */ | ||
| 155 | struct list_head seq_head; | ||
| 156 | |||
| 157 | /* | ||
| 158 | * when the only refs we have in the list must not be processed, we want | ||
| 159 | * to wait for more refs to show up or for the end of backref walking. | ||
| 160 | */ | ||
| 161 | wait_queue_head_t seq_wait; | ||
| 162 | }; | 142 | }; | 
| 163 | 143 | ||
| 164 | static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) | 144 | static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) | 
| @@ -195,34 +175,28 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, | |||
| 195 | int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, | 175 | int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, | 
| 196 | struct list_head *cluster, u64 search_start); | 176 | struct list_head *cluster, u64 search_start); | 
| 197 | 177 | ||
| 198 | static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs) | 178 | int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, | 
| 199 | { | 179 | struct btrfs_delayed_ref_root *delayed_refs, | 
| 200 | assert_spin_locked(&delayed_refs->lock); | 180 | u64 seq); | 
| 201 | ++delayed_refs->seq; | ||
| 202 | return delayed_refs->seq; | ||
| 203 | } | ||
| 204 | 181 | ||
| 205 | static inline void | 182 | /* | 
| 206 | btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs, | 183 | * delayed refs with a ref_seq > 0 must be held back during backref walking. | 
| 207 | struct seq_list *elem) | 184 | * this only applies to items in one of the fs-trees. for_cow items never need | 
| 185 | * to be held back, so they won't get a ref_seq number. | ||
| 186 | */ | ||
| 187 | static inline int need_ref_seq(int for_cow, u64 rootid) | ||
| 208 | { | 188 | { | 
| 209 | assert_spin_locked(&delayed_refs->lock); | 189 | if (for_cow) | 
| 210 | elem->seq = delayed_refs->seq; | 190 | return 0; | 
| 211 | list_add_tail(&elem->list, &delayed_refs->seq_head); | ||
| 212 | } | ||
| 213 | 191 | ||
| 214 | static inline void | 192 | if (rootid == BTRFS_FS_TREE_OBJECTID) | 
| 215 | btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs, | 193 | return 1; | 
| 216 | struct seq_list *elem) | ||
| 217 | { | ||
| 218 | spin_lock(&delayed_refs->lock); | ||
| 219 | list_del(&elem->list); | ||
| 220 | wake_up(&delayed_refs->seq_wait); | ||
| 221 | spin_unlock(&delayed_refs->lock); | ||
| 222 | } | ||
| 223 | 194 | ||
| 224 | int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs, | 195 | if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID) | 
| 225 | u64 seq); | 196 | return 1; | 
| 197 | |||
| 198 | return 0; | ||
| 199 | } | ||
| 226 | 200 | ||
| 227 | /* | 201 | /* | 
| 228 | * a node might live in a head or a regular ref, this lets you | 202 | * a node might live in a head or a regular ref, this lets you | 
