diff options
Diffstat (limited to 'fs/btrfs/delayed-ref.h')
-rw-r--r-- | fs/btrfs/delayed-ref.h | 193 |
1 files changed, 193 insertions, 0 deletions
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h new file mode 100644 index 000000000000..3bec2ff0b15c --- /dev/null +++ b/fs/btrfs/delayed-ref.h | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Oracle. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public | ||
6 | * License v2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public | ||
14 | * License along with this program; if not, write to the | ||
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
16 | * Boston, MA 021110-1307, USA. | ||
17 | */ | ||
18 | #ifndef __DELAYED_REF__ | ||
19 | #define __DELAYED_REF__ | ||
20 | |||
21 | /* these are the possible values of struct btrfs_delayed_ref->action */ | ||
22 | #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */ | ||
23 | #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */ | ||
24 | #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */ | ||
25 | #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */ | ||
26 | |||
27 | struct btrfs_delayed_ref_node { | ||
28 | struct rb_node rb_node; | ||
29 | |||
30 | /* the starting bytenr of the extent */ | ||
31 | u64 bytenr; | ||
32 | |||
33 | /* the parent our backref will point to */ | ||
34 | u64 parent; | ||
35 | |||
36 | /* the size of the extent */ | ||
37 | u64 num_bytes; | ||
38 | |||
39 | /* ref count on this data structure */ | ||
40 | atomic_t refs; | ||
41 | |||
42 | /* | ||
43 | * how many refs is this entry adding or deleting. For | ||
44 | * head refs, this may be a negative number because it is keeping | ||
45 | * track of the total mods done to the reference count. | ||
46 | * For individual refs, this will always be a positive number | ||
47 | * | ||
48 | * It may be more than one, since it is possible for a single | ||
49 | * parent to have more than one ref on an extent | ||
50 | */ | ||
51 | int ref_mod; | ||
52 | |||
53 | /* is this node still in the rbtree? */ | ||
54 | unsigned int in_tree:1; | ||
55 | }; | ||
56 | |||
57 | /* | ||
58 | * the head refs are used to hold a lock on a given extent, which allows us | ||
59 | * to make sure that only one process is running the delayed refs | ||
60 | * at a time for a single extent. They also store the sum of all the | ||
61 | * reference count modifications we've queued up. | ||
62 | */ | ||
63 | struct btrfs_delayed_ref_head { | ||
64 | struct btrfs_delayed_ref_node node; | ||
65 | |||
66 | /* | ||
67 | * the mutex is held while running the refs, and it is also | ||
68 | * held when checking the sum of reference modifications. | ||
69 | */ | ||
70 | struct mutex mutex; | ||
71 | |||
72 | struct list_head cluster; | ||
73 | |||
74 | /* | ||
75 | * when a new extent is allocated, it is just reserved in memory | ||
76 | * The actual extent isn't inserted into the extent allocation tree | ||
77 | * until the delayed ref is processed. must_insert_reserved is | ||
78 | * used to flag a delayed ref so the accounting can be updated | ||
79 | * when a full insert is done. | ||
80 | * | ||
81 | * It is possible the extent will be freed before it is ever | ||
82 | * inserted into the extent allocation tree. In this case | ||
83 | * we need to update the in ram accounting to properly reflect | ||
84 | * the free has happened. | ||
85 | */ | ||
86 | unsigned int must_insert_reserved:1; | ||
87 | }; | ||
88 | |||
89 | struct btrfs_delayed_ref { | ||
90 | struct btrfs_delayed_ref_node node; | ||
91 | |||
92 | /* the root objectid our ref will point to */ | ||
93 | u64 root; | ||
94 | |||
95 | /* the generation for the backref */ | ||
96 | u64 generation; | ||
97 | |||
98 | /* owner_objectid of the backref */ | ||
99 | u64 owner_objectid; | ||
100 | |||
101 | /* operation done by this entry in the rbtree */ | ||
102 | u8 action; | ||
103 | |||
104 | /* if pin == 1, when the extent is freed it will be pinned until | ||
105 | * transaction commit | ||
106 | */ | ||
107 | unsigned int pin:1; | ||
108 | }; | ||
109 | |||
110 | struct btrfs_delayed_ref_root { | ||
111 | struct rb_root root; | ||
112 | |||
113 | /* this spin lock protects the rbtree and the entries inside */ | ||
114 | spinlock_t lock; | ||
115 | |||
116 | /* how many delayed ref updates we've queued, used by the | ||
117 | * throttling code | ||
118 | */ | ||
119 | unsigned long num_entries; | ||
120 | |||
121 | /* total number of head nodes in tree */ | ||
122 | unsigned long num_heads; | ||
123 | |||
124 | /* total number of head nodes ready for processing */ | ||
125 | unsigned long num_heads_ready; | ||
126 | |||
127 | /* | ||
128 | * set when the tree is flushing before a transaction commit, | ||
129 | * used by the throttling code to decide if new updates need | ||
130 | * to be run right away | ||
131 | */ | ||
132 | int flushing; | ||
133 | |||
134 | u64 run_delayed_start; | ||
135 | }; | ||
136 | |||
137 | static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) | ||
138 | { | ||
139 | WARN_ON(atomic_read(&ref->refs) == 0); | ||
140 | if (atomic_dec_and_test(&ref->refs)) { | ||
141 | WARN_ON(ref->in_tree); | ||
142 | kfree(ref); | ||
143 | } | ||
144 | } | ||
145 | |||
146 | int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans, | ||
147 | u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, | ||
148 | u64 ref_generation, u64 owner_objectid, int action, | ||
149 | int pin); | ||
150 | |||
151 | struct btrfs_delayed_ref_head * | ||
152 | btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); | ||
153 | int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); | ||
154 | int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans, | ||
155 | struct btrfs_root *root, u64 bytenr, | ||
156 | u64 num_bytes, u32 *refs); | ||
157 | int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans, | ||
158 | u64 bytenr, u64 num_bytes, u64 orig_parent, | ||
159 | u64 parent, u64 orig_ref_root, u64 ref_root, | ||
160 | u64 orig_ref_generation, u64 ref_generation, | ||
161 | u64 owner_objectid, int pin); | ||
162 | int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, | ||
163 | struct btrfs_delayed_ref_head *head); | ||
164 | int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, | ||
165 | struct list_head *cluster, u64 search_start); | ||
166 | /* | ||
167 | * a node might live in a head or a regular ref, this lets you | ||
168 | * test for the proper type to use. | ||
169 | */ | ||
170 | static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node) | ||
171 | { | ||
172 | return node->parent == (u64)-1; | ||
173 | } | ||
174 | |||
175 | /* | ||
176 | * helper functions to cast a node into its container | ||
177 | */ | ||
178 | static inline struct btrfs_delayed_ref * | ||
179 | btrfs_delayed_node_to_ref(struct btrfs_delayed_ref_node *node) | ||
180 | { | ||
181 | WARN_ON(btrfs_delayed_ref_is_head(node)); | ||
182 | return container_of(node, struct btrfs_delayed_ref, node); | ||
183 | |||
184 | } | ||
185 | |||
186 | static inline struct btrfs_delayed_ref_head * | ||
187 | btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node) | ||
188 | { | ||
189 | WARN_ON(!btrfs_delayed_ref_is_head(node)); | ||
190 | return container_of(node, struct btrfs_delayed_ref_head, node); | ||
191 | |||
192 | } | ||
193 | #endif | ||