diff options
Diffstat (limited to 'fs/btrfs/inode-map.c')
-rw-r--r-- | fs/btrfs/inode-map.c | 428 |
1 files changed, 423 insertions, 5 deletions
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c05a08f4c41..00097051262 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c | |||
@@ -16,11 +16,430 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/kthread.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | |||
19 | #include "ctree.h" | 23 | #include "ctree.h" |
20 | #include "disk-io.h" | 24 | #include "disk-io.h" |
25 | #include "free-space-cache.h" | ||
26 | #include "inode-map.h" | ||
21 | #include "transaction.h" | 27 | #include "transaction.h" |
22 | 28 | ||
23 | int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) | 29 | static int caching_kthread(void *data) |
30 | { | ||
31 | struct btrfs_root *root = data; | ||
32 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
33 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
34 | struct btrfs_key key; | ||
35 | struct btrfs_path *path; | ||
36 | struct extent_buffer *leaf; | ||
37 | u64 last = (u64)-1; | ||
38 | int slot; | ||
39 | int ret; | ||
40 | |||
41 | path = btrfs_alloc_path(); | ||
42 | if (!path) | ||
43 | return -ENOMEM; | ||
44 | |||
45 | /* Since the commit root is read-only, we can safely skip locking. */ | ||
46 | path->skip_locking = 1; | ||
47 | path->search_commit_root = 1; | ||
48 | path->reada = 2; | ||
49 | |||
50 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; | ||
51 | key.offset = 0; | ||
52 | key.type = BTRFS_INODE_ITEM_KEY; | ||
53 | again: | ||
54 | /* need to make sure the commit_root doesn't disappear */ | ||
55 | mutex_lock(&root->fs_commit_mutex); | ||
56 | |||
57 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
58 | if (ret < 0) | ||
59 | goto out; | ||
60 | |||
61 | while (1) { | ||
62 | smp_mb(); | ||
63 | if (fs_info->closing > 1) | ||
64 | goto out; | ||
65 | |||
66 | leaf = path->nodes[0]; | ||
67 | slot = path->slots[0]; | ||
68 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | ||
69 | ret = btrfs_next_leaf(root, path); | ||
70 | if (ret < 0) | ||
71 | goto out; | ||
72 | else if (ret > 0) | ||
73 | break; | ||
74 | |||
75 | if (need_resched() || | ||
76 | btrfs_transaction_in_commit(fs_info)) { | ||
77 | leaf = path->nodes[0]; | ||
78 | |||
79 | if (btrfs_header_nritems(leaf) == 0) { | ||
80 | WARN_ON(1); | ||
81 | break; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Save the key so we can advances forward | ||
86 | * in the next search. | ||
87 | */ | ||
88 | btrfs_item_key_to_cpu(leaf, &key, 0); | ||
89 | btrfs_release_path(path); | ||
90 | root->cache_progress = last; | ||
91 | mutex_unlock(&root->fs_commit_mutex); | ||
92 | schedule_timeout(1); | ||
93 | goto again; | ||
94 | } else | ||
95 | continue; | ||
96 | } | ||
97 | |||
98 | btrfs_item_key_to_cpu(leaf, &key, slot); | ||
99 | |||
100 | if (key.type != BTRFS_INODE_ITEM_KEY) | ||
101 | goto next; | ||
102 | |||
103 | if (key.objectid >= BTRFS_LAST_FREE_OBJECTID) | ||
104 | break; | ||
105 | |||
106 | if (last != (u64)-1 && last + 1 != key.objectid) { | ||
107 | __btrfs_add_free_space(ctl, last + 1, | ||
108 | key.objectid - last - 1); | ||
109 | wake_up(&root->cache_wait); | ||
110 | } | ||
111 | |||
112 | last = key.objectid; | ||
113 | next: | ||
114 | path->slots[0]++; | ||
115 | } | ||
116 | |||
117 | if (last < BTRFS_LAST_FREE_OBJECTID - 1) { | ||
118 | __btrfs_add_free_space(ctl, last + 1, | ||
119 | BTRFS_LAST_FREE_OBJECTID - last - 1); | ||
120 | } | ||
121 | |||
122 | spin_lock(&root->cache_lock); | ||
123 | root->cached = BTRFS_CACHE_FINISHED; | ||
124 | spin_unlock(&root->cache_lock); | ||
125 | |||
126 | root->cache_progress = (u64)-1; | ||
127 | btrfs_unpin_free_ino(root); | ||
128 | out: | ||
129 | wake_up(&root->cache_wait); | ||
130 | mutex_unlock(&root->fs_commit_mutex); | ||
131 | |||
132 | btrfs_free_path(path); | ||
133 | |||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | static void start_caching(struct btrfs_root *root) | ||
138 | { | ||
139 | struct task_struct *tsk; | ||
140 | int ret; | ||
141 | |||
142 | spin_lock(&root->cache_lock); | ||
143 | if (root->cached != BTRFS_CACHE_NO) { | ||
144 | spin_unlock(&root->cache_lock); | ||
145 | return; | ||
146 | } | ||
147 | |||
148 | root->cached = BTRFS_CACHE_STARTED; | ||
149 | spin_unlock(&root->cache_lock); | ||
150 | |||
151 | ret = load_free_ino_cache(root->fs_info, root); | ||
152 | if (ret == 1) { | ||
153 | spin_lock(&root->cache_lock); | ||
154 | root->cached = BTRFS_CACHE_FINISHED; | ||
155 | spin_unlock(&root->cache_lock); | ||
156 | return; | ||
157 | } | ||
158 | |||
159 | tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", | ||
160 | root->root_key.objectid); | ||
161 | BUG_ON(IS_ERR(tsk)); | ||
162 | } | ||
163 | |||
164 | int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) | ||
165 | { | ||
166 | again: | ||
167 | *objectid = btrfs_find_ino_for_alloc(root); | ||
168 | |||
169 | if (*objectid != 0) | ||
170 | return 0; | ||
171 | |||
172 | start_caching(root); | ||
173 | |||
174 | wait_event(root->cache_wait, | ||
175 | root->cached == BTRFS_CACHE_FINISHED || | ||
176 | root->free_ino_ctl->free_space > 0); | ||
177 | |||
178 | if (root->cached == BTRFS_CACHE_FINISHED && | ||
179 | root->free_ino_ctl->free_space == 0) | ||
180 | return -ENOSPC; | ||
181 | else | ||
182 | goto again; | ||
183 | } | ||
184 | |||
185 | void btrfs_return_ino(struct btrfs_root *root, u64 objectid) | ||
186 | { | ||
187 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
188 | struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; | ||
189 | again: | ||
190 | if (root->cached == BTRFS_CACHE_FINISHED) { | ||
191 | __btrfs_add_free_space(ctl, objectid, 1); | ||
192 | } else { | ||
193 | /* | ||
194 | * If we are in the process of caching free ino chunks, | ||
195 | * to avoid adding the same inode number to the free_ino | ||
196 | * tree twice due to cross transaction, we'll leave it | ||
197 | * in the pinned tree until a transaction is committed | ||
198 | * or the caching work is done. | ||
199 | */ | ||
200 | |||
201 | mutex_lock(&root->fs_commit_mutex); | ||
202 | spin_lock(&root->cache_lock); | ||
203 | if (root->cached == BTRFS_CACHE_FINISHED) { | ||
204 | spin_unlock(&root->cache_lock); | ||
205 | mutex_unlock(&root->fs_commit_mutex); | ||
206 | goto again; | ||
207 | } | ||
208 | spin_unlock(&root->cache_lock); | ||
209 | |||
210 | start_caching(root); | ||
211 | |||
212 | if (objectid <= root->cache_progress) | ||
213 | __btrfs_add_free_space(ctl, objectid, 1); | ||
214 | else | ||
215 | __btrfs_add_free_space(pinned, objectid, 1); | ||
216 | |||
217 | mutex_unlock(&root->fs_commit_mutex); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | /* | ||
222 | * When a transaction is committed, we'll move those inode numbers which | ||
223 | * are smaller than root->cache_progress from pinned tree to free_ino tree, | ||
224 | * and others will just be dropped, because the commit root we were | ||
225 | * searching has changed. | ||
226 | * | ||
227 | * Must be called with root->fs_commit_mutex held | ||
228 | */ | ||
229 | void btrfs_unpin_free_ino(struct btrfs_root *root) | ||
230 | { | ||
231 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
232 | struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; | ||
233 | struct btrfs_free_space *info; | ||
234 | struct rb_node *n; | ||
235 | u64 count; | ||
236 | |||
237 | while (1) { | ||
238 | n = rb_first(rbroot); | ||
239 | if (!n) | ||
240 | break; | ||
241 | |||
242 | info = rb_entry(n, struct btrfs_free_space, offset_index); | ||
243 | BUG_ON(info->bitmap); | ||
244 | |||
245 | if (info->offset > root->cache_progress) | ||
246 | goto free; | ||
247 | else if (info->offset + info->bytes > root->cache_progress) | ||
248 | count = root->cache_progress - info->offset + 1; | ||
249 | else | ||
250 | count = info->bytes; | ||
251 | |||
252 | __btrfs_add_free_space(ctl, info->offset, count); | ||
253 | free: | ||
254 | rb_erase(&info->offset_index, rbroot); | ||
255 | kfree(info); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | #define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space)) | ||
260 | #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) | ||
261 | |||
262 | /* | ||
263 | * The goal is to keep the memory used by the free_ino tree won't | ||
264 | * exceed the memory if we use bitmaps only. | ||
265 | */ | ||
266 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) | ||
267 | { | ||
268 | struct btrfs_free_space *info; | ||
269 | struct rb_node *n; | ||
270 | int max_ino; | ||
271 | int max_bitmaps; | ||
272 | |||
273 | n = rb_last(&ctl->free_space_offset); | ||
274 | if (!n) { | ||
275 | ctl->extents_thresh = INIT_THRESHOLD; | ||
276 | return; | ||
277 | } | ||
278 | info = rb_entry(n, struct btrfs_free_space, offset_index); | ||
279 | |||
280 | /* | ||
281 | * Find the maximum inode number in the filesystem. Note we | ||
282 | * ignore the fact that this can be a bitmap, because we are | ||
283 | * not doing precise calculation. | ||
284 | */ | ||
285 | max_ino = info->bytes - 1; | ||
286 | |||
287 | max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP; | ||
288 | if (max_bitmaps <= ctl->total_bitmaps) { | ||
289 | ctl->extents_thresh = 0; | ||
290 | return; | ||
291 | } | ||
292 | |||
293 | ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * | ||
294 | PAGE_CACHE_SIZE / sizeof(*info); | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | * We don't fall back to bitmap, if we are below the extents threshold | ||
299 | * or this chunk of inode numbers is a big one. | ||
300 | */ | ||
301 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, | ||
302 | struct btrfs_free_space *info) | ||
303 | { | ||
304 | if (ctl->free_extents < ctl->extents_thresh || | ||
305 | info->bytes > INODES_PER_BITMAP / 10) | ||
306 | return false; | ||
307 | |||
308 | return true; | ||
309 | } | ||
310 | |||
311 | static struct btrfs_free_space_op free_ino_op = { | ||
312 | .recalc_thresholds = recalculate_thresholds, | ||
313 | .use_bitmap = use_bitmap, | ||
314 | }; | ||
315 | |||
316 | static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl) | ||
317 | { | ||
318 | } | ||
319 | |||
320 | static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, | ||
321 | struct btrfs_free_space *info) | ||
322 | { | ||
323 | /* | ||
324 | * We always use extents for two reasons: | ||
325 | * | ||
326 | * - The pinned tree is only used during the process of caching | ||
327 | * work. | ||
328 | * - Make code simpler. See btrfs_unpin_free_ino(). | ||
329 | */ | ||
330 | return false; | ||
331 | } | ||
332 | |||
333 | static struct btrfs_free_space_op pinned_free_ino_op = { | ||
334 | .recalc_thresholds = pinned_recalc_thresholds, | ||
335 | .use_bitmap = pinned_use_bitmap, | ||
336 | }; | ||
337 | |||
338 | void btrfs_init_free_ino_ctl(struct btrfs_root *root) | ||
339 | { | ||
340 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
341 | struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; | ||
342 | |||
343 | spin_lock_init(&ctl->tree_lock); | ||
344 | ctl->unit = 1; | ||
345 | ctl->start = 0; | ||
346 | ctl->private = NULL; | ||
347 | ctl->op = &free_ino_op; | ||
348 | |||
349 | /* | ||
350 | * Initially we allow to use 16K of ram to cache chunks of | ||
351 | * inode numbers before we resort to bitmaps. This is somewhat | ||
352 | * arbitrary, but it will be adjusted in runtime. | ||
353 | */ | ||
354 | ctl->extents_thresh = INIT_THRESHOLD; | ||
355 | |||
356 | spin_lock_init(&pinned->tree_lock); | ||
357 | pinned->unit = 1; | ||
358 | pinned->start = 0; | ||
359 | pinned->private = NULL; | ||
360 | pinned->extents_thresh = 0; | ||
361 | pinned->op = &pinned_free_ino_op; | ||
362 | } | ||
363 | |||
364 | int btrfs_save_ino_cache(struct btrfs_root *root, | ||
365 | struct btrfs_trans_handle *trans) | ||
366 | { | ||
367 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
368 | struct btrfs_path *path; | ||
369 | struct inode *inode; | ||
370 | u64 alloc_hint = 0; | ||
371 | int ret; | ||
372 | int prealloc; | ||
373 | bool retry = false; | ||
374 | |||
375 | path = btrfs_alloc_path(); | ||
376 | if (!path) | ||
377 | return -ENOMEM; | ||
378 | again: | ||
379 | inode = lookup_free_ino_inode(root, path); | ||
380 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { | ||
381 | ret = PTR_ERR(inode); | ||
382 | goto out; | ||
383 | } | ||
384 | |||
385 | if (IS_ERR(inode)) { | ||
386 | BUG_ON(retry); | ||
387 | retry = true; | ||
388 | |||
389 | ret = create_free_ino_inode(root, trans, path); | ||
390 | if (ret) | ||
391 | goto out; | ||
392 | goto again; | ||
393 | } | ||
394 | |||
395 | BTRFS_I(inode)->generation = 0; | ||
396 | ret = btrfs_update_inode(trans, root, inode); | ||
397 | WARN_ON(ret); | ||
398 | |||
399 | if (i_size_read(inode) > 0) { | ||
400 | ret = btrfs_truncate_free_space_cache(root, trans, path, inode); | ||
401 | if (ret) | ||
402 | goto out_put; | ||
403 | } | ||
404 | |||
405 | spin_lock(&root->cache_lock); | ||
406 | if (root->cached != BTRFS_CACHE_FINISHED) { | ||
407 | ret = -1; | ||
408 | spin_unlock(&root->cache_lock); | ||
409 | goto out_put; | ||
410 | } | ||
411 | spin_unlock(&root->cache_lock); | ||
412 | |||
413 | spin_lock(&ctl->tree_lock); | ||
414 | prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; | ||
415 | prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); | ||
416 | prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; | ||
417 | spin_unlock(&ctl->tree_lock); | ||
418 | |||
419 | /* Just to make sure we have enough space */ | ||
420 | prealloc += 8 * PAGE_CACHE_SIZE; | ||
421 | |||
422 | ret = btrfs_check_data_free_space(inode, prealloc); | ||
423 | if (ret) | ||
424 | goto out_put; | ||
425 | |||
426 | ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, | ||
427 | prealloc, prealloc, &alloc_hint); | ||
428 | if (ret) | ||
429 | goto out_put; | ||
430 | btrfs_free_reserved_data_space(inode, prealloc); | ||
431 | |||
432 | out_put: | ||
433 | iput(inode); | ||
434 | out: | ||
435 | if (ret == 0) | ||
436 | ret = btrfs_write_out_ino_cache(root, trans, path); | ||
437 | |||
438 | btrfs_free_path(path); | ||
439 | return ret; | ||
440 | } | ||
441 | |||
442 | static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) | ||
24 | { | 443 | { |
25 | struct btrfs_path *path; | 444 | struct btrfs_path *path; |
26 | int ret; | 445 | int ret; |
@@ -55,15 +474,14 @@ error: | |||
55 | return ret; | 474 | return ret; |
56 | } | 475 | } |
57 | 476 | ||
58 | int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, | 477 | int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) |
59 | struct btrfs_root *root, | ||
60 | u64 dirid, u64 *objectid) | ||
61 | { | 478 | { |
62 | int ret; | 479 | int ret; |
63 | mutex_lock(&root->objectid_mutex); | 480 | mutex_lock(&root->objectid_mutex); |
64 | 481 | ||
65 | if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { | 482 | if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { |
66 | ret = btrfs_find_highest_inode(root, &root->highest_objectid); | 483 | ret = btrfs_find_highest_objectid(root, |
484 | &root->highest_objectid); | ||
67 | if (ret) | 485 | if (ret) |
68 | goto out; | 486 | goto out; |
69 | } | 487 | } |