diff options
Diffstat (limited to 'fs/btrfs/inode-map.c')
-rw-r--r-- | fs/btrfs/inode-map.c | 444 |
1 files changed, 439 insertions, 5 deletions
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c05a08f4c411..3262cd17a12f 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c | |||
@@ -16,11 +16,446 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/kthread.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | |||
19 | #include "ctree.h" | 23 | #include "ctree.h" |
20 | #include "disk-io.h" | 24 | #include "disk-io.h" |
25 | #include "free-space-cache.h" | ||
26 | #include "inode-map.h" | ||
21 | #include "transaction.h" | 27 | #include "transaction.h" |
22 | 28 | ||
23 | int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) | 29 | static int caching_kthread(void *data) |
30 | { | ||
31 | struct btrfs_root *root = data; | ||
32 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
33 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
34 | struct btrfs_key key; | ||
35 | struct btrfs_path *path; | ||
36 | struct extent_buffer *leaf; | ||
37 | u64 last = (u64)-1; | ||
38 | int slot; | ||
39 | int ret; | ||
40 | |||
41 | path = btrfs_alloc_path(); | ||
42 | if (!path) | ||
43 | return -ENOMEM; | ||
44 | |||
45 | /* Since the commit root is read-only, we can safely skip locking. */ | ||
46 | path->skip_locking = 1; | ||
47 | path->search_commit_root = 1; | ||
48 | path->reada = 2; | ||
49 | |||
50 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; | ||
51 | key.offset = 0; | ||
52 | key.type = BTRFS_INODE_ITEM_KEY; | ||
53 | again: | ||
54 | /* need to make sure the commit_root doesn't disappear */ | ||
55 | mutex_lock(&root->fs_commit_mutex); | ||
56 | |||
57 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
58 | if (ret < 0) | ||
59 | goto out; | ||
60 | |||
61 | while (1) { | ||
62 | smp_mb(); | ||
63 | if (fs_info->closing) | ||
64 | goto out; | ||
65 | |||
66 | leaf = path->nodes[0]; | ||
67 | slot = path->slots[0]; | ||
68 | if (slot >= btrfs_header_nritems(leaf)) { | ||
69 | ret = btrfs_next_leaf(root, path); | ||
70 | if (ret < 0) | ||
71 | goto out; | ||
72 | else if (ret > 0) | ||
73 | break; | ||
74 | |||
75 | if (need_resched() || | ||
76 | btrfs_transaction_in_commit(fs_info)) { | ||
77 | leaf = path->nodes[0]; | ||
78 | |||
79 | if (btrfs_header_nritems(leaf) == 0) { | ||
80 | WARN_ON(1); | ||
81 | break; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Save the key so we can advances forward | ||
86 | * in the next search. | ||
87 | */ | ||
88 | btrfs_item_key_to_cpu(leaf, &key, 0); | ||
89 | btrfs_release_path(path); | ||
90 | root->cache_progress = last; | ||
91 | mutex_unlock(&root->fs_commit_mutex); | ||
92 | schedule_timeout(1); | ||
93 | goto again; | ||
94 | } else | ||
95 | continue; | ||
96 | } | ||
97 | |||
98 | btrfs_item_key_to_cpu(leaf, &key, slot); | ||
99 | |||
100 | if (key.type != BTRFS_INODE_ITEM_KEY) | ||
101 | goto next; | ||
102 | |||
103 | if (key.objectid >= root->highest_objectid) | ||
104 | break; | ||
105 | |||
106 | if (last != (u64)-1 && last + 1 != key.objectid) { | ||
107 | __btrfs_add_free_space(ctl, last + 1, | ||
108 | key.objectid - last - 1); | ||
109 | wake_up(&root->cache_wait); | ||
110 | } | ||
111 | |||
112 | last = key.objectid; | ||
113 | next: | ||
114 | path->slots[0]++; | ||
115 | } | ||
116 | |||
117 | if (last < root->highest_objectid - 1) { | ||
118 | __btrfs_add_free_space(ctl, last + 1, | ||
119 | root->highest_objectid - last - 1); | ||
120 | } | ||
121 | |||
122 | spin_lock(&root->cache_lock); | ||
123 | root->cached = BTRFS_CACHE_FINISHED; | ||
124 | spin_unlock(&root->cache_lock); | ||
125 | |||
126 | root->cache_progress = (u64)-1; | ||
127 | btrfs_unpin_free_ino(root); | ||
128 | out: | ||
129 | wake_up(&root->cache_wait); | ||
130 | mutex_unlock(&root->fs_commit_mutex); | ||
131 | |||
132 | btrfs_free_path(path); | ||
133 | |||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | static void start_caching(struct btrfs_root *root) | ||
138 | { | ||
139 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
140 | struct task_struct *tsk; | ||
141 | int ret; | ||
142 | u64 objectid; | ||
143 | |||
144 | spin_lock(&root->cache_lock); | ||
145 | if (root->cached != BTRFS_CACHE_NO) { | ||
146 | spin_unlock(&root->cache_lock); | ||
147 | return; | ||
148 | } | ||
149 | |||
150 | root->cached = BTRFS_CACHE_STARTED; | ||
151 | spin_unlock(&root->cache_lock); | ||
152 | |||
153 | ret = load_free_ino_cache(root->fs_info, root); | ||
154 | if (ret == 1) { | ||
155 | spin_lock(&root->cache_lock); | ||
156 | root->cached = BTRFS_CACHE_FINISHED; | ||
157 | spin_unlock(&root->cache_lock); | ||
158 | return; | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * It can be quite time-consuming to fill the cache by searching | ||
163 | * through the extent tree, and this can keep ino allocation path | ||
164 | * waiting. Therefore at start we quickly find out the highest | ||
165 | * inode number and we know we can use inode numbers which fall in | ||
166 | * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID]. | ||
167 | */ | ||
168 | ret = btrfs_find_free_objectid(root, &objectid); | ||
169 | if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) { | ||
170 | __btrfs_add_free_space(ctl, objectid, | ||
171 | BTRFS_LAST_FREE_OBJECTID - objectid + 1); | ||
172 | } | ||
173 | |||
174 | tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", | ||
175 | root->root_key.objectid); | ||
176 | BUG_ON(IS_ERR(tsk)); | ||
177 | } | ||
178 | |||
179 | int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) | ||
180 | { | ||
181 | again: | ||
182 | *objectid = btrfs_find_ino_for_alloc(root); | ||
183 | |||
184 | if (*objectid != 0) | ||
185 | return 0; | ||
186 | |||
187 | start_caching(root); | ||
188 | |||
189 | wait_event(root->cache_wait, | ||
190 | root->cached == BTRFS_CACHE_FINISHED || | ||
191 | root->free_ino_ctl->free_space > 0); | ||
192 | |||
193 | if (root->cached == BTRFS_CACHE_FINISHED && | ||
194 | root->free_ino_ctl->free_space == 0) | ||
195 | return -ENOSPC; | ||
196 | else | ||
197 | goto again; | ||
198 | } | ||
199 | |||
200 | void btrfs_return_ino(struct btrfs_root *root, u64 objectid) | ||
201 | { | ||
202 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
203 | struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; | ||
204 | again: | ||
205 | if (root->cached == BTRFS_CACHE_FINISHED) { | ||
206 | __btrfs_add_free_space(ctl, objectid, 1); | ||
207 | } else { | ||
208 | /* | ||
209 | * If we are in the process of caching free ino chunks, | ||
210 | * to avoid adding the same inode number to the free_ino | ||
211 | * tree twice due to cross transaction, we'll leave it | ||
212 | * in the pinned tree until a transaction is committed | ||
213 | * or the caching work is done. | ||
214 | */ | ||
215 | |||
216 | mutex_lock(&root->fs_commit_mutex); | ||
217 | spin_lock(&root->cache_lock); | ||
218 | if (root->cached == BTRFS_CACHE_FINISHED) { | ||
219 | spin_unlock(&root->cache_lock); | ||
220 | mutex_unlock(&root->fs_commit_mutex); | ||
221 | goto again; | ||
222 | } | ||
223 | spin_unlock(&root->cache_lock); | ||
224 | |||
225 | start_caching(root); | ||
226 | |||
227 | if (objectid <= root->cache_progress || | ||
228 | objectid > root->highest_objectid) | ||
229 | __btrfs_add_free_space(ctl, objectid, 1); | ||
230 | else | ||
231 | __btrfs_add_free_space(pinned, objectid, 1); | ||
232 | |||
233 | mutex_unlock(&root->fs_commit_mutex); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * When a transaction is committed, we'll move those inode numbers which | ||
239 | * are smaller than root->cache_progress from pinned tree to free_ino tree, | ||
240 | * and others will just be dropped, because the commit root we were | ||
241 | * searching has changed. | ||
242 | * | ||
243 | * Must be called with root->fs_commit_mutex held | ||
244 | */ | ||
245 | void btrfs_unpin_free_ino(struct btrfs_root *root) | ||
246 | { | ||
247 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
248 | struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; | ||
249 | struct btrfs_free_space *info; | ||
250 | struct rb_node *n; | ||
251 | u64 count; | ||
252 | |||
253 | while (1) { | ||
254 | n = rb_first(rbroot); | ||
255 | if (!n) | ||
256 | break; | ||
257 | |||
258 | info = rb_entry(n, struct btrfs_free_space, offset_index); | ||
259 | BUG_ON(info->bitmap); | ||
260 | |||
261 | if (info->offset > root->cache_progress) | ||
262 | goto free; | ||
263 | else if (info->offset + info->bytes > root->cache_progress) | ||
264 | count = root->cache_progress - info->offset + 1; | ||
265 | else | ||
266 | count = info->bytes; | ||
267 | |||
268 | __btrfs_add_free_space(ctl, info->offset, count); | ||
269 | free: | ||
270 | rb_erase(&info->offset_index, rbroot); | ||
271 | kfree(info); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | #define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space)) | ||
276 | #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) | ||
277 | |||
278 | /* | ||
279 | * The goal is to keep the memory used by the free_ino tree won't | ||
280 | * exceed the memory if we use bitmaps only. | ||
281 | */ | ||
282 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) | ||
283 | { | ||
284 | struct btrfs_free_space *info; | ||
285 | struct rb_node *n; | ||
286 | int max_ino; | ||
287 | int max_bitmaps; | ||
288 | |||
289 | n = rb_last(&ctl->free_space_offset); | ||
290 | if (!n) { | ||
291 | ctl->extents_thresh = INIT_THRESHOLD; | ||
292 | return; | ||
293 | } | ||
294 | info = rb_entry(n, struct btrfs_free_space, offset_index); | ||
295 | |||
296 | /* | ||
297 | * Find the maximum inode number in the filesystem. Note we | ||
298 | * ignore the fact that this can be a bitmap, because we are | ||
299 | * not doing precise calculation. | ||
300 | */ | ||
301 | max_ino = info->bytes - 1; | ||
302 | |||
303 | max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP; | ||
304 | if (max_bitmaps <= ctl->total_bitmaps) { | ||
305 | ctl->extents_thresh = 0; | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * | ||
310 | PAGE_CACHE_SIZE / sizeof(*info); | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * We don't fall back to bitmap, if we are below the extents threshold | ||
315 | * or this chunk of inode numbers is a big one. | ||
316 | */ | ||
317 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, | ||
318 | struct btrfs_free_space *info) | ||
319 | { | ||
320 | if (ctl->free_extents < ctl->extents_thresh || | ||
321 | info->bytes > INODES_PER_BITMAP / 10) | ||
322 | return false; | ||
323 | |||
324 | return true; | ||
325 | } | ||
326 | |||
327 | static struct btrfs_free_space_op free_ino_op = { | ||
328 | .recalc_thresholds = recalculate_thresholds, | ||
329 | .use_bitmap = use_bitmap, | ||
330 | }; | ||
331 | |||
332 | static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl) | ||
333 | { | ||
334 | } | ||
335 | |||
336 | static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, | ||
337 | struct btrfs_free_space *info) | ||
338 | { | ||
339 | /* | ||
340 | * We always use extents for two reasons: | ||
341 | * | ||
342 | * - The pinned tree is only used during the process of caching | ||
343 | * work. | ||
344 | * - Make code simpler. See btrfs_unpin_free_ino(). | ||
345 | */ | ||
346 | return false; | ||
347 | } | ||
348 | |||
349 | static struct btrfs_free_space_op pinned_free_ino_op = { | ||
350 | .recalc_thresholds = pinned_recalc_thresholds, | ||
351 | .use_bitmap = pinned_use_bitmap, | ||
352 | }; | ||
353 | |||
354 | void btrfs_init_free_ino_ctl(struct btrfs_root *root) | ||
355 | { | ||
356 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
357 | struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; | ||
358 | |||
359 | spin_lock_init(&ctl->tree_lock); | ||
360 | ctl->unit = 1; | ||
361 | ctl->start = 0; | ||
362 | ctl->private = NULL; | ||
363 | ctl->op = &free_ino_op; | ||
364 | |||
365 | /* | ||
366 | * Initially we allow to use 16K of ram to cache chunks of | ||
367 | * inode numbers before we resort to bitmaps. This is somewhat | ||
368 | * arbitrary, but it will be adjusted in runtime. | ||
369 | */ | ||
370 | ctl->extents_thresh = INIT_THRESHOLD; | ||
371 | |||
372 | spin_lock_init(&pinned->tree_lock); | ||
373 | pinned->unit = 1; | ||
374 | pinned->start = 0; | ||
375 | pinned->private = NULL; | ||
376 | pinned->extents_thresh = 0; | ||
377 | pinned->op = &pinned_free_ino_op; | ||
378 | } | ||
379 | |||
380 | int btrfs_save_ino_cache(struct btrfs_root *root, | ||
381 | struct btrfs_trans_handle *trans) | ||
382 | { | ||
383 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
384 | struct btrfs_path *path; | ||
385 | struct inode *inode; | ||
386 | u64 alloc_hint = 0; | ||
387 | int ret; | ||
388 | int prealloc; | ||
389 | bool retry = false; | ||
390 | |||
391 | path = btrfs_alloc_path(); | ||
392 | if (!path) | ||
393 | return -ENOMEM; | ||
394 | again: | ||
395 | inode = lookup_free_ino_inode(root, path); | ||
396 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { | ||
397 | ret = PTR_ERR(inode); | ||
398 | goto out; | ||
399 | } | ||
400 | |||
401 | if (IS_ERR(inode)) { | ||
402 | BUG_ON(retry); | ||
403 | retry = true; | ||
404 | |||
405 | ret = create_free_ino_inode(root, trans, path); | ||
406 | if (ret) | ||
407 | goto out; | ||
408 | goto again; | ||
409 | } | ||
410 | |||
411 | BTRFS_I(inode)->generation = 0; | ||
412 | ret = btrfs_update_inode(trans, root, inode); | ||
413 | WARN_ON(ret); | ||
414 | |||
415 | if (i_size_read(inode) > 0) { | ||
416 | ret = btrfs_truncate_free_space_cache(root, trans, path, inode); | ||
417 | if (ret) | ||
418 | goto out_put; | ||
419 | } | ||
420 | |||
421 | spin_lock(&root->cache_lock); | ||
422 | if (root->cached != BTRFS_CACHE_FINISHED) { | ||
423 | ret = -1; | ||
424 | spin_unlock(&root->cache_lock); | ||
425 | goto out_put; | ||
426 | } | ||
427 | spin_unlock(&root->cache_lock); | ||
428 | |||
429 | spin_lock(&ctl->tree_lock); | ||
430 | prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; | ||
431 | prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); | ||
432 | prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; | ||
433 | spin_unlock(&ctl->tree_lock); | ||
434 | |||
435 | /* Just to make sure we have enough space */ | ||
436 | prealloc += 8 * PAGE_CACHE_SIZE; | ||
437 | |||
438 | ret = btrfs_check_data_free_space(inode, prealloc); | ||
439 | if (ret) | ||
440 | goto out_put; | ||
441 | |||
442 | ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, | ||
443 | prealloc, prealloc, &alloc_hint); | ||
444 | if (ret) | ||
445 | goto out_put; | ||
446 | btrfs_free_reserved_data_space(inode, prealloc); | ||
447 | |||
448 | out_put: | ||
449 | iput(inode); | ||
450 | out: | ||
451 | if (ret == 0) | ||
452 | ret = btrfs_write_out_ino_cache(root, trans, path); | ||
453 | |||
454 | btrfs_free_path(path); | ||
455 | return ret; | ||
456 | } | ||
457 | |||
458 | static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) | ||
24 | { | 459 | { |
25 | struct btrfs_path *path; | 460 | struct btrfs_path *path; |
26 | int ret; | 461 | int ret; |
@@ -55,15 +490,14 @@ error: | |||
55 | return ret; | 490 | return ret; |
56 | } | 491 | } |
57 | 492 | ||
58 | int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, | 493 | int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) |
59 | struct btrfs_root *root, | ||
60 | u64 dirid, u64 *objectid) | ||
61 | { | 494 | { |
62 | int ret; | 495 | int ret; |
63 | mutex_lock(&root->objectid_mutex); | 496 | mutex_lock(&root->objectid_mutex); |
64 | 497 | ||
65 | if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { | 498 | if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { |
66 | ret = btrfs_find_highest_inode(root, &root->highest_objectid); | 499 | ret = btrfs_find_highest_objectid(root, |
500 | &root->highest_objectid); | ||
67 | if (ret) | 501 | if (ret) |
68 | goto out; | 502 | goto out; |
69 | } | 503 | } |