diff options
Diffstat (limited to 'fs/btrfs/transaction.c')
-rw-r--r-- | fs/btrfs/transaction.c | 1102 |
1 files changed, 1102 insertions, 0 deletions
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c new file mode 100644 index 000000000000..c2c3b4281962 --- /dev/null +++ b/fs/btrfs/transaction.c | |||
@@ -0,0 +1,1102 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Oracle. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public | ||
6 | * License v2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public | ||
14 | * License along with this program; if not, write to the | ||
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
16 | * Boston, MA 021110-1307, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/fs.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/writeback.h> | ||
22 | #include <linux/pagemap.h> | ||
23 | #include <linux/blkdev.h> | ||
24 | #include "ctree.h" | ||
25 | #include "disk-io.h" | ||
26 | #include "transaction.h" | ||
27 | #include "locking.h" | ||
28 | #include "ref-cache.h" | ||
29 | #include "tree-log.h" | ||
30 | |||
31 | static int total_trans = 0; | ||
32 | extern struct kmem_cache *btrfs_trans_handle_cachep; | ||
33 | extern struct kmem_cache *btrfs_transaction_cachep; | ||
34 | |||
35 | #define BTRFS_ROOT_TRANS_TAG 0 | ||
36 | |||
37 | static noinline void put_transaction(struct btrfs_transaction *transaction) | ||
38 | { | ||
39 | WARN_ON(transaction->use_count == 0); | ||
40 | transaction->use_count--; | ||
41 | if (transaction->use_count == 0) { | ||
42 | WARN_ON(total_trans == 0); | ||
43 | total_trans--; | ||
44 | list_del_init(&transaction->list); | ||
45 | memset(transaction, 0, sizeof(*transaction)); | ||
46 | kmem_cache_free(btrfs_transaction_cachep, transaction); | ||
47 | } | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * either allocate a new transaction or hop into the existing one | ||
52 | */ | ||
53 | static noinline int join_transaction(struct btrfs_root *root) | ||
54 | { | ||
55 | struct btrfs_transaction *cur_trans; | ||
56 | cur_trans = root->fs_info->running_transaction; | ||
57 | if (!cur_trans) { | ||
58 | cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, | ||
59 | GFP_NOFS); | ||
60 | total_trans++; | ||
61 | BUG_ON(!cur_trans); | ||
62 | root->fs_info->generation++; | ||
63 | root->fs_info->last_alloc = 0; | ||
64 | root->fs_info->last_data_alloc = 0; | ||
65 | cur_trans->num_writers = 1; | ||
66 | cur_trans->num_joined = 0; | ||
67 | cur_trans->transid = root->fs_info->generation; | ||
68 | init_waitqueue_head(&cur_trans->writer_wait); | ||
69 | init_waitqueue_head(&cur_trans->commit_wait); | ||
70 | cur_trans->in_commit = 0; | ||
71 | cur_trans->blocked = 0; | ||
72 | cur_trans->use_count = 1; | ||
73 | cur_trans->commit_done = 0; | ||
74 | cur_trans->start_time = get_seconds(); | ||
75 | INIT_LIST_HEAD(&cur_trans->pending_snapshots); | ||
76 | list_add_tail(&cur_trans->list, &root->fs_info->trans_list); | ||
77 | extent_io_tree_init(&cur_trans->dirty_pages, | ||
78 | root->fs_info->btree_inode->i_mapping, | ||
79 | GFP_NOFS); | ||
80 | spin_lock(&root->fs_info->new_trans_lock); | ||
81 | root->fs_info->running_transaction = cur_trans; | ||
82 | spin_unlock(&root->fs_info->new_trans_lock); | ||
83 | } else { | ||
84 | cur_trans->num_writers++; | ||
85 | cur_trans->num_joined++; | ||
86 | } | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * this does all the record keeping required to make sure that a | ||
93 | * reference counted root is properly recorded in a given transaction. | ||
94 | * This is required to make sure the old root from before we joined the transaction | ||
95 | * is deleted when the transaction commits | ||
96 | */ | ||
97 | noinline int btrfs_record_root_in_trans(struct btrfs_root *root) | ||
98 | { | ||
99 | struct btrfs_dirty_root *dirty; | ||
100 | u64 running_trans_id = root->fs_info->running_transaction->transid; | ||
101 | if (root->ref_cows && root->last_trans < running_trans_id) { | ||
102 | WARN_ON(root == root->fs_info->extent_root); | ||
103 | if (root->root_item.refs != 0) { | ||
104 | radix_tree_tag_set(&root->fs_info->fs_roots_radix, | ||
105 | (unsigned long)root->root_key.objectid, | ||
106 | BTRFS_ROOT_TRANS_TAG); | ||
107 | |||
108 | dirty = kmalloc(sizeof(*dirty), GFP_NOFS); | ||
109 | BUG_ON(!dirty); | ||
110 | dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS); | ||
111 | BUG_ON(!dirty->root); | ||
112 | dirty->latest_root = root; | ||
113 | INIT_LIST_HEAD(&dirty->list); | ||
114 | |||
115 | root->commit_root = btrfs_root_node(root); | ||
116 | |||
117 | memcpy(dirty->root, root, sizeof(*root)); | ||
118 | spin_lock_init(&dirty->root->node_lock); | ||
119 | spin_lock_init(&dirty->root->list_lock); | ||
120 | mutex_init(&dirty->root->objectid_mutex); | ||
121 | mutex_init(&dirty->root->log_mutex); | ||
122 | INIT_LIST_HEAD(&dirty->root->dead_list); | ||
123 | dirty->root->node = root->commit_root; | ||
124 | dirty->root->commit_root = NULL; | ||
125 | |||
126 | spin_lock(&root->list_lock); | ||
127 | list_add(&dirty->root->dead_list, &root->dead_list); | ||
128 | spin_unlock(&root->list_lock); | ||
129 | |||
130 | root->dirty_root = dirty; | ||
131 | } else { | ||
132 | WARN_ON(1); | ||
133 | } | ||
134 | root->last_trans = running_trans_id; | ||
135 | } | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | /* wait for commit against the current transaction to become unblocked | ||
140 | * when this is done, it is safe to start a new transaction, but the current | ||
141 | * transaction might not be fully on disk. | ||
142 | */ | ||
143 | static void wait_current_trans(struct btrfs_root *root) | ||
144 | { | ||
145 | struct btrfs_transaction *cur_trans; | ||
146 | |||
147 | cur_trans = root->fs_info->running_transaction; | ||
148 | if (cur_trans && cur_trans->blocked) { | ||
149 | DEFINE_WAIT(wait); | ||
150 | cur_trans->use_count++; | ||
151 | while(1) { | ||
152 | prepare_to_wait(&root->fs_info->transaction_wait, &wait, | ||
153 | TASK_UNINTERRUPTIBLE); | ||
154 | if (cur_trans->blocked) { | ||
155 | mutex_unlock(&root->fs_info->trans_mutex); | ||
156 | schedule(); | ||
157 | mutex_lock(&root->fs_info->trans_mutex); | ||
158 | finish_wait(&root->fs_info->transaction_wait, | ||
159 | &wait); | ||
160 | } else { | ||
161 | finish_wait(&root->fs_info->transaction_wait, | ||
162 | &wait); | ||
163 | break; | ||
164 | } | ||
165 | } | ||
166 | put_transaction(cur_trans); | ||
167 | } | ||
168 | } | ||
169 | |||
170 | static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, | ||
171 | int num_blocks, int wait) | ||
172 | { | ||
173 | struct btrfs_trans_handle *h = | ||
174 | kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); | ||
175 | int ret; | ||
176 | |||
177 | mutex_lock(&root->fs_info->trans_mutex); | ||
178 | if (!root->fs_info->log_root_recovering && | ||
179 | ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2)) | ||
180 | wait_current_trans(root); | ||
181 | ret = join_transaction(root); | ||
182 | BUG_ON(ret); | ||
183 | |||
184 | btrfs_record_root_in_trans(root); | ||
185 | h->transid = root->fs_info->running_transaction->transid; | ||
186 | h->transaction = root->fs_info->running_transaction; | ||
187 | h->blocks_reserved = num_blocks; | ||
188 | h->blocks_used = 0; | ||
189 | h->block_group = NULL; | ||
190 | h->alloc_exclude_nr = 0; | ||
191 | h->alloc_exclude_start = 0; | ||
192 | root->fs_info->running_transaction->use_count++; | ||
193 | mutex_unlock(&root->fs_info->trans_mutex); | ||
194 | return h; | ||
195 | } | ||
196 | |||
197 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, | ||
198 | int num_blocks) | ||
199 | { | ||
200 | return start_transaction(root, num_blocks, 1); | ||
201 | } | ||
202 | struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, | ||
203 | int num_blocks) | ||
204 | { | ||
205 | return start_transaction(root, num_blocks, 0); | ||
206 | } | ||
207 | |||
208 | struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, | ||
209 | int num_blocks) | ||
210 | { | ||
211 | return start_transaction(r, num_blocks, 2); | ||
212 | } | ||
213 | |||
214 | /* wait for a transaction commit to be fully complete */ | ||
215 | static noinline int wait_for_commit(struct btrfs_root *root, | ||
216 | struct btrfs_transaction *commit) | ||
217 | { | ||
218 | DEFINE_WAIT(wait); | ||
219 | mutex_lock(&root->fs_info->trans_mutex); | ||
220 | while(!commit->commit_done) { | ||
221 | prepare_to_wait(&commit->commit_wait, &wait, | ||
222 | TASK_UNINTERRUPTIBLE); | ||
223 | if (commit->commit_done) | ||
224 | break; | ||
225 | mutex_unlock(&root->fs_info->trans_mutex); | ||
226 | schedule(); | ||
227 | mutex_lock(&root->fs_info->trans_mutex); | ||
228 | } | ||
229 | mutex_unlock(&root->fs_info->trans_mutex); | ||
230 | finish_wait(&commit->commit_wait, &wait); | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * rate limit against the drop_snapshot code. This helps to slow down new operations | ||
236 | * if the drop_snapshot code isn't able to keep up. | ||
237 | */ | ||
238 | static void throttle_on_drops(struct btrfs_root *root) | ||
239 | { | ||
240 | struct btrfs_fs_info *info = root->fs_info; | ||
241 | int harder_count = 0; | ||
242 | |||
243 | harder: | ||
244 | if (atomic_read(&info->throttles)) { | ||
245 | DEFINE_WAIT(wait); | ||
246 | int thr; | ||
247 | thr = atomic_read(&info->throttle_gen); | ||
248 | |||
249 | do { | ||
250 | prepare_to_wait(&info->transaction_throttle, | ||
251 | &wait, TASK_UNINTERRUPTIBLE); | ||
252 | if (!atomic_read(&info->throttles)) { | ||
253 | finish_wait(&info->transaction_throttle, &wait); | ||
254 | break; | ||
255 | } | ||
256 | schedule(); | ||
257 | finish_wait(&info->transaction_throttle, &wait); | ||
258 | } while (thr == atomic_read(&info->throttle_gen)); | ||
259 | harder_count++; | ||
260 | |||
261 | if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 && | ||
262 | harder_count < 2) | ||
263 | goto harder; | ||
264 | |||
265 | if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 && | ||
266 | harder_count < 10) | ||
267 | goto harder; | ||
268 | |||
269 | if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 && | ||
270 | harder_count < 20) | ||
271 | goto harder; | ||
272 | } | ||
273 | } | ||
274 | |||
275 | void btrfs_throttle(struct btrfs_root *root) | ||
276 | { | ||
277 | mutex_lock(&root->fs_info->trans_mutex); | ||
278 | if (!root->fs_info->open_ioctl_trans) | ||
279 | wait_current_trans(root); | ||
280 | mutex_unlock(&root->fs_info->trans_mutex); | ||
281 | |||
282 | throttle_on_drops(root); | ||
283 | } | ||
284 | |||
285 | static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | ||
286 | struct btrfs_root *root, int throttle) | ||
287 | { | ||
288 | struct btrfs_transaction *cur_trans; | ||
289 | struct btrfs_fs_info *info = root->fs_info; | ||
290 | |||
291 | mutex_lock(&info->trans_mutex); | ||
292 | cur_trans = info->running_transaction; | ||
293 | WARN_ON(cur_trans != trans->transaction); | ||
294 | WARN_ON(cur_trans->num_writers < 1); | ||
295 | cur_trans->num_writers--; | ||
296 | |||
297 | if (waitqueue_active(&cur_trans->writer_wait)) | ||
298 | wake_up(&cur_trans->writer_wait); | ||
299 | put_transaction(cur_trans); | ||
300 | mutex_unlock(&info->trans_mutex); | ||
301 | memset(trans, 0, sizeof(*trans)); | ||
302 | kmem_cache_free(btrfs_trans_handle_cachep, trans); | ||
303 | |||
304 | if (throttle) | ||
305 | throttle_on_drops(root); | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | int btrfs_end_transaction(struct btrfs_trans_handle *trans, | ||
311 | struct btrfs_root *root) | ||
312 | { | ||
313 | return __btrfs_end_transaction(trans, root, 0); | ||
314 | } | ||
315 | |||
316 | int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, | ||
317 | struct btrfs_root *root) | ||
318 | { | ||
319 | return __btrfs_end_transaction(trans, root, 1); | ||
320 | } | ||
321 | |||
322 | /* | ||
323 | * when btree blocks are allocated, they have some corresponding bits set for | ||
324 | * them in one of two extent_io trees. This is used to make sure all of | ||
325 | * those extents are on disk for transaction or log commit | ||
326 | */ | ||
327 | int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, | ||
328 | struct extent_io_tree *dirty_pages) | ||
329 | { | ||
330 | int ret; | ||
331 | int err = 0; | ||
332 | int werr = 0; | ||
333 | struct page *page; | ||
334 | struct inode *btree_inode = root->fs_info->btree_inode; | ||
335 | u64 start = 0; | ||
336 | u64 end; | ||
337 | unsigned long index; | ||
338 | |||
339 | while(1) { | ||
340 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, | ||
341 | EXTENT_DIRTY); | ||
342 | if (ret) | ||
343 | break; | ||
344 | while(start <= end) { | ||
345 | cond_resched(); | ||
346 | |||
347 | index = start >> PAGE_CACHE_SHIFT; | ||
348 | start = (u64)(index + 1) << PAGE_CACHE_SHIFT; | ||
349 | page = find_get_page(btree_inode->i_mapping, index); | ||
350 | if (!page) | ||
351 | continue; | ||
352 | |||
353 | btree_lock_page_hook(page); | ||
354 | if (!page->mapping) { | ||
355 | unlock_page(page); | ||
356 | page_cache_release(page); | ||
357 | continue; | ||
358 | } | ||
359 | |||
360 | if (PageWriteback(page)) { | ||
361 | if (PageDirty(page)) | ||
362 | wait_on_page_writeback(page); | ||
363 | else { | ||
364 | unlock_page(page); | ||
365 | page_cache_release(page); | ||
366 | continue; | ||
367 | } | ||
368 | } | ||
369 | err = write_one_page(page, 0); | ||
370 | if (err) | ||
371 | werr = err; | ||
372 | page_cache_release(page); | ||
373 | } | ||
374 | } | ||
375 | while(1) { | ||
376 | ret = find_first_extent_bit(dirty_pages, 0, &start, &end, | ||
377 | EXTENT_DIRTY); | ||
378 | if (ret) | ||
379 | break; | ||
380 | |||
381 | clear_extent_dirty(dirty_pages, start, end, GFP_NOFS); | ||
382 | while(start <= end) { | ||
383 | index = start >> PAGE_CACHE_SHIFT; | ||
384 | start = (u64)(index + 1) << PAGE_CACHE_SHIFT; | ||
385 | page = find_get_page(btree_inode->i_mapping, index); | ||
386 | if (!page) | ||
387 | continue; | ||
388 | if (PageDirty(page)) { | ||
389 | btree_lock_page_hook(page); | ||
390 | wait_on_page_writeback(page); | ||
391 | err = write_one_page(page, 0); | ||
392 | if (err) | ||
393 | werr = err; | ||
394 | } | ||
395 | wait_on_page_writeback(page); | ||
396 | page_cache_release(page); | ||
397 | cond_resched(); | ||
398 | } | ||
399 | } | ||
400 | if (err) | ||
401 | werr = err; | ||
402 | return werr; | ||
403 | } | ||
404 | |||
405 | int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, | ||
406 | struct btrfs_root *root) | ||
407 | { | ||
408 | if (!trans || !trans->transaction) { | ||
409 | struct inode *btree_inode; | ||
410 | btree_inode = root->fs_info->btree_inode; | ||
411 | return filemap_write_and_wait(btree_inode->i_mapping); | ||
412 | } | ||
413 | return btrfs_write_and_wait_marked_extents(root, | ||
414 | &trans->transaction->dirty_pages); | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | * this is used to update the root pointer in the tree of tree roots. | ||
419 | * | ||
420 | * But, in the case of the extent allocation tree, updating the root | ||
421 | * pointer may allocate blocks which may change the root of the extent | ||
422 | * allocation tree. | ||
423 | * | ||
424 | * So, this loops and repeats and makes sure the cowonly root didn't | ||
425 | * change while the root pointer was being updated in the metadata. | ||
426 | */ | ||
427 | static int update_cowonly_root(struct btrfs_trans_handle *trans, | ||
428 | struct btrfs_root *root) | ||
429 | { | ||
430 | int ret; | ||
431 | u64 old_root_bytenr; | ||
432 | struct btrfs_root *tree_root = root->fs_info->tree_root; | ||
433 | |||
434 | btrfs_extent_post_op(trans, root); | ||
435 | btrfs_write_dirty_block_groups(trans, root); | ||
436 | btrfs_extent_post_op(trans, root); | ||
437 | |||
438 | while(1) { | ||
439 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); | ||
440 | if (old_root_bytenr == root->node->start) | ||
441 | break; | ||
442 | btrfs_set_root_bytenr(&root->root_item, | ||
443 | root->node->start); | ||
444 | btrfs_set_root_level(&root->root_item, | ||
445 | btrfs_header_level(root->node)); | ||
446 | btrfs_set_root_generation(&root->root_item, trans->transid); | ||
447 | |||
448 | btrfs_extent_post_op(trans, root); | ||
449 | |||
450 | ret = btrfs_update_root(trans, tree_root, | ||
451 | &root->root_key, | ||
452 | &root->root_item); | ||
453 | BUG_ON(ret); | ||
454 | btrfs_write_dirty_block_groups(trans, root); | ||
455 | btrfs_extent_post_op(trans, root); | ||
456 | } | ||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | /* | ||
461 | * update all the cowonly tree roots on disk | ||
462 | */ | ||
463 | int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, | ||
464 | struct btrfs_root *root) | ||
465 | { | ||
466 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
467 | struct list_head *next; | ||
468 | struct extent_buffer *eb; | ||
469 | |||
470 | btrfs_extent_post_op(trans, fs_info->tree_root); | ||
471 | |||
472 | eb = btrfs_lock_root_node(fs_info->tree_root); | ||
473 | btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb, 0); | ||
474 | btrfs_tree_unlock(eb); | ||
475 | free_extent_buffer(eb); | ||
476 | |||
477 | btrfs_extent_post_op(trans, fs_info->tree_root); | ||
478 | |||
479 | while(!list_empty(&fs_info->dirty_cowonly_roots)) { | ||
480 | next = fs_info->dirty_cowonly_roots.next; | ||
481 | list_del_init(next); | ||
482 | root = list_entry(next, struct btrfs_root, dirty_list); | ||
483 | |||
484 | update_cowonly_root(trans, root); | ||
485 | } | ||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * dead roots are old snapshots that need to be deleted. This allocates | ||
491 | * a dirty root struct and adds it into the list of dead roots that need to | ||
492 | * be deleted | ||
493 | */ | ||
494 | int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest) | ||
495 | { | ||
496 | struct btrfs_dirty_root *dirty; | ||
497 | |||
498 | dirty = kmalloc(sizeof(*dirty), GFP_NOFS); | ||
499 | if (!dirty) | ||
500 | return -ENOMEM; | ||
501 | dirty->root = root; | ||
502 | dirty->latest_root = latest; | ||
503 | |||
504 | mutex_lock(&root->fs_info->trans_mutex); | ||
505 | list_add(&dirty->list, &latest->fs_info->dead_roots); | ||
506 | mutex_unlock(&root->fs_info->trans_mutex); | ||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | /* | ||
511 | * at transaction commit time we need to schedule the old roots for | ||
512 | * deletion via btrfs_drop_snapshot. This runs through all the | ||
513 | * reference counted roots that were modified in the current | ||
514 | * transaction and puts them into the drop list | ||
515 | */ | ||
516 | static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, | ||
517 | struct radix_tree_root *radix, | ||
518 | struct list_head *list) | ||
519 | { | ||
520 | struct btrfs_dirty_root *dirty; | ||
521 | struct btrfs_root *gang[8]; | ||
522 | struct btrfs_root *root; | ||
523 | int i; | ||
524 | int ret; | ||
525 | int err = 0; | ||
526 | u32 refs; | ||
527 | |||
528 | while(1) { | ||
529 | ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0, | ||
530 | ARRAY_SIZE(gang), | ||
531 | BTRFS_ROOT_TRANS_TAG); | ||
532 | if (ret == 0) | ||
533 | break; | ||
534 | for (i = 0; i < ret; i++) { | ||
535 | root = gang[i]; | ||
536 | radix_tree_tag_clear(radix, | ||
537 | (unsigned long)root->root_key.objectid, | ||
538 | BTRFS_ROOT_TRANS_TAG); | ||
539 | |||
540 | BUG_ON(!root->ref_tree); | ||
541 | dirty = root->dirty_root; | ||
542 | |||
543 | btrfs_free_log(trans, root); | ||
544 | btrfs_free_reloc_root(trans, root); | ||
545 | |||
546 | if (root->commit_root == root->node) { | ||
547 | WARN_ON(root->node->start != | ||
548 | btrfs_root_bytenr(&root->root_item)); | ||
549 | |||
550 | free_extent_buffer(root->commit_root); | ||
551 | root->commit_root = NULL; | ||
552 | root->dirty_root = NULL; | ||
553 | |||
554 | spin_lock(&root->list_lock); | ||
555 | list_del_init(&dirty->root->dead_list); | ||
556 | spin_unlock(&root->list_lock); | ||
557 | |||
558 | kfree(dirty->root); | ||
559 | kfree(dirty); | ||
560 | |||
561 | /* make sure to update the root on disk | ||
562 | * so we get any updates to the block used | ||
563 | * counts | ||
564 | */ | ||
565 | err = btrfs_update_root(trans, | ||
566 | root->fs_info->tree_root, | ||
567 | &root->root_key, | ||
568 | &root->root_item); | ||
569 | continue; | ||
570 | } | ||
571 | |||
572 | memset(&root->root_item.drop_progress, 0, | ||
573 | sizeof(struct btrfs_disk_key)); | ||
574 | root->root_item.drop_level = 0; | ||
575 | root->commit_root = NULL; | ||
576 | root->dirty_root = NULL; | ||
577 | root->root_key.offset = root->fs_info->generation; | ||
578 | btrfs_set_root_bytenr(&root->root_item, | ||
579 | root->node->start); | ||
580 | btrfs_set_root_level(&root->root_item, | ||
581 | btrfs_header_level(root->node)); | ||
582 | btrfs_set_root_generation(&root->root_item, | ||
583 | root->root_key.offset); | ||
584 | |||
585 | err = btrfs_insert_root(trans, root->fs_info->tree_root, | ||
586 | &root->root_key, | ||
587 | &root->root_item); | ||
588 | if (err) | ||
589 | break; | ||
590 | |||
591 | refs = btrfs_root_refs(&dirty->root->root_item); | ||
592 | btrfs_set_root_refs(&dirty->root->root_item, refs - 1); | ||
593 | err = btrfs_update_root(trans, root->fs_info->tree_root, | ||
594 | &dirty->root->root_key, | ||
595 | &dirty->root->root_item); | ||
596 | |||
597 | BUG_ON(err); | ||
598 | if (refs == 1) { | ||
599 | list_add(&dirty->list, list); | ||
600 | } else { | ||
601 | WARN_ON(1); | ||
602 | free_extent_buffer(dirty->root->node); | ||
603 | kfree(dirty->root); | ||
604 | kfree(dirty); | ||
605 | } | ||
606 | } | ||
607 | } | ||
608 | return err; | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * defrag a given btree. If cacheonly == 1, this won't read from the disk, | ||
613 | * otherwise every leaf in the btree is read and defragged. | ||
614 | */ | ||
615 | int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) | ||
616 | { | ||
617 | struct btrfs_fs_info *info = root->fs_info; | ||
618 | int ret; | ||
619 | struct btrfs_trans_handle *trans; | ||
620 | unsigned long nr; | ||
621 | |||
622 | smp_mb(); | ||
623 | if (root->defrag_running) | ||
624 | return 0; | ||
625 | trans = btrfs_start_transaction(root, 1); | ||
626 | while (1) { | ||
627 | root->defrag_running = 1; | ||
628 | ret = btrfs_defrag_leaves(trans, root, cacheonly); | ||
629 | nr = trans->blocks_used; | ||
630 | btrfs_end_transaction(trans, root); | ||
631 | btrfs_btree_balance_dirty(info->tree_root, nr); | ||
632 | cond_resched(); | ||
633 | |||
634 | trans = btrfs_start_transaction(root, 1); | ||
635 | if (root->fs_info->closing || ret != -EAGAIN) | ||
636 | break; | ||
637 | } | ||
638 | root->defrag_running = 0; | ||
639 | smp_mb(); | ||
640 | btrfs_end_transaction(trans, root); | ||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on | ||
646 | * all of them | ||
647 | */ | ||
648 | static noinline int drop_dirty_roots(struct btrfs_root *tree_root, | ||
649 | struct list_head *list) | ||
650 | { | ||
651 | struct btrfs_dirty_root *dirty; | ||
652 | struct btrfs_trans_handle *trans; | ||
653 | unsigned long nr; | ||
654 | u64 num_bytes; | ||
655 | u64 bytes_used; | ||
656 | u64 max_useless; | ||
657 | int ret = 0; | ||
658 | int err; | ||
659 | |||
660 | while(!list_empty(list)) { | ||
661 | struct btrfs_root *root; | ||
662 | |||
663 | dirty = list_entry(list->prev, struct btrfs_dirty_root, list); | ||
664 | list_del_init(&dirty->list); | ||
665 | |||
666 | num_bytes = btrfs_root_used(&dirty->root->root_item); | ||
667 | root = dirty->latest_root; | ||
668 | atomic_inc(&root->fs_info->throttles); | ||
669 | |||
670 | while(1) { | ||
671 | trans = btrfs_start_transaction(tree_root, 1); | ||
672 | mutex_lock(&root->fs_info->drop_mutex); | ||
673 | ret = btrfs_drop_snapshot(trans, dirty->root); | ||
674 | if (ret != -EAGAIN) { | ||
675 | break; | ||
676 | } | ||
677 | mutex_unlock(&root->fs_info->drop_mutex); | ||
678 | |||
679 | err = btrfs_update_root(trans, | ||
680 | tree_root, | ||
681 | &dirty->root->root_key, | ||
682 | &dirty->root->root_item); | ||
683 | if (err) | ||
684 | ret = err; | ||
685 | nr = trans->blocks_used; | ||
686 | ret = btrfs_end_transaction(trans, tree_root); | ||
687 | BUG_ON(ret); | ||
688 | |||
689 | btrfs_btree_balance_dirty(tree_root, nr); | ||
690 | cond_resched(); | ||
691 | } | ||
692 | BUG_ON(ret); | ||
693 | atomic_dec(&root->fs_info->throttles); | ||
694 | wake_up(&root->fs_info->transaction_throttle); | ||
695 | |||
696 | num_bytes -= btrfs_root_used(&dirty->root->root_item); | ||
697 | bytes_used = btrfs_root_used(&root->root_item); | ||
698 | if (num_bytes) { | ||
699 | btrfs_record_root_in_trans(root); | ||
700 | btrfs_set_root_used(&root->root_item, | ||
701 | bytes_used - num_bytes); | ||
702 | } | ||
703 | |||
704 | ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key); | ||
705 | if (ret) { | ||
706 | BUG(); | ||
707 | break; | ||
708 | } | ||
709 | mutex_unlock(&root->fs_info->drop_mutex); | ||
710 | |||
711 | spin_lock(&root->list_lock); | ||
712 | list_del_init(&dirty->root->dead_list); | ||
713 | if (!list_empty(&root->dead_list)) { | ||
714 | struct btrfs_root *oldest; | ||
715 | oldest = list_entry(root->dead_list.prev, | ||
716 | struct btrfs_root, dead_list); | ||
717 | max_useless = oldest->root_key.offset - 1; | ||
718 | } else { | ||
719 | max_useless = root->root_key.offset - 1; | ||
720 | } | ||
721 | spin_unlock(&root->list_lock); | ||
722 | |||
723 | nr = trans->blocks_used; | ||
724 | ret = btrfs_end_transaction(trans, tree_root); | ||
725 | BUG_ON(ret); | ||
726 | |||
727 | ret = btrfs_remove_leaf_refs(root, max_useless, 0); | ||
728 | BUG_ON(ret); | ||
729 | |||
730 | free_extent_buffer(dirty->root->node); | ||
731 | kfree(dirty->root); | ||
732 | kfree(dirty); | ||
733 | |||
734 | btrfs_btree_balance_dirty(tree_root, nr); | ||
735 | cond_resched(); | ||
736 | } | ||
737 | return ret; | ||
738 | } | ||
739 | |||
740 | /* | ||
741 | * new snapshots need to be created at a very specific time in the | ||
742 | * transaction commit. This does the actual creation | ||
743 | */ | ||
744 | static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | ||
745 | struct btrfs_fs_info *fs_info, | ||
746 | struct btrfs_pending_snapshot *pending) | ||
747 | { | ||
748 | struct btrfs_key key; | ||
749 | struct btrfs_root_item *new_root_item; | ||
750 | struct btrfs_root *tree_root = fs_info->tree_root; | ||
751 | struct btrfs_root *root = pending->root; | ||
752 | struct extent_buffer *tmp; | ||
753 | struct extent_buffer *old; | ||
754 | int ret; | ||
755 | u64 objectid; | ||
756 | |||
757 | new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); | ||
758 | if (!new_root_item) { | ||
759 | ret = -ENOMEM; | ||
760 | goto fail; | ||
761 | } | ||
762 | ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); | ||
763 | if (ret) | ||
764 | goto fail; | ||
765 | |||
766 | btrfs_record_root_in_trans(root); | ||
767 | btrfs_set_root_last_snapshot(&root->root_item, trans->transid); | ||
768 | memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); | ||
769 | |||
770 | key.objectid = objectid; | ||
771 | key.offset = trans->transid; | ||
772 | btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); | ||
773 | |||
774 | old = btrfs_lock_root_node(root); | ||
775 | btrfs_cow_block(trans, root, old, NULL, 0, &old, 0); | ||
776 | |||
777 | btrfs_copy_root(trans, root, old, &tmp, objectid); | ||
778 | btrfs_tree_unlock(old); | ||
779 | free_extent_buffer(old); | ||
780 | |||
781 | btrfs_set_root_bytenr(new_root_item, tmp->start); | ||
782 | btrfs_set_root_level(new_root_item, btrfs_header_level(tmp)); | ||
783 | btrfs_set_root_generation(new_root_item, trans->transid); | ||
784 | ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key, | ||
785 | new_root_item); | ||
786 | btrfs_tree_unlock(tmp); | ||
787 | free_extent_buffer(tmp); | ||
788 | if (ret) | ||
789 | goto fail; | ||
790 | |||
791 | key.offset = (u64)-1; | ||
792 | memcpy(&pending->root_key, &key, sizeof(key)); | ||
793 | fail: | ||
794 | kfree(new_root_item); | ||
795 | return ret; | ||
796 | } | ||
797 | |||
798 | static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info, | ||
799 | struct btrfs_pending_snapshot *pending) | ||
800 | { | ||
801 | int ret; | ||
802 | int namelen; | ||
803 | u64 index = 0; | ||
804 | struct btrfs_trans_handle *trans; | ||
805 | struct inode *parent_inode; | ||
806 | struct inode *inode; | ||
807 | struct btrfs_root *parent_root; | ||
808 | |||
809 | parent_inode = pending->dentry->d_parent->d_inode; | ||
810 | parent_root = BTRFS_I(parent_inode)->root; | ||
811 | trans = btrfs_start_transaction(parent_root, 1); | ||
812 | |||
813 | /* | ||
814 | * insert the directory item | ||
815 | */ | ||
816 | namelen = strlen(pending->name); | ||
817 | ret = btrfs_set_inode_index(parent_inode, &index); | ||
818 | ret = btrfs_insert_dir_item(trans, parent_root, | ||
819 | pending->name, namelen, | ||
820 | parent_inode->i_ino, | ||
821 | &pending->root_key, BTRFS_FT_DIR, index); | ||
822 | |||
823 | if (ret) | ||
824 | goto fail; | ||
825 | |||
826 | /* add the backref first */ | ||
827 | ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root, | ||
828 | pending->root_key.objectid, | ||
829 | BTRFS_ROOT_BACKREF_KEY, | ||
830 | parent_root->root_key.objectid, | ||
831 | parent_inode->i_ino, index, pending->name, | ||
832 | namelen); | ||
833 | |||
834 | BUG_ON(ret); | ||
835 | |||
836 | /* now add the forward ref */ | ||
837 | ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root, | ||
838 | parent_root->root_key.objectid, | ||
839 | BTRFS_ROOT_REF_KEY, | ||
840 | pending->root_key.objectid, | ||
841 | parent_inode->i_ino, index, pending->name, | ||
842 | namelen); | ||
843 | |||
844 | inode = btrfs_lookup_dentry(parent_inode, pending->dentry); | ||
845 | d_instantiate(pending->dentry, inode); | ||
846 | fail: | ||
847 | btrfs_end_transaction(trans, fs_info->fs_root); | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | /* | ||
852 | * create all the snapshots we've scheduled for creation | ||
853 | */ | ||
854 | static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, | ||
855 | struct btrfs_fs_info *fs_info) | ||
856 | { | ||
857 | struct btrfs_pending_snapshot *pending; | ||
858 | struct list_head *head = &trans->transaction->pending_snapshots; | ||
859 | struct list_head *cur; | ||
860 | int ret; | ||
861 | |||
862 | list_for_each(cur, head) { | ||
863 | pending = list_entry(cur, struct btrfs_pending_snapshot, list); | ||
864 | ret = create_pending_snapshot(trans, fs_info, pending); | ||
865 | BUG_ON(ret); | ||
866 | } | ||
867 | return 0; | ||
868 | } | ||
869 | |||
870 | static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans, | ||
871 | struct btrfs_fs_info *fs_info) | ||
872 | { | ||
873 | struct btrfs_pending_snapshot *pending; | ||
874 | struct list_head *head = &trans->transaction->pending_snapshots; | ||
875 | int ret; | ||
876 | |||
877 | while(!list_empty(head)) { | ||
878 | pending = list_entry(head->next, | ||
879 | struct btrfs_pending_snapshot, list); | ||
880 | ret = finish_pending_snapshot(fs_info, pending); | ||
881 | BUG_ON(ret); | ||
882 | list_del(&pending->list); | ||
883 | kfree(pending->name); | ||
884 | kfree(pending); | ||
885 | } | ||
886 | return 0; | ||
887 | } | ||
888 | |||
889 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | ||
890 | struct btrfs_root *root) | ||
891 | { | ||
892 | unsigned long joined = 0; | ||
893 | unsigned long timeout = 1; | ||
894 | struct btrfs_transaction *cur_trans; | ||
895 | struct btrfs_transaction *prev_trans = NULL; | ||
896 | struct btrfs_root *chunk_root = root->fs_info->chunk_root; | ||
897 | struct list_head dirty_fs_roots; | ||
898 | struct extent_io_tree *pinned_copy; | ||
899 | DEFINE_WAIT(wait); | ||
900 | int ret; | ||
901 | |||
902 | INIT_LIST_HEAD(&dirty_fs_roots); | ||
903 | mutex_lock(&root->fs_info->trans_mutex); | ||
904 | if (trans->transaction->in_commit) { | ||
905 | cur_trans = trans->transaction; | ||
906 | trans->transaction->use_count++; | ||
907 | mutex_unlock(&root->fs_info->trans_mutex); | ||
908 | btrfs_end_transaction(trans, root); | ||
909 | |||
910 | ret = wait_for_commit(root, cur_trans); | ||
911 | BUG_ON(ret); | ||
912 | |||
913 | mutex_lock(&root->fs_info->trans_mutex); | ||
914 | put_transaction(cur_trans); | ||
915 | mutex_unlock(&root->fs_info->trans_mutex); | ||
916 | |||
917 | return 0; | ||
918 | } | ||
919 | |||
920 | pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS); | ||
921 | if (!pinned_copy) | ||
922 | return -ENOMEM; | ||
923 | |||
924 | extent_io_tree_init(pinned_copy, | ||
925 | root->fs_info->btree_inode->i_mapping, GFP_NOFS); | ||
926 | |||
927 | trans->transaction->in_commit = 1; | ||
928 | trans->transaction->blocked = 1; | ||
929 | cur_trans = trans->transaction; | ||
930 | if (cur_trans->list.prev != &root->fs_info->trans_list) { | ||
931 | prev_trans = list_entry(cur_trans->list.prev, | ||
932 | struct btrfs_transaction, list); | ||
933 | if (!prev_trans->commit_done) { | ||
934 | prev_trans->use_count++; | ||
935 | mutex_unlock(&root->fs_info->trans_mutex); | ||
936 | |||
937 | wait_for_commit(root, prev_trans); | ||
938 | |||
939 | mutex_lock(&root->fs_info->trans_mutex); | ||
940 | put_transaction(prev_trans); | ||
941 | } | ||
942 | } | ||
943 | |||
944 | do { | ||
945 | int snap_pending = 0; | ||
946 | joined = cur_trans->num_joined; | ||
947 | if (!list_empty(&trans->transaction->pending_snapshots)) | ||
948 | snap_pending = 1; | ||
949 | |||
950 | WARN_ON(cur_trans != trans->transaction); | ||
951 | prepare_to_wait(&cur_trans->writer_wait, &wait, | ||
952 | TASK_UNINTERRUPTIBLE); | ||
953 | |||
954 | if (cur_trans->num_writers > 1) | ||
955 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
956 | else | ||
957 | timeout = 1; | ||
958 | |||
959 | mutex_unlock(&root->fs_info->trans_mutex); | ||
960 | |||
961 | if (snap_pending) { | ||
962 | ret = btrfs_wait_ordered_extents(root, 1); | ||
963 | BUG_ON(ret); | ||
964 | } | ||
965 | |||
966 | schedule_timeout(timeout); | ||
967 | |||
968 | mutex_lock(&root->fs_info->trans_mutex); | ||
969 | finish_wait(&cur_trans->writer_wait, &wait); | ||
970 | } while (cur_trans->num_writers > 1 || | ||
971 | (cur_trans->num_joined != joined)); | ||
972 | |||
973 | ret = create_pending_snapshots(trans, root->fs_info); | ||
974 | BUG_ON(ret); | ||
975 | |||
976 | WARN_ON(cur_trans != trans->transaction); | ||
977 | |||
978 | /* btrfs_commit_tree_roots is responsible for getting the | ||
979 | * various roots consistent with each other. Every pointer | ||
980 | * in the tree of tree roots has to point to the most up to date | ||
981 | * root for every subvolume and other tree. So, we have to keep | ||
982 | * the tree logging code from jumping in and changing any | ||
983 | * of the trees. | ||
984 | * | ||
985 | * At this point in the commit, there can't be any tree-log | ||
986 | * writers, but a little lower down we drop the trans mutex | ||
987 | * and let new people in. By holding the tree_log_mutex | ||
988 | * from now until after the super is written, we avoid races | ||
989 | * with the tree-log code. | ||
990 | */ | ||
991 | mutex_lock(&root->fs_info->tree_log_mutex); | ||
992 | /* | ||
993 | * keep tree reloc code from adding new reloc trees | ||
994 | */ | ||
995 | mutex_lock(&root->fs_info->tree_reloc_mutex); | ||
996 | |||
997 | |||
998 | ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix, | ||
999 | &dirty_fs_roots); | ||
1000 | BUG_ON(ret); | ||
1001 | |||
1002 | /* add_dirty_roots gets rid of all the tree log roots, it is now | ||
1003 | * safe to free the root of tree log roots | ||
1004 | */ | ||
1005 | btrfs_free_log_root_tree(trans, root->fs_info); | ||
1006 | |||
1007 | ret = btrfs_commit_tree_roots(trans, root); | ||
1008 | BUG_ON(ret); | ||
1009 | |||
1010 | cur_trans = root->fs_info->running_transaction; | ||
1011 | spin_lock(&root->fs_info->new_trans_lock); | ||
1012 | root->fs_info->running_transaction = NULL; | ||
1013 | spin_unlock(&root->fs_info->new_trans_lock); | ||
1014 | btrfs_set_super_generation(&root->fs_info->super_copy, | ||
1015 | cur_trans->transid); | ||
1016 | btrfs_set_super_root(&root->fs_info->super_copy, | ||
1017 | root->fs_info->tree_root->node->start); | ||
1018 | btrfs_set_super_root_level(&root->fs_info->super_copy, | ||
1019 | btrfs_header_level(root->fs_info->tree_root->node)); | ||
1020 | |||
1021 | btrfs_set_super_chunk_root(&root->fs_info->super_copy, | ||
1022 | chunk_root->node->start); | ||
1023 | btrfs_set_super_chunk_root_level(&root->fs_info->super_copy, | ||
1024 | btrfs_header_level(chunk_root->node)); | ||
1025 | btrfs_set_super_chunk_root_generation(&root->fs_info->super_copy, | ||
1026 | btrfs_header_generation(chunk_root->node)); | ||
1027 | |||
1028 | if (!root->fs_info->log_root_recovering) { | ||
1029 | btrfs_set_super_log_root(&root->fs_info->super_copy, 0); | ||
1030 | btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0); | ||
1031 | } | ||
1032 | |||
1033 | memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy, | ||
1034 | sizeof(root->fs_info->super_copy)); | ||
1035 | |||
1036 | btrfs_copy_pinned(root, pinned_copy); | ||
1037 | |||
1038 | trans->transaction->blocked = 0; | ||
1039 | wake_up(&root->fs_info->transaction_throttle); | ||
1040 | wake_up(&root->fs_info->transaction_wait); | ||
1041 | |||
1042 | mutex_unlock(&root->fs_info->trans_mutex); | ||
1043 | ret = btrfs_write_and_wait_transaction(trans, root); | ||
1044 | BUG_ON(ret); | ||
1045 | write_ctree_super(trans, root); | ||
1046 | |||
1047 | /* | ||
1048 | * the super is written, we can safely allow the tree-loggers | ||
1049 | * to go about their business | ||
1050 | */ | ||
1051 | mutex_unlock(&root->fs_info->tree_log_mutex); | ||
1052 | |||
1053 | btrfs_finish_extent_commit(trans, root, pinned_copy); | ||
1054 | kfree(pinned_copy); | ||
1055 | |||
1056 | btrfs_drop_dead_reloc_roots(root); | ||
1057 | mutex_unlock(&root->fs_info->tree_reloc_mutex); | ||
1058 | |||
1059 | /* do the directory inserts of any pending snapshot creations */ | ||
1060 | finish_pending_snapshots(trans, root->fs_info); | ||
1061 | |||
1062 | mutex_lock(&root->fs_info->trans_mutex); | ||
1063 | |||
1064 | cur_trans->commit_done = 1; | ||
1065 | root->fs_info->last_trans_committed = cur_trans->transid; | ||
1066 | wake_up(&cur_trans->commit_wait); | ||
1067 | |||
1068 | put_transaction(cur_trans); | ||
1069 | put_transaction(cur_trans); | ||
1070 | |||
1071 | list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots); | ||
1072 | if (root->fs_info->closing) | ||
1073 | list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots); | ||
1074 | |||
1075 | mutex_unlock(&root->fs_info->trans_mutex); | ||
1076 | |||
1077 | kmem_cache_free(btrfs_trans_handle_cachep, trans); | ||
1078 | |||
1079 | if (root->fs_info->closing) { | ||
1080 | drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots); | ||
1081 | } | ||
1082 | return ret; | ||
1083 | } | ||
1084 | |||
1085 | /* | ||
1086 | * interface function to delete all the snapshots we have scheduled for deletion | ||
1087 | */ | ||
1088 | int btrfs_clean_old_snapshots(struct btrfs_root *root) | ||
1089 | { | ||
1090 | struct list_head dirty_roots; | ||
1091 | INIT_LIST_HEAD(&dirty_roots); | ||
1092 | again: | ||
1093 | mutex_lock(&root->fs_info->trans_mutex); | ||
1094 | list_splice_init(&root->fs_info->dead_roots, &dirty_roots); | ||
1095 | mutex_unlock(&root->fs_info->trans_mutex); | ||
1096 | |||
1097 | if (!list_empty(&dirty_roots)) { | ||
1098 | drop_dirty_roots(root, &dirty_roots); | ||
1099 | goto again; | ||
1100 | } | ||
1101 | return 0; | ||
1102 | } | ||