aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/backref.c783
-rw-r--r--fs/btrfs/backref.h5
2 files changed, 788 insertions, 0 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 22c64fff1bd5..03c30a1836f4 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -19,6 +19,9 @@
19#include "ctree.h" 19#include "ctree.h"
20#include "disk-io.h" 20#include "disk-io.h"
21#include "backref.h" 21#include "backref.h"
22#include "ulist.h"
23#include "transaction.h"
24#include "delayed-ref.h"
22 25
23struct __data_ref { 26struct __data_ref {
24 struct list_head list; 27 struct list_head list;
@@ -32,6 +35,786 @@ struct __shared_ref {
32 u64 disk_byte; 35 u64 disk_byte;
33}; 36};
34 37
38/*
39 * this structure records all encountered refs on the way up to the root
40 */
41struct __prelim_ref {
42 struct list_head list;
43 u64 root_id;
44 struct btrfs_key key;
45 int level;
46 int count;
47 u64 parent;
48 u64 wanted_disk_byte;
49};
50
51static int __add_prelim_ref(struct list_head *head, u64 root_id,
52 struct btrfs_key *key, int level, u64 parent,
53 u64 wanted_disk_byte, int count)
54{
55 struct __prelim_ref *ref;
56
57 /* in case we're adding delayed refs, we're holding the refs spinlock */
58 ref = kmalloc(sizeof(*ref), GFP_ATOMIC);
59 if (!ref)
60 return -ENOMEM;
61
62 ref->root_id = root_id;
63 if (key)
64 ref->key = *key;
65 else
66 memset(&ref->key, 0, sizeof(ref->key));
67
68 ref->level = level;
69 ref->count = count;
70 ref->parent = parent;
71 ref->wanted_disk_byte = wanted_disk_byte;
72 list_add_tail(&ref->list, head);
73
74 return 0;
75}
76
77static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
78 struct ulist *parents,
79 struct extent_buffer *eb, int level,
80 u64 wanted_objectid, u64 wanted_disk_byte)
81{
82 int ret;
83 int slot;
84 struct btrfs_file_extent_item *fi;
85 struct btrfs_key key;
86 u64 disk_byte;
87
88add_parent:
89 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
90 if (ret < 0)
91 return ret;
92
93 if (level != 0)
94 return 0;
95
96 /*
97 * if the current leaf is full with EXTENT_DATA items, we must
98 * check the next one if that holds a reference as well.
99 * ref->count cannot be used to skip this check.
100 * repeat this until we don't find any additional EXTENT_DATA items.
101 */
102 while (1) {
103 ret = btrfs_next_leaf(root, path);
104 if (ret < 0)
105 return ret;
106 if (ret)
107 return 0;
108
109 eb = path->nodes[0];
110 for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
111 btrfs_item_key_to_cpu(eb, &key, slot);
112 if (key.objectid != wanted_objectid ||
113 key.type != BTRFS_EXTENT_DATA_KEY)
114 return 0;
115 fi = btrfs_item_ptr(eb, slot,
116 struct btrfs_file_extent_item);
117 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
118 if (disk_byte == wanted_disk_byte)
119 goto add_parent;
120 }
121 }
122
123 return 0;
124}
125
126/*
127 * resolve an indirect backref in the form (root_id, key, level)
128 * to a logical address
129 */
130static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
131 struct __prelim_ref *ref,
132 struct ulist *parents)
133{
134 struct btrfs_path *path;
135 struct btrfs_root *root;
136 struct btrfs_key root_key;
137 struct btrfs_key key = {0};
138 struct extent_buffer *eb;
139 int ret = 0;
140 int root_level;
141 int level = ref->level;
142
143 path = btrfs_alloc_path();
144 if (!path)
145 return -ENOMEM;
146
147 root_key.objectid = ref->root_id;
148 root_key.type = BTRFS_ROOT_ITEM_KEY;
149 root_key.offset = (u64)-1;
150 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
151 if (IS_ERR(root)) {
152 ret = PTR_ERR(root);
153 goto out;
154 }
155
156 rcu_read_lock();
157 root_level = btrfs_header_level(root->node);
158 rcu_read_unlock();
159
160 if (root_level + 1 == level)
161 goto out;
162
163 path->lowest_level = level;
164 ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0);
165 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
166 "%d for key (%llu %u %llu)\n",
167 (unsigned long long)ref->root_id, level, ref->count, ret,
168 (unsigned long long)ref->key.objectid, ref->key.type,
169 (unsigned long long)ref->key.offset);
170 if (ret < 0)
171 goto out;
172
173 eb = path->nodes[level];
174 if (!eb) {
175 WARN_ON(1);
176 ret = 1;
177 goto out;
178 }
179
180 if (level == 0) {
181 if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) {
182 ret = btrfs_next_leaf(root, path);
183 if (ret)
184 goto out;
185 eb = path->nodes[0];
186 }
187
188 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
189 }
190
191 /* the last two parameters will only be used for level == 0 */
192 ret = add_all_parents(root, path, parents, eb, level, key.objectid,
193 ref->wanted_disk_byte);
194out:
195 btrfs_free_path(path);
196 return ret;
197}
198
199/*
200 * resolve all indirect backrefs from the list
201 */
202static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
203 struct list_head *head)
204{
205 int err;
206 int ret = 0;
207 struct __prelim_ref *ref;
208 struct __prelim_ref *ref_safe;
209 struct __prelim_ref *new_ref;
210 struct ulist *parents;
211 struct ulist_node *node;
212
213 parents = ulist_alloc(GFP_NOFS);
214 if (!parents)
215 return -ENOMEM;
216
217 /*
218 * _safe allows us to insert directly after the current item without
219 * iterating over the newly inserted items.
220 * we're also allowed to re-assign ref during iteration.
221 */
222 list_for_each_entry_safe(ref, ref_safe, head, list) {
223 if (ref->parent) /* already direct */
224 continue;
225 if (ref->count == 0)
226 continue;
227 err = __resolve_indirect_ref(fs_info, ref, parents);
228 if (err) {
229 if (ret == 0)
230 ret = err;
231 continue;
232 }
233
234 /* we put the first parent into the ref at hand */
235 node = ulist_next(parents, NULL);
236 ref->parent = node ? node->val : 0;
237
238 /* additional parents require new refs being added here */
239 while ((node = ulist_next(parents, node))) {
240 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
241 if (!new_ref) {
242 ret = -ENOMEM;
243 break;
244 }
245 memcpy(new_ref, ref, sizeof(*ref));
246 new_ref->parent = node->val;
247 list_add(&new_ref->list, &ref->list);
248 }
249 ulist_reinit(parents);
250 }
251
252 ulist_free(parents);
253 return ret;
254}
255
256/*
257 * merge two lists of backrefs and adjust counts accordingly
258 *
259 * mode = 1: merge identical keys, if key is set
260 * mode = 2: merge identical parents
261 */
262static int __merge_refs(struct list_head *head, int mode)
263{
264 struct list_head *pos1;
265
266 list_for_each(pos1, head) {
267 struct list_head *n2;
268 struct list_head *pos2;
269 struct __prelim_ref *ref1;
270
271 ref1 = list_entry(pos1, struct __prelim_ref, list);
272
273 if (mode == 1 && ref1->key.type == 0)
274 continue;
275 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
276 pos2 = n2, n2 = pos2->next) {
277 struct __prelim_ref *ref2;
278
279 ref2 = list_entry(pos2, struct __prelim_ref, list);
280
281 if (mode == 1) {
282 if (memcmp(&ref1->key, &ref2->key,
283 sizeof(ref1->key)) ||
284 ref1->level != ref2->level ||
285 ref1->root_id != ref2->root_id)
286 continue;
287 ref1->count += ref2->count;
288 } else {
289 if (ref1->parent != ref2->parent)
290 continue;
291 ref1->count += ref2->count;
292 }
293 list_del(&ref2->list);
294 kfree(ref2);
295 }
296
297 }
298 return 0;
299}
300
301/*
302 * add all currently queued delayed refs from this head whose seq nr is
303 * smaller or equal that seq to the list
304 */
305static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
306 struct btrfs_key *info_key,
307 struct list_head *prefs)
308{
309 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
310 struct rb_node *n = &head->node.rb_node;
311 int sgn;
312 int ret;
313
314 if (extent_op && extent_op->update_key)
315 btrfs_disk_key_to_cpu(info_key, &extent_op->key);
316
317 while ((n = rb_prev(n))) {
318 struct btrfs_delayed_ref_node *node;
319 node = rb_entry(n, struct btrfs_delayed_ref_node,
320 rb_node);
321 if (node->bytenr != head->node.bytenr)
322 break;
323 WARN_ON(node->is_head);
324
325 if (node->seq > seq)
326 continue;
327
328 switch (node->action) {
329 case BTRFS_ADD_DELAYED_EXTENT:
330 case BTRFS_UPDATE_DELAYED_HEAD:
331 WARN_ON(1);
332 continue;
333 case BTRFS_ADD_DELAYED_REF:
334 sgn = 1;
335 break;
336 case BTRFS_DROP_DELAYED_REF:
337 sgn = -1;
338 break;
339 default:
340 BUG_ON(1);
341 }
342 switch (node->type) {
343 case BTRFS_TREE_BLOCK_REF_KEY: {
344 struct btrfs_delayed_tree_ref *ref;
345
346 ref = btrfs_delayed_node_to_tree_ref(node);
347 ret = __add_prelim_ref(prefs, ref->root, info_key,
348 ref->level + 1, 0, node->bytenr,
349 node->ref_mod * sgn);
350 break;
351 }
352 case BTRFS_SHARED_BLOCK_REF_KEY: {
353 struct btrfs_delayed_tree_ref *ref;
354
355 ref = btrfs_delayed_node_to_tree_ref(node);
356 ret = __add_prelim_ref(prefs, ref->root, info_key,
357 ref->level + 1, ref->parent,
358 node->bytenr,
359 node->ref_mod * sgn);
360 break;
361 }
362 case BTRFS_EXTENT_DATA_REF_KEY: {
363 struct btrfs_delayed_data_ref *ref;
364 struct btrfs_key key;
365
366 ref = btrfs_delayed_node_to_data_ref(node);
367
368 key.objectid = ref->objectid;
369 key.type = BTRFS_EXTENT_DATA_KEY;
370 key.offset = ref->offset;
371 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
372 node->bytenr,
373 node->ref_mod * sgn);
374 break;
375 }
376 case BTRFS_SHARED_DATA_REF_KEY: {
377 struct btrfs_delayed_data_ref *ref;
378 struct btrfs_key key;
379
380 ref = btrfs_delayed_node_to_data_ref(node);
381
382 key.objectid = ref->objectid;
383 key.type = BTRFS_EXTENT_DATA_KEY;
384 key.offset = ref->offset;
385 ret = __add_prelim_ref(prefs, ref->root, &key, 0,
386 ref->parent, node->bytenr,
387 node->ref_mod * sgn);
388 break;
389 }
390 default:
391 WARN_ON(1);
392 }
393 BUG_ON(ret);
394 }
395
396 return 0;
397}
398
399/*
400 * add all inline backrefs for bytenr to the list
401 */
402static int __add_inline_refs(struct btrfs_fs_info *fs_info,
403 struct btrfs_path *path, u64 bytenr,
404 struct btrfs_key *info_key, int *info_level,
405 struct list_head *prefs)
406{
407 int ret;
408 int slot;
409 struct extent_buffer *leaf;
410 struct btrfs_key key;
411 unsigned long ptr;
412 unsigned long end;
413 struct btrfs_extent_item *ei;
414 u64 flags;
415 u64 item_size;
416
417 /*
418 * enumerate all inline refs
419 */
420 leaf = path->nodes[0];
421 slot = path->slots[0] - 1;
422
423 item_size = btrfs_item_size_nr(leaf, slot);
424 BUG_ON(item_size < sizeof(*ei));
425
426 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
427 flags = btrfs_extent_flags(leaf, ei);
428
429 ptr = (unsigned long)(ei + 1);
430 end = (unsigned long)ei + item_size;
431
432 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
433 struct btrfs_tree_block_info *info;
434 struct btrfs_disk_key disk_key;
435
436 info = (struct btrfs_tree_block_info *)ptr;
437 *info_level = btrfs_tree_block_level(leaf, info);
438 btrfs_tree_block_key(leaf, info, &disk_key);
439 btrfs_disk_key_to_cpu(info_key, &disk_key);
440 ptr += sizeof(struct btrfs_tree_block_info);
441 BUG_ON(ptr > end);
442 } else {
443 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
444 }
445
446 while (ptr < end) {
447 struct btrfs_extent_inline_ref *iref;
448 u64 offset;
449 int type;
450
451 iref = (struct btrfs_extent_inline_ref *)ptr;
452 type = btrfs_extent_inline_ref_type(leaf, iref);
453 offset = btrfs_extent_inline_ref_offset(leaf, iref);
454
455 switch (type) {
456 case BTRFS_SHARED_BLOCK_REF_KEY:
457 ret = __add_prelim_ref(prefs, 0, info_key,
458 *info_level + 1, offset,
459 bytenr, 1);
460 break;
461 case BTRFS_SHARED_DATA_REF_KEY: {
462 struct btrfs_shared_data_ref *sdref;
463 int count;
464
465 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
466 count = btrfs_shared_data_ref_count(leaf, sdref);
467 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
468 bytenr, count);
469 break;
470 }
471 case BTRFS_TREE_BLOCK_REF_KEY:
472 ret = __add_prelim_ref(prefs, offset, info_key,
473 *info_level + 1, 0, bytenr, 1);
474 break;
475 case BTRFS_EXTENT_DATA_REF_KEY: {
476 struct btrfs_extent_data_ref *dref;
477 int count;
478 u64 root;
479
480 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
481 count = btrfs_extent_data_ref_count(leaf, dref);
482 key.objectid = btrfs_extent_data_ref_objectid(leaf,
483 dref);
484 key.type = BTRFS_EXTENT_DATA_KEY;
485 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
486 root = btrfs_extent_data_ref_root(leaf, dref);
487 ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr,
488 count);
489 break;
490 }
491 default:
492 WARN_ON(1);
493 }
494 BUG_ON(ret);
495 ptr += btrfs_extent_inline_ref_size(type);
496 }
497
498 return 0;
499}
500
501/*
502 * add all non-inline backrefs for bytenr to the list
503 */
504static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
505 struct btrfs_path *path, u64 bytenr,
506 struct btrfs_key *info_key, int info_level,
507 struct list_head *prefs)
508{
509 struct btrfs_root *extent_root = fs_info->extent_root;
510 int ret;
511 int slot;
512 struct extent_buffer *leaf;
513 struct btrfs_key key;
514
515 while (1) {
516 ret = btrfs_next_item(extent_root, path);
517 if (ret < 0)
518 break;
519 if (ret) {
520 ret = 0;
521 break;
522 }
523
524 slot = path->slots[0];
525 leaf = path->nodes[0];
526 btrfs_item_key_to_cpu(leaf, &key, slot);
527
528 if (key.objectid != bytenr)
529 break;
530 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
531 continue;
532 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
533 break;
534
535 switch (key.type) {
536 case BTRFS_SHARED_BLOCK_REF_KEY:
537 ret = __add_prelim_ref(prefs, 0, info_key,
538 info_level + 1, key.offset,
539 bytenr, 1);
540 break;
541 case BTRFS_SHARED_DATA_REF_KEY: {
542 struct btrfs_shared_data_ref *sdref;
543 int count;
544
545 sdref = btrfs_item_ptr(leaf, slot,
546 struct btrfs_shared_data_ref);
547 count = btrfs_shared_data_ref_count(leaf, sdref);
548 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
549 bytenr, count);
550 break;
551 }
552 case BTRFS_TREE_BLOCK_REF_KEY:
553 ret = __add_prelim_ref(prefs, key.offset, info_key,
554 info_level + 1, 0, bytenr, 1);
555 break;
556 case BTRFS_EXTENT_DATA_REF_KEY: {
557 struct btrfs_extent_data_ref *dref;
558 int count;
559 u64 root;
560
561 dref = btrfs_item_ptr(leaf, slot,
562 struct btrfs_extent_data_ref);
563 count = btrfs_extent_data_ref_count(leaf, dref);
564 key.objectid = btrfs_extent_data_ref_objectid(leaf,
565 dref);
566 key.type = BTRFS_EXTENT_DATA_KEY;
567 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
568 root = btrfs_extent_data_ref_root(leaf, dref);
569 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
570 bytenr, count);
571 break;
572 }
573 default:
574 WARN_ON(1);
575 }
576 BUG_ON(ret);
577 }
578
579 return ret;
580}
581
582/*
583 * this adds all existing backrefs (inline backrefs, backrefs and delayed
584 * refs) for the given bytenr to the refs list, merges duplicates and resolves
585 * indirect refs to their parent bytenr.
586 * When roots are found, they're added to the roots list
587 *
588 * FIXME some caching might speed things up
589 */
590static int find_parent_nodes(struct btrfs_trans_handle *trans,
591 struct btrfs_fs_info *fs_info, u64 bytenr,
592 u64 seq, struct ulist *refs, struct ulist *roots)
593{
594 struct btrfs_key key;
595 struct btrfs_path *path;
596 struct btrfs_key info_key = { 0 };
597 struct btrfs_delayed_ref_root *delayed_refs = NULL;
598 struct btrfs_delayed_ref_head *head = NULL;
599 int info_level = 0;
600 int ret;
601 struct list_head prefs_delayed;
602 struct list_head prefs;
603 struct __prelim_ref *ref;
604
605 INIT_LIST_HEAD(&prefs);
606 INIT_LIST_HEAD(&prefs_delayed);
607
608 key.objectid = bytenr;
609 key.type = BTRFS_EXTENT_ITEM_KEY;
610 key.offset = (u64)-1;
611
612 path = btrfs_alloc_path();
613 if (!path)
614 return -ENOMEM;
615
616 /*
617 * grab both a lock on the path and a lock on the delayed ref head.
618 * We need both to get a consistent picture of how the refs look
619 * at a specified point in time
620 */
621again:
622 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
623 if (ret < 0)
624 goto out;
625 BUG_ON(ret == 0);
626
627 /*
628 * look if there are updates for this ref queued and lock the head
629 */
630 delayed_refs = &trans->transaction->delayed_refs;
631 spin_lock(&delayed_refs->lock);
632 head = btrfs_find_delayed_ref_head(trans, bytenr);
633 if (head) {
634 if (!mutex_trylock(&head->mutex)) {
635 atomic_inc(&head->node.refs);
636 spin_unlock(&delayed_refs->lock);
637
638 btrfs_release_path(path);
639
640 /*
641 * Mutex was contended, block until it's
642 * released and try again
643 */
644 mutex_lock(&head->mutex);
645 mutex_unlock(&head->mutex);
646 btrfs_put_delayed_ref(&head->node);
647 goto again;
648 }
649 ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed);
650 if (ret)
651 goto out;
652 }
653 spin_unlock(&delayed_refs->lock);
654
655 if (path->slots[0]) {
656 struct extent_buffer *leaf;
657 int slot;
658
659 leaf = path->nodes[0];
660 slot = path->slots[0] - 1;
661 btrfs_item_key_to_cpu(leaf, &key, slot);
662 if (key.objectid == bytenr &&
663 key.type == BTRFS_EXTENT_ITEM_KEY) {
664 ret = __add_inline_refs(fs_info, path, bytenr,
665 &info_key, &info_level, &prefs);
666 if (ret)
667 goto out;
668 ret = __add_keyed_refs(fs_info, path, bytenr, &info_key,
669 info_level, &prefs);
670 if (ret)
671 goto out;
672 }
673 }
674 btrfs_release_path(path);
675
676 /*
677 * when adding the delayed refs above, the info_key might not have
678 * been known yet. Go over the list and replace the missing keys
679 */
680 list_for_each_entry(ref, &prefs_delayed, list) {
681 if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0)
682 memcpy(&ref->key, &info_key, sizeof(ref->key));
683 }
684 list_splice_init(&prefs_delayed, &prefs);
685
686 ret = __merge_refs(&prefs, 1);
687 if (ret)
688 goto out;
689
690 ret = __resolve_indirect_refs(fs_info, &prefs);
691 if (ret)
692 goto out;
693
694 ret = __merge_refs(&prefs, 2);
695 if (ret)
696 goto out;
697
698 while (!list_empty(&prefs)) {
699 ref = list_first_entry(&prefs, struct __prelim_ref, list);
700 list_del(&ref->list);
701 if (ref->count < 0)
702 WARN_ON(1);
703 if (ref->count && ref->root_id && ref->parent == 0) {
704 /* no parent == root of tree */
705 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
706 BUG_ON(ret < 0);
707 }
708 if (ref->count && ref->parent) {
709 ret = ulist_add(refs, ref->parent, 0, GFP_NOFS);
710 BUG_ON(ret < 0);
711 }
712 kfree(ref);
713 }
714
715out:
716 if (head)
717 mutex_unlock(&head->mutex);
718 btrfs_free_path(path);
719 while (!list_empty(&prefs)) {
720 ref = list_first_entry(&prefs, struct __prelim_ref, list);
721 list_del(&ref->list);
722 kfree(ref);
723 }
724 while (!list_empty(&prefs_delayed)) {
725 ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
726 list);
727 list_del(&ref->list);
728 kfree(ref);
729 }
730
731 return ret;
732}
733
734/*
735 * Finds all leafs with a reference to the specified combination of bytenr and
736 * offset. key_list_head will point to a list of corresponding keys (caller must
737 * free each list element). The leafs will be stored in the leafs ulist, which
738 * must be freed with ulist_free.
739 *
740 * returns 0 on success, <0 on error
741 */
742static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
743 struct btrfs_fs_info *fs_info, u64 bytenr,
744 u64 num_bytes, u64 seq, struct ulist **leafs)
745{
746 struct ulist *tmp;
747 int ret;
748
749 tmp = ulist_alloc(GFP_NOFS);
750 if (!tmp)
751 return -ENOMEM;
752 *leafs = ulist_alloc(GFP_NOFS);
753 if (!*leafs) {
754 ulist_free(tmp);
755 return -ENOMEM;
756 }
757
758 ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp);
759 ulist_free(tmp);
760
761 if (ret < 0 && ret != -ENOENT) {
762 ulist_free(*leafs);
763 return ret;
764 }
765
766 return 0;
767}
768
769/*
770 * walk all backrefs for a given extent to find all roots that reference this
771 * extent. Walking a backref means finding all extents that reference this
772 * extent and in turn walk the backrefs of those, too. Naturally this is a
773 * recursive process, but here it is implemented in an iterative fashion: We
774 * find all referencing extents for the extent in question and put them on a
775 * list. In turn, we find all referencing extents for those, further appending
776 * to the list. The way we iterate the list allows adding more elements after
777 * the current while iterating. The process stops when we reach the end of the
778 * list. Found roots are added to the roots list.
779 *
780 * returns 0 on success, < 0 on error.
781 */
782int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
783 struct btrfs_fs_info *fs_info, u64 bytenr,
784 u64 num_bytes, u64 seq, struct ulist **roots)
785{
786 struct ulist *tmp;
787 struct ulist_node *node = NULL;
788 int ret;
789
790 tmp = ulist_alloc(GFP_NOFS);
791 if (!tmp)
792 return -ENOMEM;
793 *roots = ulist_alloc(GFP_NOFS);
794 if (!*roots) {
795 ulist_free(tmp);
796 return -ENOMEM;
797 }
798
799 while (1) {
800 ret = find_parent_nodes(trans, fs_info, bytenr, seq,
801 tmp, *roots);
802 if (ret < 0 && ret != -ENOENT) {
803 ulist_free(tmp);
804 ulist_free(*roots);
805 return ret;
806 }
807 node = ulist_next(tmp, node);
808 if (!node)
809 break;
810 bytenr = node->val;
811 }
812
813 ulist_free(tmp);
814 return 0;
815}
816
817
35static int __inode_info(u64 inum, u64 ioff, u8 key_type, 818static int __inode_info(u64 inum, u64 ioff, u8 key_type,
36 struct btrfs_root *fs_root, struct btrfs_path *path, 819 struct btrfs_root *fs_root, struct btrfs_path *path,
37 struct btrfs_key *found_key) 820 struct btrfs_key *found_key)
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 92618837cb8f..d00dfa9ca934 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -20,6 +20,7 @@
20#define __BTRFS_BACKREF__ 20#define __BTRFS_BACKREF__
21 21
22#include "ioctl.h" 22#include "ioctl.h"
23#include "ulist.h"
23 24
24struct inode_fs_paths { 25struct inode_fs_paths {
25 struct btrfs_path *btrfs_path; 26 struct btrfs_path *btrfs_path;
@@ -54,6 +55,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
54 55
55int paths_from_inode(u64 inum, struct inode_fs_paths *ipath); 56int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
56 57
58int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
59 struct btrfs_fs_info *fs_info, u64 bytenr,
60 u64 num_bytes, u64 seq, struct ulist **roots);
61
57struct btrfs_data_container *init_data_container(u32 total_bytes); 62struct btrfs_data_container *init_data_container(u32 total_bytes);
58struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 63struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
59 struct btrfs_path *path); 64 struct btrfs_path *path);