aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c2614
1 files changed, 1764 insertions, 850 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 35af93355063..a42419c276e2 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -30,43 +30,33 @@
30#include "transaction.h" 30#include "transaction.h"
31#include "volumes.h" 31#include "volumes.h"
32#include "locking.h" 32#include "locking.h"
33#include "ref-cache.h"
34#include "free-space-cache.h" 33#include "free-space-cache.h"
35 34
36#define PENDING_EXTENT_INSERT 0
37#define PENDING_EXTENT_DELETE 1
38#define PENDING_BACKREF_UPDATE 2
39
40struct pending_extent_op {
41 int type;
42 u64 bytenr;
43 u64 num_bytes;
44 u64 parent;
45 u64 orig_parent;
46 u64 generation;
47 u64 orig_generation;
48 int level;
49 struct list_head list;
50 int del;
51};
52
53static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
54 struct btrfs_root *root, u64 parent,
55 u64 root_objectid, u64 ref_generation,
56 u64 owner, struct btrfs_key *ins,
57 int ref_mod);
58static int update_reserved_extents(struct btrfs_root *root, 35static int update_reserved_extents(struct btrfs_root *root,
59 u64 bytenr, u64 num, int reserve); 36 u64 bytenr, u64 num, int reserve);
60static int update_block_group(struct btrfs_trans_handle *trans, 37static int update_block_group(struct btrfs_trans_handle *trans,
61 struct btrfs_root *root, 38 struct btrfs_root *root,
62 u64 bytenr, u64 num_bytes, int alloc, 39 u64 bytenr, u64 num_bytes, int alloc,
63 int mark_free); 40 int mark_free);
64static noinline int __btrfs_free_extent(struct btrfs_trans_handle *trans, 41static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
65 struct btrfs_root *root, 42 struct btrfs_root *root,
66 u64 bytenr, u64 num_bytes, u64 parent, 43 u64 bytenr, u64 num_bytes, u64 parent,
67 u64 root_objectid, u64 ref_generation, 44 u64 root_objectid, u64 owner_objectid,
68 u64 owner_objectid, int pin, 45 u64 owner_offset, int refs_to_drop,
69 int ref_to_drop); 46 struct btrfs_delayed_extent_op *extra_op);
47static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 struct extent_buffer *leaf,
49 struct btrfs_extent_item *ei);
50static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root,
52 u64 parent, u64 root_objectid,
53 u64 flags, u64 owner, u64 offset,
54 struct btrfs_key *ins, int ref_mod);
55static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 parent, u64 root_objectid,
58 u64 flags, struct btrfs_disk_key *key,
59 int level, struct btrfs_key *ins);
70 60
71static int do_chunk_alloc(struct btrfs_trans_handle *trans, 61static int do_chunk_alloc(struct btrfs_trans_handle *trans,
72 struct btrfs_root *extent_root, u64 alloc_bytes, 62 struct btrfs_root *extent_root, u64 alloc_bytes,
@@ -453,196 +443,973 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
453 * maintenance. This is actually the same as #2, but with a slightly 443 * maintenance. This is actually the same as #2, but with a slightly
454 * different use case. 444 * different use case.
455 * 445 *
446 * There are two kinds of back refs. The implicit back refs is optimized
447 * for pointers in non-shared tree blocks. For a given pointer in a block,
448 * back refs of this kind provide information about the block's owner tree
449 * and the pointer's key. These information allow us to find the block by
450 * b-tree searching. The full back refs is for pointers in tree blocks not
451 * referenced by their owner trees. The location of tree block is recorded
452 * in the back refs. Actually the full back refs is generic, and can be
453 * used in all cases the implicit back refs is used. The major shortcoming
454 * of the full back refs is its overhead. Every time a tree block gets
455 * COWed, we have to update back refs entry for all pointers in it.
456 *
457 * For a newly allocated tree block, we use implicit back refs for
458 * pointers in it. This means most tree related operations only involve
459 * implicit back refs. For a tree block created in old transaction, the
460 * only way to drop a reference to it is COW it. So we can detect the
461 * event that tree block loses its owner tree's reference and do the
462 * back refs conversion.
463 *
464 * When a tree block is COW'd through a tree, there are four cases:
465 *
466 * The reference count of the block is one and the tree is the block's
467 * owner tree. Nothing to do in this case.
468 *
469 * The reference count of the block is one and the tree is not the
470 * block's owner tree. In this case, full back refs is used for pointers
471 * in the block. Remove these full back refs, add implicit back refs for
472 * every pointers in the new block.
473 *
474 * The reference count of the block is greater than one and the tree is
475 * the block's owner tree. In this case, implicit back refs is used for
476 * pointers in the block. Add full back refs for every pointers in the
477 * block, increase lower level extents' reference counts. The original
478 * implicit back refs are entailed to the new block.
479 *
480 * The reference count of the block is greater than one and the tree is
481 * not the block's owner tree. Add implicit back refs for every pointer in
482 * the new block, increase lower level extents' reference count.
483 *
484 * Back Reference Key composing:
485 *
486 * The key objectid corresponds to the first byte in the extent,
487 * The key type is used to differentiate between types of back refs.
488 * There are different meanings of the key offset for different types
489 * of back refs.
490 *
456 * File extents can be referenced by: 491 * File extents can be referenced by:
457 * 492 *
458 * - multiple snapshots, subvolumes, or different generations in one subvol 493 * - multiple snapshots, subvolumes, or different generations in one subvol
459 * - different files inside a single subvolume 494 * - different files inside a single subvolume
460 * - different offsets inside a file (bookend extents in file.c) 495 * - different offsets inside a file (bookend extents in file.c)
461 * 496 *
462 * The extent ref structure has fields for: 497 * The extent ref structure for the implicit back refs has fields for:
463 * 498 *
464 * - Objectid of the subvolume root 499 * - Objectid of the subvolume root
465 * - Generation number of the tree holding the reference
466 * - objectid of the file holding the reference 500 * - objectid of the file holding the reference
467 * - number of references holding by parent node (alway 1 for tree blocks) 501 * - original offset in the file
468 * 502 * - how many bookend extents
469 * Btree leaf may hold multiple references to a file extent. In most cases,
470 * these references are from same file and the corresponding offsets inside
471 * the file are close together.
472 *
473 * When a file extent is allocated the fields are filled in:
474 * (root_key.objectid, trans->transid, inode objectid, 1)
475 * 503 *
476 * When a leaf is cow'd new references are added for every file extent found 504 * The key offset for the implicit back refs is hash of the first
477 * in the leaf. It looks similar to the create case, but trans->transid will 505 * three fields.
478 * be different when the block is cow'd.
479 * 506 *
480 * (root_key.objectid, trans->transid, inode objectid, 507 * The extent ref structure for the full back refs has field for:
481 * number of references in the leaf)
482 * 508 *
483 * When a file extent is removed either during snapshot deletion or 509 * - number of pointers in the tree leaf
484 * file truncation, we find the corresponding back reference and check
485 * the following fields:
486 * 510 *
487 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf), 511 * The key offset for the implicit back refs is the first byte of
488 * inode objectid) 512 * the tree leaf
489 * 513 *
490 * Btree extents can be referenced by: 514 * When a file extent is allocated, The implicit back refs is used.
491 * 515 * the fields are filled in:
492 * - Different subvolumes
493 * - Different generations of the same subvolume
494 *
495 * When a tree block is created, back references are inserted:
496 * 516 *
497 * (root->root_key.objectid, trans->transid, level, 1) 517 * (root_key.objectid, inode objectid, offset in file, 1)
498 * 518 *
499 * When a tree block is cow'd, new back references are added for all the 519 * When a file extent is removed file truncation, we find the
500 * blocks it points to. If the tree block isn't in reference counted root, 520 * corresponding implicit back refs and check the following fields:
501 * the old back references are removed. These new back references are of
502 * the form (trans->transid will have increased since creation):
503 * 521 *
504 * (root->root_key.objectid, trans->transid, level, 1) 522 * (btrfs_header_owner(leaf), inode objectid, offset in file)
505 * 523 *
506 * When a backref is in deleting, the following fields are checked: 524 * Btree extents can be referenced by:
507 * 525 *
508 * if backref was for a tree root: 526 * - Different subvolumes
509 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
510 * else
511 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
512 * 527 *
513 * Back Reference Key composing: 528 * Both the implicit back refs and the full back refs for tree blocks
529 * only consist of key. The key offset for the implicit back refs is
530 * objectid of block's owner tree. The key offset for the full back refs
531 * is the first byte of parent block.
514 * 532 *
515 * The key objectid corresponds to the first byte in the extent, the key 533 * When implicit back refs is used, information about the lowest key and
516 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first 534 * level of the tree block are required. These information are stored in
517 * byte of parent extent. If a extent is tree root, the key offset is set 535 * tree block info structure.
518 * to the key objectid.
519 */ 536 */
520 537
521static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans, 538#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
522 struct btrfs_root *root, 539static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
523 struct btrfs_path *path, 540 struct btrfs_root *root,
524 u64 bytenr, u64 parent, 541 struct btrfs_path *path,
525 u64 ref_root, u64 ref_generation, 542 u64 owner, u32 extra_size)
526 u64 owner_objectid, int del)
527{ 543{
544 struct btrfs_extent_item *item;
545 struct btrfs_extent_item_v0 *ei0;
546 struct btrfs_extent_ref_v0 *ref0;
547 struct btrfs_tree_block_info *bi;
548 struct extent_buffer *leaf;
528 struct btrfs_key key; 549 struct btrfs_key key;
529 struct btrfs_extent_ref *ref; 550 struct btrfs_key found_key;
551 u32 new_size = sizeof(*item);
552 u64 refs;
553 int ret;
554
555 leaf = path->nodes[0];
556 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
557
558 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
559 ei0 = btrfs_item_ptr(leaf, path->slots[0],
560 struct btrfs_extent_item_v0);
561 refs = btrfs_extent_refs_v0(leaf, ei0);
562
563 if (owner == (u64)-1) {
564 while (1) {
565 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
566 ret = btrfs_next_leaf(root, path);
567 if (ret < 0)
568 return ret;
569 BUG_ON(ret > 0);
570 leaf = path->nodes[0];
571 }
572 btrfs_item_key_to_cpu(leaf, &found_key,
573 path->slots[0]);
574 BUG_ON(key.objectid != found_key.objectid);
575 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
576 path->slots[0]++;
577 continue;
578 }
579 ref0 = btrfs_item_ptr(leaf, path->slots[0],
580 struct btrfs_extent_ref_v0);
581 owner = btrfs_ref_objectid_v0(leaf, ref0);
582 break;
583 }
584 }
585 btrfs_release_path(root, path);
586
587 if (owner < BTRFS_FIRST_FREE_OBJECTID)
588 new_size += sizeof(*bi);
589
590 new_size -= sizeof(*ei0);
591 ret = btrfs_search_slot(trans, root, &key, path,
592 new_size + extra_size, 1);
593 if (ret < 0)
594 return ret;
595 BUG_ON(ret);
596
597 ret = btrfs_extend_item(trans, root, path, new_size);
598 BUG_ON(ret);
599
600 leaf = path->nodes[0];
601 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
602 btrfs_set_extent_refs(leaf, item, refs);
603 /* FIXME: get real generation */
604 btrfs_set_extent_generation(leaf, item, 0);
605 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
606 btrfs_set_extent_flags(leaf, item,
607 BTRFS_EXTENT_FLAG_TREE_BLOCK |
608 BTRFS_BLOCK_FLAG_FULL_BACKREF);
609 bi = (struct btrfs_tree_block_info *)(item + 1);
610 /* FIXME: get first key of the block */
611 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
612 btrfs_set_tree_block_level(leaf, bi, (int)owner);
613 } else {
614 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
615 }
616 btrfs_mark_buffer_dirty(leaf);
617 return 0;
618}
619#endif
620
621static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
622{
623 u32 high_crc = ~(u32)0;
624 u32 low_crc = ~(u32)0;
625 __le64 lenum;
626
627 lenum = cpu_to_le64(root_objectid);
628 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
629 lenum = cpu_to_le64(owner);
630 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
631 lenum = cpu_to_le64(offset);
632 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
633
634 return ((u64)high_crc << 31) ^ (u64)low_crc;
635}
636
637static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
638 struct btrfs_extent_data_ref *ref)
639{
640 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
641 btrfs_extent_data_ref_objectid(leaf, ref),
642 btrfs_extent_data_ref_offset(leaf, ref));
643}
644
645static int match_extent_data_ref(struct extent_buffer *leaf,
646 struct btrfs_extent_data_ref *ref,
647 u64 root_objectid, u64 owner, u64 offset)
648{
649 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
650 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
651 btrfs_extent_data_ref_offset(leaf, ref) != offset)
652 return 0;
653 return 1;
654}
655
656static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
657 struct btrfs_root *root,
658 struct btrfs_path *path,
659 u64 bytenr, u64 parent,
660 u64 root_objectid,
661 u64 owner, u64 offset)
662{
663 struct btrfs_key key;
664 struct btrfs_extent_data_ref *ref;
530 struct extent_buffer *leaf; 665 struct extent_buffer *leaf;
531 u64 ref_objectid; 666 u32 nritems;
532 int ret; 667 int ret;
668 int recow;
669 int err = -ENOENT;
533 670
534 key.objectid = bytenr; 671 key.objectid = bytenr;
535 key.type = BTRFS_EXTENT_REF_KEY; 672 if (parent) {
536 key.offset = parent; 673 key.type = BTRFS_SHARED_DATA_REF_KEY;
674 key.offset = parent;
675 } else {
676 key.type = BTRFS_EXTENT_DATA_REF_KEY;
677 key.offset = hash_extent_data_ref(root_objectid,
678 owner, offset);
679 }
680again:
681 recow = 0;
682 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
683 if (ret < 0) {
684 err = ret;
685 goto fail;
686 }
537 687
538 ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1); 688 if (parent) {
539 if (ret < 0) 689 if (!ret)
540 goto out; 690 return 0;
541 if (ret > 0) { 691#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
542 ret = -ENOENT; 692 key.type = BTRFS_EXTENT_REF_V0_KEY;
543 goto out; 693 btrfs_release_path(root, path);
694 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
695 if (ret < 0) {
696 err = ret;
697 goto fail;
698 }
699 if (!ret)
700 return 0;
701#endif
702 goto fail;
544 } 703 }
545 704
546 leaf = path->nodes[0]; 705 leaf = path->nodes[0];
547 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref); 706 nritems = btrfs_header_nritems(leaf);
548 ref_objectid = btrfs_ref_objectid(leaf, ref); 707 while (1) {
549 if (btrfs_ref_root(leaf, ref) != ref_root || 708 if (path->slots[0] >= nritems) {
550 btrfs_ref_generation(leaf, ref) != ref_generation || 709 ret = btrfs_next_leaf(root, path);
551 (ref_objectid != owner_objectid && 710 if (ret < 0)
552 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) { 711 err = ret;
553 ret = -EIO; 712 if (ret)
554 WARN_ON(1); 713 goto fail;
555 goto out; 714
715 leaf = path->nodes[0];
716 nritems = btrfs_header_nritems(leaf);
717 recow = 1;
718 }
719
720 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
721 if (key.objectid != bytenr ||
722 key.type != BTRFS_EXTENT_DATA_REF_KEY)
723 goto fail;
724
725 ref = btrfs_item_ptr(leaf, path->slots[0],
726 struct btrfs_extent_data_ref);
727
728 if (match_extent_data_ref(leaf, ref, root_objectid,
729 owner, offset)) {
730 if (recow) {
731 btrfs_release_path(root, path);
732 goto again;
733 }
734 err = 0;
735 break;
736 }
737 path->slots[0]++;
556 } 738 }
557 ret = 0; 739fail:
558out: 740 return err;
559 return ret;
560} 741}
561 742
562static noinline int insert_extent_backref(struct btrfs_trans_handle *trans, 743static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
563 struct btrfs_root *root, 744 struct btrfs_root *root,
564 struct btrfs_path *path, 745 struct btrfs_path *path,
565 u64 bytenr, u64 parent, 746 u64 bytenr, u64 parent,
566 u64 ref_root, u64 ref_generation, 747 u64 root_objectid, u64 owner,
567 u64 owner_objectid, 748 u64 offset, int refs_to_add)
568 int refs_to_add)
569{ 749{
570 struct btrfs_key key; 750 struct btrfs_key key;
571 struct extent_buffer *leaf; 751 struct extent_buffer *leaf;
572 struct btrfs_extent_ref *ref; 752 u32 size;
573 u32 num_refs; 753 u32 num_refs;
574 int ret; 754 int ret;
575 755
576 key.objectid = bytenr; 756 key.objectid = bytenr;
577 key.type = BTRFS_EXTENT_REF_KEY; 757 if (parent) {
578 key.offset = parent; 758 key.type = BTRFS_SHARED_DATA_REF_KEY;
759 key.offset = parent;
760 size = sizeof(struct btrfs_shared_data_ref);
761 } else {
762 key.type = BTRFS_EXTENT_DATA_REF_KEY;
763 key.offset = hash_extent_data_ref(root_objectid,
764 owner, offset);
765 size = sizeof(struct btrfs_extent_data_ref);
766 }
579 767
580 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref)); 768 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
581 if (ret == 0) { 769 if (ret && ret != -EEXIST)
582 leaf = path->nodes[0]; 770 goto fail;
583 ref = btrfs_item_ptr(leaf, path->slots[0], 771
584 struct btrfs_extent_ref); 772 leaf = path->nodes[0];
585 btrfs_set_ref_root(leaf, ref, ref_root); 773 if (parent) {
586 btrfs_set_ref_generation(leaf, ref, ref_generation); 774 struct btrfs_shared_data_ref *ref;
587 btrfs_set_ref_objectid(leaf, ref, owner_objectid);
588 btrfs_set_ref_num_refs(leaf, ref, refs_to_add);
589 } else if (ret == -EEXIST) {
590 u64 existing_owner;
591
592 BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
593 leaf = path->nodes[0];
594 ref = btrfs_item_ptr(leaf, path->slots[0], 775 ref = btrfs_item_ptr(leaf, path->slots[0],
595 struct btrfs_extent_ref); 776 struct btrfs_shared_data_ref);
596 if (btrfs_ref_root(leaf, ref) != ref_root || 777 if (ret == 0) {
597 btrfs_ref_generation(leaf, ref) != ref_generation) { 778 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
598 ret = -EIO; 779 } else {
599 WARN_ON(1); 780 num_refs = btrfs_shared_data_ref_count(leaf, ref);
600 goto out; 781 num_refs += refs_to_add;
782 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
601 } 783 }
784 } else {
785 struct btrfs_extent_data_ref *ref;
786 while (ret == -EEXIST) {
787 ref = btrfs_item_ptr(leaf, path->slots[0],
788 struct btrfs_extent_data_ref);
789 if (match_extent_data_ref(leaf, ref, root_objectid,
790 owner, offset))
791 break;
792 btrfs_release_path(root, path);
793 key.offset++;
794 ret = btrfs_insert_empty_item(trans, root, path, &key,
795 size);
796 if (ret && ret != -EEXIST)
797 goto fail;
602 798
603 num_refs = btrfs_ref_num_refs(leaf, ref); 799 leaf = path->nodes[0];
604 BUG_ON(num_refs == 0); 800 }
605 btrfs_set_ref_num_refs(leaf, ref, num_refs + refs_to_add); 801 ref = btrfs_item_ptr(leaf, path->slots[0],
606 802 struct btrfs_extent_data_ref);
607 existing_owner = btrfs_ref_objectid(leaf, ref); 803 if (ret == 0) {
608 if (existing_owner != owner_objectid && 804 btrfs_set_extent_data_ref_root(leaf, ref,
609 existing_owner != BTRFS_MULTIPLE_OBJECTIDS) { 805 root_objectid);
610 btrfs_set_ref_objectid(leaf, ref, 806 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
611 BTRFS_MULTIPLE_OBJECTIDS); 807 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
808 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
809 } else {
810 num_refs = btrfs_extent_data_ref_count(leaf, ref);
811 num_refs += refs_to_add;
812 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
612 } 813 }
613 ret = 0;
614 } else {
615 goto out;
616 } 814 }
617 btrfs_unlock_up_safe(path, 1); 815 btrfs_mark_buffer_dirty(leaf);
618 btrfs_mark_buffer_dirty(path->nodes[0]); 816 ret = 0;
619out: 817fail:
620 btrfs_release_path(root, path); 818 btrfs_release_path(root, path);
621 return ret; 819 return ret;
622} 820}
623 821
624static noinline int remove_extent_backref(struct btrfs_trans_handle *trans, 822static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
625 struct btrfs_root *root, 823 struct btrfs_root *root,
626 struct btrfs_path *path, 824 struct btrfs_path *path,
627 int refs_to_drop) 825 int refs_to_drop)
628{ 826{
827 struct btrfs_key key;
828 struct btrfs_extent_data_ref *ref1 = NULL;
829 struct btrfs_shared_data_ref *ref2 = NULL;
629 struct extent_buffer *leaf; 830 struct extent_buffer *leaf;
630 struct btrfs_extent_ref *ref; 831 u32 num_refs = 0;
631 u32 num_refs;
632 int ret = 0; 832 int ret = 0;
633 833
634 leaf = path->nodes[0]; 834 leaf = path->nodes[0];
635 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref); 835 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
636 num_refs = btrfs_ref_num_refs(leaf, ref); 836
837 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
838 ref1 = btrfs_item_ptr(leaf, path->slots[0],
839 struct btrfs_extent_data_ref);
840 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
841 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
842 ref2 = btrfs_item_ptr(leaf, path->slots[0],
843 struct btrfs_shared_data_ref);
844 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
845#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
846 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
847 struct btrfs_extent_ref_v0 *ref0;
848 ref0 = btrfs_item_ptr(leaf, path->slots[0],
849 struct btrfs_extent_ref_v0);
850 num_refs = btrfs_ref_count_v0(leaf, ref0);
851#endif
852 } else {
853 BUG();
854 }
855
637 BUG_ON(num_refs < refs_to_drop); 856 BUG_ON(num_refs < refs_to_drop);
638 num_refs -= refs_to_drop; 857 num_refs -= refs_to_drop;
858
639 if (num_refs == 0) { 859 if (num_refs == 0) {
640 ret = btrfs_del_item(trans, root, path); 860 ret = btrfs_del_item(trans, root, path);
641 } else { 861 } else {
642 btrfs_set_ref_num_refs(leaf, ref, num_refs); 862 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
863 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
864 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
865 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
866#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
867 else {
868 struct btrfs_extent_ref_v0 *ref0;
869 ref0 = btrfs_item_ptr(leaf, path->slots[0],
870 struct btrfs_extent_ref_v0);
871 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
872 }
873#endif
643 btrfs_mark_buffer_dirty(leaf); 874 btrfs_mark_buffer_dirty(leaf);
644 } 875 }
876 return ret;
877}
878
879static noinline u32 extent_data_ref_count(struct btrfs_root *root,
880 struct btrfs_path *path,
881 struct btrfs_extent_inline_ref *iref)
882{
883 struct btrfs_key key;
884 struct extent_buffer *leaf;
885 struct btrfs_extent_data_ref *ref1;
886 struct btrfs_shared_data_ref *ref2;
887 u32 num_refs = 0;
888
889 leaf = path->nodes[0];
890 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
891 if (iref) {
892 if (btrfs_extent_inline_ref_type(leaf, iref) ==
893 BTRFS_EXTENT_DATA_REF_KEY) {
894 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
895 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
896 } else {
897 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
898 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
899 }
900 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
901 ref1 = btrfs_item_ptr(leaf, path->slots[0],
902 struct btrfs_extent_data_ref);
903 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
904 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
905 ref2 = btrfs_item_ptr(leaf, path->slots[0],
906 struct btrfs_shared_data_ref);
907 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
908#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
909 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
910 struct btrfs_extent_ref_v0 *ref0;
911 ref0 = btrfs_item_ptr(leaf, path->slots[0],
912 struct btrfs_extent_ref_v0);
913 num_refs = btrfs_ref_count_v0(leaf, ref0);
914#endif
915 } else {
916 WARN_ON(1);
917 }
918 return num_refs;
919}
920
921static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
922 struct btrfs_root *root,
923 struct btrfs_path *path,
924 u64 bytenr, u64 parent,
925 u64 root_objectid)
926{
927 struct btrfs_key key;
928 int ret;
929
930 key.objectid = bytenr;
931 if (parent) {
932 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
933 key.offset = parent;
934 } else {
935 key.type = BTRFS_TREE_BLOCK_REF_KEY;
936 key.offset = root_objectid;
937 }
938
939 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
940 if (ret > 0)
941 ret = -ENOENT;
942#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
943 if (ret == -ENOENT && parent) {
944 btrfs_release_path(root, path);
945 key.type = BTRFS_EXTENT_REF_V0_KEY;
946 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
947 if (ret > 0)
948 ret = -ENOENT;
949 }
950#endif
951 return ret;
952}
953
954static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
955 struct btrfs_root *root,
956 struct btrfs_path *path,
957 u64 bytenr, u64 parent,
958 u64 root_objectid)
959{
960 struct btrfs_key key;
961 int ret;
962
963 key.objectid = bytenr;
964 if (parent) {
965 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
966 key.offset = parent;
967 } else {
968 key.type = BTRFS_TREE_BLOCK_REF_KEY;
969 key.offset = root_objectid;
970 }
971
972 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
973 btrfs_release_path(root, path);
974 return ret;
975}
976
977static inline int extent_ref_type(u64 parent, u64 owner)
978{
979 int type;
980 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
981 if (parent > 0)
982 type = BTRFS_SHARED_BLOCK_REF_KEY;
983 else
984 type = BTRFS_TREE_BLOCK_REF_KEY;
985 } else {
986 if (parent > 0)
987 type = BTRFS_SHARED_DATA_REF_KEY;
988 else
989 type = BTRFS_EXTENT_DATA_REF_KEY;
990 }
991 return type;
992}
993
994static int find_next_key(struct btrfs_path *path, struct btrfs_key *key)
995
996{
997 int level;
998 BUG_ON(!path->keep_locks);
999 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
1000 if (!path->nodes[level])
1001 break;
1002 btrfs_assert_tree_locked(path->nodes[level]);
1003 if (path->slots[level] + 1 >=
1004 btrfs_header_nritems(path->nodes[level]))
1005 continue;
1006 if (level == 0)
1007 btrfs_item_key_to_cpu(path->nodes[level], key,
1008 path->slots[level] + 1);
1009 else
1010 btrfs_node_key_to_cpu(path->nodes[level], key,
1011 path->slots[level] + 1);
1012 return 0;
1013 }
1014 return 1;
1015}
1016
1017/*
1018 * look for inline back ref. if back ref is found, *ref_ret is set
1019 * to the address of inline back ref, and 0 is returned.
1020 *
1021 * if back ref isn't found, *ref_ret is set to the address where it
1022 * should be inserted, and -ENOENT is returned.
1023 *
1024 * if insert is true and there are too many inline back refs, the path
1025 * points to the extent item, and -EAGAIN is returned.
1026 *
1027 * NOTE: inline back refs are ordered in the same way that back ref
1028 * items in the tree are ordered.
1029 */
1030static noinline_for_stack
1031int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1032 struct btrfs_root *root,
1033 struct btrfs_path *path,
1034 struct btrfs_extent_inline_ref **ref_ret,
1035 u64 bytenr, u64 num_bytes,
1036 u64 parent, u64 root_objectid,
1037 u64 owner, u64 offset, int insert)
1038{
1039 struct btrfs_key key;
1040 struct extent_buffer *leaf;
1041 struct btrfs_extent_item *ei;
1042 struct btrfs_extent_inline_ref *iref;
1043 u64 flags;
1044 u64 item_size;
1045 unsigned long ptr;
1046 unsigned long end;
1047 int extra_size;
1048 int type;
1049 int want;
1050 int ret;
1051 int err = 0;
1052
1053 key.objectid = bytenr;
1054 key.type = BTRFS_EXTENT_ITEM_KEY;
1055 key.offset = num_bytes;
1056
1057 want = extent_ref_type(parent, owner);
1058 if (insert) {
1059 extra_size = btrfs_extent_inline_ref_size(want);
1060 if (owner >= BTRFS_FIRST_FREE_OBJECTID)
1061 path->keep_locks = 1;
1062 } else
1063 extra_size = -1;
1064 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1065 if (ret < 0) {
1066 err = ret;
1067 goto out;
1068 }
1069 BUG_ON(ret);
1070
1071 leaf = path->nodes[0];
1072 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1073#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1074 if (item_size < sizeof(*ei)) {
1075 if (!insert) {
1076 err = -ENOENT;
1077 goto out;
1078 }
1079 ret = convert_extent_item_v0(trans, root, path, owner,
1080 extra_size);
1081 if (ret < 0) {
1082 err = ret;
1083 goto out;
1084 }
1085 leaf = path->nodes[0];
1086 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1087 }
1088#endif
1089 BUG_ON(item_size < sizeof(*ei));
1090
1091 if (owner < BTRFS_FIRST_FREE_OBJECTID && insert &&
1092 item_size + extra_size >= BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1093 err = -EAGAIN;
1094 goto out;
1095 }
1096
1097 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1098 flags = btrfs_extent_flags(leaf, ei);
1099
1100 ptr = (unsigned long)(ei + 1);
1101 end = (unsigned long)ei + item_size;
1102
1103 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1104 ptr += sizeof(struct btrfs_tree_block_info);
1105 BUG_ON(ptr > end);
1106 } else {
1107 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1108 }
1109
1110 err = -ENOENT;
1111 while (1) {
1112 if (ptr >= end) {
1113 WARN_ON(ptr > end);
1114 break;
1115 }
1116 iref = (struct btrfs_extent_inline_ref *)ptr;
1117 type = btrfs_extent_inline_ref_type(leaf, iref);
1118 if (want < type)
1119 break;
1120 if (want > type) {
1121 ptr += btrfs_extent_inline_ref_size(type);
1122 continue;
1123 }
1124
1125 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1126 struct btrfs_extent_data_ref *dref;
1127 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1128 if (match_extent_data_ref(leaf, dref, root_objectid,
1129 owner, offset)) {
1130 err = 0;
1131 break;
1132 }
1133 if (hash_extent_data_ref_item(leaf, dref) <
1134 hash_extent_data_ref(root_objectid, owner, offset))
1135 break;
1136 } else {
1137 u64 ref_offset;
1138 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1139 if (parent > 0) {
1140 if (parent == ref_offset) {
1141 err = 0;
1142 break;
1143 }
1144 if (ref_offset < parent)
1145 break;
1146 } else {
1147 if (root_objectid == ref_offset) {
1148 err = 0;
1149 break;
1150 }
1151 if (ref_offset < root_objectid)
1152 break;
1153 }
1154 }
1155 ptr += btrfs_extent_inline_ref_size(type);
1156 }
1157 if (err == -ENOENT && insert) {
1158 if (item_size + extra_size >=
1159 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1160 err = -EAGAIN;
1161 goto out;
1162 }
1163 /*
1164 * To add new inline back ref, we have to make sure
1165 * there is no corresponding back ref item.
1166 * For simplicity, we just do not add new inline back
1167 * ref if there is any kind of item for this block
1168 */
1169 if (owner >= BTRFS_FIRST_FREE_OBJECTID &&
1170 find_next_key(path, &key) == 0 && key.objectid == bytenr) {
1171 err = -EAGAIN;
1172 goto out;
1173 }
1174 }
1175 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1176out:
1177 if (insert && owner >= BTRFS_FIRST_FREE_OBJECTID) {
1178 path->keep_locks = 0;
1179 btrfs_unlock_up_safe(path, 1);
1180 }
1181 return err;
1182}
1183
1184/*
1185 * helper to add new inline back ref
1186 */
1187static noinline_for_stack
1188int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1189 struct btrfs_root *root,
1190 struct btrfs_path *path,
1191 struct btrfs_extent_inline_ref *iref,
1192 u64 parent, u64 root_objectid,
1193 u64 owner, u64 offset, int refs_to_add,
1194 struct btrfs_delayed_extent_op *extent_op)
1195{
1196 struct extent_buffer *leaf;
1197 struct btrfs_extent_item *ei;
1198 unsigned long ptr;
1199 unsigned long end;
1200 unsigned long item_offset;
1201 u64 refs;
1202 int size;
1203 int type;
1204 int ret;
1205
1206 leaf = path->nodes[0];
1207 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1208 item_offset = (unsigned long)iref - (unsigned long)ei;
1209
1210 type = extent_ref_type(parent, owner);
1211 size = btrfs_extent_inline_ref_size(type);
1212
1213 ret = btrfs_extend_item(trans, root, path, size);
1214 BUG_ON(ret);
1215
1216 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1217 refs = btrfs_extent_refs(leaf, ei);
1218 refs += refs_to_add;
1219 btrfs_set_extent_refs(leaf, ei, refs);
1220 if (extent_op)
1221 __run_delayed_extent_op(extent_op, leaf, ei);
1222
1223 ptr = (unsigned long)ei + item_offset;
1224 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1225 if (ptr < end - size)
1226 memmove_extent_buffer(leaf, ptr + size, ptr,
1227 end - size - ptr);
1228
1229 iref = (struct btrfs_extent_inline_ref *)ptr;
1230 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1231 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1232 struct btrfs_extent_data_ref *dref;
1233 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1234 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1235 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1236 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1237 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1238 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1239 struct btrfs_shared_data_ref *sref;
1240 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1241 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1242 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1243 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1244 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1245 } else {
1246 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1247 }
1248 btrfs_mark_buffer_dirty(leaf);
1249 return 0;
1250}
1251
1252static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1253 struct btrfs_root *root,
1254 struct btrfs_path *path,
1255 struct btrfs_extent_inline_ref **ref_ret,
1256 u64 bytenr, u64 num_bytes, u64 parent,
1257 u64 root_objectid, u64 owner, u64 offset)
1258{
1259 int ret;
1260
1261 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1262 bytenr, num_bytes, parent,
1263 root_objectid, owner, offset, 0);
1264 if (ret != -ENOENT)
1265 return ret;
1266
645 btrfs_release_path(root, path); 1267 btrfs_release_path(root, path);
1268 *ref_ret = NULL;
1269
1270 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1271 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1272 root_objectid);
1273 } else {
1274 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1275 root_objectid, owner, offset);
1276 }
1277 return ret;
1278}
1279
1280/*
1281 * helper to update/remove inline back ref
1282 */
1283static noinline_for_stack
1284int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1285 struct btrfs_root *root,
1286 struct btrfs_path *path,
1287 struct btrfs_extent_inline_ref *iref,
1288 int refs_to_mod,
1289 struct btrfs_delayed_extent_op *extent_op)
1290{
1291 struct extent_buffer *leaf;
1292 struct btrfs_extent_item *ei;
1293 struct btrfs_extent_data_ref *dref = NULL;
1294 struct btrfs_shared_data_ref *sref = NULL;
1295 unsigned long ptr;
1296 unsigned long end;
1297 u32 item_size;
1298 int size;
1299 int type;
1300 int ret;
1301 u64 refs;
1302
1303 leaf = path->nodes[0];
1304 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1305 refs = btrfs_extent_refs(leaf, ei);
1306 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1307 refs += refs_to_mod;
1308 btrfs_set_extent_refs(leaf, ei, refs);
1309 if (extent_op)
1310 __run_delayed_extent_op(extent_op, leaf, ei);
1311
1312 type = btrfs_extent_inline_ref_type(leaf, iref);
1313
1314 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1315 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1316 refs = btrfs_extent_data_ref_count(leaf, dref);
1317 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1318 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1319 refs = btrfs_shared_data_ref_count(leaf, sref);
1320 } else {
1321 refs = 1;
1322 BUG_ON(refs_to_mod != -1);
1323 }
1324
1325 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1326 refs += refs_to_mod;
1327
1328 if (refs > 0) {
1329 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1330 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1331 else
1332 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1333 } else {
1334 size = btrfs_extent_inline_ref_size(type);
1335 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1336 ptr = (unsigned long)iref;
1337 end = (unsigned long)ei + item_size;
1338 if (ptr + size < end)
1339 memmove_extent_buffer(leaf, ptr, ptr + size,
1340 end - ptr - size);
1341 item_size -= size;
1342 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1343 BUG_ON(ret);
1344 }
1345 btrfs_mark_buffer_dirty(leaf);
1346 return 0;
1347}
1348
1349static noinline_for_stack
1350int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1351 struct btrfs_root *root,
1352 struct btrfs_path *path,
1353 u64 bytenr, u64 num_bytes, u64 parent,
1354 u64 root_objectid, u64 owner,
1355 u64 offset, int refs_to_add,
1356 struct btrfs_delayed_extent_op *extent_op)
1357{
1358 struct btrfs_extent_inline_ref *iref;
1359 int ret;
1360
1361 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1362 bytenr, num_bytes, parent,
1363 root_objectid, owner, offset, 1);
1364 if (ret == 0) {
1365 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1366 ret = update_inline_extent_backref(trans, root, path, iref,
1367 refs_to_add, extent_op);
1368 } else if (ret == -ENOENT) {
1369 ret = setup_inline_extent_backref(trans, root, path, iref,
1370 parent, root_objectid,
1371 owner, offset, refs_to_add,
1372 extent_op);
1373 }
1374 return ret;
1375}
1376
1377static int insert_extent_backref(struct btrfs_trans_handle *trans,
1378 struct btrfs_root *root,
1379 struct btrfs_path *path,
1380 u64 bytenr, u64 parent, u64 root_objectid,
1381 u64 owner, u64 offset, int refs_to_add)
1382{
1383 int ret;
1384 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1385 BUG_ON(refs_to_add != 1);
1386 ret = insert_tree_block_ref(trans, root, path, bytenr,
1387 parent, root_objectid);
1388 } else {
1389 ret = insert_extent_data_ref(trans, root, path, bytenr,
1390 parent, root_objectid,
1391 owner, offset, refs_to_add);
1392 }
1393 return ret;
1394}
1395
1396static int remove_extent_backref(struct btrfs_trans_handle *trans,
1397 struct btrfs_root *root,
1398 struct btrfs_path *path,
1399 struct btrfs_extent_inline_ref *iref,
1400 int refs_to_drop, int is_data)
1401{
1402 int ret;
1403
1404 BUG_ON(!is_data && refs_to_drop != 1);
1405 if (iref) {
1406 ret = update_inline_extent_backref(trans, root, path, iref,
1407 -refs_to_drop, NULL);
1408 } else if (is_data) {
1409 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1410 } else {
1411 ret = btrfs_del_item(trans, root, path);
1412 }
646 return ret; 1413 return ret;
647} 1414}
648 1415
@@ -686,71 +1453,40 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
686#endif 1453#endif
687} 1454}
688 1455
689static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans, 1456int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
690 struct btrfs_root *root, u64 bytenr, 1457 struct btrfs_root *root,
691 u64 num_bytes, 1458 u64 bytenr, u64 num_bytes, u64 parent,
692 u64 orig_parent, u64 parent, 1459 u64 root_objectid, u64 owner, u64 offset)
693 u64 orig_root, u64 ref_root,
694 u64 orig_generation, u64 ref_generation,
695 u64 owner_objectid)
696{ 1460{
697 int ret; 1461 int ret;
698 int pin = owner_objectid < BTRFS_FIRST_FREE_OBJECTID; 1462 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1463 root_objectid == BTRFS_TREE_LOG_OBJECTID);
699 1464
700 ret = btrfs_update_delayed_ref(trans, bytenr, num_bytes, 1465 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
701 orig_parent, parent, orig_root, 1466 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
702 ref_root, orig_generation, 1467 parent, root_objectid, (int)owner,
703 ref_generation, owner_objectid, pin); 1468 BTRFS_ADD_DELAYED_REF, NULL);
704 BUG_ON(ret); 1469 } else {
1470 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1471 parent, root_objectid, owner, offset,
1472 BTRFS_ADD_DELAYED_REF, NULL);
1473 }
705 return ret; 1474 return ret;
706} 1475}
707 1476
708int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
709 struct btrfs_root *root, u64 bytenr,
710 u64 num_bytes, u64 orig_parent, u64 parent,
711 u64 ref_root, u64 ref_generation,
712 u64 owner_objectid)
713{
714 int ret;
715 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
716 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
717 return 0;
718
719 ret = __btrfs_update_extent_ref(trans, root, bytenr, num_bytes,
720 orig_parent, parent, ref_root,
721 ref_root, ref_generation,
722 ref_generation, owner_objectid);
723 return ret;
724}
725static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1477static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
726 struct btrfs_root *root, u64 bytenr, 1478 struct btrfs_root *root,
727 u64 num_bytes, 1479 u64 bytenr, u64 num_bytes,
728 u64 orig_parent, u64 parent, 1480 u64 parent, u64 root_objectid,
729 u64 orig_root, u64 ref_root, 1481 u64 owner, u64 offset, int refs_to_add,
730 u64 orig_generation, u64 ref_generation, 1482 struct btrfs_delayed_extent_op *extent_op)
731 u64 owner_objectid)
732{
733 int ret;
734
735 ret = btrfs_add_delayed_ref(trans, bytenr, num_bytes, parent, ref_root,
736 ref_generation, owner_objectid,
737 BTRFS_ADD_DELAYED_REF, 0);
738 BUG_ON(ret);
739 return ret;
740}
741
742static noinline_for_stack int add_extent_ref(struct btrfs_trans_handle *trans,
743 struct btrfs_root *root, u64 bytenr,
744 u64 num_bytes, u64 parent, u64 ref_root,
745 u64 ref_generation, u64 owner_objectid,
746 int refs_to_add)
747{ 1483{
748 struct btrfs_path *path; 1484 struct btrfs_path *path;
749 int ret; 1485 struct extent_buffer *leaf;
750 struct btrfs_key key;
751 struct extent_buffer *l;
752 struct btrfs_extent_item *item; 1486 struct btrfs_extent_item *item;
753 u32 refs; 1487 u64 refs;
1488 int ret;
1489 int err = 0;
754 1490
755 path = btrfs_alloc_path(); 1491 path = btrfs_alloc_path();
756 if (!path) 1492 if (!path)
@@ -758,43 +1494,27 @@ static noinline_for_stack int add_extent_ref(struct btrfs_trans_handle *trans,
758 1494
759 path->reada = 1; 1495 path->reada = 1;
760 path->leave_spinning = 1; 1496 path->leave_spinning = 1;
761 key.objectid = bytenr; 1497 /* this will setup the path even if it fails to insert the back ref */
762 key.type = BTRFS_EXTENT_ITEM_KEY; 1498 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
763 key.offset = num_bytes; 1499 path, bytenr, num_bytes, parent,
764 1500 root_objectid, owner, offset,
765 /* first find the extent item and update its reference count */ 1501 refs_to_add, extent_op);
766 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, 1502 if (ret == 0)
767 path, 0, 1); 1503 goto out;
768 if (ret < 0) {
769 btrfs_set_path_blocking(path);
770 return ret;
771 }
772
773 if (ret > 0) {
774 WARN_ON(1);
775 btrfs_free_path(path);
776 return -EIO;
777 }
778 l = path->nodes[0];
779 1504
780 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 1505 if (ret != -EAGAIN) {
781 if (key.objectid != bytenr) { 1506 err = ret;
782 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]); 1507 goto out;
783 printk(KERN_ERR "btrfs wanted %llu found %llu\n",
784 (unsigned long long)bytenr,
785 (unsigned long long)key.objectid);
786 BUG();
787 } 1508 }
788 BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
789 1509
790 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item); 1510 leaf = path->nodes[0];
791 1511 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
792 refs = btrfs_extent_refs(l, item); 1512 refs = btrfs_extent_refs(leaf, item);
793 btrfs_set_extent_refs(l, item, refs + refs_to_add); 1513 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
794 btrfs_unlock_up_safe(path, 1); 1514 if (extent_op)
795 1515 __run_delayed_extent_op(extent_op, leaf, item);
796 btrfs_mark_buffer_dirty(path->nodes[0]);
797 1516
1517 btrfs_mark_buffer_dirty(leaf);
798 btrfs_release_path(root->fs_info->extent_root, path); 1518 btrfs_release_path(root->fs_info->extent_root, path);
799 1519
800 path->reada = 1; 1520 path->reada = 1;
@@ -802,56 +1522,197 @@ static noinline_for_stack int add_extent_ref(struct btrfs_trans_handle *trans,
802 1522
803 /* now insert the actual backref */ 1523 /* now insert the actual backref */
804 ret = insert_extent_backref(trans, root->fs_info->extent_root, 1524 ret = insert_extent_backref(trans, root->fs_info->extent_root,
805 path, bytenr, parent, 1525 path, bytenr, parent, root_objectid,
806 ref_root, ref_generation, 1526 owner, offset, refs_to_add);
807 owner_objectid, refs_to_add);
808 BUG_ON(ret); 1527 BUG_ON(ret);
1528out:
809 btrfs_free_path(path); 1529 btrfs_free_path(path);
810 return 0; 1530 return err;
811} 1531}
812 1532
813int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1533static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
814 struct btrfs_root *root, 1534 struct btrfs_root *root,
815 u64 bytenr, u64 num_bytes, u64 parent, 1535 struct btrfs_delayed_ref_node *node,
816 u64 ref_root, u64 ref_generation, 1536 struct btrfs_delayed_extent_op *extent_op,
817 u64 owner_objectid) 1537 int insert_reserved)
818{ 1538{
819 int ret; 1539 int ret = 0;
820 if (ref_root == BTRFS_TREE_LOG_OBJECTID && 1540 struct btrfs_delayed_data_ref *ref;
821 owner_objectid < BTRFS_FIRST_FREE_OBJECTID) 1541 struct btrfs_key ins;
822 return 0; 1542 u64 parent = 0;
1543 u64 ref_root = 0;
1544 u64 flags = 0;
1545
1546 ins.objectid = node->bytenr;
1547 ins.offset = node->num_bytes;
1548 ins.type = BTRFS_EXTENT_ITEM_KEY;
1549
1550 ref = btrfs_delayed_node_to_data_ref(node);
1551 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1552 parent = ref->parent;
1553 else
1554 ref_root = ref->root;
823 1555
824 ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, parent, 1556 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
825 0, ref_root, 0, ref_generation, 1557 if (extent_op) {
826 owner_objectid); 1558 BUG_ON(extent_op->update_key);
1559 flags |= extent_op->flags_to_set;
1560 }
1561 ret = alloc_reserved_file_extent(trans, root,
1562 parent, ref_root, flags,
1563 ref->objectid, ref->offset,
1564 &ins, node->ref_mod);
1565 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1566 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1567 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1568 node->num_bytes, parent,
1569 ref_root, ref->objectid,
1570 ref->offset, node->ref_mod,
1571 extent_op);
1572 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1573 ret = __btrfs_free_extent(trans, root, node->bytenr,
1574 node->num_bytes, parent,
1575 ref_root, ref->objectid,
1576 ref->offset, node->ref_mod,
1577 extent_op);
1578 } else {
1579 BUG();
1580 }
827 return ret; 1581 return ret;
828} 1582}
829 1583
830static int drop_delayed_ref(struct btrfs_trans_handle *trans, 1584static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
831 struct btrfs_root *root, 1585 struct extent_buffer *leaf,
832 struct btrfs_delayed_ref_node *node) 1586 struct btrfs_extent_item *ei)
1587{
1588 u64 flags = btrfs_extent_flags(leaf, ei);
1589 if (extent_op->update_flags) {
1590 flags |= extent_op->flags_to_set;
1591 btrfs_set_extent_flags(leaf, ei, flags);
1592 }
1593
1594 if (extent_op->update_key) {
1595 struct btrfs_tree_block_info *bi;
1596 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1597 bi = (struct btrfs_tree_block_info *)(ei + 1);
1598 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1599 }
1600}
1601
1602static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1603 struct btrfs_root *root,
1604 struct btrfs_delayed_ref_node *node,
1605 struct btrfs_delayed_extent_op *extent_op)
1606{
1607 struct btrfs_key key;
1608 struct btrfs_path *path;
1609 struct btrfs_extent_item *ei;
1610 struct extent_buffer *leaf;
1611 u32 item_size;
1612 int ret;
1613 int err = 0;
1614
1615 path = btrfs_alloc_path();
1616 if (!path)
1617 return -ENOMEM;
1618
1619 key.objectid = node->bytenr;
1620 key.type = BTRFS_EXTENT_ITEM_KEY;
1621 key.offset = node->num_bytes;
1622
1623 path->reada = 1;
1624 path->leave_spinning = 1;
1625 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1626 path, 0, 1);
1627 if (ret < 0) {
1628 err = ret;
1629 goto out;
1630 }
1631 if (ret > 0) {
1632 err = -EIO;
1633 goto out;
1634 }
1635
1636 leaf = path->nodes[0];
1637 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1638#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1639 if (item_size < sizeof(*ei)) {
1640 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1641 path, (u64)-1, 0);
1642 if (ret < 0) {
1643 err = ret;
1644 goto out;
1645 }
1646 leaf = path->nodes[0];
1647 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1648 }
1649#endif
1650 BUG_ON(item_size < sizeof(*ei));
1651 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1652 __run_delayed_extent_op(extent_op, leaf, ei);
1653
1654 btrfs_mark_buffer_dirty(leaf);
1655out:
1656 btrfs_free_path(path);
1657 return err;
1658}
1659
1660static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1661 struct btrfs_root *root,
1662 struct btrfs_delayed_ref_node *node,
1663 struct btrfs_delayed_extent_op *extent_op,
1664 int insert_reserved)
833{ 1665{
834 int ret = 0; 1666 int ret = 0;
835 struct btrfs_delayed_ref *ref = btrfs_delayed_node_to_ref(node); 1667 struct btrfs_delayed_tree_ref *ref;
1668 struct btrfs_key ins;
1669 u64 parent = 0;
1670 u64 ref_root = 0;
836 1671
837 BUG_ON(node->ref_mod == 0); 1672 ins.objectid = node->bytenr;
838 ret = __btrfs_free_extent(trans, root, node->bytenr, node->num_bytes, 1673 ins.offset = node->num_bytes;
839 node->parent, ref->root, ref->generation, 1674 ins.type = BTRFS_EXTENT_ITEM_KEY;
840 ref->owner_objectid, ref->pin, node->ref_mod);
841 1675
1676 ref = btrfs_delayed_node_to_tree_ref(node);
1677 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1678 parent = ref->parent;
1679 else
1680 ref_root = ref->root;
1681
1682 BUG_ON(node->ref_mod != 1);
1683 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1684 BUG_ON(!extent_op || !extent_op->update_flags ||
1685 !extent_op->update_key);
1686 ret = alloc_reserved_tree_block(trans, root,
1687 parent, ref_root,
1688 extent_op->flags_to_set,
1689 &extent_op->key,
1690 ref->level, &ins);
1691 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1692 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1693 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1694 node->num_bytes, parent, ref_root,
1695 ref->level, 0, 1, extent_op);
1696 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1697 ret = __btrfs_free_extent(trans, root, node->bytenr,
1698 node->num_bytes, parent, ref_root,
1699 ref->level, 0, 1, extent_op);
1700 } else {
1701 BUG();
1702 }
842 return ret; 1703 return ret;
843} 1704}
844 1705
1706
845/* helper function to actually process a single delayed ref entry */ 1707/* helper function to actually process a single delayed ref entry */
846static noinline int run_one_delayed_ref(struct btrfs_trans_handle *trans, 1708static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
847 struct btrfs_root *root, 1709 struct btrfs_root *root,
848 struct btrfs_delayed_ref_node *node, 1710 struct btrfs_delayed_ref_node *node,
849 int insert_reserved) 1711 struct btrfs_delayed_extent_op *extent_op,
1712 int insert_reserved)
850{ 1713{
851 int ret; 1714 int ret;
852 struct btrfs_delayed_ref *ref; 1715 if (btrfs_delayed_ref_is_head(node)) {
853
854 if (node->parent == (u64)-1) {
855 struct btrfs_delayed_ref_head *head; 1716 struct btrfs_delayed_ref_head *head;
856 /* 1717 /*
857 * we've hit the end of the chain and we were supposed 1718 * we've hit the end of the chain and we were supposed
@@ -859,44 +1720,35 @@ static noinline int run_one_delayed_ref(struct btrfs_trans_handle *trans,
859 * deleted before we ever needed to insert it, so all 1720 * deleted before we ever needed to insert it, so all
860 * we have to do is clean up the accounting 1721 * we have to do is clean up the accounting
861 */ 1722 */
1723 BUG_ON(extent_op);
1724 head = btrfs_delayed_node_to_head(node);
862 if (insert_reserved) { 1725 if (insert_reserved) {
1726 if (head->is_data) {
1727 ret = btrfs_del_csums(trans, root,
1728 node->bytenr,
1729 node->num_bytes);
1730 BUG_ON(ret);
1731 }
1732 btrfs_update_pinned_extents(root, node->bytenr,
1733 node->num_bytes, 1);
863 update_reserved_extents(root, node->bytenr, 1734 update_reserved_extents(root, node->bytenr,
864 node->num_bytes, 0); 1735 node->num_bytes, 0);
865 } 1736 }
866 head = btrfs_delayed_node_to_head(node);
867 mutex_unlock(&head->mutex); 1737 mutex_unlock(&head->mutex);
868 return 0; 1738 return 0;
869 } 1739 }
870 1740
871 ref = btrfs_delayed_node_to_ref(node); 1741 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
872 if (ref->action == BTRFS_ADD_DELAYED_REF) { 1742 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
873 if (insert_reserved) { 1743 ret = run_delayed_tree_ref(trans, root, node, extent_op,
874 struct btrfs_key ins; 1744 insert_reserved);
875 1745 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
876 ins.objectid = node->bytenr; 1746 node->type == BTRFS_SHARED_DATA_REF_KEY)
877 ins.offset = node->num_bytes; 1747 ret = run_delayed_data_ref(trans, root, node, extent_op,
878 ins.type = BTRFS_EXTENT_ITEM_KEY; 1748 insert_reserved);
879 1749 else
880 /* record the full extent allocation */ 1750 BUG();
881 ret = __btrfs_alloc_reserved_extent(trans, root, 1751 return ret;
882 node->parent, ref->root,
883 ref->generation, ref->owner_objectid,
884 &ins, node->ref_mod);
885 update_reserved_extents(root, node->bytenr,
886 node->num_bytes, 0);
887 } else {
888 /* just add one backref */
889 ret = add_extent_ref(trans, root, node->bytenr,
890 node->num_bytes,
891 node->parent, ref->root, ref->generation,
892 ref->owner_objectid, node->ref_mod);
893 }
894 BUG_ON(ret);
895 } else if (ref->action == BTRFS_DROP_DELAYED_REF) {
896 WARN_ON(insert_reserved);
897 ret = drop_delayed_ref(trans, root, node);
898 }
899 return 0;
900} 1752}
901 1753
902static noinline struct btrfs_delayed_ref_node * 1754static noinline struct btrfs_delayed_ref_node *
@@ -919,7 +1771,7 @@ again:
919 rb_node); 1771 rb_node);
920 if (ref->bytenr != head->node.bytenr) 1772 if (ref->bytenr != head->node.bytenr)
921 break; 1773 break;
922 if (btrfs_delayed_node_to_ref(ref)->action == action) 1774 if (ref->action == action)
923 return ref; 1775 return ref;
924 node = rb_prev(node); 1776 node = rb_prev(node);
925 } 1777 }
@@ -937,6 +1789,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
937 struct btrfs_delayed_ref_root *delayed_refs; 1789 struct btrfs_delayed_ref_root *delayed_refs;
938 struct btrfs_delayed_ref_node *ref; 1790 struct btrfs_delayed_ref_node *ref;
939 struct btrfs_delayed_ref_head *locked_ref = NULL; 1791 struct btrfs_delayed_ref_head *locked_ref = NULL;
1792 struct btrfs_delayed_extent_op *extent_op;
940 int ret; 1793 int ret;
941 int count = 0; 1794 int count = 0;
942 int must_insert_reserved = 0; 1795 int must_insert_reserved = 0;
@@ -975,6 +1828,9 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
975 must_insert_reserved = locked_ref->must_insert_reserved; 1828 must_insert_reserved = locked_ref->must_insert_reserved;
976 locked_ref->must_insert_reserved = 0; 1829 locked_ref->must_insert_reserved = 0;
977 1830
1831 extent_op = locked_ref->extent_op;
1832 locked_ref->extent_op = NULL;
1833
978 /* 1834 /*
979 * locked_ref is the head node, so we have to go one 1835 * locked_ref is the head node, so we have to go one
980 * node back for any delayed ref updates 1836 * node back for any delayed ref updates
@@ -986,6 +1842,25 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
986 * so that any accounting fixes can happen 1842 * so that any accounting fixes can happen
987 */ 1843 */
988 ref = &locked_ref->node; 1844 ref = &locked_ref->node;
1845
1846 if (extent_op && must_insert_reserved) {
1847 kfree(extent_op);
1848 extent_op = NULL;
1849 }
1850
1851 if (extent_op) {
1852 spin_unlock(&delayed_refs->lock);
1853
1854 ret = run_delayed_extent_op(trans, root,
1855 ref, extent_op);
1856 BUG_ON(ret);
1857 kfree(extent_op);
1858
1859 cond_resched();
1860 spin_lock(&delayed_refs->lock);
1861 continue;
1862 }
1863
989 list_del_init(&locked_ref->cluster); 1864 list_del_init(&locked_ref->cluster);
990 locked_ref = NULL; 1865 locked_ref = NULL;
991 } 1866 }
@@ -993,14 +1868,17 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
993 ref->in_tree = 0; 1868 ref->in_tree = 0;
994 rb_erase(&ref->rb_node, &delayed_refs->root); 1869 rb_erase(&ref->rb_node, &delayed_refs->root);
995 delayed_refs->num_entries--; 1870 delayed_refs->num_entries--;
1871
996 spin_unlock(&delayed_refs->lock); 1872 spin_unlock(&delayed_refs->lock);
997 1873
998 ret = run_one_delayed_ref(trans, root, ref, 1874 ret = run_one_delayed_ref(trans, root, ref, extent_op,
999 must_insert_reserved); 1875 must_insert_reserved);
1000 BUG_ON(ret); 1876 BUG_ON(ret);
1001 btrfs_put_delayed_ref(ref);
1002 1877
1878 btrfs_put_delayed_ref(ref);
1879 kfree(extent_op);
1003 count++; 1880 count++;
1881
1004 cond_resched(); 1882 cond_resched();
1005 spin_lock(&delayed_refs->lock); 1883 spin_lock(&delayed_refs->lock);
1006 } 1884 }
@@ -1095,25 +1973,112 @@ out:
1095 return 0; 1973 return 0;
1096} 1974}
1097 1975
1098int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 1976int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
1099 struct btrfs_root *root, u64 objectid, u64 bytenr) 1977 struct btrfs_root *root,
1978 u64 bytenr, u64 num_bytes, u64 flags,
1979 int is_data)
1980{
1981 struct btrfs_delayed_extent_op *extent_op;
1982 int ret;
1983
1984 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
1985 if (!extent_op)
1986 return -ENOMEM;
1987
1988 extent_op->flags_to_set = flags;
1989 extent_op->update_flags = 1;
1990 extent_op->update_key = 0;
1991 extent_op->is_data = is_data ? 1 : 0;
1992
1993 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
1994 if (ret)
1995 kfree(extent_op);
1996 return ret;
1997}
1998
1999static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2000 struct btrfs_root *root,
2001 struct btrfs_path *path,
2002 u64 objectid, u64 offset, u64 bytenr)
2003{
2004 struct btrfs_delayed_ref_head *head;
2005 struct btrfs_delayed_ref_node *ref;
2006 struct btrfs_delayed_data_ref *data_ref;
2007 struct btrfs_delayed_ref_root *delayed_refs;
2008 struct rb_node *node;
2009 int ret = 0;
2010
2011 ret = -ENOENT;
2012 delayed_refs = &trans->transaction->delayed_refs;
2013 spin_lock(&delayed_refs->lock);
2014 head = btrfs_find_delayed_ref_head(trans, bytenr);
2015 if (!head)
2016 goto out;
2017
2018 if (!mutex_trylock(&head->mutex)) {
2019 atomic_inc(&head->node.refs);
2020 spin_unlock(&delayed_refs->lock);
2021
2022 btrfs_release_path(root->fs_info->extent_root, path);
2023
2024 mutex_lock(&head->mutex);
2025 mutex_unlock(&head->mutex);
2026 btrfs_put_delayed_ref(&head->node);
2027 return -EAGAIN;
2028 }
2029
2030 node = rb_prev(&head->node.rb_node);
2031 if (!node)
2032 goto out_unlock;
2033
2034 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2035
2036 if (ref->bytenr != bytenr)
2037 goto out_unlock;
2038
2039 ret = 1;
2040 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2041 goto out_unlock;
2042
2043 data_ref = btrfs_delayed_node_to_data_ref(ref);
2044
2045 node = rb_prev(node);
2046 if (node) {
2047 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2048 if (ref->bytenr == bytenr)
2049 goto out_unlock;
2050 }
2051
2052 if (data_ref->root != root->root_key.objectid ||
2053 data_ref->objectid != objectid || data_ref->offset != offset)
2054 goto out_unlock;
2055
2056 ret = 0;
2057out_unlock:
2058 mutex_unlock(&head->mutex);
2059out:
2060 spin_unlock(&delayed_refs->lock);
2061 return ret;
2062}
2063
2064static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2065 struct btrfs_root *root,
2066 struct btrfs_path *path,
2067 u64 objectid, u64 offset, u64 bytenr)
1100{ 2068{
1101 struct btrfs_root *extent_root = root->fs_info->extent_root; 2069 struct btrfs_root *extent_root = root->fs_info->extent_root;
1102 struct btrfs_path *path;
1103 struct extent_buffer *leaf; 2070 struct extent_buffer *leaf;
1104 struct btrfs_extent_ref *ref_item; 2071 struct btrfs_extent_data_ref *ref;
2072 struct btrfs_extent_inline_ref *iref;
2073 struct btrfs_extent_item *ei;
1105 struct btrfs_key key; 2074 struct btrfs_key key;
1106 struct btrfs_key found_key; 2075 u32 item_size;
1107 u64 ref_root;
1108 u64 last_snapshot;
1109 u32 nritems;
1110 int ret; 2076 int ret;
1111 2077
1112 key.objectid = bytenr; 2078 key.objectid = bytenr;
1113 key.offset = (u64)-1; 2079 key.offset = (u64)-1;
1114 key.type = BTRFS_EXTENT_ITEM_KEY; 2080 key.type = BTRFS_EXTENT_ITEM_KEY;
1115 2081
1116 path = btrfs_alloc_path();
1117 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2082 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1118 if (ret < 0) 2083 if (ret < 0)
1119 goto out; 2084 goto out;
@@ -1125,55 +2090,83 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
1125 2090
1126 path->slots[0]--; 2091 path->slots[0]--;
1127 leaf = path->nodes[0]; 2092 leaf = path->nodes[0];
1128 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2093 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1129 2094
1130 if (found_key.objectid != bytenr || 2095 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
1131 found_key.type != BTRFS_EXTENT_ITEM_KEY)
1132 goto out; 2096 goto out;
1133 2097
1134 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2098 ret = 1;
1135 while (1) { 2099 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1136 leaf = path->nodes[0]; 2100#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1137 nritems = btrfs_header_nritems(leaf); 2101 if (item_size < sizeof(*ei)) {
1138 if (path->slots[0] >= nritems) { 2102 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
1139 ret = btrfs_next_leaf(extent_root, path); 2103 goto out;
1140 if (ret < 0) 2104 }
1141 goto out; 2105#endif
1142 if (ret == 0) 2106 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1143 continue;
1144 break;
1145 }
1146 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1147 if (found_key.objectid != bytenr)
1148 break;
1149 2107
1150 if (found_key.type != BTRFS_EXTENT_REF_KEY) { 2108 if (item_size != sizeof(*ei) +
1151 path->slots[0]++; 2109 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
1152 continue; 2110 goto out;
1153 }
1154 2111
1155 ref_item = btrfs_item_ptr(leaf, path->slots[0], 2112 if (btrfs_extent_generation(leaf, ei) <=
1156 struct btrfs_extent_ref); 2113 btrfs_root_last_snapshot(&root->root_item))
1157 ref_root = btrfs_ref_root(leaf, ref_item); 2114 goto out;
1158 if ((ref_root != root->root_key.objectid && 2115
1159 ref_root != BTRFS_TREE_LOG_OBJECTID) || 2116 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
1160 objectid != btrfs_ref_objectid(leaf, ref_item)) { 2117 if (btrfs_extent_inline_ref_type(leaf, iref) !=
1161 ret = 1; 2118 BTRFS_EXTENT_DATA_REF_KEY)
1162 goto out; 2119 goto out;
1163 } 2120
1164 if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) { 2121 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
1165 ret = 1; 2122 if (btrfs_extent_refs(leaf, ei) !=
2123 btrfs_extent_data_ref_count(leaf, ref) ||
2124 btrfs_extent_data_ref_root(leaf, ref) !=
2125 root->root_key.objectid ||
2126 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2127 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2128 goto out;
2129
2130 ret = 0;
2131out:
2132 return ret;
2133}
2134
2135int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2136 struct btrfs_root *root,
2137 u64 objectid, u64 offset, u64 bytenr)
2138{
2139 struct btrfs_path *path;
2140 int ret;
2141 int ret2;
2142
2143 path = btrfs_alloc_path();
2144 if (!path)
2145 return -ENOENT;
2146
2147 do {
2148 ret = check_committed_ref(trans, root, path, objectid,
2149 offset, bytenr);
2150 if (ret && ret != -ENOENT)
1166 goto out; 2151 goto out;
1167 }
1168 2152
1169 path->slots[0]++; 2153 ret2 = check_delayed_ref(trans, root, path, objectid,
2154 offset, bytenr);
2155 } while (ret2 == -EAGAIN);
2156
2157 if (ret2 && ret2 != -ENOENT) {
2158 ret = ret2;
2159 goto out;
1170 } 2160 }
1171 ret = 0; 2161
2162 if (ret != -ENOENT || ret2 != -ENOENT)
2163 ret = 0;
1172out: 2164out:
1173 btrfs_free_path(path); 2165 btrfs_free_path(path);
1174 return ret; 2166 return ret;
1175} 2167}
1176 2168
2169#if 0
1177int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2170int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1178 struct extent_buffer *buf, u32 nr_extents) 2171 struct extent_buffer *buf, u32 nr_extents)
1179{ 2172{
@@ -1291,62 +2284,44 @@ static int refsort_cmp(const void *a_void, const void *b_void)
1291 return 1; 2284 return 1;
1292 return 0; 2285 return 0;
1293} 2286}
2287#endif
1294 2288
1295 2289static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
1296noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
1297 struct btrfs_root *root, 2290 struct btrfs_root *root,
1298 struct extent_buffer *orig_buf, 2291 struct extent_buffer *buf,
1299 struct extent_buffer *buf, u32 *nr_extents) 2292 int full_backref, int inc)
1300{ 2293{
1301 u64 bytenr; 2294 u64 bytenr;
2295 u64 num_bytes;
2296 u64 parent;
1302 u64 ref_root; 2297 u64 ref_root;
1303 u64 orig_root;
1304 u64 ref_generation;
1305 u64 orig_generation;
1306 struct refsort *sorted;
1307 u32 nritems; 2298 u32 nritems;
1308 u32 nr_file_extents = 0;
1309 struct btrfs_key key; 2299 struct btrfs_key key;
1310 struct btrfs_file_extent_item *fi; 2300 struct btrfs_file_extent_item *fi;
1311 int i; 2301 int i;
1312 int level; 2302 int level;
1313 int ret = 0; 2303 int ret = 0;
1314 int faili = 0;
1315 int refi = 0;
1316 int slot;
1317 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *, 2304 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1318 u64, u64, u64, u64, u64, u64, u64, u64, u64); 2305 u64, u64, u64, u64, u64, u64);
1319 2306
1320 ref_root = btrfs_header_owner(buf); 2307 ref_root = btrfs_header_owner(buf);
1321 ref_generation = btrfs_header_generation(buf);
1322 orig_root = btrfs_header_owner(orig_buf);
1323 orig_generation = btrfs_header_generation(orig_buf);
1324
1325 nritems = btrfs_header_nritems(buf); 2308 nritems = btrfs_header_nritems(buf);
1326 level = btrfs_header_level(buf); 2309 level = btrfs_header_level(buf);
1327 2310
1328 sorted = kmalloc(sizeof(struct refsort) * nritems, GFP_NOFS); 2311 if (!root->ref_cows && level == 0)
1329 BUG_ON(!sorted); 2312 return 0;
1330 2313
1331 if (root->ref_cows) { 2314 if (inc)
1332 process_func = __btrfs_inc_extent_ref; 2315 process_func = btrfs_inc_extent_ref;
1333 } else { 2316 else
1334 if (level == 0 && 2317 process_func = btrfs_free_extent;
1335 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 2318
1336 goto out; 2319 if (full_backref)
1337 if (level != 0 && 2320 parent = buf->start;
1338 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) 2321 else
1339 goto out; 2322 parent = 0;
1340 process_func = __btrfs_update_extent_ref;
1341 }
1342 2323
1343 /*
1344 * we make two passes through the items. In the first pass we
1345 * only record the byte number and slot. Then we sort based on
1346 * byte number and do the actual work based on the sorted results
1347 */
1348 for (i = 0; i < nritems; i++) { 2324 for (i = 0; i < nritems; i++) {
1349 cond_resched();
1350 if (level == 0) { 2325 if (level == 0) {
1351 btrfs_item_key_to_cpu(buf, &key, i); 2326 btrfs_item_key_to_cpu(buf, &key, i);
1352 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) 2327 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
@@ -1360,151 +2335,38 @@ noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
1360 if (bytenr == 0) 2335 if (bytenr == 0)
1361 continue; 2336 continue;
1362 2337
1363 nr_file_extents++; 2338 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
1364 sorted[refi].bytenr = bytenr; 2339 key.offset -= btrfs_file_extent_offset(buf, fi);
1365 sorted[refi].slot = i; 2340 ret = process_func(trans, root, bytenr, num_bytes,
1366 refi++; 2341 parent, ref_root, key.objectid,
1367 } else { 2342 key.offset);
1368 bytenr = btrfs_node_blockptr(buf, i); 2343 if (ret)
1369 sorted[refi].bytenr = bytenr;
1370 sorted[refi].slot = i;
1371 refi++;
1372 }
1373 }
1374 /*
1375 * if refi == 0, we didn't actually put anything into the sorted
1376 * array and we're done
1377 */
1378 if (refi == 0)
1379 goto out;
1380
1381 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
1382
1383 for (i = 0; i < refi; i++) {
1384 cond_resched();
1385 slot = sorted[i].slot;
1386 bytenr = sorted[i].bytenr;
1387
1388 if (level == 0) {
1389 btrfs_item_key_to_cpu(buf, &key, slot);
1390 fi = btrfs_item_ptr(buf, slot,
1391 struct btrfs_file_extent_item);
1392
1393 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1394 if (bytenr == 0)
1395 continue;
1396
1397 ret = process_func(trans, root, bytenr,
1398 btrfs_file_extent_disk_num_bytes(buf, fi),
1399 orig_buf->start, buf->start,
1400 orig_root, ref_root,
1401 orig_generation, ref_generation,
1402 key.objectid);
1403
1404 if (ret) {
1405 faili = slot;
1406 WARN_ON(1);
1407 goto fail; 2344 goto fail;
1408 }
1409 } else { 2345 } else {
1410 ret = process_func(trans, root, bytenr, buf->len, 2346 bytenr = btrfs_node_blockptr(buf, i);
1411 orig_buf->start, buf->start, 2347 num_bytes = btrfs_level_size(root, level - 1);
1412 orig_root, ref_root, 2348 ret = process_func(trans, root, bytenr, num_bytes,
1413 orig_generation, ref_generation, 2349 parent, ref_root, level - 1, 0);
1414 level - 1); 2350 if (ret)
1415 if (ret) {
1416 faili = slot;
1417 WARN_ON(1);
1418 goto fail; 2351 goto fail;
1419 }
1420 } 2352 }
1421 } 2353 }
1422out:
1423 kfree(sorted);
1424 if (nr_extents) {
1425 if (level == 0)
1426 *nr_extents = nr_file_extents;
1427 else
1428 *nr_extents = nritems;
1429 }
1430 return 0; 2354 return 0;
1431fail: 2355fail:
1432 kfree(sorted); 2356 BUG();
1433 WARN_ON(1);
1434 return ret; 2357 return ret;
1435} 2358}
1436 2359
1437int btrfs_update_ref(struct btrfs_trans_handle *trans, 2360int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1438 struct btrfs_root *root, struct extent_buffer *orig_buf, 2361 struct extent_buffer *buf, int full_backref)
1439 struct extent_buffer *buf, int start_slot, int nr)
1440
1441{ 2362{
1442 u64 bytenr; 2363 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
1443 u64 ref_root; 2364}
1444 u64 orig_root;
1445 u64 ref_generation;
1446 u64 orig_generation;
1447 struct btrfs_key key;
1448 struct btrfs_file_extent_item *fi;
1449 int i;
1450 int ret;
1451 int slot;
1452 int level;
1453
1454 BUG_ON(start_slot < 0);
1455 BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
1456
1457 ref_root = btrfs_header_owner(buf);
1458 ref_generation = btrfs_header_generation(buf);
1459 orig_root = btrfs_header_owner(orig_buf);
1460 orig_generation = btrfs_header_generation(orig_buf);
1461 level = btrfs_header_level(buf);
1462
1463 if (!root->ref_cows) {
1464 if (level == 0 &&
1465 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1466 return 0;
1467 if (level != 0 &&
1468 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1469 return 0;
1470 }
1471 2365
1472 for (i = 0, slot = start_slot; i < nr; i++, slot++) { 2366int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1473 cond_resched(); 2367 struct extent_buffer *buf, int full_backref)
1474 if (level == 0) { 2368{
1475 btrfs_item_key_to_cpu(buf, &key, slot); 2369 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
1476 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1477 continue;
1478 fi = btrfs_item_ptr(buf, slot,
1479 struct btrfs_file_extent_item);
1480 if (btrfs_file_extent_type(buf, fi) ==
1481 BTRFS_FILE_EXTENT_INLINE)
1482 continue;
1483 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1484 if (bytenr == 0)
1485 continue;
1486 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1487 btrfs_file_extent_disk_num_bytes(buf, fi),
1488 orig_buf->start, buf->start,
1489 orig_root, ref_root, orig_generation,
1490 ref_generation, key.objectid);
1491 if (ret)
1492 goto fail;
1493 } else {
1494 bytenr = btrfs_node_blockptr(buf, slot);
1495 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1496 buf->len, orig_buf->start,
1497 buf->start, orig_root, ref_root,
1498 orig_generation, ref_generation,
1499 level - 1);
1500 if (ret)
1501 goto fail;
1502 }
1503 }
1504 return 0;
1505fail:
1506 WARN_ON(1);
1507 return -1;
1508} 2370}
1509 2371
1510static int write_one_cache_group(struct btrfs_trans_handle *trans, 2372static int write_one_cache_group(struct btrfs_trans_handle *trans,
@@ -2007,6 +2869,24 @@ static int update_block_group(struct btrfs_trans_handle *trans,
2007 u64 old_val; 2869 u64 old_val;
2008 u64 byte_in_group; 2870 u64 byte_in_group;
2009 2871
2872 /* block accounting for super block */
2873 spin_lock(&info->delalloc_lock);
2874 old_val = btrfs_super_bytes_used(&info->super_copy);
2875 if (alloc)
2876 old_val += num_bytes;
2877 else
2878 old_val -= num_bytes;
2879 btrfs_set_super_bytes_used(&info->super_copy, old_val);
2880
2881 /* block accounting for root item */
2882 old_val = btrfs_root_used(&root->root_item);
2883 if (alloc)
2884 old_val += num_bytes;
2885 else
2886 old_val -= num_bytes;
2887 btrfs_set_root_used(&root->root_item, old_val);
2888 spin_unlock(&info->delalloc_lock);
2889
2010 while (total) { 2890 while (total) {
2011 cache = btrfs_lookup_block_group(info, bytenr); 2891 cache = btrfs_lookup_block_group(info, bytenr);
2012 if (!cache) 2892 if (!cache)
@@ -2216,8 +3096,6 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
2216 u64 header_owner = btrfs_header_owner(buf); 3096 u64 header_owner = btrfs_header_owner(buf);
2217 u64 header_transid = btrfs_header_generation(buf); 3097 u64 header_transid = btrfs_header_generation(buf);
2218 if (header_owner != BTRFS_TREE_LOG_OBJECTID && 3098 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
2219 header_owner != BTRFS_TREE_RELOC_OBJECTID &&
2220 header_owner != BTRFS_DATA_RELOC_TREE_OBJECTID &&
2221 header_transid == trans->transid && 3099 header_transid == trans->transid &&
2222 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 3100 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
2223 *must_clean = buf; 3101 *must_clean = buf;
@@ -2235,63 +3113,77 @@ pinit:
2235 return 0; 3113 return 0;
2236} 3114}
2237 3115
2238/* 3116
2239 * remove an extent from the root, returns 0 on success 3117static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2240 */ 3118 struct btrfs_root *root,
2241static int __free_extent(struct btrfs_trans_handle *trans, 3119 u64 bytenr, u64 num_bytes, u64 parent,
2242 struct btrfs_root *root, 3120 u64 root_objectid, u64 owner_objectid,
2243 u64 bytenr, u64 num_bytes, u64 parent, 3121 u64 owner_offset, int refs_to_drop,
2244 u64 root_objectid, u64 ref_generation, 3122 struct btrfs_delayed_extent_op *extent_op)
2245 u64 owner_objectid, int pin, int mark_free,
2246 int refs_to_drop)
2247{ 3123{
2248 struct btrfs_path *path;
2249 struct btrfs_key key; 3124 struct btrfs_key key;
3125 struct btrfs_path *path;
2250 struct btrfs_fs_info *info = root->fs_info; 3126 struct btrfs_fs_info *info = root->fs_info;
2251 struct btrfs_root *extent_root = info->extent_root; 3127 struct btrfs_root *extent_root = info->extent_root;
2252 struct extent_buffer *leaf; 3128 struct extent_buffer *leaf;
3129 struct btrfs_extent_item *ei;
3130 struct btrfs_extent_inline_ref *iref;
2253 int ret; 3131 int ret;
3132 int is_data;
2254 int extent_slot = 0; 3133 int extent_slot = 0;
2255 int found_extent = 0; 3134 int found_extent = 0;
2256 int num_to_del = 1; 3135 int num_to_del = 1;
2257 struct btrfs_extent_item *ei; 3136 u32 item_size;
2258 u32 refs; 3137 u64 refs;
2259 3138
2260 key.objectid = bytenr;
2261 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
2262 key.offset = num_bytes;
2263 path = btrfs_alloc_path(); 3139 path = btrfs_alloc_path();
2264 if (!path) 3140 if (!path)
2265 return -ENOMEM; 3141 return -ENOMEM;
2266 3142
2267 path->reada = 1; 3143 path->reada = 1;
2268 path->leave_spinning = 1; 3144 path->leave_spinning = 1;
2269 ret = lookup_extent_backref(trans, extent_root, path, 3145
2270 bytenr, parent, root_objectid, 3146 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
2271 ref_generation, owner_objectid, 1); 3147 BUG_ON(!is_data && refs_to_drop != 1);
3148
3149 ret = lookup_extent_backref(trans, extent_root, path, &iref,
3150 bytenr, num_bytes, parent,
3151 root_objectid, owner_objectid,
3152 owner_offset);
2272 if (ret == 0) { 3153 if (ret == 0) {
2273 struct btrfs_key found_key;
2274 extent_slot = path->slots[0]; 3154 extent_slot = path->slots[0];
2275 while (extent_slot > 0) { 3155 while (extent_slot >= 0) {
2276 extent_slot--; 3156 btrfs_item_key_to_cpu(path->nodes[0], &key,
2277 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2278 extent_slot); 3157 extent_slot);
2279 if (found_key.objectid != bytenr) 3158 if (key.objectid != bytenr)
2280 break; 3159 break;
2281 if (found_key.type == BTRFS_EXTENT_ITEM_KEY && 3160 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
2282 found_key.offset == num_bytes) { 3161 key.offset == num_bytes) {
2283 found_extent = 1; 3162 found_extent = 1;
2284 break; 3163 break;
2285 } 3164 }
2286 if (path->slots[0] - extent_slot > 5) 3165 if (path->slots[0] - extent_slot > 5)
2287 break; 3166 break;
3167 extent_slot--;
2288 } 3168 }
3169#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3170 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
3171 if (found_extent && item_size < sizeof(*ei))
3172 found_extent = 0;
3173#endif
2289 if (!found_extent) { 3174 if (!found_extent) {
3175 BUG_ON(iref);
2290 ret = remove_extent_backref(trans, extent_root, path, 3176 ret = remove_extent_backref(trans, extent_root, path,
2291 refs_to_drop); 3177 NULL, refs_to_drop,
3178 is_data);
2292 BUG_ON(ret); 3179 BUG_ON(ret);
2293 btrfs_release_path(extent_root, path); 3180 btrfs_release_path(extent_root, path);
2294 path->leave_spinning = 1; 3181 path->leave_spinning = 1;
3182
3183 key.objectid = bytenr;
3184 key.type = BTRFS_EXTENT_ITEM_KEY;
3185 key.offset = num_bytes;
3186
2295 ret = btrfs_search_slot(trans, extent_root, 3187 ret = btrfs_search_slot(trans, extent_root,
2296 &key, path, -1, 1); 3188 &key, path, -1, 1);
2297 if (ret) { 3189 if (ret) {
@@ -2307,82 +3199,98 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2307 btrfs_print_leaf(extent_root, path->nodes[0]); 3199 btrfs_print_leaf(extent_root, path->nodes[0]);
2308 WARN_ON(1); 3200 WARN_ON(1);
2309 printk(KERN_ERR "btrfs unable to find ref byte nr %llu " 3201 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
2310 "parent %llu root %llu gen %llu owner %llu\n", 3202 "parent %llu root %llu owner %llu offset %llu\n",
2311 (unsigned long long)bytenr, 3203 (unsigned long long)bytenr,
2312 (unsigned long long)parent, 3204 (unsigned long long)parent,
2313 (unsigned long long)root_objectid, 3205 (unsigned long long)root_objectid,
2314 (unsigned long long)ref_generation, 3206 (unsigned long long)owner_objectid,
2315 (unsigned long long)owner_objectid); 3207 (unsigned long long)owner_offset);
2316 } 3208 }
2317 3209
2318 leaf = path->nodes[0]; 3210 leaf = path->nodes[0];
3211 item_size = btrfs_item_size_nr(leaf, extent_slot);
3212#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3213 if (item_size < sizeof(*ei)) {
3214 BUG_ON(found_extent || extent_slot != path->slots[0]);
3215 ret = convert_extent_item_v0(trans, extent_root, path,
3216 owner_objectid, 0);
3217 BUG_ON(ret < 0);
3218
3219 btrfs_release_path(extent_root, path);
3220 path->leave_spinning = 1;
3221
3222 key.objectid = bytenr;
3223 key.type = BTRFS_EXTENT_ITEM_KEY;
3224 key.offset = num_bytes;
3225
3226 ret = btrfs_search_slot(trans, extent_root, &key, path,
3227 -1, 1);
3228 if (ret) {
3229 printk(KERN_ERR "umm, got %d back from search"
3230 ", was looking for %llu\n", ret,
3231 (unsigned long long)bytenr);
3232 btrfs_print_leaf(extent_root, path->nodes[0]);
3233 }
3234 BUG_ON(ret);
3235 extent_slot = path->slots[0];
3236 leaf = path->nodes[0];
3237 item_size = btrfs_item_size_nr(leaf, extent_slot);
3238 }
3239#endif
3240 BUG_ON(item_size < sizeof(*ei));
2319 ei = btrfs_item_ptr(leaf, extent_slot, 3241 ei = btrfs_item_ptr(leaf, extent_slot,
2320 struct btrfs_extent_item); 3242 struct btrfs_extent_item);
2321 refs = btrfs_extent_refs(leaf, ei); 3243 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2322 3244 struct btrfs_tree_block_info *bi;
2323 /* 3245 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
2324 * we're not allowed to delete the extent item if there 3246 bi = (struct btrfs_tree_block_info *)(ei + 1);
2325 * are other delayed ref updates pending 3247 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
2326 */ 3248 }
2327 3249
3250 refs = btrfs_extent_refs(leaf, ei);
2328 BUG_ON(refs < refs_to_drop); 3251 BUG_ON(refs < refs_to_drop);
2329 refs -= refs_to_drop; 3252 refs -= refs_to_drop;
2330 btrfs_set_extent_refs(leaf, ei, refs);
2331 btrfs_mark_buffer_dirty(leaf);
2332 3253
2333 if (refs == 0 && found_extent && 3254 if (refs > 0) {
2334 path->slots[0] == extent_slot + 1) { 3255 if (extent_op)
2335 struct btrfs_extent_ref *ref; 3256 __run_delayed_extent_op(extent_op, leaf, ei);
2336 ref = btrfs_item_ptr(leaf, path->slots[0], 3257 /*
2337 struct btrfs_extent_ref); 3258 * In the case of inline back ref, reference count will
2338 BUG_ON(btrfs_ref_num_refs(leaf, ref) != refs_to_drop); 3259 * be updated by remove_extent_backref
2339 /* if the back ref and the extent are next to each other
2340 * they get deleted below in one shot
2341 */ 3260 */
2342 path->slots[0] = extent_slot; 3261 if (iref) {
2343 num_to_del = 2; 3262 BUG_ON(!found_extent);
2344 } else if (found_extent) { 3263 } else {
2345 /* otherwise delete the extent back ref */ 3264 btrfs_set_extent_refs(leaf, ei, refs);
2346 ret = remove_extent_backref(trans, extent_root, path, 3265 btrfs_mark_buffer_dirty(leaf);
2347 refs_to_drop); 3266 }
2348 BUG_ON(ret); 3267 if (found_extent) {
2349 /* if refs are 0, we need to setup the path for deletion */ 3268 ret = remove_extent_backref(trans, extent_root, path,
2350 if (refs == 0) { 3269 iref, refs_to_drop,
2351 btrfs_release_path(extent_root, path); 3270 is_data);
2352 path->leave_spinning = 1;
2353 ret = btrfs_search_slot(trans, extent_root, &key, path,
2354 -1, 1);
2355 BUG_ON(ret); 3271 BUG_ON(ret);
2356 } 3272 }
2357 } 3273 } else {
2358 3274 int mark_free = 0;
2359 if (refs == 0) {
2360 u64 super_used;
2361 u64 root_used;
2362 struct extent_buffer *must_clean = NULL; 3275 struct extent_buffer *must_clean = NULL;
2363 3276
2364 if (pin) { 3277 if (found_extent) {
2365 ret = pin_down_bytes(trans, root, path, 3278 BUG_ON(is_data && refs_to_drop !=
2366 bytenr, num_bytes, 3279 extent_data_ref_count(root, path, iref));
2367 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID, 3280 if (iref) {
2368 &must_clean); 3281 BUG_ON(path->slots[0] != extent_slot);
2369 if (ret > 0) 3282 } else {
2370 mark_free = 1; 3283 BUG_ON(path->slots[0] != extent_slot + 1);
2371 BUG_ON(ret < 0); 3284 path->slots[0] = extent_slot;
3285 num_to_del = 2;
3286 }
2372 } 3287 }
2373 3288
2374 /* block accounting for super block */ 3289 ret = pin_down_bytes(trans, root, path, bytenr,
2375 spin_lock(&info->delalloc_lock); 3290 num_bytes, is_data, &must_clean);
2376 super_used = btrfs_super_bytes_used(&info->super_copy); 3291 if (ret > 0)
2377 btrfs_set_super_bytes_used(&info->super_copy, 3292 mark_free = 1;
2378 super_used - num_bytes); 3293 BUG_ON(ret < 0);
2379
2380 /* block accounting for root item */
2381 root_used = btrfs_root_used(&root->root_item);
2382 btrfs_set_root_used(&root->root_item,
2383 root_used - num_bytes);
2384 spin_unlock(&info->delalloc_lock);
2385
2386 /* 3294 /*
2387 * it is going to be very rare for someone to be waiting 3295 * it is going to be very rare for someone to be waiting
2388 * on the block we're freeing. del_items might need to 3296 * on the block we're freeing. del_items might need to
@@ -2403,7 +3311,7 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2403 free_extent_buffer(must_clean); 3311 free_extent_buffer(must_clean);
2404 } 3312 }
2405 3313
2406 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { 3314 if (is_data) {
2407 ret = btrfs_del_csums(trans, root, bytenr, num_bytes); 3315 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
2408 BUG_ON(ret); 3316 BUG_ON(ret);
2409 } else { 3317 } else {
@@ -2421,34 +3329,6 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2421} 3329}
2422 3330
2423/* 3331/*
2424 * remove an extent from the root, returns 0 on success
2425 */
2426static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2427 struct btrfs_root *root,
2428 u64 bytenr, u64 num_bytes, u64 parent,
2429 u64 root_objectid, u64 ref_generation,
2430 u64 owner_objectid, int pin,
2431 int refs_to_drop)
2432{
2433 WARN_ON(num_bytes < root->sectorsize);
2434
2435 /*
2436 * if metadata always pin
2437 * if data pin when any transaction has committed this
2438 */
2439 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID ||
2440 ref_generation != trans->transid)
2441 pin = 1;
2442
2443 if (ref_generation != trans->transid)
2444 pin = 1;
2445
2446 return __free_extent(trans, root, bytenr, num_bytes, parent,
2447 root_objectid, ref_generation,
2448 owner_objectid, pin, pin == 0, refs_to_drop);
2449}
2450
2451/*
2452 * when we free an extent, it is possible (and likely) that we free the last 3332 * when we free an extent, it is possible (and likely) that we free the last
2453 * delayed ref for that extent as well. This searches the delayed ref tree for 3333 * delayed ref for that extent as well. This searches the delayed ref tree for
2454 * a given extent, and if there are no other delayed refs to be processed, it 3334 * a given extent, and if there are no other delayed refs to be processed, it
@@ -2479,6 +3359,13 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
2479 if (ref->bytenr == bytenr) 3359 if (ref->bytenr == bytenr)
2480 goto out; 3360 goto out;
2481 3361
3362 if (head->extent_op) {
3363 if (!head->must_insert_reserved)
3364 goto out;
3365 kfree(head->extent_op);
3366 head->extent_op = NULL;
3367 }
3368
2482 /* 3369 /*
2483 * waiting for the lock here would deadlock. If someone else has it 3370 * waiting for the lock here would deadlock. If someone else has it
2484 * locked they are already in the process of dropping it anyway 3371 * locked they are already in the process of dropping it anyway
@@ -2507,7 +3394,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
2507 spin_unlock(&delayed_refs->lock); 3394 spin_unlock(&delayed_refs->lock);
2508 3395
2509 ret = run_one_delayed_ref(trans, root->fs_info->tree_root, 3396 ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
2510 &head->node, head->must_insert_reserved); 3397 &head->node, head->extent_op,
3398 head->must_insert_reserved);
2511 BUG_ON(ret); 3399 BUG_ON(ret);
2512 btrfs_put_delayed_ref(&head->node); 3400 btrfs_put_delayed_ref(&head->node);
2513 return 0; 3401 return 0;
@@ -2519,32 +3407,32 @@ out:
2519int btrfs_free_extent(struct btrfs_trans_handle *trans, 3407int btrfs_free_extent(struct btrfs_trans_handle *trans,
2520 struct btrfs_root *root, 3408 struct btrfs_root *root,
2521 u64 bytenr, u64 num_bytes, u64 parent, 3409 u64 bytenr, u64 num_bytes, u64 parent,
2522 u64 root_objectid, u64 ref_generation, 3410 u64 root_objectid, u64 owner, u64 offset)
2523 u64 owner_objectid, int pin)
2524{ 3411{
2525 int ret; 3412 int ret;
2526 3413
2527 /* 3414 /*
2528 * tree log blocks never actually go into the extent allocation 3415 * tree log blocks never actually go into the extent allocation
2529 * tree, just update pinning info and exit early. 3416 * tree, just update pinning info and exit early.
2530 *
2531 * data extents referenced by the tree log do need to have
2532 * their reference counts bumped.
2533 */ 3417 */
2534 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID && 3418 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
2535 owner_objectid < BTRFS_FIRST_FREE_OBJECTID) { 3419 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
2536 /* unlocks the pinned mutex */ 3420 /* unlocks the pinned mutex */
2537 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); 3421 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2538 update_reserved_extents(root, bytenr, num_bytes, 0); 3422 update_reserved_extents(root, bytenr, num_bytes, 0);
2539 ret = 0; 3423 ret = 0;
2540 } else { 3424 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2541 ret = btrfs_add_delayed_ref(trans, bytenr, num_bytes, parent, 3425 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
2542 root_objectid, ref_generation, 3426 parent, root_objectid, (int)owner,
2543 owner_objectid, 3427 BTRFS_DROP_DELAYED_REF, NULL);
2544 BTRFS_DROP_DELAYED_REF, 1);
2545 BUG_ON(ret); 3428 BUG_ON(ret);
2546 ret = check_ref_cleanup(trans, root, bytenr); 3429 ret = check_ref_cleanup(trans, root, bytenr);
2547 BUG_ON(ret); 3430 BUG_ON(ret);
3431 } else {
3432 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
3433 parent, root_objectid, owner,
3434 offset, BTRFS_DROP_DELAYED_REF, NULL);
3435 BUG_ON(ret);
2548 } 3436 }
2549 return ret; 3437 return ret;
2550} 3438}
@@ -2969,99 +3857,147 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2969 return ret; 3857 return ret;
2970} 3858}
2971 3859
2972static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, 3860static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
2973 struct btrfs_root *root, u64 parent, 3861 struct btrfs_root *root,
2974 u64 root_objectid, u64 ref_generation, 3862 u64 parent, u64 root_objectid,
2975 u64 owner, struct btrfs_key *ins, 3863 u64 flags, u64 owner, u64 offset,
2976 int ref_mod) 3864 struct btrfs_key *ins, int ref_mod)
2977{ 3865{
2978 int ret; 3866 int ret;
2979 u64 super_used; 3867 struct btrfs_fs_info *fs_info = root->fs_info;
2980 u64 root_used;
2981 u64 num_bytes = ins->offset;
2982 u32 sizes[2];
2983 struct btrfs_fs_info *info = root->fs_info;
2984 struct btrfs_root *extent_root = info->extent_root;
2985 struct btrfs_extent_item *extent_item; 3868 struct btrfs_extent_item *extent_item;
2986 struct btrfs_extent_ref *ref; 3869 struct btrfs_extent_inline_ref *iref;
2987 struct btrfs_path *path; 3870 struct btrfs_path *path;
2988 struct btrfs_key keys[2]; 3871 struct extent_buffer *leaf;
2989 3872 int type;
2990 if (parent == 0) 3873 u32 size;
2991 parent = ins->objectid;
2992
2993 /* block accounting for super block */
2994 spin_lock(&info->delalloc_lock);
2995 super_used = btrfs_super_bytes_used(&info->super_copy);
2996 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2997 3874
2998 /* block accounting for root item */ 3875 if (parent > 0)
2999 root_used = btrfs_root_used(&root->root_item); 3876 type = BTRFS_SHARED_DATA_REF_KEY;
3000 btrfs_set_root_used(&root->root_item, root_used + num_bytes); 3877 else
3001 spin_unlock(&info->delalloc_lock); 3878 type = BTRFS_EXTENT_DATA_REF_KEY;
3002 3879
3003 memcpy(&keys[0], ins, sizeof(*ins)); 3880 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
3004 keys[1].objectid = ins->objectid;
3005 keys[1].type = BTRFS_EXTENT_REF_KEY;
3006 keys[1].offset = parent;
3007 sizes[0] = sizeof(*extent_item);
3008 sizes[1] = sizeof(*ref);
3009 3881
3010 path = btrfs_alloc_path(); 3882 path = btrfs_alloc_path();
3011 BUG_ON(!path); 3883 BUG_ON(!path);
3012 3884
3013 path->leave_spinning = 1; 3885 path->leave_spinning = 1;
3014 ret = btrfs_insert_empty_items(trans, extent_root, path, keys, 3886 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
3015 sizes, 2); 3887 ins, size);
3016 BUG_ON(ret); 3888 BUG_ON(ret);
3017 3889
3018 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3890 leaf = path->nodes[0];
3891 extent_item = btrfs_item_ptr(leaf, path->slots[0],
3019 struct btrfs_extent_item); 3892 struct btrfs_extent_item);
3020 btrfs_set_extent_refs(path->nodes[0], extent_item, ref_mod); 3893 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
3021 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 3894 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
3022 struct btrfs_extent_ref); 3895 btrfs_set_extent_flags(leaf, extent_item,
3023 3896 flags | BTRFS_EXTENT_FLAG_DATA);
3024 btrfs_set_ref_root(path->nodes[0], ref, root_objectid); 3897
3025 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation); 3898 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
3026 btrfs_set_ref_objectid(path->nodes[0], ref, owner); 3899 btrfs_set_extent_inline_ref_type(leaf, iref, type);
3027 btrfs_set_ref_num_refs(path->nodes[0], ref, ref_mod); 3900 if (parent > 0) {
3901 struct btrfs_shared_data_ref *ref;
3902 ref = (struct btrfs_shared_data_ref *)(iref + 1);
3903 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
3904 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
3905 } else {
3906 struct btrfs_extent_data_ref *ref;
3907 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3908 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
3909 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
3910 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
3911 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
3912 }
3028 3913
3029 btrfs_mark_buffer_dirty(path->nodes[0]); 3914 btrfs_mark_buffer_dirty(path->nodes[0]);
3030
3031 trans->alloc_exclude_start = 0;
3032 trans->alloc_exclude_nr = 0;
3033 btrfs_free_path(path); 3915 btrfs_free_path(path);
3034 3916
3035 if (ret) 3917 ret = update_block_group(trans, root, ins->objectid, ins->offset,
3036 goto out; 3918 1, 0);
3037
3038 ret = update_block_group(trans, root, ins->objectid,
3039 ins->offset, 1, 0);
3040 if (ret) { 3919 if (ret) {
3041 printk(KERN_ERR "btrfs update block group failed for %llu " 3920 printk(KERN_ERR "btrfs update block group failed for %llu "
3042 "%llu\n", (unsigned long long)ins->objectid, 3921 "%llu\n", (unsigned long long)ins->objectid,
3043 (unsigned long long)ins->offset); 3922 (unsigned long long)ins->offset);
3044 BUG(); 3923 BUG();
3045 } 3924 }
3046out:
3047 return ret; 3925 return ret;
3048} 3926}
3049 3927
3050int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, 3928static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
3051 struct btrfs_root *root, u64 parent, 3929 struct btrfs_root *root,
3052 u64 root_objectid, u64 ref_generation, 3930 u64 parent, u64 root_objectid,
3053 u64 owner, struct btrfs_key *ins) 3931 u64 flags, struct btrfs_disk_key *key,
3932 int level, struct btrfs_key *ins)
3054{ 3933{
3055 int ret; 3934 int ret;
3935 struct btrfs_fs_info *fs_info = root->fs_info;
3936 struct btrfs_extent_item *extent_item;
3937 struct btrfs_tree_block_info *block_info;
3938 struct btrfs_extent_inline_ref *iref;
3939 struct btrfs_path *path;
3940 struct extent_buffer *leaf;
3941 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
3056 3942
3057 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) 3943 path = btrfs_alloc_path();
3058 return 0; 3944 BUG_ON(!path);
3059 3945
3060 ret = btrfs_add_delayed_ref(trans, ins->objectid, 3946 path->leave_spinning = 1;
3061 ins->offset, parent, root_objectid, 3947 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
3062 ref_generation, owner, 3948 ins, size);
3063 BTRFS_ADD_DELAYED_EXTENT, 0);
3064 BUG_ON(ret); 3949 BUG_ON(ret);
3950
3951 leaf = path->nodes[0];
3952 extent_item = btrfs_item_ptr(leaf, path->slots[0],
3953 struct btrfs_extent_item);
3954 btrfs_set_extent_refs(leaf, extent_item, 1);
3955 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
3956 btrfs_set_extent_flags(leaf, extent_item,
3957 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
3958 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
3959
3960 btrfs_set_tree_block_key(leaf, block_info, key);
3961 btrfs_set_tree_block_level(leaf, block_info, level);
3962
3963 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
3964 if (parent > 0) {
3965 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
3966 btrfs_set_extent_inline_ref_type(leaf, iref,
3967 BTRFS_SHARED_BLOCK_REF_KEY);
3968 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
3969 } else {
3970 btrfs_set_extent_inline_ref_type(leaf, iref,
3971 BTRFS_TREE_BLOCK_REF_KEY);
3972 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
3973 }
3974
3975 btrfs_mark_buffer_dirty(leaf);
3976 btrfs_free_path(path);
3977
3978 ret = update_block_group(trans, root, ins->objectid, ins->offset,
3979 1, 0);
3980 if (ret) {
3981 printk(KERN_ERR "btrfs update block group failed for %llu "
3982 "%llu\n", (unsigned long long)ins->objectid,
3983 (unsigned long long)ins->offset);
3984 BUG();
3985 }
3986 return ret;
3987}
3988
3989int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
3990 struct btrfs_root *root,
3991 u64 root_objectid, u64 owner,
3992 u64 offset, struct btrfs_key *ins)
3993{
3994 int ret;
3995
3996 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
3997
3998 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
3999 0, root_objectid, owner, offset,
4000 BTRFS_ADD_DELAYED_EXTENT, NULL);
3065 return ret; 4001 return ret;
3066} 4002}
3067 4003
@@ -3070,10 +4006,10 @@ int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3070 * an extent has been allocated and makes sure to clear the free 4006 * an extent has been allocated and makes sure to clear the free
3071 * space cache bits as well 4007 * space cache bits as well
3072 */ 4008 */
3073int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans, 4009int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
3074 struct btrfs_root *root, u64 parent, 4010 struct btrfs_root *root,
3075 u64 root_objectid, u64 ref_generation, 4011 u64 root_objectid, u64 owner, u64 offset,
3076 u64 owner, struct btrfs_key *ins) 4012 struct btrfs_key *ins)
3077{ 4013{
3078 int ret; 4014 int ret;
3079 struct btrfs_block_group_cache *block_group; 4015 struct btrfs_block_group_cache *block_group;
@@ -3087,8 +4023,8 @@ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
3087 ins->offset); 4023 ins->offset);
3088 BUG_ON(ret); 4024 BUG_ON(ret);
3089 btrfs_put_block_group(block_group); 4025 btrfs_put_block_group(block_group);
3090 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid, 4026 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
3091 ref_generation, owner, ins, 1); 4027 0, owner, offset, ins, 1);
3092 return ret; 4028 return ret;
3093} 4029}
3094 4030
@@ -3099,26 +4035,48 @@ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
3099 * 4035 *
3100 * returns 0 if everything worked, non-zero otherwise. 4036 * returns 0 if everything worked, non-zero otherwise.
3101 */ 4037 */
3102int btrfs_alloc_extent(struct btrfs_trans_handle *trans, 4038static int alloc_tree_block(struct btrfs_trans_handle *trans,
3103 struct btrfs_root *root, 4039 struct btrfs_root *root,
3104 u64 num_bytes, u64 parent, u64 min_alloc_size, 4040 u64 num_bytes, u64 parent, u64 root_objectid,
3105 u64 root_objectid, u64 ref_generation, 4041 struct btrfs_disk_key *key, int level,
3106 u64 owner_objectid, u64 empty_size, u64 hint_byte, 4042 u64 empty_size, u64 hint_byte, u64 search_end,
3107 u64 search_end, struct btrfs_key *ins, u64 data) 4043 struct btrfs_key *ins)
3108{ 4044{
3109 int ret; 4045 int ret;
3110 ret = __btrfs_reserve_extent(trans, root, num_bytes, 4046 u64 flags = 0;
3111 min_alloc_size, empty_size, hint_byte, 4047
3112 search_end, ins, data); 4048 ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4049 empty_size, hint_byte, search_end,
4050 ins, 0);
3113 BUG_ON(ret); 4051 BUG_ON(ret);
4052
4053 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4054 if (parent == 0)
4055 parent = ins->objectid;
4056 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4057 } else
4058 BUG_ON(parent > 0);
4059
4060 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3114 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 4061 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
3115 ret = btrfs_add_delayed_ref(trans, ins->objectid, 4062 struct btrfs_delayed_extent_op *extent_op;
3116 ins->offset, parent, root_objectid, 4063 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
3117 ref_generation, owner_objectid, 4064 BUG_ON(!extent_op);
3118 BTRFS_ADD_DELAYED_EXTENT, 0); 4065 if (key)
4066 memcpy(&extent_op->key, key, sizeof(extent_op->key));
4067 else
4068 memset(&extent_op->key, 0, sizeof(extent_op->key));
4069 extent_op->flags_to_set = flags;
4070 extent_op->update_key = 1;
4071 extent_op->update_flags = 1;
4072 extent_op->is_data = 0;
4073
4074 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4075 ins->offset, parent, root_objectid,
4076 level, BTRFS_ADD_DELAYED_EXTENT,
4077 extent_op);
3119 BUG_ON(ret); 4078 BUG_ON(ret);
3120 } 4079 }
3121 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3122 return ret; 4080 return ret;
3123} 4081}
3124 4082
@@ -3157,21 +4115,17 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3157 * returns the tree buffer or NULL. 4115 * returns the tree buffer or NULL.
3158 */ 4116 */
3159struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, 4117struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
3160 struct btrfs_root *root, 4118 struct btrfs_root *root, u32 blocksize,
3161 u32 blocksize, u64 parent, 4119 u64 parent, u64 root_objectid,
3162 u64 root_objectid, 4120 struct btrfs_disk_key *key, int level,
3163 u64 ref_generation, 4121 u64 hint, u64 empty_size)
3164 int level,
3165 u64 hint,
3166 u64 empty_size)
3167{ 4122{
3168 struct btrfs_key ins; 4123 struct btrfs_key ins;
3169 int ret; 4124 int ret;
3170 struct extent_buffer *buf; 4125 struct extent_buffer *buf;
3171 4126
3172 ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize, 4127 ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid,
3173 root_objectid, ref_generation, level, 4128 key, level, empty_size, hint, (u64)-1, &ins);
3174 empty_size, hint, (u64)-1, &ins, 0);
3175 if (ret) { 4129 if (ret) {
3176 BUG_ON(ret > 0); 4130 BUG_ON(ret > 0);
3177 return ERR_PTR(ret); 4131 return ERR_PTR(ret);
@@ -3185,32 +4139,19 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
3185int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, 4139int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3186 struct btrfs_root *root, struct extent_buffer *leaf) 4140 struct btrfs_root *root, struct extent_buffer *leaf)
3187{ 4141{
3188 u64 leaf_owner; 4142 u64 disk_bytenr;
3189 u64 leaf_generation; 4143 u64 num_bytes;
3190 struct refsort *sorted;
3191 struct btrfs_key key; 4144 struct btrfs_key key;
3192 struct btrfs_file_extent_item *fi; 4145 struct btrfs_file_extent_item *fi;
4146 u32 nritems;
3193 int i; 4147 int i;
3194 int nritems;
3195 int ret; 4148 int ret;
3196 int refi = 0;
3197 int slot;
3198 4149
3199 BUG_ON(!btrfs_is_leaf(leaf)); 4150 BUG_ON(!btrfs_is_leaf(leaf));
3200 nritems = btrfs_header_nritems(leaf); 4151 nritems = btrfs_header_nritems(leaf);
3201 leaf_owner = btrfs_header_owner(leaf);
3202 leaf_generation = btrfs_header_generation(leaf);
3203 4152
3204 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3205 /* we do this loop twice. The first time we build a list
3206 * of the extents we have a reference on, then we sort the list
3207 * by bytenr. The second time around we actually do the
3208 * extent freeing.
3209 */
3210 for (i = 0; i < nritems; i++) { 4153 for (i = 0; i < nritems; i++) {
3211 u64 disk_bytenr;
3212 cond_resched(); 4154 cond_resched();
3213
3214 btrfs_item_key_to_cpu(leaf, &key, i); 4155 btrfs_item_key_to_cpu(leaf, &key, i);
3215 4156
3216 /* only extents have references, skip everything else */ 4157 /* only extents have references, skip everything else */
@@ -3230,45 +4171,16 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3230 if (disk_bytenr == 0) 4171 if (disk_bytenr == 0)
3231 continue; 4172 continue;
3232 4173
3233 sorted[refi].bytenr = disk_bytenr; 4174 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
3234 sorted[refi].slot = i; 4175 ret = btrfs_free_extent(trans, root, disk_bytenr, num_bytes,
3235 refi++; 4176 leaf->start, 0, key.objectid, 0);
3236 }
3237
3238 if (refi == 0)
3239 goto out;
3240
3241 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3242
3243 for (i = 0; i < refi; i++) {
3244 u64 disk_bytenr;
3245
3246 disk_bytenr = sorted[i].bytenr;
3247 slot = sorted[i].slot;
3248
3249 cond_resched();
3250
3251 btrfs_item_key_to_cpu(leaf, &key, slot);
3252 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3253 continue;
3254
3255 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
3256
3257 ret = btrfs_free_extent(trans, root, disk_bytenr,
3258 btrfs_file_extent_disk_num_bytes(leaf, fi),
3259 leaf->start, leaf_owner, leaf_generation,
3260 key.objectid, 0);
3261 BUG_ON(ret); 4177 BUG_ON(ret);
3262
3263 atomic_inc(&root->fs_info->throttle_gen);
3264 wake_up(&root->fs_info->transaction_throttle);
3265 cond_resched();
3266 } 4178 }
3267out:
3268 kfree(sorted);
3269 return 0; 4179 return 0;
3270} 4180}
3271 4181
4182#if 0
4183
3272static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, 4184static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3273 struct btrfs_root *root, 4185 struct btrfs_root *root,
3274 struct btrfs_leaf_ref *ref) 4186 struct btrfs_leaf_ref *ref)
@@ -3311,13 +4223,14 @@ static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3311 return 0; 4223 return 0;
3312} 4224}
3313 4225
4226
3314static int drop_snap_lookup_refcount(struct btrfs_trans_handle *trans, 4227static int drop_snap_lookup_refcount(struct btrfs_trans_handle *trans,
3315 struct btrfs_root *root, u64 start, 4228 struct btrfs_root *root, u64 start,
3316 u64 len, u32 *refs) 4229 u64 len, u32 *refs)
3317{ 4230{
3318 int ret; 4231 int ret;
3319 4232
3320 ret = btrfs_lookup_extent_ref(trans, root, start, len, refs); 4233 ret = btrfs_lookup_extent_refs(trans, root, start, len, refs);
3321 BUG_ON(ret); 4234 BUG_ON(ret);
3322 4235
3323#if 0 /* some debugging code in case we see problems here */ 4236#if 0 /* some debugging code in case we see problems here */
@@ -3352,6 +4265,7 @@ static int drop_snap_lookup_refcount(struct btrfs_trans_handle *trans,
3352 return ret; 4265 return ret;
3353} 4266}
3354 4267
4268
3355/* 4269/*
3356 * this is used while deleting old snapshots, and it drops the refs 4270 * this is used while deleting old snapshots, and it drops the refs
3357 * on a whole subtree starting from a level 1 node. 4271 * on a whole subtree starting from a level 1 node.
@@ -3645,32 +4559,36 @@ out:
3645 cond_resched(); 4559 cond_resched();
3646 return 0; 4560 return 0;
3647} 4561}
4562#endif
3648 4563
3649/* 4564/*
3650 * helper function for drop_subtree, this function is similar to 4565 * helper function for drop_subtree, this function is similar to
3651 * walk_down_tree. The main difference is that it checks reference 4566 * walk_down_tree. The main difference is that it checks reference
3652 * counts while tree blocks are locked. 4567 * counts while tree blocks are locked.
3653 */ 4568 */
3654static noinline int walk_down_subtree(struct btrfs_trans_handle *trans, 4569static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3655 struct btrfs_root *root, 4570 struct btrfs_root *root,
3656 struct btrfs_path *path, int *level) 4571 struct btrfs_path *path, int *level)
3657{ 4572{
3658 struct extent_buffer *next; 4573 struct extent_buffer *next;
3659 struct extent_buffer *cur; 4574 struct extent_buffer *cur;
3660 struct extent_buffer *parent; 4575 struct extent_buffer *parent;
3661 u64 bytenr; 4576 u64 bytenr;
3662 u64 ptr_gen; 4577 u64 ptr_gen;
4578 u64 refs;
4579 u64 flags;
3663 u32 blocksize; 4580 u32 blocksize;
3664 u32 refs;
3665 int ret; 4581 int ret;
3666 4582
3667 cur = path->nodes[*level]; 4583 cur = path->nodes[*level];
3668 ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len, 4584 ret = btrfs_lookup_extent_info(trans, root, cur->start, cur->len,
3669 &refs); 4585 &refs, &flags);
3670 BUG_ON(ret); 4586 BUG_ON(ret);
3671 if (refs > 1) 4587 if (refs > 1)
3672 goto out; 4588 goto out;
3673 4589
4590 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4591
3674 while (*level >= 0) { 4592 while (*level >= 0) {
3675 cur = path->nodes[*level]; 4593 cur = path->nodes[*level];
3676 if (*level == 0) { 4594 if (*level == 0) {
@@ -3692,16 +4610,15 @@ static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3692 btrfs_tree_lock(next); 4610 btrfs_tree_lock(next);
3693 btrfs_set_lock_blocking(next); 4611 btrfs_set_lock_blocking(next);
3694 4612
3695 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize, 4613 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
3696 &refs); 4614 &refs, &flags);
3697 BUG_ON(ret); 4615 BUG_ON(ret);
3698 if (refs > 1) { 4616 if (refs > 1) {
3699 parent = path->nodes[*level]; 4617 parent = path->nodes[*level];
3700 ret = btrfs_free_extent(trans, root, bytenr, 4618 ret = btrfs_free_extent(trans, root, bytenr,
3701 blocksize, parent->start, 4619 blocksize, parent->start,
3702 btrfs_header_owner(parent), 4620 btrfs_header_owner(parent),
3703 btrfs_header_generation(parent), 4621 *level - 1, 0);
3704 *level - 1, 1);
3705 BUG_ON(ret); 4622 BUG_ON(ret);
3706 path->slots[*level]++; 4623 path->slots[*level]++;
3707 btrfs_tree_unlock(next); 4624 btrfs_tree_unlock(next);
@@ -3709,6 +4626,8 @@ static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3709 continue; 4626 continue;
3710 } 4627 }
3711 4628
4629 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4630
3712 *level = btrfs_header_level(next); 4631 *level = btrfs_header_level(next);
3713 path->nodes[*level] = next; 4632 path->nodes[*level] = next;
3714 path->slots[*level] = 0; 4633 path->slots[*level] = 0;
@@ -3716,13 +4635,15 @@ static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3716 cond_resched(); 4635 cond_resched();
3717 } 4636 }
3718out: 4637out:
3719 parent = path->nodes[*level + 1]; 4638 if (path->nodes[*level] == root->node)
4639 parent = path->nodes[*level];
4640 else
4641 parent = path->nodes[*level + 1];
3720 bytenr = path->nodes[*level]->start; 4642 bytenr = path->nodes[*level]->start;
3721 blocksize = path->nodes[*level]->len; 4643 blocksize = path->nodes[*level]->len;
3722 4644
3723 ret = btrfs_free_extent(trans, root, bytenr, blocksize, 4645 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent->start,
3724 parent->start, btrfs_header_owner(parent), 4646 btrfs_header_owner(parent), *level, 0);
3725 btrfs_header_generation(parent), *level, 1);
3726 BUG_ON(ret); 4647 BUG_ON(ret);
3727 4648
3728 if (path->locks[*level]) { 4649 if (path->locks[*level]) {
@@ -3746,8 +4667,6 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3746 struct btrfs_path *path, 4667 struct btrfs_path *path,
3747 int *level, int max_level) 4668 int *level, int max_level)
3748{ 4669{
3749 u64 root_owner;
3750 u64 root_gen;
3751 struct btrfs_root_item *root_item = &root->root_item; 4670 struct btrfs_root_item *root_item = &root->root_item;
3752 int i; 4671 int i;
3753 int slot; 4672 int slot;
@@ -3755,24 +4674,22 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3755 4674
3756 for (i = *level; i < max_level && path->nodes[i]; i++) { 4675 for (i = *level; i < max_level && path->nodes[i]; i++) {
3757 slot = path->slots[i]; 4676 slot = path->slots[i];
3758 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) { 4677 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
3759 struct extent_buffer *node;
3760 struct btrfs_disk_key disk_key;
3761
3762 /* 4678 /*
3763 * there is more work to do in this level. 4679 * there is more work to do in this level.
3764 * Update the drop_progress marker to reflect 4680 * Update the drop_progress marker to reflect
3765 * the work we've done so far, and then bump 4681 * the work we've done so far, and then bump
3766 * the slot number 4682 * the slot number
3767 */ 4683 */
3768 node = path->nodes[i];
3769 path->slots[i]++; 4684 path->slots[i]++;
3770 *level = i;
3771 WARN_ON(*level == 0); 4685 WARN_ON(*level == 0);
3772 btrfs_node_key(node, &disk_key, path->slots[i]); 4686 if (max_level == BTRFS_MAX_LEVEL) {
3773 memcpy(&root_item->drop_progress, 4687 btrfs_node_key(path->nodes[i],
3774 &disk_key, sizeof(disk_key)); 4688 &root_item->drop_progress,
3775 root_item->drop_level = i; 4689 path->slots[i]);
4690 root_item->drop_level = i;
4691 }
4692 *level = i;
3776 return 0; 4693 return 0;
3777 } else { 4694 } else {
3778 struct extent_buffer *parent; 4695 struct extent_buffer *parent;
@@ -3786,22 +4703,20 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3786 else 4703 else
3787 parent = path->nodes[*level + 1]; 4704 parent = path->nodes[*level + 1];
3788 4705
3789 root_owner = btrfs_header_owner(parent); 4706 clean_tree_block(trans, root, path->nodes[i]);
3790 root_gen = btrfs_header_generation(parent);
3791
3792 clean_tree_block(trans, root, path->nodes[*level]);
3793 ret = btrfs_free_extent(trans, root, 4707 ret = btrfs_free_extent(trans, root,
3794 path->nodes[*level]->start, 4708 path->nodes[i]->start,
3795 path->nodes[*level]->len, 4709 path->nodes[i]->len,
3796 parent->start, root_owner, 4710 parent->start,
3797 root_gen, *level, 1); 4711 btrfs_header_owner(parent),
4712 *level, 0);
3798 BUG_ON(ret); 4713 BUG_ON(ret);
3799 if (path->locks[*level]) { 4714 if (path->locks[*level]) {
3800 btrfs_tree_unlock(path->nodes[*level]); 4715 btrfs_tree_unlock(path->nodes[i]);
3801 path->locks[*level] = 0; 4716 path->locks[i] = 0;
3802 } 4717 }
3803 free_extent_buffer(path->nodes[*level]); 4718 free_extent_buffer(path->nodes[i]);
3804 path->nodes[*level] = NULL; 4719 path->nodes[i] = NULL;
3805 *level = i + 1; 4720 *level = i + 1;
3806 } 4721 }
3807 } 4722 }
@@ -3820,21 +4735,18 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3820 int wret; 4735 int wret;
3821 int level; 4736 int level;
3822 struct btrfs_path *path; 4737 struct btrfs_path *path;
3823 int i;
3824 int orig_level;
3825 int update_count; 4738 int update_count;
3826 struct btrfs_root_item *root_item = &root->root_item; 4739 struct btrfs_root_item *root_item = &root->root_item;
3827 4740
3828 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
3829 path = btrfs_alloc_path(); 4741 path = btrfs_alloc_path();
3830 BUG_ON(!path); 4742 BUG_ON(!path);
3831 4743
3832 level = btrfs_header_level(root->node); 4744 level = btrfs_header_level(root->node);
3833 orig_level = level;
3834 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 4745 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
3835 path->nodes[level] = root->node; 4746 path->nodes[level] = btrfs_lock_root_node(root);
3836 extent_buffer_get(root->node); 4747 btrfs_set_lock_blocking(path->nodes[level]);
3837 path->slots[level] = 0; 4748 path->slots[level] = 0;
4749 path->locks[level] = 1;
3838 } else { 4750 } else {
3839 struct btrfs_key key; 4751 struct btrfs_key key;
3840 struct btrfs_disk_key found_key; 4752 struct btrfs_disk_key found_key;
@@ -3856,12 +4768,7 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3856 * unlock our path, this is safe because only this 4768 * unlock our path, this is safe because only this
3857 * function is allowed to delete this snapshot 4769 * function is allowed to delete this snapshot
3858 */ 4770 */
3859 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4771 btrfs_unlock_up_safe(path, 0);
3860 if (path->nodes[i] && path->locks[i]) {
3861 path->locks[i] = 0;
3862 btrfs_tree_unlock(path->nodes[i]);
3863 }
3864 }
3865 } 4772 }
3866 while (1) { 4773 while (1) {
3867 unsigned long update; 4774 unsigned long update;
@@ -3882,8 +4789,6 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3882 ret = -EAGAIN; 4789 ret = -EAGAIN;
3883 break; 4790 break;
3884 } 4791 }
3885 atomic_inc(&root->fs_info->throttle_gen);
3886 wake_up(&root->fs_info->transaction_throttle);
3887 for (update_count = 0; update_count < 16; update_count++) { 4792 for (update_count = 0; update_count < 16; update_count++) {
3888 update = trans->delayed_ref_updates; 4793 update = trans->delayed_ref_updates;
3889 trans->delayed_ref_updates = 0; 4794 trans->delayed_ref_updates = 0;
@@ -3893,12 +4798,6 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3893 break; 4798 break;
3894 } 4799 }
3895 } 4800 }
3896 for (i = 0; i <= orig_level; i++) {
3897 if (path->nodes[i]) {
3898 free_extent_buffer(path->nodes[i]);
3899 path->nodes[i] = NULL;
3900 }
3901 }
3902out: 4801out:
3903 btrfs_free_path(path); 4802 btrfs_free_path(path);
3904 return ret; 4803 return ret;
@@ -3931,7 +4830,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
3931 path->slots[level] = 0; 4830 path->slots[level] = 0;
3932 4831
3933 while (1) { 4832 while (1) {
3934 wret = walk_down_subtree(trans, root, path, &level); 4833 wret = walk_down_tree(trans, root, path, &level);
3935 if (wret < 0) 4834 if (wret < 0)
3936 ret = wret; 4835 ret = wret;
3937 if (wret != 0) 4836 if (wret != 0)
@@ -3948,6 +4847,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
3948 return ret; 4847 return ret;
3949} 4848}
3950 4849
4850#if 0
3951static unsigned long calc_ra(unsigned long start, unsigned long last, 4851static unsigned long calc_ra(unsigned long start, unsigned long last,
3952 unsigned long nr) 4852 unsigned long nr)
3953{ 4853{
@@ -5429,6 +6329,7 @@ out:
5429 kfree(ref_path); 6329 kfree(ref_path);
5430 return ret; 6330 return ret;
5431} 6331}
6332#endif
5432 6333
5433static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) 6334static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
5434{ 6335{
@@ -5477,7 +6378,8 @@ static int __alloc_chunk_for_shrink(struct btrfs_root *root,
5477 u64 calc; 6378 u64 calc;
5478 6379
5479 spin_lock(&shrink_block_group->lock); 6380 spin_lock(&shrink_block_group->lock);
5480 if (btrfs_block_group_used(&shrink_block_group->item) > 0) { 6381 if (btrfs_block_group_used(&shrink_block_group->item) +
6382 shrink_block_group->reserved > 0) {
5481 spin_unlock(&shrink_block_group->lock); 6383 spin_unlock(&shrink_block_group->lock);
5482 6384
5483 trans = btrfs_start_transaction(root, 1); 6385 trans = btrfs_start_transaction(root, 1);
@@ -5502,6 +6404,17 @@ static int __alloc_chunk_for_shrink(struct btrfs_root *root,
5502 return 0; 6404 return 0;
5503} 6405}
5504 6406
6407
6408int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
6409 struct btrfs_block_group_cache *group)
6410
6411{
6412 __alloc_chunk_for_shrink(root, group, 1);
6413 set_block_group_readonly(group);
6414 return 0;
6415}
6416
6417#if 0
5505static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 6418static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
5506 struct btrfs_root *root, 6419 struct btrfs_root *root,
5507 u64 objectid, u64 size) 6420 u64 objectid, u64 size)
@@ -5781,6 +6694,7 @@ out:
5781 btrfs_free_path(path); 6694 btrfs_free_path(path);
5782 return ret; 6695 return ret;
5783} 6696}
6697#endif
5784 6698
5785static int find_first_block_group(struct btrfs_root *root, 6699static int find_first_block_group(struct btrfs_root *root,
5786 struct btrfs_path *path, struct btrfs_key *key) 6700 struct btrfs_path *path, struct btrfs_key *key)