diff options
author | Josef Bacik <josef@redhat.com> | 2009-09-11 16:11:20 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2009-09-21 19:23:49 -0400 |
commit | f61408b81cd040a594dc0b65171230c4d5cc917d (patch) | |
tree | 0fe8757007b75ecbdb369b9f2d9750696d822092 /fs/btrfs/extent-tree.c | |
parent | f019f4264ae8c0169332592bcee419ee90e7c827 (diff) |
Btrfs: remove dead code
This patch removes a bunch of dead code from the snapshot removal stuff. It
was confusing me when doing the metadata ENOSPC stuff so I killed it.
Signed-off-by: Josef Bacik <jbacik@redhat.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r-- | fs/btrfs/extent-tree.c | 706 |
1 files changed, 0 insertions, 706 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0f41da2c2f08..93e376ada28b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -4462,430 +4462,6 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, | |||
4462 | return buf; | 4462 | return buf; |
4463 | } | 4463 | } |
4464 | 4464 | ||
4465 | #if 0 | ||
4466 | int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, | ||
4467 | struct btrfs_root *root, struct extent_buffer *leaf) | ||
4468 | { | ||
4469 | u64 disk_bytenr; | ||
4470 | u64 num_bytes; | ||
4471 | struct btrfs_key key; | ||
4472 | struct btrfs_file_extent_item *fi; | ||
4473 | u32 nritems; | ||
4474 | int i; | ||
4475 | int ret; | ||
4476 | |||
4477 | BUG_ON(!btrfs_is_leaf(leaf)); | ||
4478 | nritems = btrfs_header_nritems(leaf); | ||
4479 | |||
4480 | for (i = 0; i < nritems; i++) { | ||
4481 | cond_resched(); | ||
4482 | btrfs_item_key_to_cpu(leaf, &key, i); | ||
4483 | |||
4484 | /* only extents have references, skip everything else */ | ||
4485 | if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) | ||
4486 | continue; | ||
4487 | |||
4488 | fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); | ||
4489 | |||
4490 | /* inline extents live in the btree, they don't have refs */ | ||
4491 | if (btrfs_file_extent_type(leaf, fi) == | ||
4492 | BTRFS_FILE_EXTENT_INLINE) | ||
4493 | continue; | ||
4494 | |||
4495 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); | ||
4496 | |||
4497 | /* holes don't have refs */ | ||
4498 | if (disk_bytenr == 0) | ||
4499 | continue; | ||
4500 | |||
4501 | num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); | ||
4502 | ret = btrfs_free_extent(trans, root, disk_bytenr, num_bytes, | ||
4503 | leaf->start, 0, key.objectid, 0); | ||
4504 | BUG_ON(ret); | ||
4505 | } | ||
4506 | return 0; | ||
4507 | } | ||
4508 | |||
4509 | static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, | ||
4510 | struct btrfs_root *root, | ||
4511 | struct btrfs_leaf_ref *ref) | ||
4512 | { | ||
4513 | int i; | ||
4514 | int ret; | ||
4515 | struct btrfs_extent_info *info; | ||
4516 | struct refsort *sorted; | ||
4517 | |||
4518 | if (ref->nritems == 0) | ||
4519 | return 0; | ||
4520 | |||
4521 | sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS); | ||
4522 | for (i = 0; i < ref->nritems; i++) { | ||
4523 | sorted[i].bytenr = ref->extents[i].bytenr; | ||
4524 | sorted[i].slot = i; | ||
4525 | } | ||
4526 | sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL); | ||
4527 | |||
4528 | /* | ||
4529 | * the items in the ref were sorted when the ref was inserted | ||
4530 | * into the ref cache, so this is already in order | ||
4531 | */ | ||
4532 | for (i = 0; i < ref->nritems; i++) { | ||
4533 | info = ref->extents + sorted[i].slot; | ||
4534 | ret = btrfs_free_extent(trans, root, info->bytenr, | ||
4535 | info->num_bytes, ref->bytenr, | ||
4536 | ref->owner, ref->generation, | ||
4537 | info->objectid, 0); | ||
4538 | |||
4539 | atomic_inc(&root->fs_info->throttle_gen); | ||
4540 | wake_up(&root->fs_info->transaction_throttle); | ||
4541 | cond_resched(); | ||
4542 | |||
4543 | BUG_ON(ret); | ||
4544 | info++; | ||
4545 | } | ||
4546 | |||
4547 | kfree(sorted); | ||
4548 | return 0; | ||
4549 | } | ||
4550 | |||
4551 | |||
4552 | static int drop_snap_lookup_refcount(struct btrfs_trans_handle *trans, | ||
4553 | struct btrfs_root *root, u64 start, | ||
4554 | u64 len, u32 *refs) | ||
4555 | { | ||
4556 | int ret; | ||
4557 | |||
4558 | ret = btrfs_lookup_extent_refs(trans, root, start, len, refs); | ||
4559 | BUG_ON(ret); | ||
4560 | |||
4561 | #if 0 /* some debugging code in case we see problems here */ | ||
4562 | /* if the refs count is one, it won't get increased again. But | ||
4563 | * if the ref count is > 1, someone may be decreasing it at | ||
4564 | * the same time we are. | ||
4565 | */ | ||
4566 | if (*refs != 1) { | ||
4567 | struct extent_buffer *eb = NULL; | ||
4568 | eb = btrfs_find_create_tree_block(root, start, len); | ||
4569 | if (eb) | ||
4570 | btrfs_tree_lock(eb); | ||
4571 | |||
4572 | mutex_lock(&root->fs_info->alloc_mutex); | ||
4573 | ret = lookup_extent_ref(NULL, root, start, len, refs); | ||
4574 | BUG_ON(ret); | ||
4575 | mutex_unlock(&root->fs_info->alloc_mutex); | ||
4576 | |||
4577 | if (eb) { | ||
4578 | btrfs_tree_unlock(eb); | ||
4579 | free_extent_buffer(eb); | ||
4580 | } | ||
4581 | if (*refs == 1) { | ||
4582 | printk(KERN_ERR "btrfs block %llu went down to one " | ||
4583 | "during drop_snap\n", (unsigned long long)start); | ||
4584 | } | ||
4585 | |||
4586 | } | ||
4587 | #endif | ||
4588 | |||
4589 | cond_resched(); | ||
4590 | return ret; | ||
4591 | } | ||
4592 | |||
4593 | |||
4594 | /* | ||
4595 | * this is used while deleting old snapshots, and it drops the refs | ||
4596 | * on a whole subtree starting from a level 1 node. | ||
4597 | * | ||
4598 | * The idea is to sort all the leaf pointers, and then drop the | ||
4599 | * ref on all the leaves in order. Most of the time the leaves | ||
4600 | * will have ref cache entries, so no leaf IOs will be required to | ||
4601 | * find the extents they have references on. | ||
4602 | * | ||
4603 | * For each leaf, any references it has are also dropped in order | ||
4604 | * | ||
4605 | * This ends up dropping the references in something close to optimal | ||
4606 | * order for reading and modifying the extent allocation tree. | ||
4607 | */ | ||
4608 | static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans, | ||
4609 | struct btrfs_root *root, | ||
4610 | struct btrfs_path *path) | ||
4611 | { | ||
4612 | u64 bytenr; | ||
4613 | u64 root_owner; | ||
4614 | u64 root_gen; | ||
4615 | struct extent_buffer *eb = path->nodes[1]; | ||
4616 | struct extent_buffer *leaf; | ||
4617 | struct btrfs_leaf_ref *ref; | ||
4618 | struct refsort *sorted = NULL; | ||
4619 | int nritems = btrfs_header_nritems(eb); | ||
4620 | int ret; | ||
4621 | int i; | ||
4622 | int refi = 0; | ||
4623 | int slot = path->slots[1]; | ||
4624 | u32 blocksize = btrfs_level_size(root, 0); | ||
4625 | u32 refs; | ||
4626 | |||
4627 | if (nritems == 0) | ||
4628 | goto out; | ||
4629 | |||
4630 | root_owner = btrfs_header_owner(eb); | ||
4631 | root_gen = btrfs_header_generation(eb); | ||
4632 | sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS); | ||
4633 | |||
4634 | /* | ||
4635 | * step one, sort all the leaf pointers so we don't scribble | ||
4636 | * randomly into the extent allocation tree | ||
4637 | */ | ||
4638 | for (i = slot; i < nritems; i++) { | ||
4639 | sorted[refi].bytenr = btrfs_node_blockptr(eb, i); | ||
4640 | sorted[refi].slot = i; | ||
4641 | refi++; | ||
4642 | } | ||
4643 | |||
4644 | /* | ||
4645 | * nritems won't be zero, but if we're picking up drop_snapshot | ||
4646 | * after a crash, slot might be > 0, so double check things | ||
4647 | * just in case. | ||
4648 | */ | ||
4649 | if (refi == 0) | ||
4650 | goto out; | ||
4651 | |||
4652 | sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL); | ||
4653 | |||
4654 | /* | ||
4655 | * the first loop frees everything the leaves point to | ||
4656 | */ | ||
4657 | for (i = 0; i < refi; i++) { | ||
4658 | u64 ptr_gen; | ||
4659 | |||
4660 | bytenr = sorted[i].bytenr; | ||
4661 | |||
4662 | /* | ||
4663 | * check the reference count on this leaf. If it is > 1 | ||
4664 | * we just decrement it below and don't update any | ||
4665 | * of the refs the leaf points to. | ||
4666 | */ | ||
4667 | ret = drop_snap_lookup_refcount(trans, root, bytenr, | ||
4668 | blocksize, &refs); | ||
4669 | BUG_ON(ret); | ||
4670 | if (refs != 1) | ||
4671 | continue; | ||
4672 | |||
4673 | ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot); | ||
4674 | |||
4675 | /* | ||
4676 | * the leaf only had one reference, which means the | ||
4677 | * only thing pointing to this leaf is the snapshot | ||
4678 | * we're deleting. It isn't possible for the reference | ||
4679 | * count to increase again later | ||
4680 | * | ||
4681 | * The reference cache is checked for the leaf, | ||
4682 | * and if found we'll be able to drop any refs held by | ||
4683 | * the leaf without needing to read it in. | ||
4684 | */ | ||
4685 | ref = btrfs_lookup_leaf_ref(root, bytenr); | ||
4686 | if (ref && ref->generation != ptr_gen) { | ||
4687 | btrfs_free_leaf_ref(root, ref); | ||
4688 | ref = NULL; | ||
4689 | } | ||
4690 | if (ref) { | ||
4691 | ret = cache_drop_leaf_ref(trans, root, ref); | ||
4692 | BUG_ON(ret); | ||
4693 | btrfs_remove_leaf_ref(root, ref); | ||
4694 | btrfs_free_leaf_ref(root, ref); | ||
4695 | } else { | ||
4696 | /* | ||
4697 | * the leaf wasn't in the reference cache, so | ||
4698 | * we have to read it. | ||
4699 | */ | ||
4700 | leaf = read_tree_block(root, bytenr, blocksize, | ||
4701 | ptr_gen); | ||
4702 | ret = btrfs_drop_leaf_ref(trans, root, leaf); | ||
4703 | BUG_ON(ret); | ||
4704 | free_extent_buffer(leaf); | ||
4705 | } | ||
4706 | atomic_inc(&root->fs_info->throttle_gen); | ||
4707 | wake_up(&root->fs_info->transaction_throttle); | ||
4708 | cond_resched(); | ||
4709 | } | ||
4710 | |||
4711 | /* | ||
4712 | * run through the loop again to free the refs on the leaves. | ||
4713 | * This is faster than doing it in the loop above because | ||
4714 | * the leaves are likely to be clustered together. We end up | ||
4715 | * working in nice chunks on the extent allocation tree. | ||
4716 | */ | ||
4717 | for (i = 0; i < refi; i++) { | ||
4718 | bytenr = sorted[i].bytenr; | ||
4719 | ret = btrfs_free_extent(trans, root, bytenr, | ||
4720 | blocksize, eb->start, | ||
4721 | root_owner, root_gen, 0, 1); | ||
4722 | BUG_ON(ret); | ||
4723 | |||
4724 | atomic_inc(&root->fs_info->throttle_gen); | ||
4725 | wake_up(&root->fs_info->transaction_throttle); | ||
4726 | cond_resched(); | ||
4727 | } | ||
4728 | out: | ||
4729 | kfree(sorted); | ||
4730 | |||
4731 | /* | ||
4732 | * update the path to show we've processed the entire level 1 | ||
4733 | * node. This will get saved into the root's drop_snapshot_progress | ||
4734 | * field so these drops are not repeated again if this transaction | ||
4735 | * commits. | ||
4736 | */ | ||
4737 | path->slots[1] = nritems; | ||
4738 | return 0; | ||
4739 | } | ||
4740 | |||
4741 | /* | ||
4742 | * helper function for drop_snapshot, this walks down the tree dropping ref | ||
4743 | * counts as it goes. | ||
4744 | */ | ||
4745 | static noinline int walk_down_tree(struct btrfs_trans_handle *trans, | ||
4746 | struct btrfs_root *root, | ||
4747 | struct btrfs_path *path, int *level) | ||
4748 | { | ||
4749 | u64 root_owner; | ||
4750 | u64 root_gen; | ||
4751 | u64 bytenr; | ||
4752 | u64 ptr_gen; | ||
4753 | struct extent_buffer *next; | ||
4754 | struct extent_buffer *cur; | ||
4755 | struct extent_buffer *parent; | ||
4756 | u32 blocksize; | ||
4757 | int ret; | ||
4758 | u32 refs; | ||
4759 | |||
4760 | WARN_ON(*level < 0); | ||
4761 | WARN_ON(*level >= BTRFS_MAX_LEVEL); | ||
4762 | ret = drop_snap_lookup_refcount(trans, root, path->nodes[*level]->start, | ||
4763 | path->nodes[*level]->len, &refs); | ||
4764 | BUG_ON(ret); | ||
4765 | if (refs > 1) | ||
4766 | goto out; | ||
4767 | |||
4768 | /* | ||
4769 | * walk down to the last node level and free all the leaves | ||
4770 | */ | ||
4771 | while (*level >= 0) { | ||
4772 | WARN_ON(*level < 0); | ||
4773 | WARN_ON(*level >= BTRFS_MAX_LEVEL); | ||
4774 | cur = path->nodes[*level]; | ||
4775 | |||
4776 | if (btrfs_header_level(cur) != *level) | ||
4777 | WARN_ON(1); | ||
4778 | |||
4779 | if (path->slots[*level] >= | ||
4780 | btrfs_header_nritems(cur)) | ||
4781 | break; | ||
4782 | |||
4783 | /* the new code goes down to level 1 and does all the | ||
4784 | * leaves pointed to that node in bulk. So, this check | ||
4785 | * for level 0 will always be false. | ||
4786 | * | ||
4787 | * But, the disk format allows the drop_snapshot_progress | ||
4788 | * field in the root to leave things in a state where | ||
4789 | * a leaf will need cleaning up here. If someone crashes | ||
4790 | * with the old code and then boots with the new code, | ||
4791 | * we might find a leaf here. | ||
4792 | */ | ||
4793 | if (*level == 0) { | ||
4794 | ret = btrfs_drop_leaf_ref(trans, root, cur); | ||
4795 | BUG_ON(ret); | ||
4796 | break; | ||
4797 | } | ||
4798 | |||
4799 | /* | ||
4800 | * once we get to level one, process the whole node | ||
4801 | * at once, including everything below it. | ||
4802 | */ | ||
4803 | if (*level == 1) { | ||
4804 | ret = drop_level_one_refs(trans, root, path); | ||
4805 | BUG_ON(ret); | ||
4806 | break; | ||
4807 | } | ||
4808 | |||
4809 | bytenr = btrfs_node_blockptr(cur, path->slots[*level]); | ||
4810 | ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); | ||
4811 | blocksize = btrfs_level_size(root, *level - 1); | ||
4812 | |||
4813 | ret = drop_snap_lookup_refcount(trans, root, bytenr, | ||
4814 | blocksize, &refs); | ||
4815 | BUG_ON(ret); | ||
4816 | |||
4817 | /* | ||
4818 | * if there is more than one reference, we don't need | ||
4819 | * to read that node to drop any references it has. We | ||
4820 | * just drop the ref we hold on that node and move on to the | ||
4821 | * next slot in this level. | ||
4822 | */ | ||
4823 | if (refs != 1) { | ||
4824 | parent = path->nodes[*level]; | ||
4825 | root_owner = btrfs_header_owner(parent); | ||
4826 | root_gen = btrfs_header_generation(parent); | ||
4827 | path->slots[*level]++; | ||
4828 | |||
4829 | ret = btrfs_free_extent(trans, root, bytenr, | ||
4830 | blocksize, parent->start, | ||
4831 | root_owner, root_gen, | ||
4832 | *level - 1, 1); | ||
4833 | BUG_ON(ret); | ||
4834 | |||
4835 | atomic_inc(&root->fs_info->throttle_gen); | ||
4836 | wake_up(&root->fs_info->transaction_throttle); | ||
4837 | cond_resched(); | ||
4838 | |||
4839 | continue; | ||
4840 | } | ||
4841 | |||
4842 | /* | ||
4843 | * we need to keep freeing things in the next level down. | ||
4844 | * read the block and loop around to process it | ||
4845 | */ | ||
4846 | next = read_tree_block(root, bytenr, blocksize, ptr_gen); | ||
4847 | WARN_ON(*level <= 0); | ||
4848 | if (path->nodes[*level-1]) | ||
4849 | free_extent_buffer(path->nodes[*level-1]); | ||
4850 | path->nodes[*level-1] = next; | ||
4851 | *level = btrfs_header_level(next); | ||
4852 | path->slots[*level] = 0; | ||
4853 | cond_resched(); | ||
4854 | } | ||
4855 | out: | ||
4856 | WARN_ON(*level < 0); | ||
4857 | WARN_ON(*level >= BTRFS_MAX_LEVEL); | ||
4858 | |||
4859 | if (path->nodes[*level] == root->node) { | ||
4860 | parent = path->nodes[*level]; | ||
4861 | bytenr = path->nodes[*level]->start; | ||
4862 | } else { | ||
4863 | parent = path->nodes[*level + 1]; | ||
4864 | bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]); | ||
4865 | } | ||
4866 | |||
4867 | blocksize = btrfs_level_size(root, *level); | ||
4868 | root_owner = btrfs_header_owner(parent); | ||
4869 | root_gen = btrfs_header_generation(parent); | ||
4870 | |||
4871 | /* | ||
4872 | * cleanup and free the reference on the last node | ||
4873 | * we processed | ||
4874 | */ | ||
4875 | ret = btrfs_free_extent(trans, root, bytenr, blocksize, | ||
4876 | parent->start, root_owner, root_gen, | ||
4877 | *level, 1); | ||
4878 | free_extent_buffer(path->nodes[*level]); | ||
4879 | path->nodes[*level] = NULL; | ||
4880 | |||
4881 | *level += 1; | ||
4882 | BUG_ON(ret); | ||
4883 | |||
4884 | cond_resched(); | ||
4885 | return 0; | ||
4886 | } | ||
4887 | #endif | ||
4888 | |||
4889 | struct walk_control { | 4465 | struct walk_control { |
4890 | u64 refs[BTRFS_MAX_LEVEL]; | 4466 | u64 refs[BTRFS_MAX_LEVEL]; |
4891 | u64 flags[BTRFS_MAX_LEVEL]; | 4467 | u64 flags[BTRFS_MAX_LEVEL]; |
@@ -7129,288 +6705,6 @@ int btrfs_prepare_block_group_relocation(struct btrfs_root *root, | |||
7129 | return 0; | 6705 | return 0; |
7130 | } | 6706 | } |
7131 | 6707 | ||
7132 | #if 0 | ||
7133 | static int __insert_orphan_inode(struct btrfs_trans_handle *trans, | ||
7134 | struct btrfs_root *root, | ||
7135 | u64 objectid, u64 size) | ||
7136 | { | ||
7137 | struct btrfs_path *path; | ||
7138 | struct btrfs_inode_item *item; | ||
7139 | struct extent_buffer *leaf; | ||
7140 | int ret; | ||
7141 | |||
7142 | path = btrfs_alloc_path(); | ||
7143 | if (!path) | ||
7144 | return -ENOMEM; | ||
7145 | |||
7146 | path->leave_spinning = 1; | ||
7147 | ret = btrfs_insert_empty_inode(trans, root, path, objectid); | ||
7148 | if (ret) | ||
7149 | goto out; | ||
7150 | |||
7151 | leaf = path->nodes[0]; | ||
7152 | item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); | ||
7153 | memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item)); | ||
7154 | btrfs_set_inode_generation(leaf, item, 1); | ||
7155 | btrfs_set_inode_size(leaf, item, size); | ||
7156 | btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); | ||
7157 | btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS); | ||
7158 | btrfs_mark_buffer_dirty(leaf); | ||
7159 | btrfs_release_path(root, path); | ||
7160 | out: | ||
7161 | btrfs_free_path(path); | ||
7162 | return ret; | ||
7163 | } | ||
7164 | |||
7165 | static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, | ||
7166 | struct btrfs_block_group_cache *group) | ||
7167 | { | ||
7168 | struct inode *inode = NULL; | ||
7169 | struct btrfs_trans_handle *trans; | ||
7170 | struct btrfs_root *root; | ||
7171 | struct btrfs_key root_key; | ||
7172 | u64 objectid = BTRFS_FIRST_FREE_OBJECTID; | ||
7173 | int err = 0; | ||
7174 | |||
7175 | root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID; | ||
7176 | root_key.type = BTRFS_ROOT_ITEM_KEY; | ||
7177 | root_key.offset = (u64)-1; | ||
7178 | root = btrfs_read_fs_root_no_name(fs_info, &root_key); | ||
7179 | if (IS_ERR(root)) | ||
7180 | return ERR_CAST(root); | ||
7181 | |||
7182 | trans = btrfs_start_transaction(root, 1); | ||
7183 | BUG_ON(!trans); | ||
7184 | |||
7185 | err = btrfs_find_free_objectid(trans, root, objectid, &objectid); | ||
7186 | if (err) | ||
7187 | goto out; | ||
7188 | |||
7189 | err = __insert_orphan_inode(trans, root, objectid, group->key.offset); | ||
7190 | BUG_ON(err); | ||
7191 | |||
7192 | err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0, | ||
7193 | group->key.offset, 0, group->key.offset, | ||
7194 | 0, 0, 0); | ||
7195 | BUG_ON(err); | ||
7196 | |||
7197 | inode = btrfs_iget_locked(root->fs_info->sb, objectid, root); | ||
7198 | if (inode->i_state & I_NEW) { | ||
7199 | BTRFS_I(inode)->root = root; | ||
7200 | BTRFS_I(inode)->location.objectid = objectid; | ||
7201 | BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; | ||
7202 | BTRFS_I(inode)->location.offset = 0; | ||
7203 | btrfs_read_locked_inode(inode); | ||
7204 | unlock_new_inode(inode); | ||
7205 | BUG_ON(is_bad_inode(inode)); | ||
7206 | } else { | ||
7207 | BUG_ON(1); | ||
7208 | } | ||
7209 | BTRFS_I(inode)->index_cnt = group->key.objectid; | ||
7210 | |||
7211 | err = btrfs_orphan_add(trans, inode); | ||
7212 | out: | ||
7213 | btrfs_end_transaction(trans, root); | ||
7214 | if (err) { | ||
7215 | if (inode) | ||
7216 | iput(inode); | ||
7217 | inode = ERR_PTR(err); | ||
7218 | } | ||
7219 | return inode; | ||
7220 | } | ||
7221 | |||
7222 | int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) | ||
7223 | { | ||
7224 | |||
7225 | struct btrfs_ordered_sum *sums; | ||
7226 | struct btrfs_sector_sum *sector_sum; | ||
7227 | struct btrfs_ordered_extent *ordered; | ||
7228 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
7229 | struct list_head list; | ||
7230 | size_t offset; | ||
7231 | int ret; | ||
7232 | u64 disk_bytenr; | ||
7233 | |||
7234 | INIT_LIST_HEAD(&list); | ||
7235 | |||
7236 | ordered = btrfs_lookup_ordered_extent(inode, file_pos); | ||
7237 | BUG_ON(ordered->file_offset != file_pos || ordered->len != len); | ||
7238 | |||
7239 | disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; | ||
7240 | ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, | ||
7241 | disk_bytenr + len - 1, &list); | ||
7242 | |||
7243 | while (!list_empty(&list)) { | ||
7244 | sums = list_entry(list.next, struct btrfs_ordered_sum, list); | ||
7245 | list_del_init(&sums->list); | ||
7246 | |||
7247 | sector_sum = sums->sums; | ||
7248 | sums->bytenr = ordered->start; | ||
7249 | |||
7250 | offset = 0; | ||
7251 | while (offset < sums->len) { | ||
7252 | sector_sum->bytenr += ordered->start - disk_bytenr; | ||
7253 | sector_sum++; | ||
7254 | offset += root->sectorsize; | ||
7255 | } | ||
7256 | |||
7257 | btrfs_add_ordered_sum(inode, ordered, sums); | ||
7258 | } | ||
7259 | btrfs_put_ordered_extent(ordered); | ||
7260 | return 0; | ||
7261 | } | ||
7262 | |||
7263 | int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start) | ||
7264 | { | ||
7265 | struct btrfs_trans_handle *trans; | ||
7266 | struct btrfs_path *path; | ||
7267 | struct btrfs_fs_info *info = root->fs_info; | ||
7268 | struct extent_buffer *leaf; | ||
7269 | struct inode *reloc_inode; | ||
7270 | struct btrfs_block_group_cache *block_group; | ||
7271 | struct btrfs_key key; | ||
7272 | u64 skipped; | ||
7273 | u64 cur_byte; | ||
7274 | u64 total_found; | ||
7275 | u32 nritems; | ||
7276 | int ret; | ||
7277 | int progress; | ||
7278 | int pass = 0; | ||
7279 | |||
7280 | root = root->fs_info->extent_root; | ||
7281 | |||
7282 | block_group = btrfs_lookup_block_group(info, group_start); | ||
7283 | BUG_ON(!block_group); | ||
7284 | |||
7285 | printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n", | ||
7286 | (unsigned long long)block_group->key.objectid, | ||
7287 | (unsigned long long)block_group->flags); | ||
7288 | |||
7289 | path = btrfs_alloc_path(); | ||
7290 | BUG_ON(!path); | ||
7291 | |||
7292 | reloc_inode = create_reloc_inode(info, block_group); | ||
7293 | BUG_ON(IS_ERR(reloc_inode)); | ||
7294 | |||
7295 | __alloc_chunk_for_shrink(root, block_group, 1); | ||
7296 | set_block_group_readonly(block_group); | ||
7297 | |||
7298 | btrfs_start_delalloc_inodes(info->tree_root); | ||
7299 | btrfs_wait_ordered_extents(info->tree_root, 0); | ||
7300 | again: | ||
7301 | skipped = 0; | ||
7302 | total_found = 0; | ||
7303 | progress = 0; | ||
7304 | key.objectid = block_group->key.objectid; | ||
7305 | key.offset = 0; | ||
7306 | key.type = 0; | ||
7307 | cur_byte = key.objectid; | ||
7308 | |||
7309 | trans = btrfs_start_transaction(info->tree_root, 1); | ||
7310 | btrfs_commit_transaction(trans, info->tree_root); | ||
7311 | |||
7312 | mutex_lock(&root->fs_info->cleaner_mutex); | ||
7313 | btrfs_clean_old_snapshots(info->tree_root); | ||
7314 | btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1); | ||
7315 | mutex_unlock(&root->fs_info->cleaner_mutex); | ||
7316 | |||
7317 | trans = btrfs_start_transaction(info->tree_root, 1); | ||
7318 | btrfs_commit_transaction(trans, info->tree_root); | ||
7319 | |||
7320 | while (1) { | ||
7321 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
7322 | if (ret < 0) | ||
7323 | goto out; | ||
7324 | next: | ||
7325 | leaf = path->nodes[0]; | ||
7326 | nritems = btrfs_header_nritems(leaf); | ||
7327 | if (path->slots[0] >= nritems) { | ||
7328 | ret = btrfs_next_leaf(root, path); | ||
7329 | if (ret < 0) | ||
7330 | goto out; | ||
7331 | if (ret == 1) { | ||
7332 | ret = 0; | ||
7333 | break; | ||
7334 | } | ||
7335 | leaf = path->nodes[0]; | ||
7336 | nritems = btrfs_header_nritems(leaf); | ||
7337 | } | ||
7338 | |||
7339 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | ||
7340 | |||
7341 | if (key.objectid >= block_group->key.objectid + | ||
7342 | block_group->key.offset) | ||
7343 | break; | ||
7344 | |||
7345 | if (progress && need_resched()) { | ||
7346 | btrfs_release_path(root, path); | ||
7347 | cond_resched(); | ||
7348 | progress = 0; | ||
7349 | continue; | ||
7350 | } | ||
7351 | progress = 1; | ||
7352 | |||
7353 | if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY || | ||
7354 | key.objectid + key.offset <= cur_byte) { | ||
7355 | path->slots[0]++; | ||
7356 | goto next; | ||
7357 | } | ||
7358 | |||
7359 | total_found++; | ||
7360 | cur_byte = key.objectid + key.offset; | ||
7361 | btrfs_release_path(root, path); | ||
7362 | |||
7363 | __alloc_chunk_for_shrink(root, block_group, 0); | ||
7364 | ret = relocate_one_extent(root, path, &key, block_group, | ||
7365 | reloc_inode, pass); | ||
7366 | BUG_ON(ret < 0); | ||
7367 | if (ret > 0) | ||
7368 | skipped++; | ||
7369 | |||
7370 | key.objectid = cur_byte; | ||
7371 | key.type = 0; | ||
7372 | key.offset = 0; | ||
7373 | } | ||
7374 | |||
7375 | btrfs_release_path(root, path); | ||
7376 | |||
7377 | if (pass == 0) { | ||
7378 | btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1); | ||
7379 | invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1); | ||
7380 | } | ||
7381 | |||
7382 | if (total_found > 0) { | ||
7383 | printk(KERN_INFO "btrfs found %llu extents in pass %d\n", | ||
7384 | (unsigned long long)total_found, pass); | ||
7385 | pass++; | ||
7386 | if (total_found == skipped && pass > 2) { | ||
7387 | iput(reloc_inode); | ||
7388 | reloc_inode = create_reloc_inode(info, block_group); | ||
7389 | pass = 0; | ||
7390 | } | ||
7391 | goto again; | ||
7392 | } | ||
7393 | |||
7394 | /* delete reloc_inode */ | ||
7395 | iput(reloc_inode); | ||
7396 | |||
7397 | /* unpin extents in this range */ | ||
7398 | trans = btrfs_start_transaction(info->tree_root, 1); | ||
7399 | btrfs_commit_transaction(trans, info->tree_root); | ||
7400 | |||
7401 | spin_lock(&block_group->lock); | ||
7402 | WARN_ON(block_group->pinned > 0); | ||
7403 | WARN_ON(block_group->reserved > 0); | ||
7404 | WARN_ON(btrfs_block_group_used(&block_group->item) > 0); | ||
7405 | spin_unlock(&block_group->lock); | ||
7406 | btrfs_put_block_group(block_group); | ||
7407 | ret = 0; | ||
7408 | out: | ||
7409 | btrfs_free_path(path); | ||
7410 | return ret; | ||
7411 | } | ||
7412 | #endif | ||
7413 | |||
7414 | /* | 6708 | /* |
7415 | * checks to see if its even possible to relocate this block group. | 6709 | * checks to see if its even possible to relocate this block group. |
7416 | * | 6710 | * |