aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file-item.c
diff options
context:
space:
mode:
authorLiu Bo <bo.li.liu@oracle.com>2013-02-04 08:12:18 -0500
committerJosef Bacik <jbacik@fusionio.com>2013-02-20 12:59:37 -0500
commit2f697dc6a648d3a16f512fe7a53281d55cce1570 (patch)
tree832da66ca86e982a5ae2cbd9a757cf6455059515 /fs/btrfs/file-item.c
parentde78b51a2852bddccd6535e9e12de65f92787a1e (diff)
Btrfs: extend the checksum item as much as possible
For write, we also reserve some space for COW blocks during updating the checksum tree, and we calculate the number of blocks by checking if the number of bytes outstanding that are going to need csums needs one more block for csum. When we add these checksum into the checksum tree, we use ordered sums list. Every ordered sum contains csums for each sector, and we'll first try to look up an existing csum item, a) if we don't yet have a proper csum item, then we need to insert one, b) or if we find one but the csum item is not big enough, then we need to extend it. The point is we'll unlock the whole path and then insert or extend. So others can hack in and update the tree. Each insert or extend needs update the tree with COW on, and we may need to insert/extend for many times. That means what we've reserved for updating checksum tree is NOT enough indeed. The case is even more serious with having several write threads at the same time, it can end up eating our reserved space quickly and starting eating globle reserve pool instead. I don't yet come up with a way to calculate the worse case for updating csum, but extending the checksum item as much as possible can be helpful in my test. The idea behind is that it can reduce the times we insert/extend so that it saves us precious reserved space. Signed-off-by: Liu Bo <bo.li.liu@oracle.com> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs/btrfs/file-item.c')
-rw-r--r--fs/btrfs/file-item.c67
1 files changed, 46 insertions, 21 deletions
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 94aa53b38721..ec160202be3e 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -684,6 +684,24 @@ out:
684 return ret; 684 return ret;
685} 685}
686 686
687static u64 btrfs_sector_sum_left(struct btrfs_ordered_sum *sums,
688 struct btrfs_sector_sum *sector_sum,
689 u64 total_bytes, u64 sectorsize)
690{
691 u64 tmp = sectorsize;
692 u64 next_sector = sector_sum->bytenr;
693 struct btrfs_sector_sum *next = sector_sum + 1;
694
695 while ((tmp + total_bytes) < sums->len) {
696 if (next_sector + sectorsize != next->bytenr)
697 break;
698 tmp += sectorsize;
699 next_sector = next->bytenr;
700 next++;
701 }
702 return tmp;
703}
704
687int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 705int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
688 struct btrfs_root *root, 706 struct btrfs_root *root,
689 struct btrfs_ordered_sum *sums) 707 struct btrfs_ordered_sum *sums)
@@ -789,20 +807,32 @@ again:
789 goto insert; 807 goto insert;
790 } 808 }
791 809
792 if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) / 810 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
793 csum_size) { 811 csum_size) {
794 u32 diff = (csum_offset + 1) * csum_size; 812 int extend_nr;
813 u64 tmp;
814 u32 diff;
815 u32 free_space;
795 816
796 /* 817 if (btrfs_leaf_free_space(root, leaf) <
797 * is the item big enough already? we dropped our lock 818 sizeof(struct btrfs_item) + csum_size * 2)
798 * before and need to recheck 819 goto insert;
799 */ 820
800 if (diff < btrfs_item_size_nr(leaf, path->slots[0])) 821 free_space = btrfs_leaf_free_space(root, leaf) -
801 goto csum; 822 sizeof(struct btrfs_item) - csum_size;
823 tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
824 root->sectorsize);
825 tmp >>= root->fs_info->sb->s_blocksize_bits;
826 WARN_ON(tmp < 1);
827
828 extend_nr = max_t(int, 1, (int)tmp);
829 diff = (csum_offset + extend_nr) * csum_size;
830 diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
802 831
803 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); 832 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
804 if (diff != csum_size) 833 diff = min(free_space, diff);
805 goto insert; 834 diff /= csum_size;
835 diff *= csum_size;
806 836
807 btrfs_extend_item(trans, root, path, diff); 837 btrfs_extend_item(trans, root, path, diff);
808 goto csum; 838 goto csum;
@@ -812,19 +842,14 @@ insert:
812 btrfs_release_path(path); 842 btrfs_release_path(path);
813 csum_offset = 0; 843 csum_offset = 0;
814 if (found_next) { 844 if (found_next) {
815 u64 tmp = total_bytes + root->sectorsize; 845 u64 tmp;
816 u64 next_sector = sector_sum->bytenr;
817 struct btrfs_sector_sum *next = sector_sum + 1;
818 846
819 while (tmp < sums->len) { 847 tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
820 if (next_sector + root->sectorsize != next->bytenr) 848 root->sectorsize);
821 break;
822 tmp += root->sectorsize;
823 next_sector = next->bytenr;
824 next++;
825 }
826 tmp = min(tmp, next_offset - file_key.offset);
827 tmp >>= root->fs_info->sb->s_blocksize_bits; 849 tmp >>= root->fs_info->sb->s_blocksize_bits;
850 tmp = min(tmp, (next_offset - file_key.offset) >>
851 root->fs_info->sb->s_blocksize_bits);
852
828 tmp = max((u64)1, tmp); 853 tmp = max((u64)1, tmp);
829 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); 854 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
830 ins_size = csum_size * tmp; 855 ins_size = csum_size * tmp;