diff options
author | Chris Mason <chris.mason@oracle.com> | 2008-04-03 16:29:03 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-09-25 11:04:01 -0400 |
commit | 611f0e00a27fe0e5a571194a12443ecdc99a43ef (patch) | |
tree | d030bb63cca2aa1cb38c205bfe9fcef29cecb175 /fs | |
parent | 8790d502e4401a4a3a4175b83a3a47e8d595c771 (diff) |
Btrfs: Add support for duplicate blocks on a single spindle
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/ctree.h | 1 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 8 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 5 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 32 |
4 files changed, 37 insertions, 9 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0a207861472e..72deae63ec28 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -419,6 +419,7 @@ struct btrfs_csum_item { | |||
419 | #define BTRFS_BLOCK_GROUP_METADATA (1 << 2) | 419 | #define BTRFS_BLOCK_GROUP_METADATA (1 << 2) |
420 | #define BTRFS_BLOCK_GROUP_RAID0 (1 << 3) | 420 | #define BTRFS_BLOCK_GROUP_RAID0 (1 << 3) |
421 | #define BTRFS_BLOCK_GROUP_RAID1 (1 << 4) | 421 | #define BTRFS_BLOCK_GROUP_RAID1 (1 << 4) |
422 | #define BTRFS_BLOCK_GROUP_DUP (1 << 5) | ||
422 | 423 | ||
423 | 424 | ||
424 | struct btrfs_block_group_item { | 425 | struct btrfs_block_group_item { |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index ff75ad586767..42522232fde4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -913,9 +913,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
913 | 913 | ||
914 | fs_info->generation = btrfs_super_generation(disk_super) + 1; | 914 | fs_info->generation = btrfs_super_generation(disk_super) + 1; |
915 | if (btrfs_super_num_devices(disk_super) > 0) { | 915 | if (btrfs_super_num_devices(disk_super) > 0) { |
916 | fs_info->data_alloc_profile = BTRFS_BLOCK_GROUP_RAID0; | 916 | fs_info->data_alloc_profile = BTRFS_BLOCK_GROUP_RAID0 | |
917 | fs_info->metadata_alloc_profile = BTRFS_BLOCK_GROUP_RAID1; | 917 | BTRFS_BLOCK_GROUP_RAID1; |
918 | fs_info->system_alloc_profile = BTRFS_BLOCK_GROUP_RAID0; | 918 | fs_info->metadata_alloc_profile = BTRFS_BLOCK_GROUP_RAID1 | |
919 | BTRFS_BLOCK_GROUP_DUP; | ||
920 | fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; | ||
919 | } | 921 | } |
920 | mutex_unlock(&fs_info->fs_mutex); | 922 | mutex_unlock(&fs_info->fs_mutex); |
921 | return tree_root; | 923 | return tree_root; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4ab98d8b73fa..1885ec4280c8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -231,7 +231,7 @@ again: | |||
231 | if (start + num > total_fs_bytes) | 231 | if (start + num > total_fs_bytes) |
232 | goto new_group; | 232 | goto new_group; |
233 | if (!block_group_bits(cache, data)) { | 233 | if (!block_group_bits(cache, data)) { |
234 | printk("block group bits don't match %Lu %Lu\n", cache->flags, data); | 234 | printk("block group bits don't match %Lu %d\n", cache->flags, data); |
235 | } | 235 | } |
236 | *start_ret = start; | 236 | *start_ret = start; |
237 | return 0; | 237 | return 0; |
@@ -1048,7 +1048,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |||
1048 | static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) | 1048 | static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) |
1049 | { | 1049 | { |
1050 | u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 | | 1050 | u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 | |
1051 | BTRFS_BLOCK_GROUP_RAID1); | 1051 | BTRFS_BLOCK_GROUP_RAID1 | |
1052 | BTRFS_BLOCK_GROUP_DUP); | ||
1052 | if (extra_flags) { | 1053 | if (extra_flags) { |
1053 | if (flags & BTRFS_BLOCK_GROUP_DATA) | 1054 | if (flags & BTRFS_BLOCK_GROUP_DATA) |
1054 | fs_info->avail_data_alloc_bits |= extra_flags; | 1055 | fs_info->avail_data_alloc_bits |= extra_flags; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index bc3c0b97588e..b9294e3c05f0 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -627,6 +627,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
627 | struct extent_map *em; | 627 | struct extent_map *em; |
628 | u64 physical; | 628 | u64 physical; |
629 | u64 calc_size = 1024 * 1024 * 1024; | 629 | u64 calc_size = 1024 * 1024 * 1024; |
630 | u64 min_free = calc_size; | ||
630 | u64 avail; | 631 | u64 avail; |
631 | u64 max_avail = 0; | 632 | u64 max_avail = 0; |
632 | int num_stripes = 1; | 633 | int num_stripes = 1; |
@@ -641,6 +642,8 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
641 | 642 | ||
642 | if (type & (BTRFS_BLOCK_GROUP_RAID0)) | 643 | if (type & (BTRFS_BLOCK_GROUP_RAID0)) |
643 | num_stripes = btrfs_super_num_devices(&info->super_copy); | 644 | num_stripes = btrfs_super_num_devices(&info->super_copy); |
645 | if (type & (BTRFS_BLOCK_GROUP_DUP)) | ||
646 | num_stripes = 2; | ||
644 | if (type & (BTRFS_BLOCK_GROUP_RAID1)) { | 647 | if (type & (BTRFS_BLOCK_GROUP_RAID1)) { |
645 | num_stripes = min_t(u64, 2, | 648 | num_stripes = min_t(u64, 2, |
646 | btrfs_super_num_devices(&info->super_copy)); | 649 | btrfs_super_num_devices(&info->super_copy)); |
@@ -649,16 +652,23 @@ again: | |||
649 | INIT_LIST_HEAD(&private_devs); | 652 | INIT_LIST_HEAD(&private_devs); |
650 | cur = dev_list->next; | 653 | cur = dev_list->next; |
651 | index = 0; | 654 | index = 0; |
655 | |||
656 | if (type & BTRFS_BLOCK_GROUP_DUP) | ||
657 | min_free = calc_size * 2; | ||
658 | |||
652 | /* build a private list of devices we will allocate from */ | 659 | /* build a private list of devices we will allocate from */ |
653 | while(index < num_stripes) { | 660 | while(index < num_stripes) { |
654 | device = list_entry(cur, struct btrfs_device, dev_list); | 661 | device = list_entry(cur, struct btrfs_device, dev_list); |
662 | |||
655 | avail = device->total_bytes - device->bytes_used; | 663 | avail = device->total_bytes - device->bytes_used; |
656 | cur = cur->next; | 664 | cur = cur->next; |
657 | if (avail > max_avail) | 665 | if (avail > max_avail) |
658 | max_avail = avail; | 666 | max_avail = avail; |
659 | if (avail >= calc_size) { | 667 | if (avail >= min_free) { |
660 | list_move_tail(&device->dev_list, &private_devs); | 668 | list_move_tail(&device->dev_list, &private_devs); |
661 | index++; | 669 | index++; |
670 | if (type & BTRFS_BLOCK_GROUP_DUP) | ||
671 | index++; | ||
662 | } | 672 | } |
663 | if (cur == dev_list) | 673 | if (cur == dev_list) |
664 | break; | 674 | break; |
@@ -689,17 +699,22 @@ again: | |||
689 | 699 | ||
690 | stripes = &chunk->stripe; | 700 | stripes = &chunk->stripe; |
691 | 701 | ||
692 | if (type & BTRFS_BLOCK_GROUP_RAID1) | 702 | if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) |
693 | *num_bytes = calc_size; | 703 | *num_bytes = calc_size; |
694 | else | 704 | else |
695 | *num_bytes = calc_size * num_stripes; | 705 | *num_bytes = calc_size * num_stripes; |
696 | 706 | ||
697 | index = 0; | 707 | index = 0; |
708 | printk("new chunk type %Lu start %Lu size %Lu\n", type, key.objectid, *num_bytes); | ||
698 | while(index < num_stripes) { | 709 | while(index < num_stripes) { |
699 | BUG_ON(list_empty(&private_devs)); | 710 | BUG_ON(list_empty(&private_devs)); |
700 | cur = private_devs.next; | 711 | cur = private_devs.next; |
701 | device = list_entry(cur, struct btrfs_device, dev_list); | 712 | device = list_entry(cur, struct btrfs_device, dev_list); |
702 | list_move_tail(&device->dev_list, dev_list); | 713 | |
714 | /* loop over this device again if we're doing a dup group */ | ||
715 | if (!(type & BTRFS_BLOCK_GROUP_DUP) || | ||
716 | (index == num_stripes - 1)) | ||
717 | list_move_tail(&device->dev_list, dev_list); | ||
703 | 718 | ||
704 | ret = btrfs_alloc_dev_extent(trans, device, | 719 | ret = btrfs_alloc_dev_extent(trans, device, |
705 | key.objectid, | 720 | key.objectid, |
@@ -839,6 +854,14 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
839 | } | 854 | } |
840 | *total_devs = 1; | 855 | *total_devs = 1; |
841 | } | 856 | } |
857 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { | ||
858 | if (rw == WRITE) { | ||
859 | *total_devs = map->num_stripes; | ||
860 | stripe_index = dev_nr; | ||
861 | } else { | ||
862 | stripe_index = 0; | ||
863 | *total_devs = 1; | ||
864 | } | ||
842 | } else { | 865 | } else { |
843 | /* | 866 | /* |
844 | * after this do_div call, stripe_nr is the number of stripes | 867 | * after this do_div call, stripe_nr is the number of stripes |
@@ -851,7 +874,8 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
851 | *phys = map->stripes[stripe_index].physical + stripe_offset + | 874 | *phys = map->stripes[stripe_index].physical + stripe_offset + |
852 | stripe_nr * map->stripe_len; | 875 | stripe_nr * map->stripe_len; |
853 | 876 | ||
854 | if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1)) { | 877 | if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | |
878 | BTRFS_BLOCK_GROUP_DUP)) { | ||
855 | /* we limit the length of each bio to what fits in a stripe */ | 879 | /* we limit the length of each bio to what fits in a stripe */ |
856 | *length = min_t(u64, em->len - offset, | 880 | *length = min_t(u64, em->len - offset, |
857 | map->stripe_len - stripe_offset); | 881 | map->stripe_len - stripe_offset); |