diff options
author | Yan Zheng <zheng.yan@oracle.com> | 2008-11-17 21:11:30 -0500 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-11-17 21:11:30 -0500 |
commit | 2b82032c34ec40515d3c45c36cd1961f37977de8 (patch) | |
tree | fbdfe7b13dd51983dfca4aeb75983b37ee186ff9 /fs/btrfs/extent-tree.c | |
parent | c146afad2c7fea6a366d4945c1bab9b03880f526 (diff) |
Btrfs: Seed device support
Seed device is a special btrfs with SEEDING super flag
set and can only be mounted in read-only mode. Seed
devices allow people to create new btrfs on top of it.
The new FS contains the same contents as the seed device,
but it can be mounted in read-write mode.
This patch does the following:
1) split code in btrfs_alloc_chunk into two parts. The first part does makes
the newly allocated chunk usable, but does not do any operation that modifies
the chunk tree. The second part does the the chunk tree modifications. This
division is for the bootstrap step of adding storage to the seed device.
2) Update device management code to handle seed device.
The basic idea is: For an FS grown from seed devices, its
seed devices are put into a list. Seed devices are
opened on demand at mounting time. If any seed device is
missing or has been changed, btrfs kernel module will
refuse to mount the FS.
3) make btrfs_find_block_group not return NULL when all
block groups are read-only.
Signed-off-by: Yan Zheng <zheng.yan@oracle.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r-- | fs/btrfs/extent-tree.c | 31 |
1 files changed, 14 insertions, 17 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index af2de30dbeac..197422c1dc4b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -355,7 +355,7 @@ __btrfs_find_block_group(struct btrfs_root *root, | |||
355 | if (search_start) { | 355 | if (search_start) { |
356 | struct btrfs_block_group_cache *shint; | 356 | struct btrfs_block_group_cache *shint; |
357 | shint = btrfs_lookup_first_block_group(info, search_start); | 357 | shint = btrfs_lookup_first_block_group(info, search_start); |
358 | if (shint && block_group_bits(shint, data) && !shint->ro) { | 358 | if (shint && block_group_bits(shint, data)) { |
359 | spin_lock(&shint->lock); | 359 | spin_lock(&shint->lock); |
360 | used = btrfs_block_group_used(&shint->item); | 360 | used = btrfs_block_group_used(&shint->item); |
361 | if (used + shint->pinned + shint->reserved < | 361 | if (used + shint->pinned + shint->reserved < |
@@ -366,7 +366,7 @@ __btrfs_find_block_group(struct btrfs_root *root, | |||
366 | spin_unlock(&shint->lock); | 366 | spin_unlock(&shint->lock); |
367 | } | 367 | } |
368 | } | 368 | } |
369 | if (hint && !hint->ro && block_group_bits(hint, data)) { | 369 | if (hint && block_group_bits(hint, data)) { |
370 | spin_lock(&hint->lock); | 370 | spin_lock(&hint->lock); |
371 | used = btrfs_block_group_used(&hint->item); | 371 | used = btrfs_block_group_used(&hint->item); |
372 | if (used + hint->pinned + hint->reserved < | 372 | if (used + hint->pinned + hint->reserved < |
@@ -392,7 +392,7 @@ again: | |||
392 | last = cache->key.objectid + cache->key.offset; | 392 | last = cache->key.objectid + cache->key.offset; |
393 | used = btrfs_block_group_used(&cache->item); | 393 | used = btrfs_block_group_used(&cache->item); |
394 | 394 | ||
395 | if (!cache->ro && block_group_bits(cache, data)) { | 395 | if (block_group_bits(cache, data)) { |
396 | free_check = div_factor(cache->key.offset, factor); | 396 | free_check = div_factor(cache->key.offset, factor); |
397 | if (used + cache->pinned + cache->reserved < | 397 | if (used + cache->pinned + cache->reserved < |
398 | free_check) { | 398 | free_check) { |
@@ -1843,9 +1843,9 @@ static void set_block_group_readonly(struct btrfs_block_group_cache *cache) | |||
1843 | spin_unlock(&cache->space_info->lock); | 1843 | spin_unlock(&cache->space_info->lock); |
1844 | } | 1844 | } |
1845 | 1845 | ||
1846 | static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags) | 1846 | u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) |
1847 | { | 1847 | { |
1848 | u64 num_devices = root->fs_info->fs_devices->num_devices; | 1848 | u64 num_devices = root->fs_info->fs_devices->rw_devices; |
1849 | 1849 | ||
1850 | if (num_devices == 1) | 1850 | if (num_devices == 1) |
1851 | flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0); | 1851 | flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0); |
@@ -1877,13 +1877,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
1877 | { | 1877 | { |
1878 | struct btrfs_space_info *space_info; | 1878 | struct btrfs_space_info *space_info; |
1879 | u64 thresh; | 1879 | u64 thresh; |
1880 | u64 start; | ||
1881 | u64 num_bytes; | ||
1882 | int ret = 0; | 1880 | int ret = 0; |
1883 | 1881 | ||
1884 | mutex_lock(&extent_root->fs_info->chunk_mutex); | 1882 | mutex_lock(&extent_root->fs_info->chunk_mutex); |
1885 | 1883 | ||
1886 | flags = reduce_alloc_profile(extent_root, flags); | 1884 | flags = btrfs_reduce_alloc_profile(extent_root, flags); |
1887 | 1885 | ||
1888 | space_info = __find_space_info(extent_root->fs_info, flags); | 1886 | space_info = __find_space_info(extent_root->fs_info, flags); |
1889 | if (!space_info) { | 1887 | if (!space_info) { |
@@ -1913,16 +1911,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
1913 | } | 1911 | } |
1914 | spin_unlock(&space_info->lock); | 1912 | spin_unlock(&space_info->lock); |
1915 | 1913 | ||
1916 | ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags); | 1914 | ret = btrfs_alloc_chunk(trans, extent_root, flags); |
1917 | if (ret) { | 1915 | if (ret) { |
1918 | printk("space info full %Lu\n", flags); | 1916 | printk("space info full %Lu\n", flags); |
1919 | space_info->full = 1; | 1917 | space_info->full = 1; |
1920 | goto out; | ||
1921 | } | 1918 | } |
1922 | |||
1923 | ret = btrfs_make_block_group(trans, extent_root, 0, flags, | ||
1924 | BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes); | ||
1925 | BUG_ON(ret); | ||
1926 | out: | 1919 | out: |
1927 | mutex_unlock(&extent_root->fs_info->chunk_mutex); | 1920 | mutex_unlock(&extent_root->fs_info->chunk_mutex); |
1928 | return ret; | 1921 | return ret; |
@@ -3040,7 +3033,7 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans, | |||
3040 | data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile; | 3033 | data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile; |
3041 | } | 3034 | } |
3042 | again: | 3035 | again: |
3043 | data = reduce_alloc_profile(root, data); | 3036 | data = btrfs_reduce_alloc_profile(root, data); |
3044 | /* | 3037 | /* |
3045 | * the only place that sets empty_size is btrfs_realloc_node, which | 3038 | * the only place that sets empty_size is btrfs_realloc_node, which |
3046 | * is not called recursively on allocations | 3039 | * is not called recursively on allocations |
@@ -5136,7 +5129,8 @@ static int noinline relocate_one_path(struct btrfs_trans_handle *trans, | |||
5136 | else | 5129 | else |
5137 | btrfs_node_key_to_cpu(eb, &keys[level], 0); | 5130 | btrfs_node_key_to_cpu(eb, &keys[level], 0); |
5138 | } | 5131 | } |
5139 | if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { | 5132 | if (nodes[0] && |
5133 | ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { | ||
5140 | eb = path->nodes[0]; | 5134 | eb = path->nodes[0]; |
5141 | ret = replace_extents_in_leaf(trans, reloc_root, eb, | 5135 | ret = replace_extents_in_leaf(trans, reloc_root, eb, |
5142 | group, reloc_inode); | 5136 | group, reloc_inode); |
@@ -5377,7 +5371,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) | |||
5377 | u64 stripped = BTRFS_BLOCK_GROUP_RAID0 | | 5371 | u64 stripped = BTRFS_BLOCK_GROUP_RAID0 | |
5378 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; | 5372 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; |
5379 | 5373 | ||
5380 | num_devices = root->fs_info->fs_devices->num_devices; | 5374 | num_devices = root->fs_info->fs_devices->rw_devices; |
5381 | if (num_devices == 1) { | 5375 | if (num_devices == 1) { |
5382 | stripped |= BTRFS_BLOCK_GROUP_DUP; | 5376 | stripped |= BTRFS_BLOCK_GROUP_DUP; |
5383 | stripped = flags & ~stripped; | 5377 | stripped = flags & ~stripped; |
@@ -5801,6 +5795,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
5801 | BUG_ON(ret); | 5795 | BUG_ON(ret); |
5802 | 5796 | ||
5803 | set_avail_alloc_bits(root->fs_info, cache->flags); | 5797 | set_avail_alloc_bits(root->fs_info, cache->flags); |
5798 | if (btrfs_chunk_readonly(root, cache->key.objectid)) | ||
5799 | set_block_group_readonly(cache); | ||
5804 | } | 5800 | } |
5805 | ret = 0; | 5801 | ret = 0; |
5806 | error: | 5802 | error: |
@@ -5889,6 +5885,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
5889 | block_group->space_info->total_bytes -= block_group->key.offset; | 5885 | block_group->space_info->total_bytes -= block_group->key.offset; |
5890 | block_group->space_info->bytes_readonly -= block_group->key.offset; | 5886 | block_group->space_info->bytes_readonly -= block_group->key.offset; |
5891 | spin_unlock(&block_group->space_info->lock); | 5887 | spin_unlock(&block_group->space_info->lock); |
5888 | block_group->space_info->full = 0; | ||
5892 | 5889 | ||
5893 | /* | 5890 | /* |
5894 | memset(shrink_block_group, 0, sizeof(*shrink_block_group)); | 5891 | memset(shrink_block_group, 0, sizeof(*shrink_block_group)); |