aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2012-03-28 20:32:46 -0400
committerChris Mason <chris.mason@oracle.com>2012-03-28 20:32:46 -0400
commit1c691b330a19a1344df89bcb0f4cacd99e8b289a (patch)
treeb3143a9875a773d33b9b8f60e98c7e5fae003b6a /fs
parent1d4284bd6e8d7dd1d5521a6747bdb6dc1caf0225 (diff)
parent213e64da90d14537cd63f7090d6c4d1fcc75d9f8 (diff)
Merge branch 'for-chris' of git://github.com/idryomov/btrfs-unstable into for-linus
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/backref.c7
-rw-r--r--fs/btrfs/ctree.h33
-rw-r--r--fs/btrfs/extent-tree.c163
-rw-r--r--fs/btrfs/volumes.c93
4 files changed, 157 insertions, 139 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 0436c12da8c2..4c79547f4a0c 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1342,12 +1342,6 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1342 inode_to_path, ipath); 1342 inode_to_path, ipath);
1343} 1343}
1344 1344
1345/*
1346 * allocates space to return multiple file system paths for an inode.
1347 * total_bytes to allocate are passed, note that space usable for actual path
1348 * information will be total_bytes - sizeof(struct inode_fs_paths).
1349 * the returned pointer must be freed with free_ipath() in the end.
1350 */
1351struct btrfs_data_container *init_data_container(u32 total_bytes) 1345struct btrfs_data_container *init_data_container(u32 total_bytes)
1352{ 1346{
1353 struct btrfs_data_container *data; 1347 struct btrfs_data_container *data;
@@ -1403,5 +1397,6 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1403 1397
1404void free_ipath(struct inode_fs_paths *ipath) 1398void free_ipath(struct inode_fs_paths *ipath)
1405{ 1399{
1400 kfree(ipath->fspath);
1406 kfree(ipath); 1401 kfree(ipath);
1407} 1402}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index ed2d196f7a84..5b8ef8eb3521 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -851,6 +851,21 @@ struct btrfs_csum_item {
851 */ 851 */
852#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48) 852#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48)
853 853
854#define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \
855 BTRFS_AVAIL_ALLOC_BIT_SINGLE)
856
857static inline u64 chunk_to_extended(u64 flags)
858{
859 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0)
860 flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE;
861
862 return flags;
863}
864static inline u64 extended_to_chunk(u64 flags)
865{
866 return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
867}
868
854struct btrfs_block_group_item { 869struct btrfs_block_group_item {
855 __le64 used; 870 __le64 used;
856 __le64 chunk_objectid; 871 __le64 chunk_objectid;
@@ -2723,24 +2738,6 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
2723 kfree(fs_info->super_for_commit); 2738 kfree(fs_info->super_for_commit);
2724 kfree(fs_info); 2739 kfree(fs_info);
2725} 2740}
2726/**
2727 * profile_is_valid - tests whether a given profile is valid and reduced
2728 * @flags: profile to validate
2729 * @extended: if true @flags is treated as an extended profile
2730 */
2731static inline int profile_is_valid(u64 flags, int extended)
2732{
2733 u64 mask = ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
2734
2735 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2736 if (extended)
2737 mask &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2738
2739 if (flags & mask)
2740 return 0;
2741 /* true if zero or exactly one bit set */
2742 return (flags & (~flags + 1)) == flags;
2743}
2744 2741
2745/* root-item.c */ 2742/* root-item.c */
2746int btrfs_find_root_ref(struct btrfs_root *tree_root, 2743int btrfs_find_root_ref(struct btrfs_root *tree_root,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 8b304e3537c4..8fe517bd8521 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3138,11 +3138,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3138 3138
3139static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 3139static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3140{ 3140{
3141 u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK; 3141 u64 extra_flags = chunk_to_extended(flags) &
3142 3142 BTRFS_EXTENDED_PROFILE_MASK;
3143 /* chunk -> extended profile */
3144 if (extra_flags == 0)
3145 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3146 3143
3147 if (flags & BTRFS_BLOCK_GROUP_DATA) 3144 if (flags & BTRFS_BLOCK_GROUP_DATA)
3148 fs_info->avail_data_alloc_bits |= extra_flags; 3145 fs_info->avail_data_alloc_bits |= extra_flags;
@@ -3153,6 +3150,35 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3153} 3150}
3154 3151
3155/* 3152/*
3153 * returns target flags in extended format or 0 if restripe for this
3154 * chunk_type is not in progress
3155 */
3156static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3157{
3158 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3159 u64 target = 0;
3160
3161 BUG_ON(!mutex_is_locked(&fs_info->volume_mutex) &&
3162 !spin_is_locked(&fs_info->balance_lock));
3163
3164 if (!bctl)
3165 return 0;
3166
3167 if (flags & BTRFS_BLOCK_GROUP_DATA &&
3168 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3169 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3170 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3171 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3172 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3173 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3174 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3175 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3176 }
3177
3178 return target;
3179}
3180
3181/*
3156 * @flags: available profiles in extended format (see ctree.h) 3182 * @flags: available profiles in extended format (see ctree.h)
3157 * 3183 *
3158 * Returns reduced profile in chunk format. If profile changing is in 3184 * Returns reduced profile in chunk format. If profile changing is in
@@ -3168,31 +3194,19 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3168 */ 3194 */
3169 u64 num_devices = root->fs_info->fs_devices->rw_devices + 3195 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3170 root->fs_info->fs_devices->missing_devices; 3196 root->fs_info->fs_devices->missing_devices;
3197 u64 target;
3171 3198
3172 /* pick restriper's target profile if it's available */ 3199 /*
3200 * see if restripe for this chunk_type is in progress, if so
3201 * try to reduce to the target profile
3202 */
3173 spin_lock(&root->fs_info->balance_lock); 3203 spin_lock(&root->fs_info->balance_lock);
3174 if (root->fs_info->balance_ctl) { 3204 target = get_restripe_target(root->fs_info, flags);
3175 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl; 3205 if (target) {
3176 u64 tgt = 0; 3206 /* pick target profile only if it's already available */
3177 3207 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3178 if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
3179 (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3180 (flags & bctl->data.target)) {
3181 tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3182 } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
3183 (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3184 (flags & bctl->sys.target)) {
3185 tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3186 } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
3187 (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3188 (flags & bctl->meta.target)) {
3189 tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3190 }
3191
3192 if (tgt) {
3193 spin_unlock(&root->fs_info->balance_lock); 3208 spin_unlock(&root->fs_info->balance_lock);
3194 flags = tgt; 3209 return extended_to_chunk(target);
3195 goto out;
3196 } 3210 }
3197 } 3211 }
3198 spin_unlock(&root->fs_info->balance_lock); 3212 spin_unlock(&root->fs_info->balance_lock);
@@ -3220,10 +3234,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3220 flags &= ~BTRFS_BLOCK_GROUP_RAID0; 3234 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3221 } 3235 }
3222 3236
3223out: 3237 return extended_to_chunk(flags);
3224 /* extended -> chunk profile */
3225 flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3226 return flags;
3227} 3238}
3228 3239
3229static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) 3240static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
@@ -3445,8 +3456,6 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3445 int wait_for_alloc = 0; 3456 int wait_for_alloc = 0;
3446 int ret = 0; 3457 int ret = 0;
3447 3458
3448 BUG_ON(!profile_is_valid(flags, 0));
3449
3450 space_info = __find_space_info(extent_root->fs_info, flags); 3459 space_info = __find_space_info(extent_root->fs_info, flags);
3451 if (!space_info) { 3460 if (!space_info) {
3452 ret = update_space_info(extent_root->fs_info, flags, 3461 ret = update_space_info(extent_root->fs_info, flags,
@@ -5300,22 +5309,29 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5300 return 0; 5309 return 0;
5301} 5310}
5302 5311
5303static int get_block_group_index(struct btrfs_block_group_cache *cache) 5312static int __get_block_group_index(u64 flags)
5304{ 5313{
5305 int index; 5314 int index;
5306 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10) 5315
5316 if (flags & BTRFS_BLOCK_GROUP_RAID10)
5307 index = 0; 5317 index = 0;
5308 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1) 5318 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5309 index = 1; 5319 index = 1;
5310 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP) 5320 else if (flags & BTRFS_BLOCK_GROUP_DUP)
5311 index = 2; 5321 index = 2;
5312 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0) 5322 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5313 index = 3; 5323 index = 3;
5314 else 5324 else
5315 index = 4; 5325 index = 4;
5326
5316 return index; 5327 return index;
5317} 5328}
5318 5329
5330static int get_block_group_index(struct btrfs_block_group_cache *cache)
5331{
5332 return __get_block_group_index(cache->flags);
5333}
5334
5319enum btrfs_loop_type { 5335enum btrfs_loop_type {
5320 LOOP_CACHING_NOWAIT = 0, 5336 LOOP_CACHING_NOWAIT = 0,
5321 LOOP_CACHING_WAIT = 1, 5337 LOOP_CACHING_WAIT = 1,
@@ -7011,31 +7027,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7011static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) 7027static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7012{ 7028{
7013 u64 num_devices; 7029 u64 num_devices;
7014 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 | 7030 u64 stripped;
7015 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7016
7017 if (root->fs_info->balance_ctl) {
7018 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
7019 u64 tgt = 0;
7020
7021 /* pick restriper's target profile and return */
7022 if (flags & BTRFS_BLOCK_GROUP_DATA &&
7023 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
7024 tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
7025 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
7026 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
7027 tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
7028 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
7029 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
7030 tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
7031 }
7032 7031
7033 if (tgt) { 7032 /*
7034 /* extended -> chunk profile */ 7033 * if restripe for this chunk_type is on pick target profile and
7035 tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE; 7034 * return, otherwise do the usual balance
7036 return tgt; 7035 */
7037 } 7036 stripped = get_restripe_target(root->fs_info, flags);
7038 } 7037 if (stripped)
7038 return extended_to_chunk(stripped);
7039 7039
7040 /* 7040 /*
7041 * we add in the count of missing devices because we want 7041 * we add in the count of missing devices because we want
@@ -7045,6 +7045,9 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7045 num_devices = root->fs_info->fs_devices->rw_devices + 7045 num_devices = root->fs_info->fs_devices->rw_devices +
7046 root->fs_info->fs_devices->missing_devices; 7046 root->fs_info->fs_devices->missing_devices;
7047 7047
7048 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7049 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7050
7048 if (num_devices == 1) { 7051 if (num_devices == 1) {
7049 stripped |= BTRFS_BLOCK_GROUP_DUP; 7052 stripped |= BTRFS_BLOCK_GROUP_DUP;
7050 stripped = flags & ~stripped; 7053 stripped = flags & ~stripped;
@@ -7057,7 +7060,6 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7057 if (flags & (BTRFS_BLOCK_GROUP_RAID1 | 7060 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7058 BTRFS_BLOCK_GROUP_RAID10)) 7061 BTRFS_BLOCK_GROUP_RAID10))
7059 return stripped | BTRFS_BLOCK_GROUP_DUP; 7062 return stripped | BTRFS_BLOCK_GROUP_DUP;
7060 return flags;
7061 } else { 7063 } else {
7062 /* they already had raid on here, just return */ 7064 /* they already had raid on here, just return */
7063 if (flags & stripped) 7065 if (flags & stripped)
@@ -7070,9 +7072,9 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7070 if (flags & BTRFS_BLOCK_GROUP_DUP) 7072 if (flags & BTRFS_BLOCK_GROUP_DUP)
7071 return stripped | BTRFS_BLOCK_GROUP_RAID1; 7073 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7072 7074
7073 /* turn single device chunks into raid0 */ 7075 /* this is drive concat, leave it alone */
7074 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7075 } 7076 }
7077
7076 return flags; 7078 return flags;
7077} 7079}
7078 7080
@@ -7253,6 +7255,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7253 u64 min_free; 7255 u64 min_free;
7254 u64 dev_min = 1; 7256 u64 dev_min = 1;
7255 u64 dev_nr = 0; 7257 u64 dev_nr = 0;
7258 u64 target;
7256 int index; 7259 int index;
7257 int full = 0; 7260 int full = 0;
7258 int ret = 0; 7261 int ret = 0;
@@ -7293,13 +7296,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7293 /* 7296 /*
7294 * ok we don't have enough space, but maybe we have free space on our 7297 * ok we don't have enough space, but maybe we have free space on our
7295 * devices to allocate new chunks for relocation, so loop through our 7298 * devices to allocate new chunks for relocation, so loop through our
7296 * alloc devices and guess if we have enough space. However, if we 7299 * alloc devices and guess if we have enough space. if this block
7297 * were marked as full, then we know there aren't enough chunks, and we 7300 * group is going to be restriped, run checks against the target
7298 * can just return. 7301 * profile instead of the current one.
7299 */ 7302 */
7300 ret = -1; 7303 ret = -1;
7301 if (full)
7302 goto out;
7303 7304
7304 /* 7305 /*
7305 * index: 7306 * index:
@@ -7309,7 +7310,20 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7309 * 3: raid0 7310 * 3: raid0
7310 * 4: single 7311 * 4: single
7311 */ 7312 */
7312 index = get_block_group_index(block_group); 7313 target = get_restripe_target(root->fs_info, block_group->flags);
7314 if (target) {
7315 index = __get_block_group_index(extended_to_chunk(target));
7316 } else {
7317 /*
7318 * this is just a balance, so if we were marked as full
7319 * we know there is no space for a new chunk
7320 */
7321 if (full)
7322 goto out;
7323
7324 index = get_block_group_index(block_group);
7325 }
7326
7313 if (index == 0) { 7327 if (index == 0) {
7314 dev_min = 4; 7328 dev_min = 4;
7315 /* Divide by 2 */ 7329 /* Divide by 2 */
@@ -7720,11 +7734,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7720 7734
7721static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 7735static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7722{ 7736{
7723 u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK; 7737 u64 extra_flags = chunk_to_extended(flags) &
7724 7738 BTRFS_EXTENDED_PROFILE_MASK;
7725 /* chunk -> extended profile */
7726 if (extra_flags == 0)
7727 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
7728 7739
7729 if (flags & BTRFS_BLOCK_GROUP_DATA) 7740 if (flags & BTRFS_BLOCK_GROUP_DATA)
7730 fs_info->avail_data_alloc_bits &= ~extra_flags; 7741 fs_info->avail_data_alloc_bits &= ~extra_flags;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d64cd6cbdbb6..68a1754fe367 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2282,15 +2282,13 @@ static void unset_balance_control(struct btrfs_fs_info *fs_info)
2282 * Balance filters. Return 1 if chunk should be filtered out 2282 * Balance filters. Return 1 if chunk should be filtered out
2283 * (should not be balanced). 2283 * (should not be balanced).
2284 */ 2284 */
2285static int chunk_profiles_filter(u64 chunk_profile, 2285static int chunk_profiles_filter(u64 chunk_type,
2286 struct btrfs_balance_args *bargs) 2286 struct btrfs_balance_args *bargs)
2287{ 2287{
2288 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK; 2288 chunk_type = chunk_to_extended(chunk_type) &
2289 BTRFS_EXTENDED_PROFILE_MASK;
2289 2290
2290 if (chunk_profile == 0) 2291 if (bargs->profiles & chunk_type)
2291 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2292
2293 if (bargs->profiles & chunk_profile)
2294 return 0; 2292 return 0;
2295 2293
2296 return 1; 2294 return 1;
@@ -2397,18 +2395,16 @@ static int chunk_vrange_filter(struct extent_buffer *leaf,
2397 return 1; 2395 return 1;
2398} 2396}
2399 2397
2400static int chunk_soft_convert_filter(u64 chunk_profile, 2398static int chunk_soft_convert_filter(u64 chunk_type,
2401 struct btrfs_balance_args *bargs) 2399 struct btrfs_balance_args *bargs)
2402{ 2400{
2403 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 2401 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2404 return 0; 2402 return 0;
2405 2403
2406 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK; 2404 chunk_type = chunk_to_extended(chunk_type) &
2407 2405 BTRFS_EXTENDED_PROFILE_MASK;
2408 if (chunk_profile == 0)
2409 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2410 2406
2411 if (bargs->target & chunk_profile) 2407 if (bargs->target == chunk_type)
2412 return 1; 2408 return 1;
2413 2409
2414 return 0; 2410 return 0;
@@ -2634,6 +2630,30 @@ error:
2634 return ret; 2630 return ret;
2635} 2631}
2636 2632
2633/**
2634 * alloc_profile_is_valid - see if a given profile is valid and reduced
2635 * @flags: profile to validate
2636 * @extended: if true @flags is treated as an extended profile
2637 */
2638static int alloc_profile_is_valid(u64 flags, int extended)
2639{
2640 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2641 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2642
2643 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2644
2645 /* 1) check that all other bits are zeroed */
2646 if (flags & ~mask)
2647 return 0;
2648
2649 /* 2) see if profile is reduced */
2650 if (flags == 0)
2651 return !extended; /* "0" is valid for usual profiles */
2652
2653 /* true if exactly one bit set */
2654 return (flags & (flags - 1)) == 0;
2655}
2656
2637static inline int balance_need_close(struct btrfs_fs_info *fs_info) 2657static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2638{ 2658{
2639 /* cancel requested || normal exit path */ 2659 /* cancel requested || normal exit path */
@@ -2662,6 +2682,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2662{ 2682{
2663 struct btrfs_fs_info *fs_info = bctl->fs_info; 2683 struct btrfs_fs_info *fs_info = bctl->fs_info;
2664 u64 allowed; 2684 u64 allowed;
2685 int mixed = 0;
2665 int ret; 2686 int ret;
2666 2687
2667 if (btrfs_fs_closing(fs_info) || 2688 if (btrfs_fs_closing(fs_info) ||
@@ -2671,13 +2692,16 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2671 goto out; 2692 goto out;
2672 } 2693 }
2673 2694
2695 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2696 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2697 mixed = 1;
2698
2674 /* 2699 /*
2675 * In case of mixed groups both data and meta should be picked, 2700 * In case of mixed groups both data and meta should be picked,
2676 * and identical options should be given for both of them. 2701 * and identical options should be given for both of them.
2677 */ 2702 */
2678 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 2703 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2679 if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && 2704 if (mixed && (bctl->flags & allowed)) {
2680 (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
2681 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 2705 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2682 !(bctl->flags & BTRFS_BALANCE_METADATA) || 2706 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2683 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 2707 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
@@ -2688,14 +2712,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2688 } 2712 }
2689 } 2713 }
2690 2714
2691 /*
2692 * Profile changing sanity checks. Skip them if a simple
2693 * balance is requested.
2694 */
2695 if (!((bctl->data.flags | bctl->sys.flags | bctl->meta.flags) &
2696 BTRFS_BALANCE_ARGS_CONVERT))
2697 goto do_balance;
2698
2699 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 2715 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2700 if (fs_info->fs_devices->num_devices == 1) 2716 if (fs_info->fs_devices->num_devices == 1)
2701 allowed |= BTRFS_BLOCK_GROUP_DUP; 2717 allowed |= BTRFS_BLOCK_GROUP_DUP;
@@ -2705,24 +2721,27 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2705 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | 2721 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2706 BTRFS_BLOCK_GROUP_RAID10); 2722 BTRFS_BLOCK_GROUP_RAID10);
2707 2723
2708 if (!profile_is_valid(bctl->data.target, 1) || 2724 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2709 bctl->data.target & ~allowed) { 2725 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2726 (bctl->data.target & ~allowed))) {
2710 printk(KERN_ERR "btrfs: unable to start balance with target " 2727 printk(KERN_ERR "btrfs: unable to start balance with target "
2711 "data profile %llu\n", 2728 "data profile %llu\n",
2712 (unsigned long long)bctl->data.target); 2729 (unsigned long long)bctl->data.target);
2713 ret = -EINVAL; 2730 ret = -EINVAL;
2714 goto out; 2731 goto out;
2715 } 2732 }
2716 if (!profile_is_valid(bctl->meta.target, 1) || 2733 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2717 bctl->meta.target & ~allowed) { 2734 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2735 (bctl->meta.target & ~allowed))) {
2718 printk(KERN_ERR "btrfs: unable to start balance with target " 2736 printk(KERN_ERR "btrfs: unable to start balance with target "
2719 "metadata profile %llu\n", 2737 "metadata profile %llu\n",
2720 (unsigned long long)bctl->meta.target); 2738 (unsigned long long)bctl->meta.target);
2721 ret = -EINVAL; 2739 ret = -EINVAL;
2722 goto out; 2740 goto out;
2723 } 2741 }
2724 if (!profile_is_valid(bctl->sys.target, 1) || 2742 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2725 bctl->sys.target & ~allowed) { 2743 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2744 (bctl->sys.target & ~allowed))) {
2726 printk(KERN_ERR "btrfs: unable to start balance with target " 2745 printk(KERN_ERR "btrfs: unable to start balance with target "
2727 "system profile %llu\n", 2746 "system profile %llu\n",
2728 (unsigned long long)bctl->sys.target); 2747 (unsigned long long)bctl->sys.target);
@@ -2730,7 +2749,9 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2730 goto out; 2749 goto out;
2731 } 2750 }
2732 2751
2733 if (bctl->data.target & BTRFS_BLOCK_GROUP_DUP) { 2752 /* allow dup'ed data chunks only in mixed mode */
2753 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2754 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2734 printk(KERN_ERR "btrfs: dup for data is not allowed\n"); 2755 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2735 ret = -EINVAL; 2756 ret = -EINVAL;
2736 goto out; 2757 goto out;
@@ -2756,7 +2777,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2756 } 2777 }
2757 } 2778 }
2758 2779
2759do_balance:
2760 ret = insert_balance_item(fs_info->tree_root, bctl); 2780 ret = insert_balance_item(fs_info->tree_root, bctl);
2761 if (ret && ret != -EEXIST) 2781 if (ret && ret != -EEXIST)
2762 goto out; 2782 goto out;
@@ -2999,7 +3019,7 @@ again:
2999 key.offset = (u64)-1; 3019 key.offset = (u64)-1;
3000 key.type = BTRFS_DEV_EXTENT_KEY; 3020 key.type = BTRFS_DEV_EXTENT_KEY;
3001 3021
3002 while (1) { 3022 do {
3003 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3023 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3004 if (ret < 0) 3024 if (ret < 0)
3005 goto done; 3025 goto done;
@@ -3041,8 +3061,7 @@ again:
3041 goto done; 3061 goto done;
3042 if (ret == -ENOSPC) 3062 if (ret == -ENOSPC)
3043 failed++; 3063 failed++;
3044 key.offset -= 1; 3064 } while (key.offset-- > 0);
3045 }
3046 3065
3047 if (failed && !retried) { 3066 if (failed && !retried) {
3048 failed = 0; 3067 failed = 0;
@@ -3160,11 +3179,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3160 int i; 3179 int i;
3161 int j; 3180 int j;
3162 3181
3163 if ((type & BTRFS_BLOCK_GROUP_RAID1) && 3182 BUG_ON(!alloc_profile_is_valid(type, 0));
3164 (type & BTRFS_BLOCK_GROUP_DUP)) {
3165 WARN_ON(1);
3166 type &= ~BTRFS_BLOCK_GROUP_DUP;
3167 }
3168 3183
3169 if (list_empty(&fs_devices->alloc_list)) 3184 if (list_empty(&fs_devices->alloc_list))
3170 return -ENOSPC; 3185 return -ENOSPC;