aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/extent-tree.c126
-rw-r--r--fs/btrfs/extent_io.c51
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/scrub.c5
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/volumes.c2
9 files changed, 120 insertions, 79 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 04a5dfcee5a1..50634abef9b4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2369,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
2369int btrfs_block_rsv_refill(struct btrfs_root *root, 2369int btrfs_block_rsv_refill(struct btrfs_root *root,
2370 struct btrfs_block_rsv *block_rsv, 2370 struct btrfs_block_rsv *block_rsv,
2371 u64 min_reserved); 2371 u64 min_reserved);
2372int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
2373 struct btrfs_block_rsv *block_rsv,
2374 u64 min_reserved);
2372int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 2375int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
2373 struct btrfs_block_rsv *dst_rsv, 2376 struct btrfs_block_rsv *dst_rsv,
2374 u64 num_bytes); 2377 u64 num_bytes);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 930ae8949737..2ad813674d77 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3888,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
3888 return ret; 3888 return ret;
3889} 3889}
3890 3890
3891int btrfs_block_rsv_refill(struct btrfs_root *root, 3891static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
3892 struct btrfs_block_rsv *block_rsv, 3892 struct btrfs_block_rsv *block_rsv,
3893 u64 min_reserved) 3893 u64 min_reserved, int flush)
3894{ 3894{
3895 u64 num_bytes = 0; 3895 u64 num_bytes = 0;
3896 int ret = -ENOSPC; 3896 int ret = -ENOSPC;
@@ -3909,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
3909 if (!ret) 3909 if (!ret)
3910 return 0; 3910 return 0;
3911 3911
3912 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); 3912 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3913 if (!ret) { 3913 if (!ret) {
3914 block_rsv_add_bytes(block_rsv, num_bytes, 0); 3914 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3915 return 0; 3915 return 0;
@@ -3918,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
3918 return ret; 3918 return ret;
3919} 3919}
3920 3920
3921int btrfs_block_rsv_refill(struct btrfs_root *root,
3922 struct btrfs_block_rsv *block_rsv,
3923 u64 min_reserved)
3924{
3925 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
3926}
3927
3928int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
3929 struct btrfs_block_rsv *block_rsv,
3930 u64 min_reserved)
3931{
3932 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
3933}
3934
3921int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 3935int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3922 struct btrfs_block_rsv *dst_rsv, 3936 struct btrfs_block_rsv *dst_rsv,
3923 u64 num_bytes) 3937 u64 num_bytes)
@@ -5093,11 +5107,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5093 struct btrfs_root *root = orig_root->fs_info->extent_root; 5107 struct btrfs_root *root = orig_root->fs_info->extent_root;
5094 struct btrfs_free_cluster *last_ptr = NULL; 5108 struct btrfs_free_cluster *last_ptr = NULL;
5095 struct btrfs_block_group_cache *block_group = NULL; 5109 struct btrfs_block_group_cache *block_group = NULL;
5110 struct btrfs_block_group_cache *used_block_group;
5096 int empty_cluster = 2 * 1024 * 1024; 5111 int empty_cluster = 2 * 1024 * 1024;
5097 int allowed_chunk_alloc = 0; 5112 int allowed_chunk_alloc = 0;
5098 int done_chunk_alloc = 0; 5113 int done_chunk_alloc = 0;
5099 struct btrfs_space_info *space_info; 5114 struct btrfs_space_info *space_info;
5100 int last_ptr_loop = 0;
5101 int loop = 0; 5115 int loop = 0;
5102 int index = 0; 5116 int index = 0;
5103 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? 5117 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5159,6 +5173,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5159ideal_cache: 5173ideal_cache:
5160 block_group = btrfs_lookup_block_group(root->fs_info, 5174 block_group = btrfs_lookup_block_group(root->fs_info,
5161 search_start); 5175 search_start);
5176 used_block_group = block_group;
5162 /* 5177 /*
5163 * we don't want to use the block group if it doesn't match our 5178 * we don't want to use the block group if it doesn't match our
5164 * allocation bits, or if its not cached. 5179 * allocation bits, or if its not cached.
@@ -5196,6 +5211,7 @@ search:
5196 u64 offset; 5211 u64 offset;
5197 int cached; 5212 int cached;
5198 5213
5214 used_block_group = block_group;
5199 btrfs_get_block_group(block_group); 5215 btrfs_get_block_group(block_group);
5200 search_start = block_group->key.objectid; 5216 search_start = block_group->key.objectid;
5201 5217
@@ -5265,84 +5281,73 @@ alloc:
5265 spin_lock(&block_group->free_space_ctl->tree_lock); 5281 spin_lock(&block_group->free_space_ctl->tree_lock);
5266 if (cached && 5282 if (cached &&
5267 block_group->free_space_ctl->free_space < 5283 block_group->free_space_ctl->free_space <
5268 num_bytes + empty_size) { 5284 num_bytes + empty_cluster + empty_size) {
5269 spin_unlock(&block_group->free_space_ctl->tree_lock); 5285 spin_unlock(&block_group->free_space_ctl->tree_lock);
5270 goto loop; 5286 goto loop;
5271 } 5287 }
5272 spin_unlock(&block_group->free_space_ctl->tree_lock); 5288 spin_unlock(&block_group->free_space_ctl->tree_lock);
5273 5289
5274 /* 5290 /*
5275 * Ok we want to try and use the cluster allocator, so lets look 5291 * Ok we want to try and use the cluster allocator, so
5276 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will 5292 * lets look there
5277 * have tried the cluster allocator plenty of times at this
5278 * point and not have found anything, so we are likely way too
5279 * fragmented for the clustering stuff to find anything, so lets
5280 * just skip it and let the allocator find whatever block it can
5281 * find
5282 */ 5293 */
5283 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) { 5294 if (last_ptr) {
5284 /* 5295 /*
5285 * the refill lock keeps out other 5296 * the refill lock keeps out other
5286 * people trying to start a new cluster 5297 * people trying to start a new cluster
5287 */ 5298 */
5288 spin_lock(&last_ptr->refill_lock); 5299 spin_lock(&last_ptr->refill_lock);
5289 if (last_ptr->block_group && 5300 used_block_group = last_ptr->block_group;
5290 (last_ptr->block_group->ro || 5301 if (used_block_group != block_group &&
5291 !block_group_bits(last_ptr->block_group, data))) { 5302 (!used_block_group ||
5292 offset = 0; 5303 used_block_group->ro ||
5304 !block_group_bits(used_block_group, data))) {
5305 used_block_group = block_group;
5293 goto refill_cluster; 5306 goto refill_cluster;
5294 } 5307 }
5295 5308
5296 offset = btrfs_alloc_from_cluster(block_group, last_ptr, 5309 if (used_block_group != block_group)
5297 num_bytes, search_start); 5310 btrfs_get_block_group(used_block_group);
5311
5312 offset = btrfs_alloc_from_cluster(used_block_group,
5313 last_ptr, num_bytes, used_block_group->key.objectid);
5298 if (offset) { 5314 if (offset) {
5299 /* we have a block, we're done */ 5315 /* we have a block, we're done */
5300 spin_unlock(&last_ptr->refill_lock); 5316 spin_unlock(&last_ptr->refill_lock);
5301 goto checks; 5317 goto checks;
5302 } 5318 }
5303 5319
5304 spin_lock(&last_ptr->lock); 5320 WARN_ON(last_ptr->block_group != used_block_group);
5305 /* 5321 if (used_block_group != block_group) {
5306 * whoops, this cluster doesn't actually point to 5322 btrfs_put_block_group(used_block_group);
5307 * this block group. Get a ref on the block 5323 used_block_group = block_group;
5308 * group is does point to and try again
5309 */
5310 if (!last_ptr_loop && last_ptr->block_group &&
5311 last_ptr->block_group != block_group &&
5312 index <=
5313 get_block_group_index(last_ptr->block_group)) {
5314
5315 btrfs_put_block_group(block_group);
5316 block_group = last_ptr->block_group;
5317 btrfs_get_block_group(block_group);
5318 spin_unlock(&last_ptr->lock);
5319 spin_unlock(&last_ptr->refill_lock);
5320
5321 last_ptr_loop = 1;
5322 search_start = block_group->key.objectid;
5323 /*
5324 * we know this block group is properly
5325 * in the list because
5326 * btrfs_remove_block_group, drops the
5327 * cluster before it removes the block
5328 * group from the list
5329 */
5330 goto have_block_group;
5331 } 5324 }
5332 spin_unlock(&last_ptr->lock);
5333refill_cluster: 5325refill_cluster:
5326 BUG_ON(used_block_group != block_group);
5327 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5328 * set up a new clusters, so lets just skip it
5329 * and let the allocator find whatever block
5330 * it can find. If we reach this point, we
5331 * will have tried the cluster allocator
5332 * plenty of times and not have found
5333 * anything, so we are likely way too
5334 * fragmented for the clustering stuff to find
5335 * anything. */
5336 if (loop >= LOOP_NO_EMPTY_SIZE) {
5337 spin_unlock(&last_ptr->refill_lock);
5338 goto unclustered_alloc;
5339 }
5340
5334 /* 5341 /*
5335 * this cluster didn't work out, free it and 5342 * this cluster didn't work out, free it and
5336 * start over 5343 * start over
5337 */ 5344 */
5338 btrfs_return_cluster_to_free_space(NULL, last_ptr); 5345 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5339 5346
5340 last_ptr_loop = 0;
5341
5342 /* allocate a cluster in this block group */ 5347 /* allocate a cluster in this block group */
5343 ret = btrfs_find_space_cluster(trans, root, 5348 ret = btrfs_find_space_cluster(trans, root,
5344 block_group, last_ptr, 5349 block_group, last_ptr,
5345 offset, num_bytes, 5350 search_start, num_bytes,
5346 empty_cluster + empty_size); 5351 empty_cluster + empty_size);
5347 if (ret == 0) { 5352 if (ret == 0) {
5348 /* 5353 /*
@@ -5378,6 +5383,7 @@ refill_cluster:
5378 goto loop; 5383 goto loop;
5379 } 5384 }
5380 5385
5386unclustered_alloc:
5381 offset = btrfs_find_space_for_alloc(block_group, search_start, 5387 offset = btrfs_find_space_for_alloc(block_group, search_start,
5382 num_bytes, empty_size); 5388 num_bytes, empty_size);
5383 /* 5389 /*
@@ -5404,14 +5410,14 @@ checks:
5404 search_start = stripe_align(root, offset); 5410 search_start = stripe_align(root, offset);
5405 /* move on to the next group */ 5411 /* move on to the next group */
5406 if (search_start + num_bytes >= search_end) { 5412 if (search_start + num_bytes >= search_end) {
5407 btrfs_add_free_space(block_group, offset, num_bytes); 5413 btrfs_add_free_space(used_block_group, offset, num_bytes);
5408 goto loop; 5414 goto loop;
5409 } 5415 }
5410 5416
5411 /* move on to the next group */ 5417 /* move on to the next group */
5412 if (search_start + num_bytes > 5418 if (search_start + num_bytes >
5413 block_group->key.objectid + block_group->key.offset) { 5419 used_block_group->key.objectid + used_block_group->key.offset) {
5414 btrfs_add_free_space(block_group, offset, num_bytes); 5420 btrfs_add_free_space(used_block_group, offset, num_bytes);
5415 goto loop; 5421 goto loop;
5416 } 5422 }
5417 5423
@@ -5419,14 +5425,14 @@ checks:
5419 ins->offset = num_bytes; 5425 ins->offset = num_bytes;
5420 5426
5421 if (offset < search_start) 5427 if (offset < search_start)
5422 btrfs_add_free_space(block_group, offset, 5428 btrfs_add_free_space(used_block_group, offset,
5423 search_start - offset); 5429 search_start - offset);
5424 BUG_ON(offset > search_start); 5430 BUG_ON(offset > search_start);
5425 5431
5426 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 5432 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5427 alloc_type); 5433 alloc_type);
5428 if (ret == -EAGAIN) { 5434 if (ret == -EAGAIN) {
5429 btrfs_add_free_space(block_group, offset, num_bytes); 5435 btrfs_add_free_space(used_block_group, offset, num_bytes);
5430 goto loop; 5436 goto loop;
5431 } 5437 }
5432 5438
@@ -5435,15 +5441,19 @@ checks:
5435 ins->offset = num_bytes; 5441 ins->offset = num_bytes;
5436 5442
5437 if (offset < search_start) 5443 if (offset < search_start)
5438 btrfs_add_free_space(block_group, offset, 5444 btrfs_add_free_space(used_block_group, offset,
5439 search_start - offset); 5445 search_start - offset);
5440 BUG_ON(offset > search_start); 5446 BUG_ON(offset > search_start);
5447 if (used_block_group != block_group)
5448 btrfs_put_block_group(used_block_group);
5441 btrfs_put_block_group(block_group); 5449 btrfs_put_block_group(block_group);
5442 break; 5450 break;
5443loop: 5451loop:
5444 failed_cluster_refill = false; 5452 failed_cluster_refill = false;
5445 failed_alloc = false; 5453 failed_alloc = false;
5446 BUG_ON(index != get_block_group_index(block_group)); 5454 BUG_ON(index != get_block_group_index(block_group));
5455 if (used_block_group != block_group)
5456 btrfs_put_block_group(used_block_group);
5447 btrfs_put_block_group(block_group); 5457 btrfs_put_block_group(block_group);
5448 } 5458 }
5449 up_read(&space_info->groups_sem); 5459 up_read(&space_info->groups_sem);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 9472d3de5e52..49f3c9dc09f4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -935,8 +935,10 @@ again:
935 node = tree_search(tree, start); 935 node = tree_search(tree, start);
936 if (!node) { 936 if (!node) {
937 prealloc = alloc_extent_state_atomic(prealloc); 937 prealloc = alloc_extent_state_atomic(prealloc);
938 if (!prealloc) 938 if (!prealloc) {
939 return -ENOMEM; 939 err = -ENOMEM;
940 goto out;
941 }
940 err = insert_state(tree, prealloc, start, end, &bits); 942 err = insert_state(tree, prealloc, start, end, &bits);
941 prealloc = NULL; 943 prealloc = NULL;
942 BUG_ON(err == -EEXIST); 944 BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
992 */ 994 */
993 if (state->start < start) { 995 if (state->start < start) {
994 prealloc = alloc_extent_state_atomic(prealloc); 996 prealloc = alloc_extent_state_atomic(prealloc);
995 if (!prealloc) 997 if (!prealloc) {
996 return -ENOMEM; 998 err = -ENOMEM;
999 goto out;
1000 }
997 err = split_state(tree, state, prealloc, start); 1001 err = split_state(tree, state, prealloc, start);
998 BUG_ON(err == -EEXIST); 1002 BUG_ON(err == -EEXIST);
999 prealloc = NULL; 1003 prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
1024 this_end = last_start - 1; 1028 this_end = last_start - 1;
1025 1029
1026 prealloc = alloc_extent_state_atomic(prealloc); 1030 prealloc = alloc_extent_state_atomic(prealloc);
1027 if (!prealloc) 1031 if (!prealloc) {
1028 return -ENOMEM; 1032 err = -ENOMEM;
1033 goto out;
1034 }
1029 1035
1030 /* 1036 /*
1031 * Avoid to free 'prealloc' if it can be merged with 1037 * Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
1051 */ 1057 */
1052 if (state->start <= end && state->end > end) { 1058 if (state->start <= end && state->end > end) {
1053 prealloc = alloc_extent_state_atomic(prealloc); 1059 prealloc = alloc_extent_state_atomic(prealloc);
1054 if (!prealloc) 1060 if (!prealloc) {
1055 return -ENOMEM; 1061 err = -ENOMEM;
1062 goto out;
1063 }
1056 1064
1057 err = split_state(tree, state, prealloc, end + 1); 1065 err = split_state(tree, state, prealloc, end + 1);
1058 BUG_ON(err == -EEXIST); 1066 BUG_ON(err == -EEXIST);
@@ -2287,14 +2295,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2287 if (!uptodate) { 2295 if (!uptodate) {
2288 int failed_mirror; 2296 int failed_mirror;
2289 failed_mirror = (int)(unsigned long)bio->bi_bdev; 2297 failed_mirror = (int)(unsigned long)bio->bi_bdev;
2290 if (tree->ops && tree->ops->readpage_io_failed_hook) 2298 /*
2291 ret = tree->ops->readpage_io_failed_hook( 2299 * The generic bio_readpage_error handles errors the
2292 bio, page, start, end, 2300 * following way: If possible, new read requests are
2293 failed_mirror, state); 2301 * created and submitted and will end up in
2294 else 2302 * end_bio_extent_readpage as well (if we're lucky, not
2295 ret = bio_readpage_error(bio, page, start, end, 2303 * in the !uptodate case). In that case it returns 0 and
2296 failed_mirror, NULL); 2304 * we just go on with the next page in our bio. If it
2305 * can't handle the error it will return -EIO and we
2306 * remain responsible for that page.
2307 */
2308 ret = bio_readpage_error(bio, page, start, end,
2309 failed_mirror, NULL);
2297 if (ret == 0) { 2310 if (ret == 0) {
2311error_handled:
2298 uptodate = 2312 uptodate =
2299 test_bit(BIO_UPTODATE, &bio->bi_flags); 2313 test_bit(BIO_UPTODATE, &bio->bi_flags);
2300 if (err) 2314 if (err)
@@ -2302,6 +2316,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2302 uncache_state(&cached); 2316 uncache_state(&cached);
2303 continue; 2317 continue;
2304 } 2318 }
2319 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2320 ret = tree->ops->readpage_io_failed_hook(
2321 bio, page, start, end,
2322 failed_mirror, state);
2323 if (ret == 0)
2324 goto error_handled;
2325 }
2305 } 2326 }
2306 2327
2307 if (uptodate) { 2328 if (uptodate) {
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 6e5b7e463698..ec23d43d0c35 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1470,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1470{ 1470{
1471 info->offset = offset_to_bitmap(ctl, offset); 1471 info->offset = offset_to_bitmap(ctl, offset);
1472 info->bytes = 0; 1472 info->bytes = 0;
1473 INIT_LIST_HEAD(&info->list);
1473 link_free_space(ctl, info); 1474 link_free_space(ctl, info);
1474 ctl->total_bitmaps++; 1475 ctl->total_bitmaps++;
1475 1476
@@ -2319,6 +2320,7 @@ again:
2319 2320
2320 if (!found) { 2321 if (!found) {
2321 start = i; 2322 start = i;
2323 cluster->max_size = 0;
2322 found = true; 2324 found = true;
2323 } 2325 }
2324 2326
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 526dd51a1966..2c984f7d4c2a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3490,7 +3490,7 @@ void btrfs_evict_inode(struct inode *inode)
3490 * doing the truncate. 3490 * doing the truncate.
3491 */ 3491 */
3492 while (1) { 3492 while (1) {
3493 ret = btrfs_block_rsv_refill(root, rsv, min_size); 3493 ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
3494 3494
3495 /* 3495 /*
3496 * Try and steal from the global reserve since we will 3496 * Try and steal from the global reserve since we will
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a90e749ed6d2..72d461656f60 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1278,7 +1278,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1278 } 1278 }
1279 ret = btrfs_grow_device(trans, device, new_size); 1279 ret = btrfs_grow_device(trans, device, new_size);
1280 btrfs_commit_transaction(trans, root); 1280 btrfs_commit_transaction(trans, root);
1281 } else { 1281 } else if (new_size < old_size) {
1282 ret = btrfs_shrink_device(device, new_size); 1282 ret = btrfs_shrink_device(device, new_size);
1283 } 1283 }
1284 1284
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index fab420db5121..c27bcb67f330 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
256 btrfs_release_path(swarn->path); 256 btrfs_release_path(swarn->path);
257 257
258 ipath = init_ipath(4096, local_root, swarn->path); 258 ipath = init_ipath(4096, local_root, swarn->path);
259 if (IS_ERR(ipath)) {
260 ret = PTR_ERR(ipath);
261 ipath = NULL;
262 goto err;
263 }
259 ret = paths_from_inode(inum, ipath); 264 ret = paths_from_inode(inum, ipath);
260 265
261 if (ret < 0) 266 if (ret < 0)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 17ee7fc5e64e..e28ad4baf483 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1057,7 +1057,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1057 int i = 0, nr_devices; 1057 int i = 0, nr_devices;
1058 int ret; 1058 int ret;
1059 1059
1060 nr_devices = fs_info->fs_devices->rw_devices; 1060 nr_devices = fs_info->fs_devices->open_devices;
1061 BUG_ON(!nr_devices); 1061 BUG_ON(!nr_devices);
1062 1062
1063 devices_info = kmalloc(sizeof(*devices_info) * nr_devices, 1063 devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
@@ -1079,8 +1079,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1079 else 1079 else
1080 min_stripe_size = BTRFS_STRIPE_LEN; 1080 min_stripe_size = BTRFS_STRIPE_LEN;
1081 1081
1082 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 1082 list_for_each_entry(device, &fs_devices->devices, dev_list) {
1083 if (!device->in_fs_metadata) 1083 if (!device->in_fs_metadata || !device->bdev)
1084 continue; 1084 continue;
1085 1085
1086 avail_space = device->total_bytes - device->bytes_used; 1086 avail_space = device->total_bytes - device->bytes_used;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c37433d3cd82..0a8c8f8304b1 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1611,7 +1611,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1611 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 1611 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1612 return -EINVAL; 1612 return -EINVAL;
1613 1613
1614 bdev = blkdev_get_by_path(device_path, FMODE_EXCL, 1614 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1615 root->fs_info->bdev_holder); 1615 root->fs_info->bdev_holder);
1616 if (IS_ERR(bdev)) 1616 if (IS_ERR(bdev))
1617 return PTR_ERR(bdev); 1617 return PTR_ERR(bdev);