summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/ctree.c')
-rw-r--r--fs/btrfs/ctree.c76
1 files changed, 50 insertions, 26 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d92462fe66c8..5a6c39b44c84 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -968,6 +968,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
968 return 0; 968 return 0;
969} 969}
970 970
971static struct extent_buffer *alloc_tree_block_no_bg_flush(
972 struct btrfs_trans_handle *trans,
973 struct btrfs_root *root,
974 u64 parent_start,
975 const struct btrfs_disk_key *disk_key,
976 int level,
977 u64 hint,
978 u64 empty_size)
979{
980 struct btrfs_fs_info *fs_info = root->fs_info;
981 struct extent_buffer *ret;
982
983 /*
984 * If we are COWing a node/leaf from the extent, chunk, device or free
985 * space trees, make sure that we do not finish block group creation of
986 * pending block groups. We do this to avoid a deadlock.
987 * COWing can result in allocation of a new chunk, and flushing pending
988 * block groups (btrfs_create_pending_block_groups()) can be triggered
989 * when finishing allocation of a new chunk. Creation of a pending block
990 * group modifies the extent, chunk, device and free space trees,
991 * therefore we could deadlock with ourselves since we are holding a
992 * lock on an extent buffer that btrfs_create_pending_block_groups() may
993 * try to COW later.
994 * For similar reasons, we also need to delay flushing pending block
995 * groups when splitting a leaf or node, from one of those trees, since
996 * we are holding a write lock on it and its parent or when inserting a
997 * new root node for one of those trees.
998 */
999 if (root == fs_info->extent_root ||
1000 root == fs_info->chunk_root ||
1001 root == fs_info->dev_root ||
1002 root == fs_info->free_space_root)
1003 trans->can_flush_pending_bgs = false;
1004
1005 ret = btrfs_alloc_tree_block(trans, root, parent_start,
1006 root->root_key.objectid, disk_key, level,
1007 hint, empty_size);
1008 trans->can_flush_pending_bgs = true;
1009
1010 return ret;
1011}
1012
971/* 1013/*
972 * does the dirty work in cow of a single block. The parent block (if 1014 * does the dirty work in cow of a single block. The parent block (if
973 * supplied) is updated to point to the new cow copy. The new buffer is marked 1015 * supplied) is updated to point to the new cow copy. The new buffer is marked
@@ -1015,26 +1057,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1015 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 1057 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1016 parent_start = parent->start; 1058 parent_start = parent->start;
1017 1059
1018 /* 1060 cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1019 * If we are COWing a node/leaf from the extent, chunk or device trees, 1061 level, search_start, empty_size);
1020 * make sure that we do not finish block group creation of pending block
1021 * groups. We do this to avoid a deadlock.
1022 * COWing can result in allocation of a new chunk, and flushing pending
1023 * block groups (btrfs_create_pending_block_groups()) can be triggered
1024 * when finishing allocation of a new chunk. Creation of a pending block
1025 * group modifies the extent, chunk and device trees, therefore we could
1026 * deadlock with ourselves since we are holding a lock on an extent
1027 * buffer that btrfs_create_pending_block_groups() may try to COW later.
1028 */
1029 if (root == fs_info->extent_root ||
1030 root == fs_info->chunk_root ||
1031 root == fs_info->dev_root)
1032 trans->can_flush_pending_bgs = false;
1033
1034 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1035 root->root_key.objectid, &disk_key, level,
1036 search_start, empty_size);
1037 trans->can_flush_pending_bgs = true;
1038 if (IS_ERR(cow)) 1062 if (IS_ERR(cow))
1039 return PTR_ERR(cow); 1063 return PTR_ERR(cow);
1040 1064
@@ -3343,8 +3367,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3343 else 3367 else
3344 btrfs_node_key(lower, &lower_key, 0); 3368 btrfs_node_key(lower, &lower_key, 0);
3345 3369
3346 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3370 c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
3347 &lower_key, level, root->node->start, 0); 3371 root->node->start, 0);
3348 if (IS_ERR(c)) 3372 if (IS_ERR(c))
3349 return PTR_ERR(c); 3373 return PTR_ERR(c);
3350 3374
@@ -3473,8 +3497,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3473 mid = (c_nritems + 1) / 2; 3497 mid = (c_nritems + 1) / 2;
3474 btrfs_node_key(c, &disk_key, mid); 3498 btrfs_node_key(c, &disk_key, mid);
3475 3499
3476 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3500 split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
3477 &disk_key, level, c->start, 0); 3501 c->start, 0);
3478 if (IS_ERR(split)) 3502 if (IS_ERR(split))
3479 return PTR_ERR(split); 3503 return PTR_ERR(split);
3480 3504
@@ -4258,8 +4282,8 @@ again:
4258 else 4282 else
4259 btrfs_item_key(l, &disk_key, mid); 4283 btrfs_item_key(l, &disk_key, mid);
4260 4284
4261 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 4285 right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
4262 &disk_key, 0, l->start, 0); 4286 l->start, 0);
4263 if (IS_ERR(right)) 4287 if (IS_ERR(right))
4264 return PTR_ERR(right); 4288 return PTR_ERR(right);
4265 4289