diff options
author | Josef Bacik <jbacik@redhat.com> | 2008-10-01 19:11:18 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-10-01 19:11:18 -0400 |
commit | cf749823857230017c86504bfdc70524f929ba96 (patch) | |
tree | bea09bb1df8954b1ed1629ea4269bc596ad83ef6 /fs/btrfs/extent-tree.c | |
parent | 83afeac42c5680b0b70d64fb8c4724cf05483fc2 (diff) |
Btrfs: fix deadlock between alloc_mutex/chunk_mutex
This fixes a deadlock that happens between the alloc_mutex and chunk_mutex.
Process A comes in, decides to do a do_chunk_alloc, which takes the
chunk_mutex, and is holding the alloc_mutex because the only way you get to
do_chunk_alloc is by holding the alloc_mutex. btrfs_alloc_chunk does its thing
and goes to insert a new item, which results in a cow of the block.
We get into del_pending_extents from there, where if we need to be rescheduled
we drop the alloc_mutex and schedule. At this point process B comes in to do
an allocation and gets the alloc_mutex, and because process A did not do the
chunk allocation completely it thinks its a good time to do a chunk allocation
as well, and hangs on the chunk_mutex.
Process A wakes up and tries to take the alloc_mutex and cannot. The way to
fix this is do a mutex_trylock() on chunk_mutex. If we return 0 we didn't get
the lock, and if this is just a "hey it may be a good time to allocate a chunk"
then we just exit. If we are trying to force an allocation then we reschedule
and keep trying to acquire the chunk_mutex. If once we acquire it the space is
already full then we can just exit, otherwise we can continue with the chunk
allocation. Thank you,
Signed-off-by: Josef Bacik <jbacik@redhat.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r-- | fs/btrfs/extent-tree.c | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 677d5e774fad..db37b867e4f1 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -1505,7 +1505,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
1505 | u64 thresh; | 1505 | u64 thresh; |
1506 | u64 start; | 1506 | u64 start; |
1507 | u64 num_bytes; | 1507 | u64 num_bytes; |
1508 | int ret = 0; | 1508 | int ret = 0, waited = 0; |
1509 | 1509 | ||
1510 | flags = reduce_alloc_profile(extent_root, flags); | 1510 | flags = reduce_alloc_profile(extent_root, flags); |
1511 | 1511 | ||
@@ -1530,7 +1530,18 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
1530 | space_info->bytes_reserved + alloc_bytes) < thresh) | 1530 | space_info->bytes_reserved + alloc_bytes) < thresh) |
1531 | goto out; | 1531 | goto out; |
1532 | 1532 | ||
1533 | mutex_lock(&extent_root->fs_info->chunk_mutex); | 1533 | while (!mutex_trylock(&extent_root->fs_info->chunk_mutex)) { |
1534 | if (!force) | ||
1535 | goto out; | ||
1536 | mutex_unlock(&extent_root->fs_info->alloc_mutex); | ||
1537 | cond_resched(); | ||
1538 | mutex_lock(&extent_root->fs_info->alloc_mutex); | ||
1539 | waited = 1; | ||
1540 | } | ||
1541 | |||
1542 | if (waited && space_info->full) | ||
1543 | goto out_unlock; | ||
1544 | |||
1534 | ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags); | 1545 | ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags); |
1535 | if (ret == -ENOSPC) { | 1546 | if (ret == -ENOSPC) { |
1536 | printk("space info full %Lu\n", flags); | 1547 | printk("space info full %Lu\n", flags); |