aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fusionio.com>2012-10-22 15:52:28 -0400
committerJosef Bacik <jbacik@fusionio.com>2012-12-11 13:31:36 -0500
commit7b398f8e58c415738e397645c926253c428cf002 (patch)
tree4fb8237c474e2e86b4275b196993f7fd757df09e /fs/btrfs/extent-tree.c
parent32adf0901371c8b9d258dba7811f3067d1d2ea5c (diff)
Btrfs: fill the global reserve when unpinning space
Dave gave me an image of a very full file system that would abort the transaction because it ran out of space while committing the transaction. This is because we would think there was plenty of room to create a snapshot even though the global reserve was not full. This happens because we calculate the global reserve size before we unpin any space, so after we unpin the space we allow reservations to occur even though we haven't reserved all of the space for our global reserve. Fix this by adding to the global reserve while unpinning in order to make sure we always have enough space to do our work. With this patch we no longer end up with an aborted transaction, we return ENOSPC properly to the person trying to create the snapshot. Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2136adda2a0..b495cb4b9b2 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4949,9 +4949,13 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4949{ 4949{
4950 struct btrfs_fs_info *fs_info = root->fs_info; 4950 struct btrfs_fs_info *fs_info = root->fs_info;
4951 struct btrfs_block_group_cache *cache = NULL; 4951 struct btrfs_block_group_cache *cache = NULL;
4952 struct btrfs_space_info *space_info;
4953 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4952 u64 len; 4954 u64 len;
4955 bool readonly;
4953 4956
4954 while (start <= end) { 4957 while (start <= end) {
4958 readonly = false;
4955 if (!cache || 4959 if (!cache ||
4956 start >= cache->key.objectid + cache->key.offset) { 4960 start >= cache->key.objectid + cache->key.offset) {
4957 if (cache) 4961 if (cache)
@@ -4969,15 +4973,30 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4969 } 4973 }
4970 4974
4971 start += len; 4975 start += len;
4976 space_info = cache->space_info;
4972 4977
4973 spin_lock(&cache->space_info->lock); 4978 spin_lock(&space_info->lock);
4974 spin_lock(&cache->lock); 4979 spin_lock(&cache->lock);
4975 cache->pinned -= len; 4980 cache->pinned -= len;
4976 cache->space_info->bytes_pinned -= len; 4981 space_info->bytes_pinned -= len;
4977 if (cache->ro) 4982 if (cache->ro) {
4978 cache->space_info->bytes_readonly += len; 4983 space_info->bytes_readonly += len;
4984 readonly = true;
4985 }
4979 spin_unlock(&cache->lock); 4986 spin_unlock(&cache->lock);
4980 spin_unlock(&cache->space_info->lock); 4987 if (!readonly && global_rsv->space_info == space_info) {
4988 spin_lock(&global_rsv->lock);
4989 if (!global_rsv->full) {
4990 len = min(len, global_rsv->size -
4991 global_rsv->reserved);
4992 global_rsv->reserved += len;
4993 space_info->bytes_may_use += len;
4994 if (global_rsv->reserved >= global_rsv->size)
4995 global_rsv->full = 1;
4996 }
4997 spin_unlock(&global_rsv->lock);
4998 }
4999 spin_unlock(&space_info->lock);
4981 } 5000 }
4982 5001
4983 if (cache) 5002 if (cache)