aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2011-04-11 20:20:11 -0400
committerChris Mason <chris.mason@oracle.com>2011-04-16 07:10:56 -0400
commit6d74119f1a3efad9dc7f79a16c201242324b731f (patch)
treefaf541835e82e9d56c7474c04b2d785821fe61d3 /fs
parent0d399205edf3a4c290e76ebb36e541593af4a1b4 (diff)
Btrfs: avoid taking the chunk_mutex in do_chunk_alloc
Everytime we try to allocate disk space we try and see if we can pre-emptively allocate a chunk, but in the common case we don't allocate anything, so there is no sense in taking the chunk_mutex at all. So instead if we are allocating a chunk, mark it in the space_info so we don't get two people trying to allocate at the same time. Thanks, Signed-off-by: Josef Bacik <josef@redhat.com> Reviewed-by: Liu Bo <liubo2009@cn.fujitsu.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/extent-tree.c30
2 files changed, 28 insertions, 6 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0d00a07b5b29..2e61fe1b6b8c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -740,8 +740,10 @@ struct btrfs_space_info {
740 */ 740 */
741 unsigned long reservation_progress; 741 unsigned long reservation_progress;
742 742
743 int full; /* indicates that we cannot allocate any more 743 int full:1; /* indicates that we cannot allocate any more
744 chunks for this space */ 744 chunks for this space */
745 int chunk_alloc:1; /* set if we are allocating a chunk */
746
745 int force_alloc; /* set if we need to force a chunk alloc for 747 int force_alloc; /* set if we need to force a chunk alloc for
746 this space */ 748 this space */
747 749
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 26479484180d..31f33ba56fe8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3039,6 +3039,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3039 found->bytes_may_use = 0; 3039 found->bytes_may_use = 0;
3040 found->full = 0; 3040 found->full = 0;
3041 found->force_alloc = CHUNK_ALLOC_NO_FORCE; 3041 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3042 found->chunk_alloc = 0;
3042 *space_info = found; 3043 *space_info = found;
3043 list_add_rcu(&found->list, &info->space_info); 3044 list_add_rcu(&found->list, &info->space_info);
3044 atomic_set(&found->caching_threads, 0); 3045 atomic_set(&found->caching_threads, 0);
@@ -3318,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3318{ 3319{
3319 struct btrfs_space_info *space_info; 3320 struct btrfs_space_info *space_info;
3320 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3321 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3322 int wait_for_alloc = 0;
3321 int ret = 0; 3323 int ret = 0;
3322 3324
3323 mutex_lock(&fs_info->chunk_mutex);
3324
3325 flags = btrfs_reduce_alloc_profile(extent_root, flags); 3325 flags = btrfs_reduce_alloc_profile(extent_root, flags);
3326 3326
3327 space_info = __find_space_info(extent_root->fs_info, flags); 3327 space_info = __find_space_info(extent_root->fs_info, flags);
@@ -3332,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3332 } 3332 }
3333 BUG_ON(!space_info); 3333 BUG_ON(!space_info);
3334 3334
3335again:
3335 spin_lock(&space_info->lock); 3336 spin_lock(&space_info->lock);
3336 if (space_info->force_alloc) 3337 if (space_info->force_alloc)
3337 force = space_info->force_alloc; 3338 force = space_info->force_alloc;
3338 if (space_info->full) { 3339 if (space_info->full) {
3339 spin_unlock(&space_info->lock); 3340 spin_unlock(&space_info->lock);
3340 goto out; 3341 return 0;
3341 } 3342 }
3342 3343
3343 if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) { 3344 if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3344 spin_unlock(&space_info->lock); 3345 spin_unlock(&space_info->lock);
3345 goto out; 3346 return 0;
3347 } else if (space_info->chunk_alloc) {
3348 wait_for_alloc = 1;
3349 } else {
3350 space_info->chunk_alloc = 1;
3346 } 3351 }
3347 3352
3348 spin_unlock(&space_info->lock); 3353 spin_unlock(&space_info->lock);
3349 3354
3355 mutex_lock(&fs_info->chunk_mutex);
3356
3357 /*
3358 * The chunk_mutex is held throughout the entirety of a chunk
3359 * allocation, so once we've acquired the chunk_mutex we know that the
3360 * other guy is done and we need to recheck and see if we should
3361 * allocate.
3362 */
3363 if (wait_for_alloc) {
3364 mutex_unlock(&fs_info->chunk_mutex);
3365 wait_for_alloc = 0;
3366 goto again;
3367 }
3368
3350 /* 3369 /*
3351 * If we have mixed data/metadata chunks we want to make sure we keep 3370 * If we have mixed data/metadata chunks we want to make sure we keep
3352 * allocating mixed chunks instead of individual chunks. 3371 * allocating mixed chunks instead of individual chunks.
@@ -3372,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3372 space_info->full = 1; 3391 space_info->full = 1;
3373 else 3392 else
3374 ret = 1; 3393 ret = 1;
3394
3375 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 3395 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3396 space_info->chunk_alloc = 0;
3376 spin_unlock(&space_info->lock); 3397 spin_unlock(&space_info->lock);
3377out:
3378 mutex_unlock(&extent_root->fs_info->chunk_mutex); 3398 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3379 return ret; 3399 return ret;
3380} 3400}