diff options
author | Josef Bacik <jbacik@fb.com> | 2014-11-26 11:52:54 -0500 |
---|---|---|
committer | Chris Mason <clm@fb.com> | 2014-12-02 21:35:10 -0500 |
commit | cb83b7b81698a4abe531e0ba18b9e288b06947ce (patch) | |
tree | d7d72b454b610e65b51509834603baaf7e0101f3 /fs | |
parent | 8dbcd10f6978ca3ccee2f43288d16b7b9da2fb2b (diff) |
Btrfs: make get_caching_control unconditionally return the ctl
This was written when we didn't do a caching control for the fast free space
cache loading. However we started doing that a long time ago, and there is
still a small window of time that we could be caching the block group the fast
way, so if there is a caching_ctl at all on the block group just return it, the
callers all wait properly for what they want. Thanks,
Signed-off-by: Josef Bacik <jbacik@fb.com>
Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/extent-tree.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 17b052ae4653..222d6aea4a8a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -315,12 +315,6 @@ get_caching_control(struct btrfs_block_group_cache *cache) | |||
315 | struct btrfs_caching_control *ctl; | 315 | struct btrfs_caching_control *ctl; |
316 | 316 | ||
317 | spin_lock(&cache->lock); | 317 | spin_lock(&cache->lock); |
318 | if (cache->cached != BTRFS_CACHE_STARTED) { | ||
319 | spin_unlock(&cache->lock); | ||
320 | return NULL; | ||
321 | } | ||
322 | |||
323 | /* We're loading it the fast way, so we don't have a caching_ctl. */ | ||
324 | if (!cache->caching_ctl) { | 318 | if (!cache->caching_ctl) { |
325 | spin_unlock(&cache->lock); | 319 | spin_unlock(&cache->lock); |
326 | return NULL; | 320 | return NULL; |
@@ -594,6 +588,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
594 | spin_unlock(&cache->lock); | 588 | spin_unlock(&cache->lock); |
595 | 589 | ||
596 | if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) { | 590 | if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) { |
591 | mutex_lock(&caching_ctl->mutex); | ||
597 | ret = load_free_space_cache(fs_info, cache); | 592 | ret = load_free_space_cache(fs_info, cache); |
598 | 593 | ||
599 | spin_lock(&cache->lock); | 594 | spin_lock(&cache->lock); |
@@ -601,6 +596,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
601 | cache->caching_ctl = NULL; | 596 | cache->caching_ctl = NULL; |
602 | cache->cached = BTRFS_CACHE_FINISHED; | 597 | cache->cached = BTRFS_CACHE_FINISHED; |
603 | cache->last_byte_to_unpin = (u64)-1; | 598 | cache->last_byte_to_unpin = (u64)-1; |
599 | caching_ctl->progress = (u64)-1; | ||
604 | } else { | 600 | } else { |
605 | if (load_cache_only) { | 601 | if (load_cache_only) { |
606 | cache->caching_ctl = NULL; | 602 | cache->caching_ctl = NULL; |
@@ -611,6 +607,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
611 | } | 607 | } |
612 | } | 608 | } |
613 | spin_unlock(&cache->lock); | 609 | spin_unlock(&cache->lock); |
610 | mutex_unlock(&caching_ctl->mutex); | ||
611 | |||
614 | wake_up(&caching_ctl->wait); | 612 | wake_up(&caching_ctl->wait); |
615 | if (ret == 1) { | 613 | if (ret == 1) { |
616 | put_caching_control(caching_ctl); | 614 | put_caching_control(caching_ctl); |