aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fusionio.com>2013-08-05 11:15:21 -0400
committerChris Mason <chris.mason@fusionio.com>2013-09-01 08:04:47 -0400
commit36cce922875563a1e2a4b6a53fbe1147f652a51e (patch)
treefad46576765b99267f7906de13343886f8a5a361 /fs/btrfs/extent-tree.c
parent0f0fe8f710f29dbd4b2c915fc1c36962e4957b3b (diff)
Btrfs: handle errors when doing slow caching
Alex Lyakas reported a bug where wait_block_group_cache_progress() would wait forever if a drive failed. This is because we just bail out if there is an error while trying to cache a block group, we don't update anybody who may be waiting. So this introduces a new enum for the cache state in case of error and makes everybody bail out if we have an error. Alex tested and verified this patch fixed his problem. This fixes bz 59431. Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e868c35f760c..a073f3ece43a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -113,7 +113,8 @@ static noinline int
113block_group_cache_done(struct btrfs_block_group_cache *cache) 113block_group_cache_done(struct btrfs_block_group_cache *cache)
114{ 114{
115 smp_mb(); 115 smp_mb();
116 return cache->cached == BTRFS_CACHE_FINISHED; 116 return cache->cached == BTRFS_CACHE_FINISHED ||
117 cache->cached == BTRFS_CACHE_ERROR;
117} 118}
118 119
119static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) 120static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
@@ -389,7 +390,7 @@ static noinline void caching_thread(struct btrfs_work *work)
389 u64 total_found = 0; 390 u64 total_found = 0;
390 u64 last = 0; 391 u64 last = 0;
391 u32 nritems; 392 u32 nritems;
392 int ret = 0; 393 int ret = -ENOMEM;
393 394
394 caching_ctl = container_of(work, struct btrfs_caching_control, work); 395 caching_ctl = container_of(work, struct btrfs_caching_control, work);
395 block_group = caching_ctl->block_group; 396 block_group = caching_ctl->block_group;
@@ -517,6 +518,12 @@ err:
517 518
518 mutex_unlock(&caching_ctl->mutex); 519 mutex_unlock(&caching_ctl->mutex);
519out: 520out:
521 if (ret) {
522 spin_lock(&block_group->lock);
523 block_group->caching_ctl = NULL;
524 block_group->cached = BTRFS_CACHE_ERROR;
525 spin_unlock(&block_group->lock);
526 }
520 wake_up(&caching_ctl->wait); 527 wake_up(&caching_ctl->wait);
521 528
522 put_caching_control(caching_ctl); 529 put_caching_control(caching_ctl);
@@ -6035,8 +6042,11 @@ static u64 stripe_align(struct btrfs_root *root,
6035 * for our min num_bytes. Another option is to have it go ahead 6042 * for our min num_bytes. Another option is to have it go ahead
6036 * and look in the rbtree for a free extent of a given size, but this 6043 * and look in the rbtree for a free extent of a given size, but this
6037 * is a good start. 6044 * is a good start.
6045 *
6046 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6047 * any of the information in this block group.
6038 */ 6048 */
6039static noinline int 6049static noinline void
6040wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, 6050wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6041 u64 num_bytes) 6051 u64 num_bytes)
6042{ 6052{
@@ -6044,28 +6054,29 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6044 6054
6045 caching_ctl = get_caching_control(cache); 6055 caching_ctl = get_caching_control(cache);
6046 if (!caching_ctl) 6056 if (!caching_ctl)
6047 return 0; 6057 return;
6048 6058
6049 wait_event(caching_ctl->wait, block_group_cache_done(cache) || 6059 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6050 (cache->free_space_ctl->free_space >= num_bytes)); 6060 (cache->free_space_ctl->free_space >= num_bytes));
6051 6061
6052 put_caching_control(caching_ctl); 6062 put_caching_control(caching_ctl);
6053 return 0;
6054} 6063}
6055 6064
6056static noinline int 6065static noinline int
6057wait_block_group_cache_done(struct btrfs_block_group_cache *cache) 6066wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6058{ 6067{
6059 struct btrfs_caching_control *caching_ctl; 6068 struct btrfs_caching_control *caching_ctl;
6069 int ret = 0;
6060 6070
6061 caching_ctl = get_caching_control(cache); 6071 caching_ctl = get_caching_control(cache);
6062 if (!caching_ctl) 6072 if (!caching_ctl)
6063 return 0; 6073 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6064 6074
6065 wait_event(caching_ctl->wait, block_group_cache_done(cache)); 6075 wait_event(caching_ctl->wait, block_group_cache_done(cache));
6066 6076 if (cache->cached == BTRFS_CACHE_ERROR)
6077 ret = -EIO;
6067 put_caching_control(caching_ctl); 6078 put_caching_control(caching_ctl);
6068 return 0; 6079 return ret;
6069} 6080}
6070 6081
6071int __get_raid_index(u64 flags) 6082int __get_raid_index(u64 flags)
@@ -6248,6 +6259,8 @@ have_block_group:
6248 ret = 0; 6259 ret = 0;
6249 } 6260 }
6250 6261
6262 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6263 goto loop;
6251 if (unlikely(block_group->ro)) 6264 if (unlikely(block_group->ro))
6252 goto loop; 6265 goto loop;
6253 6266
@@ -8230,7 +8243,8 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
8230 * We haven't cached this block group, which means we could 8243 * We haven't cached this block group, which means we could
8231 * possibly have excluded extents on this block group. 8244 * possibly have excluded extents on this block group.
8232 */ 8245 */
8233 if (block_group->cached == BTRFS_CACHE_NO) 8246 if (block_group->cached == BTRFS_CACHE_NO ||
8247 block_group->cached == BTRFS_CACHE_ERROR)
8234 free_excluded_extents(info->extent_root, block_group); 8248 free_excluded_extents(info->extent_root, block_group);
8235 8249
8236 btrfs_remove_free_space_cache(block_group); 8250 btrfs_remove_free_space_cache(block_group);