aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-26 20:40:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-26 20:40:30 -0400
commitf583381f506dc53b9d3e98cf9d6577323eb3fcd2 (patch)
tree5a53895acd1d3c6adab886910399780dd95d65e3 /fs
parent59953fba87e5e535657403cc6439d24187929559 (diff)
parenta3bdccc4e683f0ac69230707ed3fa20e7cf73a79 (diff)
Merge branch 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs fixes from Chris Mason: "Filipe hit two problems in my block group cache patches. We finalized the fixes last week and ran through more tests" * 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: Btrfs: prevent list corruption during free space cache processing Btrfs: fix inode cache writeout
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/free-space-cache.c42
1 files changed, 25 insertions, 17 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 253cb74b0e27..81fa75a8e1f3 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1119,10 +1119,7 @@ static int flush_dirty_cache(struct inode *inode)
1119} 1119}
1120 1120
1121static void noinline_for_stack 1121static void noinline_for_stack
1122cleanup_write_cache_enospc(struct inode *inode, 1122cleanup_bitmap_list(struct list_head *bitmap_list)
1123 struct btrfs_io_ctl *io_ctl,
1124 struct extent_state **cached_state,
1125 struct list_head *bitmap_list)
1126{ 1123{
1127 struct list_head *pos, *n; 1124 struct list_head *pos, *n;
1128 1125
@@ -1131,6 +1128,14 @@ cleanup_write_cache_enospc(struct inode *inode,
1131 list_entry(pos, struct btrfs_free_space, list); 1128 list_entry(pos, struct btrfs_free_space, list);
1132 list_del_init(&entry->list); 1129 list_del_init(&entry->list);
1133 } 1130 }
1131}
1132
1133static void noinline_for_stack
1134cleanup_write_cache_enospc(struct inode *inode,
1135 struct btrfs_io_ctl *io_ctl,
1136 struct extent_state **cached_state,
1137 struct list_head *bitmap_list)
1138{
1134 io_ctl_drop_pages(io_ctl); 1139 io_ctl_drop_pages(io_ctl);
1135 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, 1140 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1136 i_size_read(inode) - 1, cached_state, 1141 i_size_read(inode) - 1, cached_state,
@@ -1149,7 +1154,8 @@ int btrfs_wait_cache_io(struct btrfs_root *root,
1149 if (!inode) 1154 if (!inode)
1150 return 0; 1155 return 0;
1151 1156
1152 root = root->fs_info->tree_root; 1157 if (block_group)
1158 root = root->fs_info->tree_root;
1153 1159
1154 /* Flush the dirty pages in the cache file. */ 1160 /* Flush the dirty pages in the cache file. */
1155 ret = flush_dirty_cache(inode); 1161 ret = flush_dirty_cache(inode);
@@ -1265,11 +1271,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1265 ret = write_cache_extent_entries(io_ctl, ctl, 1271 ret = write_cache_extent_entries(io_ctl, ctl,
1266 block_group, &entries, &bitmaps, 1272 block_group, &entries, &bitmaps,
1267 &bitmap_list); 1273 &bitmap_list);
1268 spin_unlock(&ctl->tree_lock); 1274 if (ret)
1269 if (ret) { 1275 goto out_nospc_locked;
1270 mutex_unlock(&ctl->cache_writeout_mutex);
1271 goto out_nospc;
1272 }
1273 1276
1274 /* 1277 /*
1275 * Some spaces that are freed in the current transaction are pinned, 1278 * Some spaces that are freed in the current transaction are pinned,
@@ -1280,17 +1283,14 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1280 * the dirty list and redo it. No locking needed 1283 * the dirty list and redo it. No locking needed
1281 */ 1284 */
1282 ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries); 1285 ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
1283 if (ret) { 1286 if (ret)
1284 mutex_unlock(&ctl->cache_writeout_mutex); 1287 goto out_nospc_locked;
1285 goto out_nospc;
1286 }
1287 1288
1288 /* 1289 /*
1289 * At last, we write out all the bitmaps and keep cache_writeout_mutex 1290 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1290 * locked while doing it because a concurrent trim can be manipulating 1291 * locked while doing it because a concurrent trim can be manipulating
1291 * or freeing the bitmap. 1292 * or freeing the bitmap.
1292 */ 1293 */
1293 spin_lock(&ctl->tree_lock);
1294 ret = write_bitmap_entries(io_ctl, &bitmap_list); 1294 ret = write_bitmap_entries(io_ctl, &bitmap_list);
1295 spin_unlock(&ctl->tree_lock); 1295 spin_unlock(&ctl->tree_lock);
1296 mutex_unlock(&ctl->cache_writeout_mutex); 1296 mutex_unlock(&ctl->cache_writeout_mutex);
@@ -1343,6 +1343,11 @@ out:
1343 iput(inode); 1343 iput(inode);
1344 return ret; 1344 return ret;
1345 1345
1346out_nospc_locked:
1347 cleanup_bitmap_list(&bitmap_list);
1348 spin_unlock(&ctl->tree_lock);
1349 mutex_unlock(&ctl->cache_writeout_mutex);
1350
1346out_nospc: 1351out_nospc:
1347 cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list); 1352 cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
1348 1353
@@ -3463,9 +3468,12 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
3463 if (!btrfs_test_opt(root, INODE_MAP_CACHE)) 3468 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
3464 return 0; 3469 return 0;
3465 3470
3471 memset(&io_ctl, 0, sizeof(io_ctl));
3466 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, 3472 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
3467 trans, path, 0) || 3473 trans, path, 0);
3468 btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0); 3474 if (!ret)
3475 ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
3476
3469 if (ret) { 3477 if (ret) {
3470 btrfs_delalloc_release_metadata(inode, inode->i_size); 3478 btrfs_delalloc_release_metadata(inode, inode->i_size);
3471#ifdef DEBUG 3479#ifdef DEBUG