aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c131
1 files changed, 104 insertions, 27 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 47c2adb3ddf3..6886ae063483 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -170,13 +170,13 @@ static int __create_free_space_inode(struct btrfs_root *root,
170 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 170 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
171 key.offset = offset; 171 key.offset = offset;
172 key.type = 0; 172 key.type = 0;
173
174 ret = btrfs_insert_empty_item(trans, root, path, &key, 173 ret = btrfs_insert_empty_item(trans, root, path, &key,
175 sizeof(struct btrfs_free_space_header)); 174 sizeof(struct btrfs_free_space_header));
176 if (ret < 0) { 175 if (ret < 0) {
177 btrfs_release_path(path); 176 btrfs_release_path(path);
178 return ret; 177 return ret;
179 } 178 }
179
180 leaf = path->nodes[0]; 180 leaf = path->nodes[0];
181 header = btrfs_item_ptr(leaf, path->slots[0], 181 header = btrfs_item_ptr(leaf, path->slots[0],
182 struct btrfs_free_space_header); 182 struct btrfs_free_space_header);
@@ -296,6 +296,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
296 io_ctl->num_pages = num_pages; 296 io_ctl->num_pages = num_pages;
297 io_ctl->root = root; 297 io_ctl->root = root;
298 io_ctl->check_crcs = check_crcs; 298 io_ctl->check_crcs = check_crcs;
299 io_ctl->inode = inode;
299 300
300 return 0; 301 return 0;
301} 302}
@@ -303,6 +304,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
303static void io_ctl_free(struct btrfs_io_ctl *io_ctl) 304static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
304{ 305{
305 kfree(io_ctl->pages); 306 kfree(io_ctl->pages);
307 io_ctl->pages = NULL;
306} 308}
307 309
308static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl) 310static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
@@ -1092,6 +1094,61 @@ cleanup_write_cache_enospc(struct inode *inode,
1092 GFP_NOFS); 1094 GFP_NOFS);
1093} 1095}
1094 1096
1097int btrfs_wait_cache_io(struct btrfs_root *root,
1098 struct btrfs_trans_handle *trans,
1099 struct btrfs_block_group_cache *block_group,
1100 struct btrfs_io_ctl *io_ctl,
1101 struct btrfs_path *path, u64 offset)
1102{
1103 int ret;
1104 struct inode *inode = io_ctl->inode;
1105
1106 root = root->fs_info->tree_root;
1107
1108 /* Flush the dirty pages in the cache file. */
1109 ret = flush_dirty_cache(inode);
1110 if (ret)
1111 goto out;
1112
1113 /* Update the cache item to tell everyone this cache file is valid. */
1114 ret = update_cache_item(trans, root, inode, path, offset,
1115 io_ctl->entries, io_ctl->bitmaps);
1116out:
1117 io_ctl_free(io_ctl);
1118 if (ret) {
1119 invalidate_inode_pages2(inode->i_mapping);
1120 BTRFS_I(inode)->generation = 0;
1121 if (block_group) {
1122#ifdef DEBUG
1123 btrfs_err(root->fs_info,
1124 "failed to write free space cache for block group %llu",
1125 block_group->key.objectid);
1126#endif
1127 }
1128 }
1129 btrfs_update_inode(trans, root, inode);
1130
1131 if (block_group) {
1132 spin_lock(&block_group->lock);
1133
1134 /*
1135 * only mark this as written if we didn't get put back on
1136 * the dirty list while waiting for IO.
1137 */
1138 if (!ret && list_empty(&block_group->dirty_list))
1139 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1140 else if (ret)
1141 block_group->disk_cache_state = BTRFS_DC_ERROR;
1142
1143 spin_unlock(&block_group->lock);
1144 io_ctl->inode = NULL;
1145 iput(inode);
1146 }
1147
1148 return ret;
1149
1150}
1151
1095/** 1152/**
1096 * __btrfs_write_out_cache - write out cached info to an inode 1153 * __btrfs_write_out_cache - write out cached info to an inode
1097 * @root - the root the inode belongs to 1154 * @root - the root the inode belongs to
@@ -1108,20 +1165,22 @@ cleanup_write_cache_enospc(struct inode *inode,
1108static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, 1165static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1109 struct btrfs_free_space_ctl *ctl, 1166 struct btrfs_free_space_ctl *ctl,
1110 struct btrfs_block_group_cache *block_group, 1167 struct btrfs_block_group_cache *block_group,
1168 struct btrfs_io_ctl *io_ctl,
1111 struct btrfs_trans_handle *trans, 1169 struct btrfs_trans_handle *trans,
1112 struct btrfs_path *path, u64 offset) 1170 struct btrfs_path *path, u64 offset)
1113{ 1171{
1114 struct extent_state *cached_state = NULL; 1172 struct extent_state *cached_state = NULL;
1115 struct btrfs_io_ctl io_ctl;
1116 LIST_HEAD(bitmap_list); 1173 LIST_HEAD(bitmap_list);
1117 int entries = 0; 1174 int entries = 0;
1118 int bitmaps = 0; 1175 int bitmaps = 0;
1119 int ret; 1176 int ret;
1177 int must_iput = 0;
1120 1178
1121 if (!i_size_read(inode)) 1179 if (!i_size_read(inode))
1122 return -1; 1180 return -1;
1123 1181
1124 ret = io_ctl_init(&io_ctl, inode, root, 1); 1182 WARN_ON(io_ctl->pages);
1183 ret = io_ctl_init(io_ctl, inode, root, 1);
1125 if (ret) 1184 if (ret)
1126 return -1; 1185 return -1;
1127 1186
@@ -1134,22 +1193,23 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1134 up_write(&block_group->data_rwsem); 1193 up_write(&block_group->data_rwsem);
1135 BTRFS_I(inode)->generation = 0; 1194 BTRFS_I(inode)->generation = 0;
1136 ret = 0; 1195 ret = 0;
1196 must_iput = 1;
1137 goto out; 1197 goto out;
1138 } 1198 }
1139 spin_unlock(&block_group->lock); 1199 spin_unlock(&block_group->lock);
1140 } 1200 }
1141 1201
1142 /* Lock all pages first so we can lock the extent safely. */ 1202 /* Lock all pages first so we can lock the extent safely. */
1143 io_ctl_prepare_pages(&io_ctl, inode, 0); 1203 io_ctl_prepare_pages(io_ctl, inode, 0);
1144 1204
1145 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 1205 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1146 0, &cached_state); 1206 0, &cached_state);
1147 1207
1148 io_ctl_set_generation(&io_ctl, trans->transid); 1208 io_ctl_set_generation(io_ctl, trans->transid);
1149 1209
1150 mutex_lock(&ctl->cache_writeout_mutex); 1210 mutex_lock(&ctl->cache_writeout_mutex);
1151 /* Write out the extent entries in the free space cache */ 1211 /* Write out the extent entries in the free space cache */
1152 ret = write_cache_extent_entries(&io_ctl, ctl, 1212 ret = write_cache_extent_entries(io_ctl, ctl,
1153 block_group, &entries, &bitmaps, 1213 block_group, &entries, &bitmaps,
1154 &bitmap_list); 1214 &bitmap_list);
1155 if (ret) { 1215 if (ret) {
@@ -1162,7 +1222,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1162 * they will be added into free space cache after the transaction is 1222 * they will be added into free space cache after the transaction is
1163 * committed, we shouldn't lose them. 1223 * committed, we shouldn't lose them.
1164 */ 1224 */
1165 ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries); 1225 ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
1166 if (ret) { 1226 if (ret) {
1167 mutex_unlock(&ctl->cache_writeout_mutex); 1227 mutex_unlock(&ctl->cache_writeout_mutex);
1168 goto out_nospc; 1228 goto out_nospc;
@@ -1173,16 +1233,16 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1173 * locked while doing it because a concurrent trim can be manipulating 1233 * locked while doing it because a concurrent trim can be manipulating
1174 * or freeing the bitmap. 1234 * or freeing the bitmap.
1175 */ 1235 */
1176 ret = write_bitmap_entries(&io_ctl, &bitmap_list); 1236 ret = write_bitmap_entries(io_ctl, &bitmap_list);
1177 mutex_unlock(&ctl->cache_writeout_mutex); 1237 mutex_unlock(&ctl->cache_writeout_mutex);
1178 if (ret) 1238 if (ret)
1179 goto out_nospc; 1239 goto out_nospc;
1180 1240
1181 /* Zero out the rest of the pages just to make sure */ 1241 /* Zero out the rest of the pages just to make sure */
1182 io_ctl_zero_remaining_pages(&io_ctl); 1242 io_ctl_zero_remaining_pages(io_ctl);
1183 1243
1184 /* Everything is written out, now we dirty the pages in the file. */ 1244 /* Everything is written out, now we dirty the pages in the file. */
1185 ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages, 1245 ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
1186 0, i_size_read(inode), &cached_state); 1246 0, i_size_read(inode), &cached_state);
1187 if (ret) 1247 if (ret)
1188 goto out_nospc; 1248 goto out_nospc;
@@ -1193,30 +1253,39 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1193 * Release the pages and unlock the extent, we will flush 1253 * Release the pages and unlock the extent, we will flush
1194 * them out later 1254 * them out later
1195 */ 1255 */
1196 io_ctl_drop_pages(&io_ctl); 1256 io_ctl_drop_pages(io_ctl);
1197 1257
1198 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, 1258 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1199 i_size_read(inode) - 1, &cached_state, GFP_NOFS); 1259 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1200 1260
1201 /* Flush the dirty pages in the cache file. */ 1261 /*
1202 ret = flush_dirty_cache(inode); 1262 * at this point the pages are under IO and we're happy,
1263 * The caller is responsible for waiting on them and updating the
1264 * the cache and the inode
1265 */
1266 io_ctl->entries = entries;
1267 io_ctl->bitmaps = bitmaps;
1268
1269 ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1203 if (ret) 1270 if (ret)
1204 goto out; 1271 goto out;
1205 1272
1206 /* Update the cache item to tell everyone this cache file is valid. */ 1273 return 0;
1207 ret = update_cache_item(trans, root, inode, path, offset, 1274
1208 entries, bitmaps);
1209out: 1275out:
1210 io_ctl_free(&io_ctl); 1276 io_ctl->inode = NULL;
1277 io_ctl_free(io_ctl);
1211 if (ret) { 1278 if (ret) {
1212 invalidate_inode_pages2(inode->i_mapping); 1279 invalidate_inode_pages2(inode->i_mapping);
1213 BTRFS_I(inode)->generation = 0; 1280 BTRFS_I(inode)->generation = 0;
1214 } 1281 }
1215 btrfs_update_inode(trans, root, inode); 1282 btrfs_update_inode(trans, root, inode);
1283 if (must_iput)
1284 iput(inode);
1216 return ret; 1285 return ret;
1217 1286
1218out_nospc: 1287out_nospc:
1219 cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list); 1288 cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
1220 1289
1221 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) 1290 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1222 up_write(&block_group->data_rwsem); 1291 up_write(&block_group->data_rwsem);
@@ -1232,7 +1301,6 @@ int btrfs_write_out_cache(struct btrfs_root *root,
1232 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1301 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1233 struct inode *inode; 1302 struct inode *inode;
1234 int ret = 0; 1303 int ret = 0;
1235 enum btrfs_disk_cache_state dcs = BTRFS_DC_WRITTEN;
1236 1304
1237 root = root->fs_info->tree_root; 1305 root = root->fs_info->tree_root;
1238 1306
@@ -1253,22 +1321,28 @@ int btrfs_write_out_cache(struct btrfs_root *root,
1253 if (IS_ERR(inode)) 1321 if (IS_ERR(inode))
1254 return 0; 1322 return 0;
1255 1323
1256 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, 1324 ret = __btrfs_write_out_cache(root, inode, ctl, block_group,
1325 &block_group->io_ctl, trans,
1257 path, block_group->key.objectid); 1326 path, block_group->key.objectid);
1258 if (ret) { 1327 if (ret) {
1259 dcs = BTRFS_DC_ERROR;
1260 ret = 0;
1261#ifdef DEBUG 1328#ifdef DEBUG
1262 btrfs_err(root->fs_info, 1329 btrfs_err(root->fs_info,
1263 "failed to write free space cache for block group %llu", 1330 "failed to write free space cache for block group %llu",
1264 block_group->key.objectid); 1331 block_group->key.objectid);
1265#endif 1332#endif
1333 spin_lock(&block_group->lock);
1334 block_group->disk_cache_state = BTRFS_DC_ERROR;
1335 spin_unlock(&block_group->lock);
1336
1337 block_group->io_ctl.inode = NULL;
1338 iput(inode);
1266 } 1339 }
1267 1340
1268 spin_lock(&block_group->lock); 1341 /*
1269 block_group->disk_cache_state = dcs; 1342 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1270 spin_unlock(&block_group->lock); 1343 * to wait for IO and put the inode
1271 iput(inode); 1344 */
1345
1272 return ret; 1346 return ret;
1273} 1347}
1274 1348
@@ -3331,11 +3405,14 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
3331{ 3405{
3332 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 3406 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
3333 int ret; 3407 int ret;
3408 struct btrfs_io_ctl io_ctl;
3334 3409
3335 if (!btrfs_test_opt(root, INODE_MAP_CACHE)) 3410 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
3336 return 0; 3411 return 0;
3337 3412
3338 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); 3413 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
3414 trans, path, 0) ||
3415 btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
3339 if (ret) { 3416 if (ret) {
3340 btrfs_delalloc_release_metadata(inode, inode->i_size); 3417 btrfs_delalloc_release_metadata(inode, inode->i_size);
3341#ifdef DEBUG 3418#ifdef DEBUG