diff options
| -rw-r--r-- | fs/btrfs/free-space-cache.c | 358 |
1 files changed, 204 insertions, 154 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 2ce89bfc8815..fcbdcef6ca28 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
| @@ -33,9 +33,9 @@ | |||
| 33 | static int link_free_space(struct btrfs_free_space_ctl *ctl, | 33 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
| 34 | struct btrfs_free_space *info); | 34 | struct btrfs_free_space *info); |
| 35 | 35 | ||
| 36 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | 36 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, |
| 37 | struct btrfs_block_group_cache | 37 | struct btrfs_path *path, |
| 38 | *block_group, struct btrfs_path *path) | 38 | u64 offset) |
| 39 | { | 39 | { |
| 40 | struct btrfs_key key; | 40 | struct btrfs_key key; |
| 41 | struct btrfs_key location; | 41 | struct btrfs_key location; |
| @@ -45,15 +45,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
| 45 | struct inode *inode = NULL; | 45 | struct inode *inode = NULL; |
| 46 | int ret; | 46 | int ret; |
| 47 | 47 | ||
| 48 | spin_lock(&block_group->lock); | ||
| 49 | if (block_group->inode) | ||
| 50 | inode = igrab(block_group->inode); | ||
| 51 | spin_unlock(&block_group->lock); | ||
| 52 | if (inode) | ||
| 53 | return inode; | ||
| 54 | |||
| 55 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 48 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
| 56 | key.offset = block_group->key.objectid; | 49 | key.offset = offset; |
| 57 | key.type = 0; | 50 | key.type = 0; |
| 58 | 51 | ||
| 59 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 52 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| @@ -83,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
| 83 | 76 | ||
| 84 | inode->i_mapping->flags &= ~__GFP_FS; | 77 | inode->i_mapping->flags &= ~__GFP_FS; |
| 85 | 78 | ||
| 79 | return inode; | ||
| 80 | } | ||
| 81 | |||
| 82 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | ||
| 83 | struct btrfs_block_group_cache | ||
| 84 | *block_group, struct btrfs_path *path) | ||
| 85 | { | ||
| 86 | struct inode *inode = NULL; | ||
| 87 | |||
| 88 | spin_lock(&block_group->lock); | ||
| 89 | if (block_group->inode) | ||
| 90 | inode = igrab(block_group->inode); | ||
| 91 | spin_unlock(&block_group->lock); | ||
| 92 | if (inode) | ||
| 93 | return inode; | ||
| 94 | |||
| 95 | inode = __lookup_free_space_inode(root, path, | ||
| 96 | block_group->key.objectid); | ||
| 97 | if (IS_ERR(inode)) | ||
| 98 | return inode; | ||
| 99 | |||
| 86 | spin_lock(&block_group->lock); | 100 | spin_lock(&block_group->lock); |
| 87 | if (!root->fs_info->closing) { | 101 | if (!root->fs_info->closing) { |
| 88 | block_group->inode = igrab(inode); | 102 | block_group->inode = igrab(inode); |
| @@ -93,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
| 93 | return inode; | 107 | return inode; |
| 94 | } | 108 | } |
| 95 | 109 | ||
| 96 | int create_free_space_inode(struct btrfs_root *root, | 110 | int __create_free_space_inode(struct btrfs_root *root, |
| 97 | struct btrfs_trans_handle *trans, | 111 | struct btrfs_trans_handle *trans, |
| 98 | struct btrfs_block_group_cache *block_group, | 112 | struct btrfs_path *path, u64 ino, u64 offset) |
| 99 | struct btrfs_path *path) | ||
| 100 | { | 113 | { |
| 101 | struct btrfs_key key; | 114 | struct btrfs_key key; |
| 102 | struct btrfs_disk_key disk_key; | 115 | struct btrfs_disk_key disk_key; |
| 103 | struct btrfs_free_space_header *header; | 116 | struct btrfs_free_space_header *header; |
| 104 | struct btrfs_inode_item *inode_item; | 117 | struct btrfs_inode_item *inode_item; |
| 105 | struct extent_buffer *leaf; | 118 | struct extent_buffer *leaf; |
| 106 | u64 objectid; | ||
| 107 | int ret; | 119 | int ret; |
| 108 | 120 | ||
| 109 | ret = btrfs_find_free_objectid(root, &objectid); | 121 | ret = btrfs_insert_empty_inode(trans, root, path, ino); |
| 110 | if (ret < 0) | ||
| 111 | return ret; | ||
| 112 | |||
| 113 | ret = btrfs_insert_empty_inode(trans, root, path, objectid); | ||
| 114 | if (ret) | 122 | if (ret) |
| 115 | return ret; | 123 | return ret; |
| 116 | 124 | ||
| @@ -130,13 +138,12 @@ int create_free_space_inode(struct btrfs_root *root, | |||
| 130 | BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); | 138 | BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); |
| 131 | btrfs_set_inode_nlink(leaf, inode_item, 1); | 139 | btrfs_set_inode_nlink(leaf, inode_item, 1); |
| 132 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); | 140 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); |
| 133 | btrfs_set_inode_block_group(leaf, inode_item, | 141 | btrfs_set_inode_block_group(leaf, inode_item, offset); |
| 134 | block_group->key.objectid); | ||
| 135 | btrfs_mark_buffer_dirty(leaf); | 142 | btrfs_mark_buffer_dirty(leaf); |
| 136 | btrfs_release_path(root, path); | 143 | btrfs_release_path(root, path); |
| 137 | 144 | ||
| 138 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 145 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
| 139 | key.offset = block_group->key.objectid; | 146 | key.offset = offset; |
| 140 | key.type = 0; | 147 | key.type = 0; |
| 141 | 148 | ||
| 142 | ret = btrfs_insert_empty_item(trans, root, path, &key, | 149 | ret = btrfs_insert_empty_item(trans, root, path, &key, |
| @@ -156,6 +163,22 @@ int create_free_space_inode(struct btrfs_root *root, | |||
| 156 | return 0; | 163 | return 0; |
| 157 | } | 164 | } |
| 158 | 165 | ||
| 166 | int create_free_space_inode(struct btrfs_root *root, | ||
| 167 | struct btrfs_trans_handle *trans, | ||
| 168 | struct btrfs_block_group_cache *block_group, | ||
| 169 | struct btrfs_path *path) | ||
| 170 | { | ||
| 171 | int ret; | ||
| 172 | u64 ino; | ||
| 173 | |||
| 174 | ret = btrfs_find_free_objectid(root, &ino); | ||
| 175 | if (ret < 0) | ||
| 176 | return ret; | ||
| 177 | |||
| 178 | return __create_free_space_inode(root, trans, path, ino, | ||
| 179 | block_group->key.objectid); | ||
| 180 | } | ||
| 181 | |||
| 159 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, | 182 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, |
| 160 | struct btrfs_trans_handle *trans, | 183 | struct btrfs_trans_handle *trans, |
| 161 | struct btrfs_path *path, | 184 | struct btrfs_path *path, |
| @@ -208,16 +231,13 @@ static int readahead_cache(struct inode *inode) | |||
| 208 | return 0; | 231 | return 0; |
| 209 | } | 232 | } |
| 210 | 233 | ||
| 211 | int load_free_space_cache(struct btrfs_fs_info *fs_info, | 234 | int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, |
| 212 | struct btrfs_block_group_cache *block_group) | 235 | struct btrfs_free_space_ctl *ctl, |
| 236 | struct btrfs_path *path, u64 offset) | ||
| 213 | { | 237 | { |
| 214 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
| 215 | struct btrfs_root *root = fs_info->tree_root; | ||
| 216 | struct inode *inode; | ||
| 217 | struct btrfs_free_space_header *header; | 238 | struct btrfs_free_space_header *header; |
| 218 | struct extent_buffer *leaf; | 239 | struct extent_buffer *leaf; |
| 219 | struct page *page; | 240 | struct page *page; |
| 220 | struct btrfs_path *path; | ||
| 221 | u32 *checksums = NULL, *crc; | 241 | u32 *checksums = NULL, *crc; |
| 222 | char *disk_crcs = NULL; | 242 | char *disk_crcs = NULL; |
| 223 | struct btrfs_key key; | 243 | struct btrfs_key key; |
| @@ -225,76 +245,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
| 225 | u64 num_entries; | 245 | u64 num_entries; |
| 226 | u64 num_bitmaps; | 246 | u64 num_bitmaps; |
| 227 | u64 generation; | 247 | u64 generation; |
| 228 | u64 used = btrfs_block_group_used(&block_group->item); | ||
| 229 | u32 cur_crc = ~(u32)0; | 248 | u32 cur_crc = ~(u32)0; |
| 230 | pgoff_t index = 0; | 249 | pgoff_t index = 0; |
| 231 | unsigned long first_page_offset; | 250 | unsigned long first_page_offset; |
| 232 | int num_checksums; | 251 | int num_checksums; |
| 233 | int ret = 0; | 252 | int ret = 0, ret2; |
| 234 | |||
| 235 | /* | ||
| 236 | * If we're unmounting then just return, since this does a search on the | ||
| 237 | * normal root and not the commit root and we could deadlock. | ||
| 238 | */ | ||
| 239 | smp_mb(); | ||
| 240 | if (fs_info->closing) | ||
| 241 | return 0; | ||
| 242 | |||
| 243 | /* | ||
| 244 | * If this block group has been marked to be cleared for one reason or | ||
| 245 | * another then we can't trust the on disk cache, so just return. | ||
| 246 | */ | ||
| 247 | spin_lock(&block_group->lock); | ||
| 248 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | ||
| 249 | spin_unlock(&block_group->lock); | ||
| 250 | return 0; | ||
| 251 | } | ||
| 252 | spin_unlock(&block_group->lock); | ||
| 253 | 253 | ||
| 254 | INIT_LIST_HEAD(&bitmaps); | 254 | INIT_LIST_HEAD(&bitmaps); |
| 255 | 255 | ||
| 256 | path = btrfs_alloc_path(); | ||
| 257 | if (!path) | ||
| 258 | return 0; | ||
| 259 | |||
| 260 | inode = lookup_free_space_inode(root, block_group, path); | ||
| 261 | if (IS_ERR(inode)) { | ||
| 262 | btrfs_free_path(path); | ||
| 263 | return 0; | ||
| 264 | } | ||
| 265 | |||
| 266 | /* Nothing in the space cache, goodbye */ | 256 | /* Nothing in the space cache, goodbye */ |
| 267 | if (!i_size_read(inode)) { | 257 | if (!i_size_read(inode)) |
| 268 | btrfs_free_path(path); | ||
| 269 | goto out; | 258 | goto out; |
| 270 | } | ||
| 271 | 259 | ||
| 272 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 260 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
| 273 | key.offset = block_group->key.objectid; | 261 | key.offset = offset; |
| 274 | key.type = 0; | 262 | key.type = 0; |
| 275 | 263 | ||
| 276 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 264 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 277 | if (ret) { | 265 | if (ret < 0) |
| 278 | btrfs_free_path(path); | 266 | goto out; |
| 267 | else if (ret > 0) { | ||
| 268 | btrfs_release_path(root, path); | ||
| 269 | ret = 0; | ||
| 279 | goto out; | 270 | goto out; |
| 280 | } | 271 | } |
| 281 | 272 | ||
| 273 | ret = -1; | ||
| 274 | |||
| 282 | leaf = path->nodes[0]; | 275 | leaf = path->nodes[0]; |
| 283 | header = btrfs_item_ptr(leaf, path->slots[0], | 276 | header = btrfs_item_ptr(leaf, path->slots[0], |
| 284 | struct btrfs_free_space_header); | 277 | struct btrfs_free_space_header); |
| 285 | num_entries = btrfs_free_space_entries(leaf, header); | 278 | num_entries = btrfs_free_space_entries(leaf, header); |
| 286 | num_bitmaps = btrfs_free_space_bitmaps(leaf, header); | 279 | num_bitmaps = btrfs_free_space_bitmaps(leaf, header); |
| 287 | generation = btrfs_free_space_generation(leaf, header); | 280 | generation = btrfs_free_space_generation(leaf, header); |
| 288 | btrfs_free_path(path); | 281 | btrfs_release_path(root, path); |
| 289 | 282 | ||
| 290 | if (BTRFS_I(inode)->generation != generation) { | 283 | if (BTRFS_I(inode)->generation != generation) { |
| 291 | printk(KERN_ERR "btrfs: free space inode generation (%llu) did" | 284 | printk(KERN_ERR "btrfs: free space inode generation (%llu) did" |
| 292 | " not match free space cache generation (%llu) for " | 285 | " not match free space cache generation (%llu)\n", |
| 293 | "block group %llu\n", | ||
| 294 | (unsigned long long)BTRFS_I(inode)->generation, | 286 | (unsigned long long)BTRFS_I(inode)->generation, |
| 295 | (unsigned long long)generation, | 287 | (unsigned long long)generation); |
| 296 | (unsigned long long)block_group->key.objectid); | 288 | goto out; |
| 297 | goto free_cache; | ||
| 298 | } | 289 | } |
| 299 | 290 | ||
| 300 | if (!num_entries) | 291 | if (!num_entries) |
| @@ -311,10 +302,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
| 311 | goto out; | 302 | goto out; |
| 312 | 303 | ||
| 313 | ret = readahead_cache(inode); | 304 | ret = readahead_cache(inode); |
| 314 | if (ret) { | 305 | if (ret) |
| 315 | ret = 0; | ||
| 316 | goto out; | 306 | goto out; |
| 317 | } | ||
| 318 | 307 | ||
| 319 | while (1) { | 308 | while (1) { |
| 320 | struct btrfs_free_space_entry *entry; | 309 | struct btrfs_free_space_entry *entry; |
| @@ -333,10 +322,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
| 333 | } | 322 | } |
| 334 | 323 | ||
| 335 | page = grab_cache_page(inode->i_mapping, index); | 324 | page = grab_cache_page(inode->i_mapping, index); |
| 336 | if (!page) { | 325 | if (!page) |
| 337 | ret = 0; | ||
| 338 | goto free_cache; | 326 | goto free_cache; |
| 339 | } | ||
| 340 | 327 | ||
| 341 | if (!PageUptodate(page)) { | 328 | if (!PageUptodate(page)) { |
| 342 | btrfs_readpage(NULL, page); | 329 | btrfs_readpage(NULL, page); |
| @@ -345,9 +332,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
| 345 | unlock_page(page); | 332 | unlock_page(page); |
| 346 | page_cache_release(page); | 333 | page_cache_release(page); |
| 347 | printk(KERN_ERR "btrfs: error reading free " | 334 | printk(KERN_ERR "btrfs: error reading free " |
| 348 | "space cache: %llu\n", | 335 | "space cache\n"); |
| 349 | (unsigned long long) | ||
| 350 | block_group->key.objectid); | ||
| 351 | goto free_cache; | 336 | goto free_cache; |
| 352 | } | 337 | } |
| 353 | } | 338 | } |
| @@ -360,13 +345,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
| 360 | gen = addr + (sizeof(u32) * num_checksums); | 345 | gen = addr + (sizeof(u32) * num_checksums); |
| 361 | if (*gen != BTRFS_I(inode)->generation) { | 346 | if (*gen != BTRFS_I(inode)->generation) { |
| 362 | printk(KERN_ERR "btrfs: space cache generation" | 347 | printk(KERN_ERR "btrfs: space cache generation" |
| 363 | " (%llu) does not match inode (%llu) " | 348 | " (%llu) does not match inode (%llu)\n", |
| 364 | "for block group %llu\n", | ||
| 365 | (unsigned long long)*gen, | 349 | (unsigned long long)*gen, |
| 366 | (unsigned long long) | 350 | (unsigned long long) |
| 367 | BTRFS_I(inode)->generation, | 351 | BTRFS_I(inode)->generation); |
| 368 | (unsigned long long) | ||
| 369 | block_group->key.objectid); | ||
| 370 | kunmap(page); | 352 | kunmap(page); |
| 371 | unlock_page(page); | 353 | unlock_page(page); |
| 372 | page_cache_release(page); | 354 | page_cache_release(page); |
| @@ -382,9 +364,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
| 382 | PAGE_CACHE_SIZE - start_offset); | 364 | PAGE_CACHE_SIZE - start_offset); |
| 383 | btrfs_csum_final(cur_crc, (char *)&cur_crc); | 365 | btrfs_csum_final(cur_crc, (char *)&cur_crc); |
| 384 | if (cur_crc != *crc) { | 366 | if (cur_crc != *crc) { |
| 385 | printk(KERN_ERR "btrfs: crc mismatch for page %lu in " | 367 | printk(KERN_ERR "btrfs: crc mismatch for page %lu\n", |
| 386 | "block group %llu\n", index, | 368 | index); |
| 387 | (unsigned long long)block_group->key.objectid); | ||
| 388 | kunmap(page); | 369 | kunmap(page); |
| 389 | unlock_page(page); | 370 | unlock_page(page); |
| 390 | page_cache_release(page); | 371 | page_cache_release(page); |
| @@ -432,7 +413,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
| 432 | goto free_cache; | 413 | goto free_cache; |
| 433 | } | 414 | } |
| 434 | spin_lock(&ctl->tree_lock); | 415 | spin_lock(&ctl->tree_lock); |
| 435 | ret = link_free_space(ctl, e); | 416 | ret2 = link_free_space(ctl, e); |
| 436 | ctl->total_bitmaps++; | 417 | ctl->total_bitmaps++; |
| 437 | ctl->op->recalc_thresholds(ctl); | 418 | ctl->op->recalc_thresholds(ctl); |
| 438 | spin_unlock(&ctl->tree_lock); | 419 | spin_unlock(&ctl->tree_lock); |
| @@ -471,42 +452,96 @@ next: | |||
| 471 | index++; | 452 | index++; |
| 472 | } | 453 | } |
| 473 | 454 | ||
| 474 | spin_lock(&ctl->tree_lock); | ||
| 475 | if (ctl->free_space != (block_group->key.offset - used - | ||
| 476 | block_group->bytes_super)) { | ||
| 477 | spin_unlock(&ctl->tree_lock); | ||
| 478 | printk(KERN_ERR "block group %llu has an wrong amount of free " | ||
| 479 | "space\n", block_group->key.objectid); | ||
| 480 | ret = 0; | ||
| 481 | goto free_cache; | ||
| 482 | } | ||
| 483 | spin_unlock(&ctl->tree_lock); | ||
| 484 | |||
| 485 | ret = 1; | 455 | ret = 1; |
| 486 | out: | 456 | out: |
| 487 | kfree(checksums); | 457 | kfree(checksums); |
| 488 | kfree(disk_crcs); | 458 | kfree(disk_crcs); |
| 489 | iput(inode); | ||
| 490 | return ret; | 459 | return ret; |
| 491 | |||
| 492 | free_cache: | 460 | free_cache: |
| 493 | /* This cache is bogus, make sure it gets cleared */ | 461 | __btrfs_remove_free_space_cache(ctl); |
| 494 | spin_lock(&block_group->lock); | ||
| 495 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | ||
| 496 | spin_unlock(&block_group->lock); | ||
| 497 | btrfs_remove_free_space_cache(block_group); | ||
| 498 | goto out; | 462 | goto out; |
| 499 | } | 463 | } |
| 500 | 464 | ||
| 501 | int btrfs_write_out_cache(struct btrfs_root *root, | 465 | int load_free_space_cache(struct btrfs_fs_info *fs_info, |
| 502 | struct btrfs_trans_handle *trans, | 466 | struct btrfs_block_group_cache *block_group) |
| 503 | struct btrfs_block_group_cache *block_group, | ||
| 504 | struct btrfs_path *path) | ||
| 505 | { | 467 | { |
| 506 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 468 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
| 469 | struct btrfs_root *root = fs_info->tree_root; | ||
| 470 | struct inode *inode; | ||
| 471 | struct btrfs_path *path; | ||
| 472 | int ret; | ||
| 473 | bool matched; | ||
| 474 | u64 used = btrfs_block_group_used(&block_group->item); | ||
| 475 | |||
| 476 | /* | ||
| 477 | * If we're unmounting then just return, since this does a search on the | ||
| 478 | * normal root and not the commit root and we could deadlock. | ||
| 479 | */ | ||
| 480 | smp_mb(); | ||
| 481 | if (fs_info->closing) | ||
| 482 | return 0; | ||
| 483 | |||
| 484 | /* | ||
| 485 | * If this block group has been marked to be cleared for one reason or | ||
| 486 | * another then we can't trust the on disk cache, so just return. | ||
| 487 | */ | ||
| 488 | spin_lock(&block_group->lock); | ||
| 489 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | ||
| 490 | spin_unlock(&block_group->lock); | ||
| 491 | return 0; | ||
| 492 | } | ||
| 493 | spin_unlock(&block_group->lock); | ||
| 494 | |||
| 495 | path = btrfs_alloc_path(); | ||
| 496 | if (!path) | ||
| 497 | return 0; | ||
| 498 | |||
| 499 | inode = lookup_free_space_inode(root, block_group, path); | ||
| 500 | if (IS_ERR(inode)) { | ||
| 501 | btrfs_free_path(path); | ||
| 502 | return 0; | ||
| 503 | } | ||
| 504 | |||
| 505 | ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, | ||
| 506 | path, block_group->key.objectid); | ||
| 507 | btrfs_free_path(path); | ||
| 508 | if (ret <= 0) | ||
| 509 | goto out; | ||
| 510 | |||
| 511 | spin_lock(&ctl->tree_lock); | ||
| 512 | matched = (ctl->free_space == (block_group->key.offset - used - | ||
| 513 | block_group->bytes_super)); | ||
| 514 | spin_unlock(&ctl->tree_lock); | ||
| 515 | |||
| 516 | if (!matched) { | ||
| 517 | __btrfs_remove_free_space_cache(ctl); | ||
| 518 | printk(KERN_ERR "block group %llu has an wrong amount of free " | ||
| 519 | "space\n", block_group->key.objectid); | ||
| 520 | ret = -1; | ||
| 521 | } | ||
| 522 | out: | ||
| 523 | if (ret < 0) { | ||
| 524 | /* This cache is bogus, make sure it gets cleared */ | ||
| 525 | spin_lock(&block_group->lock); | ||
| 526 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | ||
| 527 | spin_unlock(&block_group->lock); | ||
| 528 | |||
| 529 | printk(KERN_ERR "btrfs: failed to load free space cache " | ||
| 530 | "for block group %llu\n", block_group->key.objectid); | ||
| 531 | } | ||
| 532 | |||
| 533 | iput(inode); | ||
| 534 | return ret; | ||
| 535 | } | ||
| 536 | |||
| 537 | int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | ||
| 538 | struct btrfs_free_space_ctl *ctl, | ||
| 539 | struct btrfs_block_group_cache *block_group, | ||
| 540 | struct btrfs_trans_handle *trans, | ||
| 541 | struct btrfs_path *path, u64 offset) | ||
| 542 | { | ||
| 507 | struct btrfs_free_space_header *header; | 543 | struct btrfs_free_space_header *header; |
| 508 | struct extent_buffer *leaf; | 544 | struct extent_buffer *leaf; |
| 509 | struct inode *inode; | ||
| 510 | struct rb_node *node; | 545 | struct rb_node *node; |
| 511 | struct list_head *pos, *n; | 546 | struct list_head *pos, *n; |
| 512 | struct page **pages; | 547 | struct page **pages; |
| @@ -523,35 +558,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 523 | int index = 0, num_pages = 0; | 558 | int index = 0, num_pages = 0; |
| 524 | int entries = 0; | 559 | int entries = 0; |
| 525 | int bitmaps = 0; | 560 | int bitmaps = 0; |
| 526 | int ret = 0; | 561 | int ret = -1; |
| 527 | bool next_page = false; | 562 | bool next_page = false; |
| 528 | bool out_of_space = false; | 563 | bool out_of_space = false; |
| 529 | 564 | ||
| 530 | root = root->fs_info->tree_root; | ||
| 531 | |||
| 532 | INIT_LIST_HEAD(&bitmap_list); | 565 | INIT_LIST_HEAD(&bitmap_list); |
| 533 | 566 | ||
| 534 | spin_lock(&block_group->lock); | ||
| 535 | if (block_group->disk_cache_state < BTRFS_DC_SETUP) { | ||
| 536 | spin_unlock(&block_group->lock); | ||
| 537 | return 0; | ||
| 538 | } | ||
| 539 | spin_unlock(&block_group->lock); | ||
| 540 | |||
| 541 | inode = lookup_free_space_inode(root, block_group, path); | ||
| 542 | if (IS_ERR(inode)) | ||
| 543 | return 0; | ||
| 544 | |||
| 545 | if (!i_size_read(inode)) { | ||
| 546 | iput(inode); | ||
| 547 | return 0; | ||
| 548 | } | ||
| 549 | |||
| 550 | node = rb_first(&ctl->free_space_offset); | 567 | node = rb_first(&ctl->free_space_offset); |
| 551 | if (!node) { | 568 | if (!node) |
| 552 | iput(inode); | ||
| 553 | return 0; | 569 | return 0; |
| 554 | } | 570 | |
| 571 | if (!i_size_read(inode)) | ||
| 572 | return -1; | ||
| 555 | 573 | ||
| 556 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> | 574 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> |
| 557 | PAGE_CACHE_SHIFT; | 575 | PAGE_CACHE_SHIFT; |
| @@ -561,16 +579,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 561 | 579 | ||
| 562 | /* We need a checksum per page. */ | 580 | /* We need a checksum per page. */ |
| 563 | crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); | 581 | crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); |
| 564 | if (!crc) { | 582 | if (!crc) |
| 565 | iput(inode); | 583 | return -1; |
| 566 | return 0; | ||
| 567 | } | ||
| 568 | 584 | ||
| 569 | pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); | 585 | pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); |
| 570 | if (!pages) { | 586 | if (!pages) { |
| 571 | kfree(crc); | 587 | kfree(crc); |
| 572 | iput(inode); | 588 | return -1; |
| 573 | return 0; | ||
| 574 | } | 589 | } |
| 575 | 590 | ||
| 576 | /* Since the first page has all of our checksums and our generation we | 591 | /* Since the first page has all of our checksums and our generation we |
| @@ -580,7 +595,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 580 | first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); | 595 | first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); |
| 581 | 596 | ||
| 582 | /* Get the cluster for this block_group if it exists */ | 597 | /* Get the cluster for this block_group if it exists */ |
| 583 | if (!list_empty(&block_group->cluster_list)) | 598 | if (block_group && !list_empty(&block_group->cluster_list)) |
| 584 | cluster = list_entry(block_group->cluster_list.next, | 599 | cluster = list_entry(block_group->cluster_list.next, |
| 585 | struct btrfs_free_cluster, | 600 | struct btrfs_free_cluster, |
| 586 | block_group_list); | 601 | block_group_list); |
| @@ -622,7 +637,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 622 | * When searching for pinned extents, we need to start at our start | 637 | * When searching for pinned extents, we need to start at our start |
| 623 | * offset. | 638 | * offset. |
| 624 | */ | 639 | */ |
| 625 | start = block_group->key.objectid; | 640 | if (block_group) |
| 641 | start = block_group->key.objectid; | ||
| 626 | 642 | ||
| 627 | /* Write out the extent entries */ | 643 | /* Write out the extent entries */ |
| 628 | do { | 644 | do { |
| @@ -680,8 +696,9 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 680 | * We want to add any pinned extents to our free space cache | 696 | * We want to add any pinned extents to our free space cache |
| 681 | * so we don't leak the space | 697 | * so we don't leak the space |
| 682 | */ | 698 | */ |
| 683 | while (!next_page && (start < block_group->key.objectid + | 699 | while (block_group && !next_page && |
| 684 | block_group->key.offset)) { | 700 | (start < block_group->key.objectid + |
| 701 | block_group->key.offset)) { | ||
| 685 | ret = find_first_extent_bit(unpin, start, &start, &end, | 702 | ret = find_first_extent_bit(unpin, start, &start, &end, |
| 686 | EXTENT_DIRTY); | 703 | EXTENT_DIRTY); |
| 687 | if (ret) { | 704 | if (ret) { |
| @@ -799,12 +816,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 799 | filemap_write_and_wait(inode->i_mapping); | 816 | filemap_write_and_wait(inode->i_mapping); |
| 800 | 817 | ||
| 801 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 818 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
| 802 | key.offset = block_group->key.objectid; | 819 | key.offset = offset; |
| 803 | key.type = 0; | 820 | key.type = 0; |
| 804 | 821 | ||
| 805 | ret = btrfs_search_slot(trans, root, &key, path, 1, 1); | 822 | ret = btrfs_search_slot(trans, root, &key, path, 1, 1); |
| 806 | if (ret < 0) { | 823 | if (ret < 0) { |
| 807 | ret = 0; | 824 | ret = -1; |
| 808 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, | 825 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, |
| 809 | EXTENT_DIRTY | EXTENT_DELALLOC | | 826 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 810 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); | 827 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); |
| @@ -817,8 +834,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 817 | path->slots[0]--; | 834 | path->slots[0]--; |
| 818 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 835 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
| 819 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || | 836 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || |
| 820 | found_key.offset != block_group->key.objectid) { | 837 | found_key.offset != offset) { |
| 821 | ret = 0; | 838 | ret = -1; |
| 822 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, | 839 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, |
| 823 | EXTENT_DIRTY | EXTENT_DELALLOC | | 840 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 824 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, | 841 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, |
| @@ -838,16 +855,49 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 838 | ret = 1; | 855 | ret = 1; |
| 839 | 856 | ||
| 840 | out_free: | 857 | out_free: |
| 841 | if (ret == 0) { | 858 | if (ret != 1) { |
| 842 | invalidate_inode_pages2_range(inode->i_mapping, 0, index); | 859 | invalidate_inode_pages2_range(inode->i_mapping, 0, index); |
| 843 | spin_lock(&block_group->lock); | ||
| 844 | block_group->disk_cache_state = BTRFS_DC_ERROR; | ||
| 845 | spin_unlock(&block_group->lock); | ||
| 846 | BTRFS_I(inode)->generation = 0; | 860 | BTRFS_I(inode)->generation = 0; |
| 847 | } | 861 | } |
| 848 | kfree(checksums); | 862 | kfree(checksums); |
| 849 | kfree(pages); | 863 | kfree(pages); |
| 850 | btrfs_update_inode(trans, root, inode); | 864 | btrfs_update_inode(trans, root, inode); |
| 865 | return ret; | ||
| 866 | } | ||
| 867 | |||
| 868 | int btrfs_write_out_cache(struct btrfs_root *root, | ||
| 869 | struct btrfs_trans_handle *trans, | ||
| 870 | struct btrfs_block_group_cache *block_group, | ||
| 871 | struct btrfs_path *path) | ||
| 872 | { | ||
| 873 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
| 874 | struct inode *inode; | ||
| 875 | int ret = 0; | ||
| 876 | |||
| 877 | root = root->fs_info->tree_root; | ||
| 878 | |||
| 879 | spin_lock(&block_group->lock); | ||
| 880 | if (block_group->disk_cache_state < BTRFS_DC_SETUP) { | ||
| 881 | spin_unlock(&block_group->lock); | ||
| 882 | return 0; | ||
| 883 | } | ||
| 884 | spin_unlock(&block_group->lock); | ||
| 885 | |||
| 886 | inode = lookup_free_space_inode(root, block_group, path); | ||
| 887 | if (IS_ERR(inode)) | ||
| 888 | return 0; | ||
| 889 | |||
| 890 | ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, | ||
| 891 | path, block_group->key.objectid); | ||
| 892 | if (ret < 0) { | ||
| 893 | spin_lock(&block_group->lock); | ||
| 894 | block_group->disk_cache_state = BTRFS_DC_ERROR; | ||
| 895 | spin_unlock(&block_group->lock); | ||
| 896 | |||
| 897 | printk(KERN_ERR "btrfs: failed to write free space cace " | ||
| 898 | "for block group %llu\n", block_group->key.objectid); | ||
| 899 | } | ||
| 900 | |||
| 851 | iput(inode); | 901 | iput(inode); |
| 852 | return ret; | 902 | return ret; |
| 853 | } | 903 | } |
