diff options
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 231 |
1 files changed, 182 insertions, 49 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 70d45795d758..9f985a429877 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -98,7 +98,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
98 | return inode; | 98 | return inode; |
99 | 99 | ||
100 | spin_lock(&block_group->lock); | 100 | spin_lock(&block_group->lock); |
101 | if (!root->fs_info->closing) { | 101 | if (!btrfs_fs_closing(root->fs_info)) { |
102 | block_group->inode = igrab(inode); | 102 | block_group->inode = igrab(inode); |
103 | block_group->iref = 1; | 103 | block_group->iref = 1; |
104 | } | 104 | } |
@@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
250 | pgoff_t index = 0; | 250 | pgoff_t index = 0; |
251 | unsigned long first_page_offset; | 251 | unsigned long first_page_offset; |
252 | int num_checksums; | 252 | int num_checksums; |
253 | int ret = 0, ret2; | 253 | int ret = 0; |
254 | 254 | ||
255 | INIT_LIST_HEAD(&bitmaps); | 255 | INIT_LIST_HEAD(&bitmaps); |
256 | 256 | ||
@@ -402,7 +402,14 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
402 | spin_lock(&ctl->tree_lock); | 402 | spin_lock(&ctl->tree_lock); |
403 | ret = link_free_space(ctl, e); | 403 | ret = link_free_space(ctl, e); |
404 | spin_unlock(&ctl->tree_lock); | 404 | spin_unlock(&ctl->tree_lock); |
405 | BUG_ON(ret); | 405 | if (ret) { |
406 | printk(KERN_ERR "Duplicate entries in " | ||
407 | "free space cache, dumping\n"); | ||
408 | kunmap(page); | ||
409 | unlock_page(page); | ||
410 | page_cache_release(page); | ||
411 | goto free_cache; | ||
412 | } | ||
406 | } else { | 413 | } else { |
407 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 414 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
408 | if (!e->bitmap) { | 415 | if (!e->bitmap) { |
@@ -414,10 +421,18 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
414 | goto free_cache; | 421 | goto free_cache; |
415 | } | 422 | } |
416 | spin_lock(&ctl->tree_lock); | 423 | spin_lock(&ctl->tree_lock); |
417 | ret2 = link_free_space(ctl, e); | 424 | ret = link_free_space(ctl, e); |
418 | ctl->total_bitmaps++; | 425 | ctl->total_bitmaps++; |
419 | ctl->op->recalc_thresholds(ctl); | 426 | ctl->op->recalc_thresholds(ctl); |
420 | spin_unlock(&ctl->tree_lock); | 427 | spin_unlock(&ctl->tree_lock); |
428 | if (ret) { | ||
429 | printk(KERN_ERR "Duplicate entries in " | ||
430 | "free space cache, dumping\n"); | ||
431 | kunmap(page); | ||
432 | unlock_page(page); | ||
433 | page_cache_release(page); | ||
434 | goto free_cache; | ||
435 | } | ||
421 | list_add_tail(&e->list, &bitmaps); | 436 | list_add_tail(&e->list, &bitmaps); |
422 | } | 437 | } |
423 | 438 | ||
@@ -478,8 +493,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
478 | * If we're unmounting then just return, since this does a search on the | 493 | * If we're unmounting then just return, since this does a search on the |
479 | * normal root and not the commit root and we could deadlock. | 494 | * normal root and not the commit root and we could deadlock. |
480 | */ | 495 | */ |
481 | smp_mb(); | 496 | if (btrfs_fs_closing(fs_info)) |
482 | if (fs_info->closing) | ||
483 | return 0; | 497 | return 0; |
484 | 498 | ||
485 | /* | 499 | /* |
@@ -575,10 +589,25 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
575 | 589 | ||
576 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> | 590 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> |
577 | PAGE_CACHE_SHIFT; | 591 | PAGE_CACHE_SHIFT; |
592 | |||
593 | /* Since the first page has all of our checksums and our generation we | ||
594 | * need to calculate the offset into the page that we can start writing | ||
595 | * our entries. | ||
596 | */ | ||
597 | first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); | ||
598 | |||
578 | filemap_write_and_wait(inode->i_mapping); | 599 | filemap_write_and_wait(inode->i_mapping); |
579 | btrfs_wait_ordered_range(inode, inode->i_size & | 600 | btrfs_wait_ordered_range(inode, inode->i_size & |
580 | ~(root->sectorsize - 1), (u64)-1); | 601 | ~(root->sectorsize - 1), (u64)-1); |
581 | 602 | ||
603 | /* make sure we don't overflow that first page */ | ||
604 | if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) { | ||
605 | /* this is really the same as running out of space, where we also return 0 */ | ||
606 | printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n"); | ||
607 | ret = 0; | ||
608 | goto out_update; | ||
609 | } | ||
610 | |||
582 | /* We need a checksum per page. */ | 611 | /* We need a checksum per page. */ |
583 | crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); | 612 | crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); |
584 | if (!crc) | 613 | if (!crc) |
@@ -590,12 +619,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
590 | return -1; | 619 | return -1; |
591 | } | 620 | } |
592 | 621 | ||
593 | /* Since the first page has all of our checksums and our generation we | ||
594 | * need to calculate the offset into the page that we can start writing | ||
595 | * our entries. | ||
596 | */ | ||
597 | first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); | ||
598 | |||
599 | /* Get the cluster for this block_group if it exists */ | 622 | /* Get the cluster for this block_group if it exists */ |
600 | if (block_group && !list_empty(&block_group->cluster_list)) | 623 | if (block_group && !list_empty(&block_group->cluster_list)) |
601 | cluster = list_entry(block_group->cluster_list.next, | 624 | cluster = list_entry(block_group->cluster_list.next, |
@@ -857,12 +880,14 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
857 | ret = 1; | 880 | ret = 1; |
858 | 881 | ||
859 | out_free: | 882 | out_free: |
883 | kfree(checksums); | ||
884 | kfree(pages); | ||
885 | |||
886 | out_update: | ||
860 | if (ret != 1) { | 887 | if (ret != 1) { |
861 | invalidate_inode_pages2_range(inode->i_mapping, 0, index); | 888 | invalidate_inode_pages2_range(inode->i_mapping, 0, index); |
862 | BTRFS_I(inode)->generation = 0; | 889 | BTRFS_I(inode)->generation = 0; |
863 | } | 890 | } |
864 | kfree(checksums); | ||
865 | kfree(pages); | ||
866 | btrfs_update_inode(trans, root, inode); | 891 | btrfs_update_inode(trans, root, inode); |
867 | return ret; | 892 | return ret; |
868 | } | 893 | } |
@@ -963,10 +988,16 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, | |||
963 | * logically. | 988 | * logically. |
964 | */ | 989 | */ |
965 | if (bitmap) { | 990 | if (bitmap) { |
966 | WARN_ON(info->bitmap); | 991 | if (info->bitmap) { |
992 | WARN_ON_ONCE(1); | ||
993 | return -EEXIST; | ||
994 | } | ||
967 | p = &(*p)->rb_right; | 995 | p = &(*p)->rb_right; |
968 | } else { | 996 | } else { |
969 | WARN_ON(!info->bitmap); | 997 | if (!info->bitmap) { |
998 | WARN_ON_ONCE(1); | ||
999 | return -EEXIST; | ||
1000 | } | ||
970 | p = &(*p)->rb_left; | 1001 | p = &(*p)->rb_left; |
971 | } | 1002 | } |
972 | } | 1003 | } |
@@ -1386,6 +1417,23 @@ again: | |||
1386 | return 0; | 1417 | return 0; |
1387 | } | 1418 | } |
1388 | 1419 | ||
1420 | static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, | ||
1421 | struct btrfs_free_space *info, u64 offset, | ||
1422 | u64 bytes) | ||
1423 | { | ||
1424 | u64 bytes_to_set = 0; | ||
1425 | u64 end; | ||
1426 | |||
1427 | end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); | ||
1428 | |||
1429 | bytes_to_set = min(end - offset, bytes); | ||
1430 | |||
1431 | bitmap_set_bits(ctl, info, offset, bytes_to_set); | ||
1432 | |||
1433 | return bytes_to_set; | ||
1434 | |||
1435 | } | ||
1436 | |||
1389 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, | 1437 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, |
1390 | struct btrfs_free_space *info) | 1438 | struct btrfs_free_space *info) |
1391 | { | 1439 | { |
@@ -1422,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, | |||
1422 | return true; | 1470 | return true; |
1423 | } | 1471 | } |
1424 | 1472 | ||
1473 | static struct btrfs_free_space_op free_space_op = { | ||
1474 | .recalc_thresholds = recalculate_thresholds, | ||
1475 | .use_bitmap = use_bitmap, | ||
1476 | }; | ||
1477 | |||
1425 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, | 1478 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, |
1426 | struct btrfs_free_space *info) | 1479 | struct btrfs_free_space *info) |
1427 | { | 1480 | { |
1428 | struct btrfs_free_space *bitmap_info; | 1481 | struct btrfs_free_space *bitmap_info; |
1482 | struct btrfs_block_group_cache *block_group = NULL; | ||
1429 | int added = 0; | 1483 | int added = 0; |
1430 | u64 bytes, offset, end; | 1484 | u64 bytes, offset, bytes_added; |
1431 | int ret; | 1485 | int ret; |
1432 | 1486 | ||
1433 | bytes = info->bytes; | 1487 | bytes = info->bytes; |
@@ -1436,7 +1490,49 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, | |||
1436 | if (!ctl->op->use_bitmap(ctl, info)) | 1490 | if (!ctl->op->use_bitmap(ctl, info)) |
1437 | return 0; | 1491 | return 0; |
1438 | 1492 | ||
1493 | if (ctl->op == &free_space_op) | ||
1494 | block_group = ctl->private; | ||
1439 | again: | 1495 | again: |
1496 | /* | ||
1497 | * Since we link bitmaps right into the cluster we need to see if we | ||
1498 | * have a cluster here, and if so and it has our bitmap we need to add | ||
1499 | * the free space to that bitmap. | ||
1500 | */ | ||
1501 | if (block_group && !list_empty(&block_group->cluster_list)) { | ||
1502 | struct btrfs_free_cluster *cluster; | ||
1503 | struct rb_node *node; | ||
1504 | struct btrfs_free_space *entry; | ||
1505 | |||
1506 | cluster = list_entry(block_group->cluster_list.next, | ||
1507 | struct btrfs_free_cluster, | ||
1508 | block_group_list); | ||
1509 | spin_lock(&cluster->lock); | ||
1510 | node = rb_first(&cluster->root); | ||
1511 | if (!node) { | ||
1512 | spin_unlock(&cluster->lock); | ||
1513 | goto no_cluster_bitmap; | ||
1514 | } | ||
1515 | |||
1516 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1517 | if (!entry->bitmap) { | ||
1518 | spin_unlock(&cluster->lock); | ||
1519 | goto no_cluster_bitmap; | ||
1520 | } | ||
1521 | |||
1522 | if (entry->offset == offset_to_bitmap(ctl, offset)) { | ||
1523 | bytes_added = add_bytes_to_bitmap(ctl, entry, | ||
1524 | offset, bytes); | ||
1525 | bytes -= bytes_added; | ||
1526 | offset += bytes_added; | ||
1527 | } | ||
1528 | spin_unlock(&cluster->lock); | ||
1529 | if (!bytes) { | ||
1530 | ret = 1; | ||
1531 | goto out; | ||
1532 | } | ||
1533 | } | ||
1534 | |||
1535 | no_cluster_bitmap: | ||
1440 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | 1536 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1441 | 1, 0); | 1537 | 1, 0); |
1442 | if (!bitmap_info) { | 1538 | if (!bitmap_info) { |
@@ -1444,19 +1540,10 @@ again: | |||
1444 | goto new_bitmap; | 1540 | goto new_bitmap; |
1445 | } | 1541 | } |
1446 | 1542 | ||
1447 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); | 1543 | bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); |
1448 | 1544 | bytes -= bytes_added; | |
1449 | if (offset >= bitmap_info->offset && offset + bytes > end) { | 1545 | offset += bytes_added; |
1450 | bitmap_set_bits(ctl, bitmap_info, offset, end - offset); | 1546 | added = 0; |
1451 | bytes -= end - offset; | ||
1452 | offset = end; | ||
1453 | added = 0; | ||
1454 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { | ||
1455 | bitmap_set_bits(ctl, bitmap_info, offset, bytes); | ||
1456 | bytes = 0; | ||
1457 | } else { | ||
1458 | BUG(); | ||
1459 | } | ||
1460 | 1547 | ||
1461 | if (!bytes) { | 1548 | if (!bytes) { |
1462 | ret = 1; | 1549 | ret = 1; |
@@ -1735,11 +1822,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | |||
1735 | "\n", count); | 1822 | "\n", count); |
1736 | } | 1823 | } |
1737 | 1824 | ||
1738 | static struct btrfs_free_space_op free_space_op = { | ||
1739 | .recalc_thresholds = recalculate_thresholds, | ||
1740 | .use_bitmap = use_bitmap, | ||
1741 | }; | ||
1742 | |||
1743 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) | 1825 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) |
1744 | { | 1826 | { |
1745 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 1827 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
@@ -2111,9 +2193,11 @@ again: | |||
2111 | /* | 2193 | /* |
2112 | * This searches the block group for just extents to fill the cluster with. | 2194 | * This searches the block group for just extents to fill the cluster with. |
2113 | */ | 2195 | */ |
2114 | static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | 2196 | static noinline int |
2115 | struct btrfs_free_cluster *cluster, | 2197 | setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, |
2116 | u64 offset, u64 bytes, u64 min_bytes) | 2198 | struct btrfs_free_cluster *cluster, |
2199 | struct list_head *bitmaps, u64 offset, u64 bytes, | ||
2200 | u64 min_bytes) | ||
2117 | { | 2201 | { |
2118 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2202 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2119 | struct btrfs_free_space *first = NULL; | 2203 | struct btrfs_free_space *first = NULL; |
@@ -2135,6 +2219,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2135 | * extent entry. | 2219 | * extent entry. |
2136 | */ | 2220 | */ |
2137 | while (entry->bitmap) { | 2221 | while (entry->bitmap) { |
2222 | if (list_empty(&entry->list)) | ||
2223 | list_add_tail(&entry->list, bitmaps); | ||
2138 | node = rb_next(&entry->offset_index); | 2224 | node = rb_next(&entry->offset_index); |
2139 | if (!node) | 2225 | if (!node) |
2140 | return -ENOSPC; | 2226 | return -ENOSPC; |
@@ -2154,8 +2240,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2154 | return -ENOSPC; | 2240 | return -ENOSPC; |
2155 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2241 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2156 | 2242 | ||
2157 | if (entry->bitmap) | 2243 | if (entry->bitmap) { |
2244 | if (list_empty(&entry->list)) | ||
2245 | list_add_tail(&entry->list, bitmaps); | ||
2158 | continue; | 2246 | continue; |
2247 | } | ||
2248 | |||
2159 | /* | 2249 | /* |
2160 | * we haven't filled the empty size and the window is | 2250 | * we haven't filled the empty size and the window is |
2161 | * very large. reset and try again | 2251 | * very large. reset and try again |
@@ -2207,9 +2297,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2207 | * This specifically looks for bitmaps that may work in the cluster, we assume | 2297 | * This specifically looks for bitmaps that may work in the cluster, we assume |
2208 | * that we have already failed to find extents that will work. | 2298 | * that we have already failed to find extents that will work. |
2209 | */ | 2299 | */ |
2210 | static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | 2300 | static noinline int |
2211 | struct btrfs_free_cluster *cluster, | 2301 | setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, |
2212 | u64 offset, u64 bytes, u64 min_bytes) | 2302 | struct btrfs_free_cluster *cluster, |
2303 | struct list_head *bitmaps, u64 offset, u64 bytes, | ||
2304 | u64 min_bytes) | ||
2213 | { | 2305 | { |
2214 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2306 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2215 | struct btrfs_free_space *entry; | 2307 | struct btrfs_free_space *entry; |
@@ -2219,10 +2311,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2219 | if (ctl->total_bitmaps == 0) | 2311 | if (ctl->total_bitmaps == 0) |
2220 | return -ENOSPC; | 2312 | return -ENOSPC; |
2221 | 2313 | ||
2314 | /* | ||
2315 | * First check our cached list of bitmaps and see if there is an entry | ||
2316 | * here that will work. | ||
2317 | */ | ||
2318 | list_for_each_entry(entry, bitmaps, list) { | ||
2319 | if (entry->bytes < min_bytes) | ||
2320 | continue; | ||
2321 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | ||
2322 | bytes, min_bytes); | ||
2323 | if (!ret) | ||
2324 | return 0; | ||
2325 | } | ||
2326 | |||
2327 | /* | ||
2328 | * If we do have entries on our list and we are here then we didn't find | ||
2329 | * anything, so go ahead and get the next entry after the last entry in | ||
2330 | * this list and start the search from there. | ||
2331 | */ | ||
2332 | if (!list_empty(bitmaps)) { | ||
2333 | entry = list_entry(bitmaps->prev, struct btrfs_free_space, | ||
2334 | list); | ||
2335 | node = rb_next(&entry->offset_index); | ||
2336 | if (!node) | ||
2337 | return -ENOSPC; | ||
2338 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2339 | goto search; | ||
2340 | } | ||
2341 | |||
2222 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); | 2342 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); |
2223 | if (!entry) | 2343 | if (!entry) |
2224 | return -ENOSPC; | 2344 | return -ENOSPC; |
2225 | 2345 | ||
2346 | search: | ||
2226 | node = &entry->offset_index; | 2347 | node = &entry->offset_index; |
2227 | do { | 2348 | do { |
2228 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2349 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
@@ -2253,6 +2374,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2253 | u64 offset, u64 bytes, u64 empty_size) | 2374 | u64 offset, u64 bytes, u64 empty_size) |
2254 | { | 2375 | { |
2255 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2376 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2377 | struct list_head bitmaps; | ||
2378 | struct btrfs_free_space *entry, *tmp; | ||
2256 | u64 min_bytes; | 2379 | u64 min_bytes; |
2257 | int ret; | 2380 | int ret; |
2258 | 2381 | ||
@@ -2291,11 +2414,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2291 | goto out; | 2414 | goto out; |
2292 | } | 2415 | } |
2293 | 2416 | ||
2294 | ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes, | 2417 | INIT_LIST_HEAD(&bitmaps); |
2295 | min_bytes); | 2418 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, |
2419 | bytes, min_bytes); | ||
2296 | if (ret) | 2420 | if (ret) |
2297 | ret = setup_cluster_bitmap(block_group, cluster, offset, | 2421 | ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, |
2298 | bytes, min_bytes); | 2422 | offset, bytes, min_bytes); |
2423 | |||
2424 | /* Clear our temporary list */ | ||
2425 | list_for_each_entry_safe(entry, tmp, &bitmaps, list) | ||
2426 | list_del_init(&entry->list); | ||
2299 | 2427 | ||
2300 | if (!ret) { | 2428 | if (!ret) { |
2301 | atomic_inc(&block_group->count); | 2429 | atomic_inc(&block_group->count); |
@@ -2481,7 +2609,7 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root, | |||
2481 | return inode; | 2609 | return inode; |
2482 | 2610 | ||
2483 | spin_lock(&root->cache_lock); | 2611 | spin_lock(&root->cache_lock); |
2484 | if (!root->fs_info->closing) | 2612 | if (!btrfs_fs_closing(root->fs_info)) |
2485 | root->cache_inode = igrab(inode); | 2613 | root->cache_inode = igrab(inode); |
2486 | spin_unlock(&root->cache_lock); | 2614 | spin_unlock(&root->cache_lock); |
2487 | 2615 | ||
@@ -2504,12 +2632,14 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) | |||
2504 | int ret = 0; | 2632 | int ret = 0; |
2505 | u64 root_gen = btrfs_root_generation(&root->root_item); | 2633 | u64 root_gen = btrfs_root_generation(&root->root_item); |
2506 | 2634 | ||
2635 | if (!btrfs_test_opt(root, INODE_MAP_CACHE)) | ||
2636 | return 0; | ||
2637 | |||
2507 | /* | 2638 | /* |
2508 | * If we're unmounting then just return, since this does a search on the | 2639 | * If we're unmounting then just return, since this does a search on the |
2509 | * normal root and not the commit root and we could deadlock. | 2640 | * normal root and not the commit root and we could deadlock. |
2510 | */ | 2641 | */ |
2511 | smp_mb(); | 2642 | if (btrfs_fs_closing(fs_info)) |
2512 | if (fs_info->closing) | ||
2513 | return 0; | 2643 | return 0; |
2514 | 2644 | ||
2515 | path = btrfs_alloc_path(); | 2645 | path = btrfs_alloc_path(); |
@@ -2543,6 +2673,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, | |||
2543 | struct inode *inode; | 2673 | struct inode *inode; |
2544 | int ret; | 2674 | int ret; |
2545 | 2675 | ||
2676 | if (!btrfs_test_opt(root, INODE_MAP_CACHE)) | ||
2677 | return 0; | ||
2678 | |||
2546 | inode = lookup_free_ino_inode(root, path); | 2679 | inode = lookup_free_ino_inode(root, path); |
2547 | if (IS_ERR(inode)) | 2680 | if (IS_ERR(inode)) |
2548 | return 0; | 2681 | return 0; |