aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c80
1 files changed, 36 insertions, 44 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 7a15fcfb3e1f..6e5b7e463698 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
351 } 351 }
352 } 352 }
353 353
354 for (i = 0; i < io_ctl->num_pages; i++) {
355 clear_page_dirty_for_io(io_ctl->pages[i]);
356 set_page_extent_mapped(io_ctl->pages[i]);
357 }
358
354 return 0; 359 return 0;
355} 360}
356 361
@@ -537,6 +542,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
537 struct btrfs_free_space *entry, u8 *type) 542 struct btrfs_free_space *entry, u8 *type)
538{ 543{
539 struct btrfs_free_space_entry *e; 544 struct btrfs_free_space_entry *e;
545 int ret;
546
547 if (!io_ctl->cur) {
548 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
549 if (ret)
550 return ret;
551 }
540 552
541 e = io_ctl->cur; 553 e = io_ctl->cur;
542 entry->offset = le64_to_cpu(e->offset); 554 entry->offset = le64_to_cpu(e->offset);
@@ -550,10 +562,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
550 562
551 io_ctl_unmap_page(io_ctl); 563 io_ctl_unmap_page(io_ctl);
552 564
553 if (io_ctl->index >= io_ctl->num_pages) 565 return 0;
554 return 0;
555
556 return io_ctl_check_crc(io_ctl, io_ctl->index);
557} 566}
558 567
559static int io_ctl_read_bitmap(struct io_ctl *io_ctl, 568static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
@@ -561,9 +570,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
561{ 570{
562 int ret; 571 int ret;
563 572
564 if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
565 io_ctl_unmap_page(io_ctl);
566
567 ret = io_ctl_check_crc(io_ctl, io_ctl->index); 573 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
568 if (ret) 574 if (ret)
569 return ret; 575 return ret;
@@ -699,6 +705,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
699 num_entries--; 705 num_entries--;
700 } 706 }
701 707
708 io_ctl_unmap_page(&io_ctl);
709
702 /* 710 /*
703 * We add the bitmaps at the end of the entries in order that 711 * We add the bitmaps at the end of the entries in order that
704 * the bitmap entries are added to the cache. 712 * the bitmap entries are added to the cache.
@@ -1841,7 +1849,13 @@ again:
1841 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1849 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1842 1, 0); 1850 1, 0);
1843 if (!info) { 1851 if (!info) {
1844 WARN_ON(1); 1852 /* the tree logging code might be calling us before we
1853 * have fully loaded the free space rbtree for this
1854 * block group. So it is possible the entry won't
1855 * be in the rbtree yet at all. The caching code
1856 * will make sure not to put it in the rbtree if
1857 * the logging code has pinned it.
1858 */
1845 goto out_lock; 1859 goto out_lock;
1846 } 1860 }
1847 } 1861 }
@@ -2448,16 +2462,23 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2448{ 2462{
2449 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2463 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2450 struct btrfs_free_space *entry; 2464 struct btrfs_free_space *entry;
2451 struct rb_node *node;
2452 int ret = -ENOSPC; 2465 int ret = -ENOSPC;
2466 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2453 2467
2454 if (ctl->total_bitmaps == 0) 2468 if (ctl->total_bitmaps == 0)
2455 return -ENOSPC; 2469 return -ENOSPC;
2456 2470
2457 /* 2471 /*
2458 * First check our cached list of bitmaps and see if there is an entry 2472 * The bitmap that covers offset won't be in the list unless offset
2459 * here that will work. 2473 * is just its start offset.
2460 */ 2474 */
2475 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2476 if (entry->offset != bitmap_offset) {
2477 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2478 if (entry && list_empty(&entry->list))
2479 list_add(&entry->list, bitmaps);
2480 }
2481
2461 list_for_each_entry(entry, bitmaps, list) { 2482 list_for_each_entry(entry, bitmaps, list) {
2462 if (entry->bytes < min_bytes) 2483 if (entry->bytes < min_bytes)
2463 continue; 2484 continue;
@@ -2468,38 +2489,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2468 } 2489 }
2469 2490
2470 /* 2491 /*
2471 * If we do have entries on our list and we are here then we didn't find 2492 * The bitmaps list has all the bitmaps that record free space
2472 * anything, so go ahead and get the next entry after the last entry in 2493 * starting after offset, so no more search is required.
2473 * this list and start the search from there.
2474 */ 2494 */
2475 if (!list_empty(bitmaps)) { 2495 return -ENOSPC;
2476 entry = list_entry(bitmaps->prev, struct btrfs_free_space,
2477 list);
2478 node = rb_next(&entry->offset_index);
2479 if (!node)
2480 return -ENOSPC;
2481 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2482 goto search;
2483 }
2484
2485 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2486 if (!entry)
2487 return -ENOSPC;
2488
2489search:
2490 node = &entry->offset_index;
2491 do {
2492 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2493 node = rb_next(&entry->offset_index);
2494 if (!entry->bitmap)
2495 continue;
2496 if (entry->bytes < min_bytes)
2497 continue;
2498 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2499 bytes, min_bytes);
2500 } while (ret && node);
2501
2502 return ret;
2503} 2496}
2504 2497
2505/* 2498/*
@@ -2517,8 +2510,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2517 u64 offset, u64 bytes, u64 empty_size) 2510 u64 offset, u64 bytes, u64 empty_size)
2518{ 2511{
2519 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2512 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2520 struct list_head bitmaps;
2521 struct btrfs_free_space *entry, *tmp; 2513 struct btrfs_free_space *entry, *tmp;
2514 LIST_HEAD(bitmaps);
2522 u64 min_bytes; 2515 u64 min_bytes;
2523 int ret; 2516 int ret;
2524 2517
@@ -2557,7 +2550,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2557 goto out; 2550 goto out;
2558 } 2551 }
2559 2552
2560 INIT_LIST_HEAD(&bitmaps);
2561 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, 2553 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2562 bytes, min_bytes); 2554 bytes, min_bytes);
2563 if (ret) 2555 if (ret)