diff options
-rw-r--r-- | fs/btrfs/free-space-cache.c | 41 |
1 files changed, 41 insertions, 0 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index cecf8df62481..19a0d85b451c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -33,6 +33,8 @@ | |||
33 | 33 | ||
34 | static int link_free_space(struct btrfs_free_space_ctl *ctl, | 34 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
35 | struct btrfs_free_space *info); | 35 | struct btrfs_free_space *info); |
36 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, | ||
37 | struct btrfs_free_space *info); | ||
36 | 38 | ||
37 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, | 39 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, |
38 | struct btrfs_path *path, | 40 | struct btrfs_path *path, |
@@ -584,6 +586,44 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl, | |||
584 | return 0; | 586 | return 0; |
585 | } | 587 | } |
586 | 588 | ||
589 | /* | ||
590 | * Since we attach pinned extents after the fact we can have contiguous sections | ||
591 | * of free space that are split up in entries. This poses a problem with the | ||
592 | * tree logging stuff since it could have allocated across what appears to be 2 | ||
593 | * entries since we would have merged the entries when adding the pinned extents | ||
594 | * back to the free space cache. So run through the space cache that we just | ||
595 | * loaded and merge contiguous entries. This will make the log replay stuff not | ||
596 | * blow up and it will make for nicer allocator behavior. | ||
597 | */ | ||
598 | static void merge_space_tree(struct btrfs_free_space_ctl *ctl) | ||
599 | { | ||
600 | struct btrfs_free_space *e, *prev = NULL; | ||
601 | struct rb_node *n; | ||
602 | |||
603 | again: | ||
604 | spin_lock(&ctl->tree_lock); | ||
605 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { | ||
606 | e = rb_entry(n, struct btrfs_free_space, offset_index); | ||
607 | if (!prev) | ||
608 | goto next; | ||
609 | if (e->bitmap || prev->bitmap) | ||
610 | goto next; | ||
611 | if (prev->offset + prev->bytes == e->offset) { | ||
612 | unlink_free_space(ctl, prev); | ||
613 | unlink_free_space(ctl, e); | ||
614 | prev->bytes += e->bytes; | ||
615 | kmem_cache_free(btrfs_free_space_cachep, e); | ||
616 | link_free_space(ctl, prev); | ||
617 | prev = NULL; | ||
618 | spin_unlock(&ctl->tree_lock); | ||
619 | goto again; | ||
620 | } | ||
621 | next: | ||
622 | prev = e; | ||
623 | } | ||
624 | spin_unlock(&ctl->tree_lock); | ||
625 | } | ||
626 | |||
587 | int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | 627 | int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, |
588 | struct btrfs_free_space_ctl *ctl, | 628 | struct btrfs_free_space_ctl *ctl, |
589 | struct btrfs_path *path, u64 offset) | 629 | struct btrfs_path *path, u64 offset) |
@@ -726,6 +766,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
726 | } | 766 | } |
727 | 767 | ||
728 | io_ctl_drop_pages(&io_ctl); | 768 | io_ctl_drop_pages(&io_ctl); |
769 | merge_space_tree(ctl); | ||
729 | ret = 1; | 770 | ret = 1; |
730 | out: | 771 | out: |
731 | io_ctl_free(&io_ctl); | 772 | io_ctl_free(&io_ctl); |