diff options
author | Josef Bacik <josef@redhat.com> | 2012-05-14 10:06:40 -0400 |
---|---|---|
committer | Josef Bacik <josef@redhat.com> | 2012-05-30 10:23:36 -0400 |
commit | cd023e7b17fe86c530475da210b3348421c40e5f (patch) | |
tree | b80cf40b7ad18af59ef43602b673fe1c7d371232 /fs/btrfs | |
parent | 9ba1f6e44ed7a1fa52d3f292508bf921b5054172 (diff) |
Btrfs: merge contigous regions when loading free space cache
When we write out the free space cache we will write out everything that is
in our in memory tree, and then we will just walk the pinned extents tree
and write anything we see there. The problem with this is that during
normal operations the pinned extents will be merged back into the free space
tree normally, and then we can allocate space from the merged areas and
commit them to the tree log. If we crash and replay the tree log we will
crash again because the tree log will try to free up space from what looks
like 2 seperate but contiguous entries, since one entry is from the original
free space cache and the other was a pinned extent that was merged back. To
fix this we just need to walk the free space tree after we load it and merge
contiguous entries back together. This will keep the tree log stuff from
breaking and it will make the allocator behave more nicely. Thanks,
Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 41 |
1 files changed, 41 insertions, 0 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index cecf8df62481..19a0d85b451c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -33,6 +33,8 @@ | |||
33 | 33 | ||
34 | static int link_free_space(struct btrfs_free_space_ctl *ctl, | 34 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
35 | struct btrfs_free_space *info); | 35 | struct btrfs_free_space *info); |
36 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, | ||
37 | struct btrfs_free_space *info); | ||
36 | 38 | ||
37 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, | 39 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, |
38 | struct btrfs_path *path, | 40 | struct btrfs_path *path, |
@@ -584,6 +586,44 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl, | |||
584 | return 0; | 586 | return 0; |
585 | } | 587 | } |
586 | 588 | ||
589 | /* | ||
590 | * Since we attach pinned extents after the fact we can have contiguous sections | ||
591 | * of free space that are split up in entries. This poses a problem with the | ||
592 | * tree logging stuff since it could have allocated across what appears to be 2 | ||
593 | * entries since we would have merged the entries when adding the pinned extents | ||
594 | * back to the free space cache. So run through the space cache that we just | ||
595 | * loaded and merge contiguous entries. This will make the log replay stuff not | ||
596 | * blow up and it will make for nicer allocator behavior. | ||
597 | */ | ||
598 | static void merge_space_tree(struct btrfs_free_space_ctl *ctl) | ||
599 | { | ||
600 | struct btrfs_free_space *e, *prev = NULL; | ||
601 | struct rb_node *n; | ||
602 | |||
603 | again: | ||
604 | spin_lock(&ctl->tree_lock); | ||
605 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { | ||
606 | e = rb_entry(n, struct btrfs_free_space, offset_index); | ||
607 | if (!prev) | ||
608 | goto next; | ||
609 | if (e->bitmap || prev->bitmap) | ||
610 | goto next; | ||
611 | if (prev->offset + prev->bytes == e->offset) { | ||
612 | unlink_free_space(ctl, prev); | ||
613 | unlink_free_space(ctl, e); | ||
614 | prev->bytes += e->bytes; | ||
615 | kmem_cache_free(btrfs_free_space_cachep, e); | ||
616 | link_free_space(ctl, prev); | ||
617 | prev = NULL; | ||
618 | spin_unlock(&ctl->tree_lock); | ||
619 | goto again; | ||
620 | } | ||
621 | next: | ||
622 | prev = e; | ||
623 | } | ||
624 | spin_unlock(&ctl->tree_lock); | ||
625 | } | ||
626 | |||
587 | int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | 627 | int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, |
588 | struct btrfs_free_space_ctl *ctl, | 628 | struct btrfs_free_space_ctl *ctl, |
589 | struct btrfs_path *path, u64 offset) | 629 | struct btrfs_path *path, u64 offset) |
@@ -726,6 +766,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
726 | } | 766 | } |
727 | 767 | ||
728 | io_ctl_drop_pages(&io_ctl); | 768 | io_ctl_drop_pages(&io_ctl); |
769 | merge_space_tree(ctl); | ||
729 | ret = 1; | 770 | ret = 1; |
730 | out: | 771 | out: |
731 | io_ctl_free(&io_ctl); | 772 | io_ctl_free(&io_ctl); |