diff options
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/ctree.c | 3 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 25 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 1 | ||||
-rw-r--r-- | fs/btrfs/extent_map.c | 183 | ||||
-rw-r--r-- | fs/btrfs/extent_map.h | 16 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 5 |
6 files changed, 128 insertions, 105 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 08ddf1873ab1..bef61ee883e1 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -88,8 +88,6 @@ static int __btrfs_cow_block(struct btrfs_trans_handle *trans, | |||
88 | if (IS_ERR(cow)) | 88 | if (IS_ERR(cow)) |
89 | return PTR_ERR(cow); | 89 | return PTR_ERR(cow); |
90 | 90 | ||
91 | cow->alloc_addr = (unsigned long)__builtin_return_address(0); | ||
92 | |||
93 | copy_extent_buffer(cow, buf, 0, 0, cow->len); | 91 | copy_extent_buffer(cow, buf, 0, 0, cow->len); |
94 | btrfs_set_header_bytenr(cow, cow->start); | 92 | btrfs_set_header_bytenr(cow, cow->start); |
95 | btrfs_set_header_generation(cow, trans->transid); | 93 | btrfs_set_header_generation(cow, trans->transid); |
@@ -151,7 +149,6 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans, | |||
151 | search_start = buf->start & ~((u64)BTRFS_BLOCK_GROUP_SIZE - 1); | 149 | search_start = buf->start & ~((u64)BTRFS_BLOCK_GROUP_SIZE - 1); |
152 | ret = __btrfs_cow_block(trans, root, buf, parent, | 150 | ret = __btrfs_cow_block(trans, root, buf, parent, |
153 | parent_slot, cow_ret, search_start, 0); | 151 | parent_slot, cow_ret, search_start, 0); |
154 | (*cow_ret)->alloc_addr = (unsigned long)__builtin_return_address(0); | ||
155 | return ret; | 152 | return ret; |
156 | } | 153 | } |
157 | 154 | ||
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8e606e6658aa..fd7e6c182b9d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -50,8 +50,6 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, | |||
50 | struct extent_buffer *eb; | 50 | struct extent_buffer *eb; |
51 | eb = find_extent_buffer(&BTRFS_I(btree_inode)->extent_tree, | 51 | eb = find_extent_buffer(&BTRFS_I(btree_inode)->extent_tree, |
52 | bytenr, blocksize, GFP_NOFS); | 52 | bytenr, blocksize, GFP_NOFS); |
53 | if (eb) | ||
54 | eb->alloc_addr = (unsigned long)__builtin_return_address(0); | ||
55 | return eb; | 53 | return eb; |
56 | } | 54 | } |
57 | 55 | ||
@@ -63,7 +61,6 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, | |||
63 | 61 | ||
64 | eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->extent_tree, | 62 | eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->extent_tree, |
65 | bytenr, blocksize, GFP_NOFS); | 63 | bytenr, blocksize, GFP_NOFS); |
66 | eb->alloc_addr = (unsigned long)__builtin_return_address(0); | ||
67 | return eb; | 64 | return eb; |
68 | } | 65 | } |
69 | 66 | ||
@@ -234,7 +231,6 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, | |||
234 | return NULL; | 231 | return NULL; |
235 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree, | 232 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree, |
236 | buf, 1); | 233 | buf, 1); |
237 | buf->alloc_addr = (unsigned long)__builtin_return_address(0); | ||
238 | return buf; | 234 | return buf; |
239 | } | 235 | } |
240 | 236 | ||
@@ -638,6 +634,7 @@ int close_ctree(struct btrfs_root *root) | |||
638 | 634 | ||
639 | btrfs_free_block_groups(root->fs_info); | 635 | btrfs_free_block_groups(root->fs_info); |
640 | del_fs_roots(fs_info); | 636 | del_fs_roots(fs_info); |
637 | extent_map_tree_cleanup(&BTRFS_I(fs_info->btree_inode)->extent_tree); | ||
641 | truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); | 638 | truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); |
642 | iput(fs_info->btree_inode); | 639 | iput(fs_info->btree_inode); |
643 | kfree(fs_info->extent_root); | 640 | kfree(fs_info->extent_root); |
@@ -647,20 +644,20 @@ int close_ctree(struct btrfs_root *root) | |||
647 | 644 | ||
648 | int btrfs_buffer_uptodate(struct extent_buffer *buf) | 645 | int btrfs_buffer_uptodate(struct extent_buffer *buf) |
649 | { | 646 | { |
650 | struct inode *btree_inode = buf->pages[0]->mapping->host; | 647 | struct inode *btree_inode = buf->last_page->mapping->host; |
651 | return extent_buffer_uptodate(&BTRFS_I(btree_inode)->extent_tree, buf); | 648 | return extent_buffer_uptodate(&BTRFS_I(btree_inode)->extent_tree, buf); |
652 | } | 649 | } |
653 | 650 | ||
654 | int btrfs_set_buffer_uptodate(struct extent_buffer *buf) | 651 | int btrfs_set_buffer_uptodate(struct extent_buffer *buf) |
655 | { | 652 | { |
656 | struct inode *btree_inode = buf->pages[0]->mapping->host; | 653 | struct inode *btree_inode = buf->last_page->mapping->host; |
657 | return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->extent_tree, | 654 | return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->extent_tree, |
658 | buf); | 655 | buf); |
659 | } | 656 | } |
660 | 657 | ||
661 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf) | 658 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf) |
662 | { | 659 | { |
663 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 660 | struct btrfs_root *root = BTRFS_I(buf->last_page->mapping->host)->root; |
664 | u64 transid = btrfs_header_generation(buf); | 661 | u64 transid = btrfs_header_generation(buf); |
665 | struct inode *btree_inode = root->fs_info->btree_inode; | 662 | struct inode *btree_inode = root->fs_info->btree_inode; |
666 | 663 | ||
@@ -681,7 +678,7 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) | |||
681 | 678 | ||
682 | void btrfs_set_buffer_defrag(struct extent_buffer *buf) | 679 | void btrfs_set_buffer_defrag(struct extent_buffer *buf) |
683 | { | 680 | { |
684 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 681 | struct btrfs_root *root = BTRFS_I(buf->last_page->mapping->host)->root; |
685 | struct inode *btree_inode = root->fs_info->btree_inode; | 682 | struct inode *btree_inode = root->fs_info->btree_inode; |
686 | set_extent_bits(&BTRFS_I(btree_inode)->extent_tree, buf->start, | 683 | set_extent_bits(&BTRFS_I(btree_inode)->extent_tree, buf->start, |
687 | buf->start + buf->len - 1, EXTENT_DEFRAG, GFP_NOFS); | 684 | buf->start + buf->len - 1, EXTENT_DEFRAG, GFP_NOFS); |
@@ -689,7 +686,7 @@ void btrfs_set_buffer_defrag(struct extent_buffer *buf) | |||
689 | 686 | ||
690 | void btrfs_set_buffer_defrag_done(struct extent_buffer *buf) | 687 | void btrfs_set_buffer_defrag_done(struct extent_buffer *buf) |
691 | { | 688 | { |
692 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 689 | struct btrfs_root *root = BTRFS_I(buf->last_page->mapping->host)->root; |
693 | struct inode *btree_inode = root->fs_info->btree_inode; | 690 | struct inode *btree_inode = root->fs_info->btree_inode; |
694 | set_extent_bits(&BTRFS_I(btree_inode)->extent_tree, buf->start, | 691 | set_extent_bits(&BTRFS_I(btree_inode)->extent_tree, buf->start, |
695 | buf->start + buf->len - 1, EXTENT_DEFRAG_DONE, | 692 | buf->start + buf->len - 1, EXTENT_DEFRAG_DONE, |
@@ -698,7 +695,7 @@ void btrfs_set_buffer_defrag_done(struct extent_buffer *buf) | |||
698 | 695 | ||
699 | int btrfs_buffer_defrag(struct extent_buffer *buf) | 696 | int btrfs_buffer_defrag(struct extent_buffer *buf) |
700 | { | 697 | { |
701 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 698 | struct btrfs_root *root = BTRFS_I(buf->last_page->mapping->host)->root; |
702 | struct inode *btree_inode = root->fs_info->btree_inode; | 699 | struct inode *btree_inode = root->fs_info->btree_inode; |
703 | return test_range_bit(&BTRFS_I(btree_inode)->extent_tree, | 700 | return test_range_bit(&BTRFS_I(btree_inode)->extent_tree, |
704 | buf->start, buf->start + buf->len - 1, EXTENT_DEFRAG, 0); | 701 | buf->start, buf->start + buf->len - 1, EXTENT_DEFRAG, 0); |
@@ -706,7 +703,7 @@ int btrfs_buffer_defrag(struct extent_buffer *buf) | |||
706 | 703 | ||
707 | int btrfs_buffer_defrag_done(struct extent_buffer *buf) | 704 | int btrfs_buffer_defrag_done(struct extent_buffer *buf) |
708 | { | 705 | { |
709 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 706 | struct btrfs_root *root = BTRFS_I(buf->last_page->mapping->host)->root; |
710 | struct inode *btree_inode = root->fs_info->btree_inode; | 707 | struct inode *btree_inode = root->fs_info->btree_inode; |
711 | return test_range_bit(&BTRFS_I(btree_inode)->extent_tree, | 708 | return test_range_bit(&BTRFS_I(btree_inode)->extent_tree, |
712 | buf->start, buf->start + buf->len - 1, | 709 | buf->start, buf->start + buf->len - 1, |
@@ -715,7 +712,7 @@ int btrfs_buffer_defrag_done(struct extent_buffer *buf) | |||
715 | 712 | ||
716 | int btrfs_clear_buffer_defrag_done(struct extent_buffer *buf) | 713 | int btrfs_clear_buffer_defrag_done(struct extent_buffer *buf) |
717 | { | 714 | { |
718 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 715 | struct btrfs_root *root = BTRFS_I(buf->last_page->mapping->host)->root; |
719 | struct inode *btree_inode = root->fs_info->btree_inode; | 716 | struct inode *btree_inode = root->fs_info->btree_inode; |
720 | return clear_extent_bits(&BTRFS_I(btree_inode)->extent_tree, | 717 | return clear_extent_bits(&BTRFS_I(btree_inode)->extent_tree, |
721 | buf->start, buf->start + buf->len - 1, | 718 | buf->start, buf->start + buf->len - 1, |
@@ -724,7 +721,7 @@ int btrfs_clear_buffer_defrag_done(struct extent_buffer *buf) | |||
724 | 721 | ||
725 | int btrfs_clear_buffer_defrag(struct extent_buffer *buf) | 722 | int btrfs_clear_buffer_defrag(struct extent_buffer *buf) |
726 | { | 723 | { |
727 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 724 | struct btrfs_root *root = BTRFS_I(buf->last_page->mapping->host)->root; |
728 | struct inode *btree_inode = root->fs_info->btree_inode; | 725 | struct inode *btree_inode = root->fs_info->btree_inode; |
729 | return clear_extent_bits(&BTRFS_I(btree_inode)->extent_tree, | 726 | return clear_extent_bits(&BTRFS_I(btree_inode)->extent_tree, |
730 | buf->start, buf->start + buf->len - 1, | 727 | buf->start, buf->start + buf->len - 1, |
@@ -733,7 +730,7 @@ int btrfs_clear_buffer_defrag(struct extent_buffer *buf) | |||
733 | 730 | ||
734 | int btrfs_read_buffer(struct extent_buffer *buf) | 731 | int btrfs_read_buffer(struct extent_buffer *buf) |
735 | { | 732 | { |
736 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 733 | struct btrfs_root *root = BTRFS_I(buf->last_page->mapping->host)->root; |
737 | struct inode *btree_inode = root->fs_info->btree_inode; | 734 | struct inode *btree_inode = root->fs_info->btree_inode; |
738 | return read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree, | 735 | return read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree, |
739 | buf, 1); | 736 | buf, 1); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 72e6b274a450..525fa845d613 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -1176,7 +1176,6 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, | |||
1176 | return ERR_PTR(-ENOMEM); | 1176 | return ERR_PTR(-ENOMEM); |
1177 | } | 1177 | } |
1178 | btrfs_set_buffer_uptodate(buf); | 1178 | btrfs_set_buffer_uptodate(buf); |
1179 | buf->alloc_addr = (unsigned long)__builtin_return_address(0); | ||
1180 | set_extent_dirty(&trans->transaction->dirty_pages, buf->start, | 1179 | set_extent_dirty(&trans->transaction->dirty_pages, buf->start, |
1181 | buf->start + buf->len - 1, GFP_NOFS); | 1180 | buf->start + buf->len - 1, GFP_NOFS); |
1182 | btrfs_set_buffer_defrag(buf); | 1181 | btrfs_set_buffer_defrag(buf); |
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index e241699024da..85b28a6a4e05 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/spinlock.h> | 9 | #include <linux/spinlock.h> |
10 | #include <linux/blkdev.h> | 10 | #include <linux/blkdev.h> |
11 | #include <linux/swap.h> | ||
11 | #include "extent_map.h" | 12 | #include "extent_map.h" |
12 | 13 | ||
13 | /* temporary define until extent_map moves out of btrfs */ | 14 | /* temporary define until extent_map moves out of btrfs */ |
@@ -20,14 +21,11 @@ static struct kmem_cache *extent_map_cache; | |||
20 | static struct kmem_cache *extent_state_cache; | 21 | static struct kmem_cache *extent_state_cache; |
21 | static struct kmem_cache *extent_buffer_cache; | 22 | static struct kmem_cache *extent_buffer_cache; |
22 | 23 | ||
23 | static LIST_HEAD(extent_buffers); | ||
24 | static LIST_HEAD(buffers); | 24 | static LIST_HEAD(buffers); |
25 | static LIST_HEAD(states); | 25 | static LIST_HEAD(states); |
26 | 26 | ||
27 | static spinlock_t extent_buffers_lock; | ||
28 | static spinlock_t state_lock = SPIN_LOCK_UNLOCKED; | 27 | static spinlock_t state_lock = SPIN_LOCK_UNLOCKED; |
29 | static int nr_extent_buffers; | 28 | #define BUFFER_LRU_MAX 64 |
30 | #define MAX_EXTENT_BUFFER_CACHE 128 | ||
31 | 29 | ||
32 | struct tree_entry { | 30 | struct tree_entry { |
33 | u64 start; | 31 | u64 start; |
@@ -47,20 +45,12 @@ void __init extent_map_init(void) | |||
47 | extent_buffer_cache = btrfs_cache_create("extent_buffers", | 45 | extent_buffer_cache = btrfs_cache_create("extent_buffers", |
48 | sizeof(struct extent_buffer), 0, | 46 | sizeof(struct extent_buffer), 0, |
49 | NULL); | 47 | NULL); |
50 | spin_lock_init(&extent_buffers_lock); | ||
51 | } | 48 | } |
52 | 49 | ||
53 | void __exit extent_map_exit(void) | 50 | void __exit extent_map_exit(void) |
54 | { | 51 | { |
55 | struct extent_buffer *eb; | ||
56 | struct extent_state *state; | 52 | struct extent_state *state; |
57 | 53 | ||
58 | while (!list_empty(&extent_buffers)) { | ||
59 | eb = list_entry(extent_buffers.next, | ||
60 | struct extent_buffer, list); | ||
61 | list_del(&eb->list); | ||
62 | kmem_cache_free(extent_buffer_cache, eb); | ||
63 | } | ||
64 | while (!list_empty(&states)) { | 54 | while (!list_empty(&states)) { |
65 | state = list_entry(states.next, struct extent_state, list); | 55 | state = list_entry(states.next, struct extent_state, list); |
66 | printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs)); | 56 | printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs)); |
@@ -68,14 +58,6 @@ void __exit extent_map_exit(void) | |||
68 | kmem_cache_free(extent_state_cache, state); | 58 | kmem_cache_free(extent_state_cache, state); |
69 | 59 | ||
70 | } | 60 | } |
71 | while (!list_empty(&buffers)) { | ||
72 | eb = list_entry(buffers.next, | ||
73 | struct extent_buffer, leak_list); | ||
74 | printk("buffer leak start %Lu len %lu return %lX\n", eb->start, eb->len, eb->alloc_addr); | ||
75 | list_del(&eb->leak_list); | ||
76 | kmem_cache_free(extent_buffer_cache, eb); | ||
77 | } | ||
78 | |||
79 | 61 | ||
80 | if (extent_map_cache) | 62 | if (extent_map_cache) |
81 | kmem_cache_destroy(extent_map_cache); | 63 | kmem_cache_destroy(extent_map_cache); |
@@ -92,10 +74,25 @@ void extent_map_tree_init(struct extent_map_tree *tree, | |||
92 | tree->state.rb_node = NULL; | 74 | tree->state.rb_node = NULL; |
93 | tree->ops = NULL; | 75 | tree->ops = NULL; |
94 | rwlock_init(&tree->lock); | 76 | rwlock_init(&tree->lock); |
77 | spin_lock_init(&tree->lru_lock); | ||
95 | tree->mapping = mapping; | 78 | tree->mapping = mapping; |
79 | INIT_LIST_HEAD(&tree->buffer_lru); | ||
80 | tree->lru_size = 0; | ||
96 | } | 81 | } |
97 | EXPORT_SYMBOL(extent_map_tree_init); | 82 | EXPORT_SYMBOL(extent_map_tree_init); |
98 | 83 | ||
84 | void extent_map_tree_cleanup(struct extent_map_tree *tree) | ||
85 | { | ||
86 | struct extent_buffer *eb; | ||
87 | while(!list_empty(&tree->buffer_lru)) { | ||
88 | eb = list_entry(tree->buffer_lru.next, struct extent_buffer, | ||
89 | lru); | ||
90 | list_del(&eb->lru); | ||
91 | free_extent_buffer(eb); | ||
92 | } | ||
93 | } | ||
94 | EXPORT_SYMBOL(extent_map_tree_cleanup); | ||
95 | |||
99 | struct extent_map *alloc_extent_map(gfp_t mask) | 96 | struct extent_map *alloc_extent_map(gfp_t mask) |
100 | { | 97 | { |
101 | struct extent_map *em; | 98 | struct extent_map *em; |
@@ -1915,66 +1912,99 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, | |||
1915 | return (em->block_start + start - em->start) >> inode->i_blkbits; | 1912 | return (em->block_start + start - em->start) >> inode->i_blkbits; |
1916 | } | 1913 | } |
1917 | 1914 | ||
1918 | static struct extent_buffer *__alloc_extent_buffer(gfp_t mask) | 1915 | static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb) |
1919 | { | 1916 | { |
1920 | struct extent_buffer *eb = NULL; | 1917 | if (list_empty(&eb->lru)) { |
1921 | 1918 | extent_buffer_get(eb); | |
1922 | spin_lock(&extent_buffers_lock); | 1919 | list_add(&eb->lru, &tree->buffer_lru); |
1923 | if (!list_empty(&extent_buffers)) { | 1920 | tree->lru_size++; |
1924 | eb = list_entry(extent_buffers.next, struct extent_buffer, | 1921 | if (tree->lru_size >= BUFFER_LRU_MAX) { |
1925 | list); | 1922 | struct extent_buffer *rm; |
1926 | list_del(&eb->list); | 1923 | rm = list_entry(tree->buffer_lru.prev, |
1927 | WARN_ON(nr_extent_buffers == 0); | 1924 | struct extent_buffer, lru); |
1928 | nr_extent_buffers--; | 1925 | tree->lru_size--; |
1929 | } | 1926 | list_del(&rm->lru); |
1930 | spin_unlock(&extent_buffers_lock); | 1927 | free_extent_buffer(rm); |
1928 | } | ||
1929 | } else | ||
1930 | list_move(&eb->lru, &tree->buffer_lru); | ||
1931 | return 0; | ||
1932 | } | ||
1933 | static struct extent_buffer *find_lru(struct extent_map_tree *tree, | ||
1934 | u64 start, unsigned long len) | ||
1935 | { | ||
1936 | struct list_head *lru = &tree->buffer_lru; | ||
1937 | struct list_head *cur = lru->next; | ||
1938 | struct extent_buffer *eb; | ||
1931 | 1939 | ||
1932 | if (eb) { | 1940 | if (list_empty(lru)) |
1933 | memset(eb, 0, sizeof(*eb)); | 1941 | return NULL; |
1934 | } else { | ||
1935 | eb = kmem_cache_zalloc(extent_buffer_cache, mask); | ||
1936 | } | ||
1937 | spin_lock(&extent_buffers_lock); | ||
1938 | list_add(&eb->leak_list, &buffers); | ||
1939 | spin_unlock(&extent_buffers_lock); | ||
1940 | 1942 | ||
1941 | return eb; | 1943 | do { |
1944 | eb = list_entry(cur, struct extent_buffer, lru); | ||
1945 | if (eb->start == start && eb->len == len) { | ||
1946 | extent_buffer_get(eb); | ||
1947 | return eb; | ||
1948 | } | ||
1949 | cur = cur->next; | ||
1950 | } while (cur != lru); | ||
1951 | return NULL; | ||
1942 | } | 1952 | } |
1943 | 1953 | ||
1944 | static void __free_extent_buffer(struct extent_buffer *eb) | 1954 | static inline unsigned long num_extent_pages(u64 start, u64 len) |
1945 | { | 1955 | { |
1946 | 1956 | return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - | |
1947 | spin_lock(&extent_buffers_lock); | 1957 | (start >> PAGE_CACHE_SHIFT); |
1948 | list_del_init(&eb->leak_list); | ||
1949 | spin_unlock(&extent_buffers_lock); | ||
1950 | |||
1951 | if (nr_extent_buffers >= MAX_EXTENT_BUFFER_CACHE) { | ||
1952 | kmem_cache_free(extent_buffer_cache, eb); | ||
1953 | } else { | ||
1954 | spin_lock(&extent_buffers_lock); | ||
1955 | list_add(&eb->list, &extent_buffers); | ||
1956 | nr_extent_buffers++; | ||
1957 | spin_unlock(&extent_buffers_lock); | ||
1958 | } | ||
1959 | } | 1958 | } |
1960 | 1959 | ||
1961 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, int i) | 1960 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, |
1961 | unsigned long i) | ||
1962 | { | 1962 | { |
1963 | struct page *p; | 1963 | struct page *p; |
1964 | 1964 | ||
1965 | if (i < EXTENT_INLINE_PAGES) | 1965 | if (i == 0) |
1966 | return eb->pages[i]; | 1966 | return eb->last_page; |
1967 | i += eb->start >> PAGE_CACHE_SHIFT; | 1967 | i += eb->start >> PAGE_CACHE_SHIFT; |
1968 | p = find_get_page(eb->pages[0]->mapping, i); | 1968 | p = find_get_page(eb->last_page->mapping, i); |
1969 | page_cache_release(p); | 1969 | page_cache_release(p); |
1970 | return p; | 1970 | return p; |
1971 | } | 1971 | } |
1972 | 1972 | ||
1973 | static inline unsigned long num_extent_pages(u64 start, u64 len) | 1973 | static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree, |
1974 | u64 start, | ||
1975 | unsigned long len, | ||
1976 | gfp_t mask) | ||
1974 | { | 1977 | { |
1975 | return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - | 1978 | struct extent_buffer *eb = NULL; |
1976 | (start >> PAGE_CACHE_SHIFT); | 1979 | |
1980 | spin_lock(&tree->lru_lock); | ||
1981 | eb = find_lru(tree, start, len); | ||
1982 | if (eb) | ||
1983 | goto lru_add; | ||
1984 | spin_unlock(&tree->lru_lock); | ||
1985 | |||
1986 | if (eb) { | ||
1987 | memset(eb, 0, sizeof(*eb)); | ||
1988 | } else { | ||
1989 | eb = kmem_cache_zalloc(extent_buffer_cache, mask); | ||
1990 | } | ||
1991 | INIT_LIST_HEAD(&eb->lru); | ||
1992 | eb->start = start; | ||
1993 | eb->len = len; | ||
1994 | atomic_set(&eb->refs, 1); | ||
1995 | |||
1996 | spin_lock(&tree->lru_lock); | ||
1997 | lru_add: | ||
1998 | add_lru(tree, eb); | ||
1999 | spin_unlock(&tree->lru_lock); | ||
2000 | return eb; | ||
2001 | } | ||
2002 | |||
2003 | static void __free_extent_buffer(struct extent_buffer *eb) | ||
2004 | { | ||
2005 | kmem_cache_free(extent_buffer_cache, eb); | ||
1977 | } | 2006 | } |
2007 | |||
1978 | struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, | 2008 | struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, |
1979 | u64 start, unsigned long len, | 2009 | u64 start, unsigned long len, |
1980 | gfp_t mask) | 2010 | gfp_t mask) |
@@ -1987,14 +2017,12 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, | |||
1987 | struct address_space *mapping = tree->mapping; | 2017 | struct address_space *mapping = tree->mapping; |
1988 | int uptodate = 0; | 2018 | int uptodate = 0; |
1989 | 2019 | ||
1990 | eb = __alloc_extent_buffer(mask); | 2020 | eb = __alloc_extent_buffer(tree, start, len, mask); |
1991 | if (!eb || IS_ERR(eb)) | 2021 | if (!eb || IS_ERR(eb)) |
1992 | return NULL; | 2022 | return NULL; |
1993 | 2023 | ||
1994 | eb->alloc_addr = (unsigned long)__builtin_return_address(0); | 2024 | if (eb->flags & EXTENT_BUFFER_FILLED) |
1995 | eb->start = start; | 2025 | return eb; |
1996 | eb->len = len; | ||
1997 | atomic_set(&eb->refs, 1); | ||
1998 | 2026 | ||
1999 | for (i = 0; i < num_pages; i++, index++) { | 2027 | for (i = 0; i < num_pages; i++, index++) { |
2000 | p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); | 2028 | p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); |
@@ -2008,14 +2036,15 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, | |||
2008 | goto fail; | 2036 | goto fail; |
2009 | } | 2037 | } |
2010 | set_page_extent_mapped(p); | 2038 | set_page_extent_mapped(p); |
2011 | if (i < EXTENT_INLINE_PAGES) | 2039 | if (i == 0) |
2012 | eb->pages[i] = p; | 2040 | eb->last_page = p; |
2013 | if (!PageUptodate(p)) | 2041 | if (!PageUptodate(p)) |
2014 | uptodate = 0; | 2042 | uptodate = 0; |
2015 | unlock_page(p); | 2043 | unlock_page(p); |
2016 | } | 2044 | } |
2017 | if (uptodate) | 2045 | if (uptodate) |
2018 | eb->flags |= EXTENT_UPTODATE; | 2046 | eb->flags |= EXTENT_UPTODATE; |
2047 | eb->flags |= EXTENT_BUFFER_FILLED; | ||
2019 | return eb; | 2048 | return eb; |
2020 | fail: | 2049 | fail: |
2021 | free_extent_buffer(eb); | 2050 | free_extent_buffer(eb); |
@@ -2035,14 +2064,12 @@ struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree, | |||
2035 | struct address_space *mapping = tree->mapping; | 2064 | struct address_space *mapping = tree->mapping; |
2036 | int uptodate = 1; | 2065 | int uptodate = 1; |
2037 | 2066 | ||
2038 | eb = __alloc_extent_buffer(mask); | 2067 | eb = __alloc_extent_buffer(tree, start, len, mask); |
2039 | if (!eb || IS_ERR(eb)) | 2068 | if (!eb || IS_ERR(eb)) |
2040 | return NULL; | 2069 | return NULL; |
2041 | 2070 | ||
2042 | eb->alloc_addr = (unsigned long)__builtin_return_address(0); | 2071 | if (eb->flags & EXTENT_BUFFER_FILLED) |
2043 | eb->start = start; | 2072 | return eb; |
2044 | eb->len = len; | ||
2045 | atomic_set(&eb->refs, 1); | ||
2046 | 2073 | ||
2047 | for (i = 0; i < num_pages; i++, index++) { | 2074 | for (i = 0; i < num_pages; i++, index++) { |
2048 | p = find_lock_page(mapping, index); | 2075 | p = find_lock_page(mapping, index); |
@@ -2055,14 +2082,15 @@ struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree, | |||
2055 | goto fail; | 2082 | goto fail; |
2056 | } | 2083 | } |
2057 | set_page_extent_mapped(p); | 2084 | set_page_extent_mapped(p); |
2058 | if (i < EXTENT_INLINE_PAGES) | 2085 | if (i == 0) |
2059 | eb->pages[i] = p; | 2086 | eb->last_page = p; |
2060 | if (!PageUptodate(p)) | 2087 | if (!PageUptodate(p)) |
2061 | uptodate = 0; | 2088 | uptodate = 0; |
2062 | unlock_page(p); | 2089 | unlock_page(p); |
2063 | } | 2090 | } |
2064 | if (uptodate) | 2091 | if (uptodate) |
2065 | eb->flags |= EXTENT_UPTODATE; | 2092 | eb->flags |= EXTENT_UPTODATE; |
2093 | eb->flags |= EXTENT_BUFFER_FILLED; | ||
2066 | return eb; | 2094 | return eb; |
2067 | fail: | 2095 | fail: |
2068 | free_extent_buffer(eb); | 2096 | free_extent_buffer(eb); |
@@ -2231,7 +2259,8 @@ int read_extent_buffer_pages(struct extent_map_tree *tree, | |||
2231 | ret = -EIO; | 2259 | ret = -EIO; |
2232 | } | 2260 | } |
2233 | } | 2261 | } |
2234 | eb->flags |= EXTENT_UPTODATE; | 2262 | if (!ret) |
2263 | eb->flags |= EXTENT_UPTODATE; | ||
2235 | return ret; | 2264 | return ret; |
2236 | } | 2265 | } |
2237 | EXPORT_SYMBOL(read_extent_buffer_pages); | 2266 | EXPORT_SYMBOL(read_extent_buffer_pages); |
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 8409b5cbeda7..52a8b9394fc6 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define EXTENT_DELALLOC (1 << 5) | 16 | #define EXTENT_DELALLOC (1 << 5) |
17 | #define EXTENT_DEFRAG (1 << 6) | 17 | #define EXTENT_DEFRAG (1 << 6) |
18 | #define EXTENT_DEFRAG_DONE (1 << 7) | 18 | #define EXTENT_DEFRAG_DONE (1 << 7) |
19 | #define EXTENT_BUFFER_FILLED (1 << 8) | ||
19 | #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) | 20 | #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) |
20 | 21 | ||
21 | 22 | ||
@@ -33,6 +34,9 @@ struct extent_map_tree { | |||
33 | struct address_space *mapping; | 34 | struct address_space *mapping; |
34 | rwlock_t lock; | 35 | rwlock_t lock; |
35 | struct extent_map_ops *ops; | 36 | struct extent_map_ops *ops; |
37 | spinlock_t lru_lock; | ||
38 | struct list_head buffer_lru; | ||
39 | int lru_size; | ||
36 | }; | 40 | }; |
37 | 41 | ||
38 | /* note, this must start with the same fields as fs/extent_map.c:tree_entry */ | 42 | /* note, this must start with the same fields as fs/extent_map.c:tree_entry */ |
@@ -64,20 +68,17 @@ struct extent_state { | |||
64 | struct list_head list; | 68 | struct list_head list; |
65 | }; | 69 | }; |
66 | 70 | ||
67 | #define EXTENT_INLINE_PAGES 32 | ||
68 | struct extent_buffer { | 71 | struct extent_buffer { |
69 | u64 start; | 72 | u64 start; |
70 | unsigned long len; | 73 | unsigned long len; |
71 | atomic_t refs; | ||
72 | int flags; | ||
73 | struct list_head list; | ||
74 | struct list_head leak_list; | ||
75 | unsigned long alloc_addr; | ||
76 | char *map_token; | 74 | char *map_token; |
77 | char *kaddr; | 75 | char *kaddr; |
78 | unsigned long map_start; | 76 | unsigned long map_start; |
79 | unsigned long map_len; | 77 | unsigned long map_len; |
80 | struct page *pages[EXTENT_INLINE_PAGES]; | 78 | struct page *last_page; |
79 | struct list_head lru; | ||
80 | atomic_t refs; | ||
81 | int flags; | ||
81 | }; | 82 | }; |
82 | 83 | ||
83 | typedef struct extent_map *(get_extent_t)(struct inode *inode, | 84 | typedef struct extent_map *(get_extent_t)(struct inode *inode, |
@@ -88,6 +89,7 @@ typedef struct extent_map *(get_extent_t)(struct inode *inode, | |||
88 | 89 | ||
89 | void extent_map_tree_init(struct extent_map_tree *tree, | 90 | void extent_map_tree_init(struct extent_map_tree *tree, |
90 | struct address_space *mapping, gfp_t mask); | 91 | struct address_space *mapping, gfp_t mask); |
92 | void extent_map_tree_cleanup(struct extent_map_tree *tree); | ||
91 | struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, | 93 | struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, |
92 | u64 start, u64 end); | 94 | u64 start, u64 end); |
93 | int add_extent_mapping(struct extent_map_tree *tree, | 95 | int add_extent_mapping(struct extent_map_tree *tree, |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 87456ab74275..67e4aca36a62 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -443,8 +443,7 @@ static int drop_dirty_roots(struct btrfs_root *tree_root, | |||
443 | BUG_ON(ret); | 443 | BUG_ON(ret); |
444 | mutex_unlock(&tree_root->fs_info->fs_mutex); | 444 | mutex_unlock(&tree_root->fs_info->fs_mutex); |
445 | btrfs_btree_balance_dirty(tree_root, nr); | 445 | btrfs_btree_balance_dirty(tree_root, nr); |
446 | schedule(); | 446 | cond_resched(); |
447 | |||
448 | mutex_lock(&tree_root->fs_info->fs_mutex); | 447 | mutex_lock(&tree_root->fs_info->fs_mutex); |
449 | } | 448 | } |
450 | BUG_ON(ret); | 449 | BUG_ON(ret); |
@@ -471,7 +470,7 @@ static int drop_dirty_roots(struct btrfs_root *tree_root, | |||
471 | mutex_unlock(&tree_root->fs_info->fs_mutex); | 470 | mutex_unlock(&tree_root->fs_info->fs_mutex); |
472 | 471 | ||
473 | btrfs_btree_balance_dirty(tree_root, nr); | 472 | btrfs_btree_balance_dirty(tree_root, nr); |
474 | schedule(); | 473 | cond_resched(); |
475 | } | 474 | } |
476 | return ret; | 475 | return ret; |
477 | } | 476 | } |