diff options
author | Chris Mason <chris.mason@oracle.com> | 2007-10-15 16:18:14 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-09-25 11:03:56 -0400 |
commit | 4dc119046d0d8501afa4346472917fb05586ad9c (patch) | |
tree | 712c98f8f66aa1e83020574a19d015df88c5024e /fs/btrfs/extent_map.c | |
parent | e19caa5f0e34b571ed0c2617554af5c43cb124d1 (diff) |
Btrfs: Add an extent buffer LRU to reduce radix tree hits
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r-- | fs/btrfs/extent_map.c | 183 |
1 files changed, 106 insertions, 77 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index e241699024da..85b28a6a4e05 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/spinlock.h> | 9 | #include <linux/spinlock.h> |
10 | #include <linux/blkdev.h> | 10 | #include <linux/blkdev.h> |
11 | #include <linux/swap.h> | ||
11 | #include "extent_map.h" | 12 | #include "extent_map.h" |
12 | 13 | ||
13 | /* temporary define until extent_map moves out of btrfs */ | 14 | /* temporary define until extent_map moves out of btrfs */ |
@@ -20,14 +21,11 @@ static struct kmem_cache *extent_map_cache; | |||
20 | static struct kmem_cache *extent_state_cache; | 21 | static struct kmem_cache *extent_state_cache; |
21 | static struct kmem_cache *extent_buffer_cache; | 22 | static struct kmem_cache *extent_buffer_cache; |
22 | 23 | ||
23 | static LIST_HEAD(extent_buffers); | ||
24 | static LIST_HEAD(buffers); | 24 | static LIST_HEAD(buffers); |
25 | static LIST_HEAD(states); | 25 | static LIST_HEAD(states); |
26 | 26 | ||
27 | static spinlock_t extent_buffers_lock; | ||
28 | static spinlock_t state_lock = SPIN_LOCK_UNLOCKED; | 27 | static spinlock_t state_lock = SPIN_LOCK_UNLOCKED; |
29 | static int nr_extent_buffers; | 28 | #define BUFFER_LRU_MAX 64 |
30 | #define MAX_EXTENT_BUFFER_CACHE 128 | ||
31 | 29 | ||
32 | struct tree_entry { | 30 | struct tree_entry { |
33 | u64 start; | 31 | u64 start; |
@@ -47,20 +45,12 @@ void __init extent_map_init(void) | |||
47 | extent_buffer_cache = btrfs_cache_create("extent_buffers", | 45 | extent_buffer_cache = btrfs_cache_create("extent_buffers", |
48 | sizeof(struct extent_buffer), 0, | 46 | sizeof(struct extent_buffer), 0, |
49 | NULL); | 47 | NULL); |
50 | spin_lock_init(&extent_buffers_lock); | ||
51 | } | 48 | } |
52 | 49 | ||
53 | void __exit extent_map_exit(void) | 50 | void __exit extent_map_exit(void) |
54 | { | 51 | { |
55 | struct extent_buffer *eb; | ||
56 | struct extent_state *state; | 52 | struct extent_state *state; |
57 | 53 | ||
58 | while (!list_empty(&extent_buffers)) { | ||
59 | eb = list_entry(extent_buffers.next, | ||
60 | struct extent_buffer, list); | ||
61 | list_del(&eb->list); | ||
62 | kmem_cache_free(extent_buffer_cache, eb); | ||
63 | } | ||
64 | while (!list_empty(&states)) { | 54 | while (!list_empty(&states)) { |
65 | state = list_entry(states.next, struct extent_state, list); | 55 | state = list_entry(states.next, struct extent_state, list); |
66 | printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs)); | 56 | printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs)); |
@@ -68,14 +58,6 @@ void __exit extent_map_exit(void) | |||
68 | kmem_cache_free(extent_state_cache, state); | 58 | kmem_cache_free(extent_state_cache, state); |
69 | 59 | ||
70 | } | 60 | } |
71 | while (!list_empty(&buffers)) { | ||
72 | eb = list_entry(buffers.next, | ||
73 | struct extent_buffer, leak_list); | ||
74 | printk("buffer leak start %Lu len %lu return %lX\n", eb->start, eb->len, eb->alloc_addr); | ||
75 | list_del(&eb->leak_list); | ||
76 | kmem_cache_free(extent_buffer_cache, eb); | ||
77 | } | ||
78 | |||
79 | 61 | ||
80 | if (extent_map_cache) | 62 | if (extent_map_cache) |
81 | kmem_cache_destroy(extent_map_cache); | 63 | kmem_cache_destroy(extent_map_cache); |
@@ -92,10 +74,25 @@ void extent_map_tree_init(struct extent_map_tree *tree, | |||
92 | tree->state.rb_node = NULL; | 74 | tree->state.rb_node = NULL; |
93 | tree->ops = NULL; | 75 | tree->ops = NULL; |
94 | rwlock_init(&tree->lock); | 76 | rwlock_init(&tree->lock); |
77 | spin_lock_init(&tree->lru_lock); | ||
95 | tree->mapping = mapping; | 78 | tree->mapping = mapping; |
79 | INIT_LIST_HEAD(&tree->buffer_lru); | ||
80 | tree->lru_size = 0; | ||
96 | } | 81 | } |
97 | EXPORT_SYMBOL(extent_map_tree_init); | 82 | EXPORT_SYMBOL(extent_map_tree_init); |
98 | 83 | ||
84 | void extent_map_tree_cleanup(struct extent_map_tree *tree) | ||
85 | { | ||
86 | struct extent_buffer *eb; | ||
87 | while(!list_empty(&tree->buffer_lru)) { | ||
88 | eb = list_entry(tree->buffer_lru.next, struct extent_buffer, | ||
89 | lru); | ||
90 | list_del(&eb->lru); | ||
91 | free_extent_buffer(eb); | ||
92 | } | ||
93 | } | ||
94 | EXPORT_SYMBOL(extent_map_tree_cleanup); | ||
95 | |||
99 | struct extent_map *alloc_extent_map(gfp_t mask) | 96 | struct extent_map *alloc_extent_map(gfp_t mask) |
100 | { | 97 | { |
101 | struct extent_map *em; | 98 | struct extent_map *em; |
@@ -1915,66 +1912,99 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, | |||
1915 | return (em->block_start + start - em->start) >> inode->i_blkbits; | 1912 | return (em->block_start + start - em->start) >> inode->i_blkbits; |
1916 | } | 1913 | } |
1917 | 1914 | ||
1918 | static struct extent_buffer *__alloc_extent_buffer(gfp_t mask) | 1915 | static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb) |
1919 | { | 1916 | { |
1920 | struct extent_buffer *eb = NULL; | 1917 | if (list_empty(&eb->lru)) { |
1921 | 1918 | extent_buffer_get(eb); | |
1922 | spin_lock(&extent_buffers_lock); | 1919 | list_add(&eb->lru, &tree->buffer_lru); |
1923 | if (!list_empty(&extent_buffers)) { | 1920 | tree->lru_size++; |
1924 | eb = list_entry(extent_buffers.next, struct extent_buffer, | 1921 | if (tree->lru_size >= BUFFER_LRU_MAX) { |
1925 | list); | 1922 | struct extent_buffer *rm; |
1926 | list_del(&eb->list); | 1923 | rm = list_entry(tree->buffer_lru.prev, |
1927 | WARN_ON(nr_extent_buffers == 0); | 1924 | struct extent_buffer, lru); |
1928 | nr_extent_buffers--; | 1925 | tree->lru_size--; |
1929 | } | 1926 | list_del(&rm->lru); |
1930 | spin_unlock(&extent_buffers_lock); | 1927 | free_extent_buffer(rm); |
1928 | } | ||
1929 | } else | ||
1930 | list_move(&eb->lru, &tree->buffer_lru); | ||
1931 | return 0; | ||
1932 | } | ||
1933 | static struct extent_buffer *find_lru(struct extent_map_tree *tree, | ||
1934 | u64 start, unsigned long len) | ||
1935 | { | ||
1936 | struct list_head *lru = &tree->buffer_lru; | ||
1937 | struct list_head *cur = lru->next; | ||
1938 | struct extent_buffer *eb; | ||
1931 | 1939 | ||
1932 | if (eb) { | 1940 | if (list_empty(lru)) |
1933 | memset(eb, 0, sizeof(*eb)); | 1941 | return NULL; |
1934 | } else { | ||
1935 | eb = kmem_cache_zalloc(extent_buffer_cache, mask); | ||
1936 | } | ||
1937 | spin_lock(&extent_buffers_lock); | ||
1938 | list_add(&eb->leak_list, &buffers); | ||
1939 | spin_unlock(&extent_buffers_lock); | ||
1940 | 1942 | ||
1941 | return eb; | 1943 | do { |
1944 | eb = list_entry(cur, struct extent_buffer, lru); | ||
1945 | if (eb->start == start && eb->len == len) { | ||
1946 | extent_buffer_get(eb); | ||
1947 | return eb; | ||
1948 | } | ||
1949 | cur = cur->next; | ||
1950 | } while (cur != lru); | ||
1951 | return NULL; | ||
1942 | } | 1952 | } |
1943 | 1953 | ||
1944 | static void __free_extent_buffer(struct extent_buffer *eb) | 1954 | static inline unsigned long num_extent_pages(u64 start, u64 len) |
1945 | { | 1955 | { |
1946 | 1956 | return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - | |
1947 | spin_lock(&extent_buffers_lock); | 1957 | (start >> PAGE_CACHE_SHIFT); |
1948 | list_del_init(&eb->leak_list); | ||
1949 | spin_unlock(&extent_buffers_lock); | ||
1950 | |||
1951 | if (nr_extent_buffers >= MAX_EXTENT_BUFFER_CACHE) { | ||
1952 | kmem_cache_free(extent_buffer_cache, eb); | ||
1953 | } else { | ||
1954 | spin_lock(&extent_buffers_lock); | ||
1955 | list_add(&eb->list, &extent_buffers); | ||
1956 | nr_extent_buffers++; | ||
1957 | spin_unlock(&extent_buffers_lock); | ||
1958 | } | ||
1959 | } | 1958 | } |
1960 | 1959 | ||
1961 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, int i) | 1960 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, |
1961 | unsigned long i) | ||
1962 | { | 1962 | { |
1963 | struct page *p; | 1963 | struct page *p; |
1964 | 1964 | ||
1965 | if (i < EXTENT_INLINE_PAGES) | 1965 | if (i == 0) |
1966 | return eb->pages[i]; | 1966 | return eb->last_page; |
1967 | i += eb->start >> PAGE_CACHE_SHIFT; | 1967 | i += eb->start >> PAGE_CACHE_SHIFT; |
1968 | p = find_get_page(eb->pages[0]->mapping, i); | 1968 | p = find_get_page(eb->last_page->mapping, i); |
1969 | page_cache_release(p); | 1969 | page_cache_release(p); |
1970 | return p; | 1970 | return p; |
1971 | } | 1971 | } |
1972 | 1972 | ||
1973 | static inline unsigned long num_extent_pages(u64 start, u64 len) | 1973 | static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree, |
1974 | u64 start, | ||
1975 | unsigned long len, | ||
1976 | gfp_t mask) | ||
1974 | { | 1977 | { |
1975 | return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - | 1978 | struct extent_buffer *eb = NULL; |
1976 | (start >> PAGE_CACHE_SHIFT); | 1979 | |
1980 | spin_lock(&tree->lru_lock); | ||
1981 | eb = find_lru(tree, start, len); | ||
1982 | if (eb) | ||
1983 | goto lru_add; | ||
1984 | spin_unlock(&tree->lru_lock); | ||
1985 | |||
1986 | if (eb) { | ||
1987 | memset(eb, 0, sizeof(*eb)); | ||
1988 | } else { | ||
1989 | eb = kmem_cache_zalloc(extent_buffer_cache, mask); | ||
1990 | } | ||
1991 | INIT_LIST_HEAD(&eb->lru); | ||
1992 | eb->start = start; | ||
1993 | eb->len = len; | ||
1994 | atomic_set(&eb->refs, 1); | ||
1995 | |||
1996 | spin_lock(&tree->lru_lock); | ||
1997 | lru_add: | ||
1998 | add_lru(tree, eb); | ||
1999 | spin_unlock(&tree->lru_lock); | ||
2000 | return eb; | ||
2001 | } | ||
2002 | |||
2003 | static void __free_extent_buffer(struct extent_buffer *eb) | ||
2004 | { | ||
2005 | kmem_cache_free(extent_buffer_cache, eb); | ||
1977 | } | 2006 | } |
2007 | |||
1978 | struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, | 2008 | struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, |
1979 | u64 start, unsigned long len, | 2009 | u64 start, unsigned long len, |
1980 | gfp_t mask) | 2010 | gfp_t mask) |
@@ -1987,14 +2017,12 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, | |||
1987 | struct address_space *mapping = tree->mapping; | 2017 | struct address_space *mapping = tree->mapping; |
1988 | int uptodate = 0; | 2018 | int uptodate = 0; |
1989 | 2019 | ||
1990 | eb = __alloc_extent_buffer(mask); | 2020 | eb = __alloc_extent_buffer(tree, start, len, mask); |
1991 | if (!eb || IS_ERR(eb)) | 2021 | if (!eb || IS_ERR(eb)) |
1992 | return NULL; | 2022 | return NULL; |
1993 | 2023 | ||
1994 | eb->alloc_addr = (unsigned long)__builtin_return_address(0); | 2024 | if (eb->flags & EXTENT_BUFFER_FILLED) |
1995 | eb->start = start; | 2025 | return eb; |
1996 | eb->len = len; | ||
1997 | atomic_set(&eb->refs, 1); | ||
1998 | 2026 | ||
1999 | for (i = 0; i < num_pages; i++, index++) { | 2027 | for (i = 0; i < num_pages; i++, index++) { |
2000 | p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); | 2028 | p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); |
@@ -2008,14 +2036,15 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, | |||
2008 | goto fail; | 2036 | goto fail; |
2009 | } | 2037 | } |
2010 | set_page_extent_mapped(p); | 2038 | set_page_extent_mapped(p); |
2011 | if (i < EXTENT_INLINE_PAGES) | 2039 | if (i == 0) |
2012 | eb->pages[i] = p; | 2040 | eb->last_page = p; |
2013 | if (!PageUptodate(p)) | 2041 | if (!PageUptodate(p)) |
2014 | uptodate = 0; | 2042 | uptodate = 0; |
2015 | unlock_page(p); | 2043 | unlock_page(p); |
2016 | } | 2044 | } |
2017 | if (uptodate) | 2045 | if (uptodate) |
2018 | eb->flags |= EXTENT_UPTODATE; | 2046 | eb->flags |= EXTENT_UPTODATE; |
2047 | eb->flags |= EXTENT_BUFFER_FILLED; | ||
2019 | return eb; | 2048 | return eb; |
2020 | fail: | 2049 | fail: |
2021 | free_extent_buffer(eb); | 2050 | free_extent_buffer(eb); |
@@ -2035,14 +2064,12 @@ struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree, | |||
2035 | struct address_space *mapping = tree->mapping; | 2064 | struct address_space *mapping = tree->mapping; |
2036 | int uptodate = 1; | 2065 | int uptodate = 1; |
2037 | 2066 | ||
2038 | eb = __alloc_extent_buffer(mask); | 2067 | eb = __alloc_extent_buffer(tree, start, len, mask); |
2039 | if (!eb || IS_ERR(eb)) | 2068 | if (!eb || IS_ERR(eb)) |
2040 | return NULL; | 2069 | return NULL; |
2041 | 2070 | ||
2042 | eb->alloc_addr = (unsigned long)__builtin_return_address(0); | 2071 | if (eb->flags & EXTENT_BUFFER_FILLED) |
2043 | eb->start = start; | 2072 | return eb; |
2044 | eb->len = len; | ||
2045 | atomic_set(&eb->refs, 1); | ||
2046 | 2073 | ||
2047 | for (i = 0; i < num_pages; i++, index++) { | 2074 | for (i = 0; i < num_pages; i++, index++) { |
2048 | p = find_lock_page(mapping, index); | 2075 | p = find_lock_page(mapping, index); |
@@ -2055,14 +2082,15 @@ struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree, | |||
2055 | goto fail; | 2082 | goto fail; |
2056 | } | 2083 | } |
2057 | set_page_extent_mapped(p); | 2084 | set_page_extent_mapped(p); |
2058 | if (i < EXTENT_INLINE_PAGES) | 2085 | if (i == 0) |
2059 | eb->pages[i] = p; | 2086 | eb->last_page = p; |
2060 | if (!PageUptodate(p)) | 2087 | if (!PageUptodate(p)) |
2061 | uptodate = 0; | 2088 | uptodate = 0; |
2062 | unlock_page(p); | 2089 | unlock_page(p); |
2063 | } | 2090 | } |
2064 | if (uptodate) | 2091 | if (uptodate) |
2065 | eb->flags |= EXTENT_UPTODATE; | 2092 | eb->flags |= EXTENT_UPTODATE; |
2093 | eb->flags |= EXTENT_BUFFER_FILLED; | ||
2066 | return eb; | 2094 | return eb; |
2067 | fail: | 2095 | fail: |
2068 | free_extent_buffer(eb); | 2096 | free_extent_buffer(eb); |
@@ -2231,7 +2259,8 @@ int read_extent_buffer_pages(struct extent_map_tree *tree, | |||
2231 | ret = -EIO; | 2259 | ret = -EIO; |
2232 | } | 2260 | } |
2233 | } | 2261 | } |
2234 | eb->flags |= EXTENT_UPTODATE; | 2262 | if (!ret) |
2263 | eb->flags |= EXTENT_UPTODATE; | ||
2235 | return ret; | 2264 | return ret; |
2236 | } | 2265 | } |
2237 | EXPORT_SYMBOL(read_extent_buffer_pages); | 2266 | EXPORT_SYMBOL(read_extent_buffer_pages); |