aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2007-11-07 21:08:16 -0500
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:03:57 -0400
commit09be207d1ba224531a61de9afdc07a125e45318c (patch)
treef88b82aa822903f6f86e0ec9f00dc99195949e1e /fs/btrfs
parent0da5468f4724a59e745d938395beffe830c424ee (diff)
Btrfs: Fix failure cleanups when allocating extent buffers fail
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent_map.c56
1 files changed, 30 insertions, 26 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index b0677c84bb7..c976615dcda 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -2106,25 +2106,17 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2106 2106
2107 spin_lock(&tree->lru_lock); 2107 spin_lock(&tree->lru_lock);
2108 eb = find_lru(tree, start, len); 2108 eb = find_lru(tree, start, len);
2109 if (eb) {
2110 goto lru_add;
2111 }
2112 spin_unlock(&tree->lru_lock); 2109 spin_unlock(&tree->lru_lock);
2113
2114 if (eb) { 2110 if (eb) {
2115 memset(eb, 0, sizeof(*eb)); 2111 return eb;
2116 } else {
2117 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2118 } 2112 }
2113
2114 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2119 INIT_LIST_HEAD(&eb->lru); 2115 INIT_LIST_HEAD(&eb->lru);
2120 eb->start = start; 2116 eb->start = start;
2121 eb->len = len; 2117 eb->len = len;
2122 atomic_set(&eb->refs, 1); 2118 atomic_set(&eb->refs, 1);
2123 2119
2124 spin_lock(&tree->lru_lock);
2125lru_add:
2126 add_lru(tree, eb);
2127 spin_unlock(&tree->lru_lock);
2128 return eb; 2120 return eb;
2129} 2121}
2130 2122
@@ -2151,7 +2143,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2151 return NULL; 2143 return NULL;
2152 2144
2153 if (eb->flags & EXTENT_BUFFER_FILLED) 2145 if (eb->flags & EXTENT_BUFFER_FILLED)
2154 return eb; 2146 goto lru_add;
2155 2147
2156 if (page0) { 2148 if (page0) {
2157 eb->first_page = page0; 2149 eb->first_page = page0;
@@ -2169,11 +2161,6 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2169 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); 2161 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2170 if (!p) { 2162 if (!p) {
2171 WARN_ON(1); 2163 WARN_ON(1);
2172 /* make sure the free only frees the pages we've
2173 * grabbed a reference on
2174 */
2175 eb->len = i << PAGE_CACHE_SHIFT;
2176 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2177 goto fail; 2164 goto fail;
2178 } 2165 }
2179 set_page_extent_mapped(p); 2166 set_page_extent_mapped(p);
@@ -2192,9 +2179,20 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2192 if (uptodate) 2179 if (uptodate)
2193 eb->flags |= EXTENT_UPTODATE; 2180 eb->flags |= EXTENT_UPTODATE;
2194 eb->flags |= EXTENT_BUFFER_FILLED; 2181 eb->flags |= EXTENT_BUFFER_FILLED;
2182
2183lru_add:
2184 spin_lock(&tree->lru_lock);
2185 add_lru(tree, eb);
2186 spin_unlock(&tree->lru_lock);
2195 return eb; 2187 return eb;
2188
2196fail: 2189fail:
2197 free_extent_buffer(eb); 2190 if (!atomic_dec_and_test(&eb->refs))
2191 return NULL;
2192 for (index = 0; index < i; index++) {
2193 page_cache_release(extent_buffer_page(eb, index));
2194 }
2195 __free_extent_buffer(eb);
2198 return NULL; 2196 return NULL;
2199} 2197}
2200EXPORT_SYMBOL(alloc_extent_buffer); 2198EXPORT_SYMBOL(alloc_extent_buffer);
@@ -2204,7 +2202,8 @@ struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2204 gfp_t mask) 2202 gfp_t mask)
2205{ 2203{
2206 unsigned long num_pages = num_extent_pages(start, len); 2204 unsigned long num_pages = num_extent_pages(start, len);
2207 unsigned long i; unsigned long index = start >> PAGE_CACHE_SHIFT; 2205 unsigned long i;
2206 unsigned long index = start >> PAGE_CACHE_SHIFT;
2208 struct extent_buffer *eb; 2207 struct extent_buffer *eb;
2209 struct page *p; 2208 struct page *p;
2210 struct address_space *mapping = tree->mapping; 2209 struct address_space *mapping = tree->mapping;
@@ -2215,16 +2214,11 @@ struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2215 return NULL; 2214 return NULL;
2216 2215
2217 if (eb->flags & EXTENT_BUFFER_FILLED) 2216 if (eb->flags & EXTENT_BUFFER_FILLED)
2218 return eb; 2217 goto lru_add;
2219 2218
2220 for (i = 0; i < num_pages; i++, index++) { 2219 for (i = 0; i < num_pages; i++, index++) {
2221 p = find_lock_page(mapping, index); 2220 p = find_lock_page(mapping, index);
2222 if (!p) { 2221 if (!p) {
2223 /* make sure the free only frees the pages we've
2224 * grabbed a reference on
2225 */
2226 eb->len = i << PAGE_CACHE_SHIFT;
2227 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2228 goto fail; 2222 goto fail;
2229 } 2223 }
2230 set_page_extent_mapped(p); 2224 set_page_extent_mapped(p);
@@ -2245,9 +2239,19 @@ struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2245 if (uptodate) 2239 if (uptodate)
2246 eb->flags |= EXTENT_UPTODATE; 2240 eb->flags |= EXTENT_UPTODATE;
2247 eb->flags |= EXTENT_BUFFER_FILLED; 2241 eb->flags |= EXTENT_BUFFER_FILLED;
2242
2243lru_add:
2244 spin_lock(&tree->lru_lock);
2245 add_lru(tree, eb);
2246 spin_unlock(&tree->lru_lock);
2248 return eb; 2247 return eb;
2249fail: 2248fail:
2250 free_extent_buffer(eb); 2249 if (!atomic_dec_and_test(&eb->refs))
2250 return NULL;
2251 for (index = 0; index < i; index++) {
2252 page_cache_release(extent_buffer_page(eb, index));
2253 }
2254 __free_extent_buffer(eb);
2251 return NULL; 2255 return NULL;
2252} 2256}
2253EXPORT_SYMBOL(find_extent_buffer); 2257EXPORT_SYMBOL(find_extent_buffer);