aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_map.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2007-11-11 08:22:00 -0500
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:03:57 -0400
commit0591fb56fb732c7bf500d1446d0bf927046d7002 (patch)
treeab2476ebe645d6243fb7c92d567765e96b4e822f /fs/btrfs/extent_map.c
parent3ab2fb5a8cb003897016b6eb38ddad916226c1b2 (diff)
Btrfs: Fix extent bit range testing
It could return the bit as set when there was actually a hole at the very end of the range. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r--fs/btrfs/extent_map.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 0077c6c1d9f9..b3ff63c4bf2a 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -96,7 +96,7 @@ void extent_map_tree_empty_lru(struct extent_map_tree *tree)
96 while(!list_empty(&tree->buffer_lru)) { 96 while(!list_empty(&tree->buffer_lru)) {
97 eb = list_entry(tree->buffer_lru.next, struct extent_buffer, 97 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
98 lru); 98 lru);
99 list_del(&eb->lru); 99 list_del_init(&eb->lru);
100 free_extent_buffer(eb); 100 free_extent_buffer(eb);
101 } 101 }
102} 102}
@@ -1212,13 +1212,15 @@ int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1212 node = tree_search(&tree->state, start); 1212 node = tree_search(&tree->state, start);
1213 while (node && start <= end) { 1213 while (node && start <= end) {
1214 state = rb_entry(node, struct extent_state, rb_node); 1214 state = rb_entry(node, struct extent_state, rb_node);
1215 if (state->start > end)
1216 break;
1217 1215
1218 if (filled && state->start > start) { 1216 if (filled && state->start > start) {
1219 bitset = 0; 1217 bitset = 0;
1220 break; 1218 break;
1221 } 1219 }
1220
1221 if (state->start > end)
1222 break;
1223
1222 if (state->state & bits) { 1224 if (state->state & bits) {
1223 bitset = 1; 1225 bitset = 1;
1224 if (!filled) 1226 if (!filled)
@@ -2208,6 +2210,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2208 page_cache_get(page0); 2210 page_cache_get(page0);
2209 mark_page_accessed(page0); 2211 mark_page_accessed(page0);
2210 set_page_extent_mapped(page0); 2212 set_page_extent_mapped(page0);
2213 WARN_ON(!PageUptodate(page0));
2211 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE | 2214 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2212 len << 2); 2215 len << 2);
2213 } else { 2216 } else {
@@ -2248,9 +2251,11 @@ fail:
2248 spin_unlock(&tree->lru_lock); 2251 spin_unlock(&tree->lru_lock);
2249 if (!atomic_dec_and_test(&eb->refs)) 2252 if (!atomic_dec_and_test(&eb->refs))
2250 return NULL; 2253 return NULL;
2251 for (index = 0; index < i; index++) { 2254 for (index = 1; index < i; index++) {
2252 page_cache_release(extent_buffer_page(eb, index)); 2255 page_cache_release(extent_buffer_page(eb, index));
2253 } 2256 }
2257 if (i > 0)
2258 page_cache_release(extent_buffer_page(eb, 0));
2254 __free_extent_buffer(eb); 2259 __free_extent_buffer(eb);
2255 return NULL; 2260 return NULL;
2256} 2261}
@@ -2310,9 +2315,11 @@ fail:
2310 spin_unlock(&tree->lru_lock); 2315 spin_unlock(&tree->lru_lock);
2311 if (!atomic_dec_and_test(&eb->refs)) 2316 if (!atomic_dec_and_test(&eb->refs))
2312 return NULL; 2317 return NULL;
2313 for (index = 0; index < i; index++) { 2318 for (index = 1; index < i; index++) {
2314 page_cache_release(extent_buffer_page(eb, index)); 2319 page_cache_release(extent_buffer_page(eb, index));
2315 } 2320 }
2321 if (i > 0)
2322 page_cache_release(extent_buffer_page(eb, 0));
2316 __free_extent_buffer(eb); 2323 __free_extent_buffer(eb);
2317 return NULL; 2324 return NULL;
2318} 2325}
@@ -2329,11 +2336,13 @@ void free_extent_buffer(struct extent_buffer *eb)
2329 if (!atomic_dec_and_test(&eb->refs)) 2336 if (!atomic_dec_and_test(&eb->refs))
2330 return; 2337 return;
2331 2338
2339 WARN_ON(!list_empty(&eb->lru));
2332 num_pages = num_extent_pages(eb->start, eb->len); 2340 num_pages = num_extent_pages(eb->start, eb->len);
2333 2341
2334 for (i = 0; i < num_pages; i++) { 2342 for (i = 1; i < num_pages; i++) {
2335 page_cache_release(extent_buffer_page(eb, i)); 2343 page_cache_release(extent_buffer_page(eb, i));
2336 } 2344 }
2345 page_cache_release(extent_buffer_page(eb, 0));
2337 __free_extent_buffer(eb); 2346 __free_extent_buffer(eb);
2338} 2347}
2339EXPORT_SYMBOL(free_extent_buffer); 2348EXPORT_SYMBOL(free_extent_buffer);
@@ -2469,6 +2478,7 @@ int read_extent_buffer_pages(struct extent_map_tree *tree,
2469 EXTENT_UPTODATE, 1)) { 2478 EXTENT_UPTODATE, 1)) {
2470 return 0; 2479 return 0;
2471 } 2480 }
2481
2472 if (start) { 2482 if (start) {
2473 WARN_ON(start < eb->start); 2483 WARN_ON(start < eb->start);
2474 start_i = (start >> PAGE_CACHE_SHIFT) - 2484 start_i = (start >> PAGE_CACHE_SHIFT) -
@@ -2577,7 +2587,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2577 *map_start = 0; 2587 *map_start = 0;
2578 } else { 2588 } else {
2579 offset = 0; 2589 offset = 0;
2580 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset; 2590 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2581 } 2591 }
2582 if (start + min_len > eb->len) { 2592 if (start + min_len > eb->len) {
2583printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len); 2593printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);