aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-01-05 21:25:51 -0500
committerChris Mason <chris.mason@oracle.com>2009-01-05 21:25:51 -0500
commitd397712bcc6a759a560fd247e6053ecae091f958 (patch)
tree9da8daebb870d8b8b1843507c4621715e23dd31a /fs/btrfs/extent_io.c
parent1f3c79a28c8837e8572b98f6d14142d9a6133c56 (diff)
Btrfs: Fix checkpatch.pl warnings
There were many, most are fixed now. struct-funcs.c generates some warnings but these are bogus. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c213
1 files changed, 99 insertions, 114 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 0bf7684207aa..39edb551dca6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -32,7 +32,7 @@ static LIST_HEAD(states);
32 32
33#define LEAK_DEBUG 0 33#define LEAK_DEBUG 0
34#ifdef LEAK_DEBUG 34#ifdef LEAK_DEBUG
35static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED; 35static DEFINE_SPINLOCK(leak_lock);
36#endif 36#endif
37 37
38#define BUFFER_LRU_MAX 64 38#define BUFFER_LRU_MAX 64
@@ -81,7 +81,11 @@ void extent_io_exit(void)
81 81
82 while (!list_empty(&states)) { 82 while (!list_empty(&states)) {
83 state = list_entry(states.next, struct extent_state, leak_list); 83 state = list_entry(states.next, struct extent_state, leak_list);
84 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs)); 84 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
85 "state %lu in tree %p refs %d\n",
86 (unsigned long long)state->start,
87 (unsigned long long)state->end,
88 state->state, state->tree, atomic_read(&state->refs));
85 list_del(&state->leak_list); 89 list_del(&state->leak_list);
86 kmem_cache_free(extent_state_cache, state); 90 kmem_cache_free(extent_state_cache, state);
87 91
@@ -89,7 +93,9 @@ void extent_io_exit(void)
89 93
90 while (!list_empty(&buffers)) { 94 while (!list_empty(&buffers)) {
91 eb = list_entry(buffers.next, struct extent_buffer, leak_list); 95 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
92 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs)); 96 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
97 "refs %d\n", (unsigned long long)eb->start,
98 eb->len, atomic_read(&eb->refs));
93 list_del(&eb->leak_list); 99 list_del(&eb->leak_list);
94 kmem_cache_free(extent_buffer_cache, eb); 100 kmem_cache_free(extent_buffer_cache, eb);
95 } 101 }
@@ -158,11 +164,11 @@ EXPORT_SYMBOL(free_extent_state);
158static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 164static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159 struct rb_node *node) 165 struct rb_node *node)
160{ 166{
161 struct rb_node ** p = &root->rb_node; 167 struct rb_node **p = &root->rb_node;
162 struct rb_node * parent = NULL; 168 struct rb_node *parent = NULL;
163 struct tree_entry *entry; 169 struct tree_entry *entry;
164 170
165 while(*p) { 171 while (*p) {
166 parent = *p; 172 parent = *p;
167 entry = rb_entry(parent, struct tree_entry, rb_node); 173 entry = rb_entry(parent, struct tree_entry, rb_node);
168 174
@@ -185,13 +191,13 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
185 struct rb_node **next_ret) 191 struct rb_node **next_ret)
186{ 192{
187 struct rb_root *root = &tree->state; 193 struct rb_root *root = &tree->state;
188 struct rb_node * n = root->rb_node; 194 struct rb_node *n = root->rb_node;
189 struct rb_node *prev = NULL; 195 struct rb_node *prev = NULL;
190 struct rb_node *orig_prev = NULL; 196 struct rb_node *orig_prev = NULL;
191 struct tree_entry *entry; 197 struct tree_entry *entry;
192 struct tree_entry *prev_entry = NULL; 198 struct tree_entry *prev_entry = NULL;
193 199
194 while(n) { 200 while (n) {
195 entry = rb_entry(n, struct tree_entry, rb_node); 201 entry = rb_entry(n, struct tree_entry, rb_node);
196 prev = n; 202 prev = n;
197 prev_entry = entry; 203 prev_entry = entry;
@@ -200,14 +206,13 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
200 n = n->rb_left; 206 n = n->rb_left;
201 else if (offset > entry->end) 207 else if (offset > entry->end)
202 n = n->rb_right; 208 n = n->rb_right;
203 else { 209 else
204 return n; 210 return n;
205 }
206 } 211 }
207 212
208 if (prev_ret) { 213 if (prev_ret) {
209 orig_prev = prev; 214 orig_prev = prev;
210 while(prev && offset > prev_entry->end) { 215 while (prev && offset > prev_entry->end) {
211 prev = rb_next(prev); 216 prev = rb_next(prev);
212 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 217 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
213 } 218 }
@@ -217,7 +222,7 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
217 222
218 if (next_ret) { 223 if (next_ret) {
219 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 224 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
220 while(prev && offset < prev_entry->start) { 225 while (prev && offset < prev_entry->start) {
221 prev = rb_prev(prev); 226 prev = rb_prev(prev);
222 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 227 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
223 } 228 }
@@ -233,9 +238,8 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree,
233 struct rb_node *ret; 238 struct rb_node *ret;
234 239
235 ret = __etree_search(tree, offset, &prev, NULL); 240 ret = __etree_search(tree, offset, &prev, NULL);
236 if (!ret) { 241 if (!ret)
237 return prev; 242 return prev;
238 }
239 return ret; 243 return ret;
240} 244}
241 245
@@ -243,11 +247,11 @@ static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
243 u64 offset, struct rb_node *node) 247 u64 offset, struct rb_node *node)
244{ 248{
245 struct rb_root *root = &tree->buffer; 249 struct rb_root *root = &tree->buffer;
246 struct rb_node ** p = &root->rb_node; 250 struct rb_node **p = &root->rb_node;
247 struct rb_node * parent = NULL; 251 struct rb_node *parent = NULL;
248 struct extent_buffer *eb; 252 struct extent_buffer *eb;
249 253
250 while(*p) { 254 while (*p) {
251 parent = *p; 255 parent = *p;
252 eb = rb_entry(parent, struct extent_buffer, rb_node); 256 eb = rb_entry(parent, struct extent_buffer, rb_node);
253 257
@@ -268,10 +272,10 @@ static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
268 u64 offset) 272 u64 offset)
269{ 273{
270 struct rb_root *root = &tree->buffer; 274 struct rb_root *root = &tree->buffer;
271 struct rb_node * n = root->rb_node; 275 struct rb_node *n = root->rb_node;
272 struct extent_buffer *eb; 276 struct extent_buffer *eb;
273 277
274 while(n) { 278 while (n) {
275 eb = rb_entry(n, struct extent_buffer, rb_node); 279 eb = rb_entry(n, struct extent_buffer, rb_node);
276 if (offset < eb->start) 280 if (offset < eb->start)
277 n = n->rb_left; 281 n = n->rb_left;
@@ -363,7 +367,9 @@ static int insert_state(struct extent_io_tree *tree,
363 struct rb_node *node; 367 struct rb_node *node;
364 368
365 if (end < start) { 369 if (end < start) {
366 printk("end < start %Lu %Lu\n", end, start); 370 printk(KERN_ERR "btrfs end < start %llu %llu\n",
371 (unsigned long long)end,
372 (unsigned long long)start);
367 WARN_ON(1); 373 WARN_ON(1);
368 } 374 }
369 if (bits & EXTENT_DIRTY) 375 if (bits & EXTENT_DIRTY)
@@ -376,7 +382,10 @@ static int insert_state(struct extent_io_tree *tree,
376 if (node) { 382 if (node) {
377 struct extent_state *found; 383 struct extent_state *found;
378 found = rb_entry(node, struct extent_state, rb_node); 384 found = rb_entry(node, struct extent_state, rb_node);
379 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); 385 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
386 "%llu %llu\n", (unsigned long long)found->start,
387 (unsigned long long)found->end,
388 (unsigned long long)start, (unsigned long long)end);
380 free_extent_state(state); 389 free_extent_state(state);
381 return -EEXIST; 390 return -EEXIST;
382 } 391 }
@@ -412,7 +421,6 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
412 if (node) { 421 if (node) {
413 struct extent_state *found; 422 struct extent_state *found;
414 found = rb_entry(node, struct extent_state, rb_node); 423 found = rb_entry(node, struct extent_state, rb_node);
415 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
416 free_extent_state(prealloc); 424 free_extent_state(prealloc);
417 return -EEXIST; 425 return -EEXIST;
418 } 426 }
@@ -661,8 +669,9 @@ static void set_state_bits(struct extent_io_tree *tree,
661 * [start, end] is inclusive 669 * [start, end] is inclusive
662 * This takes the tree lock. 670 * This takes the tree lock.
663 */ 671 */
664static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, 672static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
665 int exclusive, u64 *failed_start, gfp_t mask) 673 int bits, int exclusive, u64 *failed_start,
674 gfp_t mask)
666{ 675{
667 struct extent_state *state; 676 struct extent_state *state;
668 struct extent_state *prealloc = NULL; 677 struct extent_state *prealloc = NULL;
@@ -763,7 +772,7 @@ again:
763 if (end < last_start) 772 if (end < last_start)
764 this_end = end; 773 this_end = end;
765 else 774 else
766 this_end = last_start -1; 775 this_end = last_start - 1;
767 err = insert_state(tree, prealloc, start, this_end, 776 err = insert_state(tree, prealloc, start, this_end,
768 bits); 777 bits);
769 prealloc = NULL; 778 prealloc = NULL;
@@ -891,8 +900,8 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
891} 900}
892EXPORT_SYMBOL(set_extent_uptodate); 901EXPORT_SYMBOL(set_extent_uptodate);
893 902
894static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 903static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
895 gfp_t mask) 904 u64 end, gfp_t mask)
896{ 905{
897 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); 906 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
898} 907}
@@ -904,8 +913,8 @@ static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
904 0, NULL, mask); 913 0, NULL, mask);
905} 914}
906 915
907static int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, 916static int clear_extent_writeback(struct extent_io_tree *tree, u64 start,
908 gfp_t mask) 917 u64 end, gfp_t mask)
909{ 918{
910 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); 919 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
911} 920}
@@ -1025,11 +1034,10 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1025 * our range starts. 1034 * our range starts.
1026 */ 1035 */
1027 node = tree_search(tree, start); 1036 node = tree_search(tree, start);
1028 if (!node) { 1037 if (!node)
1029 goto out; 1038 goto out;
1030 }
1031 1039
1032 while(1) { 1040 while (1) {
1033 state = rb_entry(node, struct extent_state, rb_node); 1041 state = rb_entry(node, struct extent_state, rb_node);
1034 if (state->end >= start && (state->state & bits)) { 1042 if (state->end >= start && (state->state & bits)) {
1035 *start_ret = state->start; 1043 *start_ret = state->start;
@@ -1062,15 +1070,14 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1062 * our range starts. 1070 * our range starts.
1063 */ 1071 */
1064 node = tree_search(tree, start); 1072 node = tree_search(tree, start);
1065 if (!node) { 1073 if (!node)
1066 goto out; 1074 goto out;
1067 }
1068 1075
1069 while(1) { 1076 while (1) {
1070 state = rb_entry(node, struct extent_state, rb_node); 1077 state = rb_entry(node, struct extent_state, rb_node);
1071 if (state->end >= start && (state->state & bits)) { 1078 if (state->end >= start && (state->state & bits))
1072 return state; 1079 return state;
1073 } 1080
1074 node = rb_next(node); 1081 node = rb_next(node);
1075 if (!node) 1082 if (!node)
1076 break; 1083 break;
@@ -1108,7 +1115,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1108 goto out; 1115 goto out;
1109 } 1116 }
1110 1117
1111 while(1) { 1118 while (1) {
1112 state = rb_entry(node, struct extent_state, rb_node); 1119 state = rb_entry(node, struct extent_state, rb_node);
1113 if (found && (state->start != cur_start || 1120 if (found && (state->start != cur_start ||
1114 (state->state & EXTENT_BOUNDARY))) { 1121 (state->state & EXTENT_BOUNDARY))) {
@@ -1150,7 +1157,7 @@ static noinline int __unlock_for_delalloc(struct inode *inode,
1150 if (index == locked_page->index && end_index == index) 1157 if (index == locked_page->index && end_index == index)
1151 return 0; 1158 return 0;
1152 1159
1153 while(nr_pages > 0) { 1160 while (nr_pages > 0) {
1154 ret = find_get_pages_contig(inode->i_mapping, index, 1161 ret = find_get_pages_contig(inode->i_mapping, index,
1155 min_t(unsigned long, nr_pages, 1162 min_t(unsigned long, nr_pages,
1156 ARRAY_SIZE(pages)), pages); 1163 ARRAY_SIZE(pages)), pages);
@@ -1186,7 +1193,7 @@ static noinline int lock_delalloc_pages(struct inode *inode,
1186 1193
1187 /* skip the page at the start index */ 1194 /* skip the page at the start index */
1188 nrpages = end_index - index + 1; 1195 nrpages = end_index - index + 1;
1189 while(nrpages > 0) { 1196 while (nrpages > 0) {
1190 ret = find_get_pages_contig(inode->i_mapping, index, 1197 ret = find_get_pages_contig(inode->i_mapping, index,
1191 min_t(unsigned long, 1198 min_t(unsigned long,
1192 nrpages, ARRAY_SIZE(pages)), pages); 1199 nrpages, ARRAY_SIZE(pages)), pages);
@@ -1263,17 +1270,16 @@ again:
1263 * pages in order, so we can't process delalloc bytes before 1270 * pages in order, so we can't process delalloc bytes before
1264 * locked_page 1271 * locked_page
1265 */ 1272 */
1266 if (delalloc_start < *start) { 1273 if (delalloc_start < *start)
1267 delalloc_start = *start; 1274 delalloc_start = *start;
1268 }
1269 1275
1270 /* 1276 /*
1271 * make sure to limit the number of pages we try to lock down 1277 * make sure to limit the number of pages we try to lock down
1272 * if we're looping. 1278 * if we're looping.
1273 */ 1279 */
1274 if (delalloc_end + 1 - delalloc_start > max_bytes && loops) { 1280 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1275 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1; 1281 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1276 } 1282
1277 /* step two, lock all the pages after the page that has start */ 1283 /* step two, lock all the pages after the page that has start */
1278 ret = lock_delalloc_pages(inode, locked_page, 1284 ret = lock_delalloc_pages(inode, locked_page,
1279 delalloc_start, delalloc_end); 1285 delalloc_start, delalloc_end);
@@ -1341,7 +1347,7 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1341 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback)) 1347 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
1342 return 0; 1348 return 0;
1343 1349
1344 while(nr_pages > 0) { 1350 while (nr_pages > 0) {
1345 ret = find_get_pages_contig(inode->i_mapping, index, 1351 ret = find_get_pages_contig(inode->i_mapping, index,
1346 min_t(unsigned long, 1352 min_t(unsigned long,
1347 nr_pages, ARRAY_SIZE(pages)), pages); 1353 nr_pages, ARRAY_SIZE(pages)), pages);
@@ -1384,7 +1390,6 @@ u64 count_range_bits(struct extent_io_tree *tree,
1384 int found = 0; 1390 int found = 0;
1385 1391
1386 if (search_end <= cur_start) { 1392 if (search_end <= cur_start) {
1387 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1388 WARN_ON(1); 1393 WARN_ON(1);
1389 return 0; 1394 return 0;
1390 } 1395 }
@@ -1399,11 +1404,10 @@ u64 count_range_bits(struct extent_io_tree *tree,
1399 * our range starts. 1404 * our range starts.
1400 */ 1405 */
1401 node = tree_search(tree, cur_start); 1406 node = tree_search(tree, cur_start);
1402 if (!node) { 1407 if (!node)
1403 goto out; 1408 goto out;
1404 }
1405 1409
1406 while(1) { 1410 while (1) {
1407 state = rb_entry(node, struct extent_state, rb_node); 1411 state = rb_entry(node, struct extent_state, rb_node);
1408 if (state->start > search_end) 1412 if (state->start > search_end)
1409 break; 1413 break;
@@ -1927,19 +1931,15 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
1927 nr = bio_get_nr_vecs(bdev); 1931 nr = bio_get_nr_vecs(bdev);
1928 1932
1929 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); 1933 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1930 if (!bio) {
1931 printk("failed to allocate bio nr %d\n", nr);
1932 }
1933 1934
1934 bio_add_page(bio, page, page_size, offset); 1935 bio_add_page(bio, page, page_size, offset);
1935 bio->bi_end_io = end_io_func; 1936 bio->bi_end_io = end_io_func;
1936 bio->bi_private = tree; 1937 bio->bi_private = tree;
1937 1938
1938 if (bio_ret) { 1939 if (bio_ret)
1939 *bio_ret = bio; 1940 *bio_ret = bio;
1940 } else { 1941 else
1941 ret = submit_one_bio(rw, bio, mirror_num, bio_flags); 1942 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1942 }
1943 1943
1944 return ret; 1944 return ret;
1945} 1945}
@@ -2028,13 +2028,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2028 break; 2028 break;
2029 } 2029 }
2030 extent_offset = cur - em->start; 2030 extent_offset = cur - em->start;
2031 if (extent_map_end(em) <= cur) {
2032printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
2033 }
2034 BUG_ON(extent_map_end(em) <= cur); 2031 BUG_ON(extent_map_end(em) <= cur);
2035 if (end < cur) {
2036printk("2bad mapping end %Lu cur %Lu\n", end, cur);
2037 }
2038 BUG_ON(end < cur); 2032 BUG_ON(end < cur);
2039 2033
2040 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 2034 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
@@ -2199,7 +2193,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2199 delalloc_end = 0; 2193 delalloc_end = 0;
2200 page_started = 0; 2194 page_started = 0;
2201 if (!epd->extent_locked) { 2195 if (!epd->extent_locked) {
2202 while(delalloc_end < page_end) { 2196 while (delalloc_end < page_end) {
2203 nr_delalloc = find_lock_delalloc_range(inode, tree, 2197 nr_delalloc = find_lock_delalloc_range(inode, tree,
2204 page, 2198 page,
2205 &delalloc_start, 2199 &delalloc_start,
@@ -2242,9 +2236,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2242 nr_written++; 2236 nr_written++;
2243 2237
2244 end = page_end; 2238 end = page_end;
2245 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 2239 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
2246 printk("found delalloc bits after lock_extent\n"); 2240 printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
2247 }
2248 2241
2249 if (last_byte <= start) { 2242 if (last_byte <= start) {
2250 clear_extent_dirty(tree, start, page_end, GFP_NOFS); 2243 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
@@ -2297,7 +2290,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2297 clear_extent_dirty(tree, cur, 2290 clear_extent_dirty(tree, cur,
2298 cur + iosize - 1, GFP_NOFS); 2291 cur + iosize - 1, GFP_NOFS);
2299 2292
2300 unlock_extent(tree, unlock_start, cur + iosize -1, 2293 unlock_extent(tree, unlock_start, cur + iosize - 1,
2301 GFP_NOFS); 2294 GFP_NOFS);
2302 2295
2303 /* 2296 /*
@@ -2344,9 +2337,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2344 2337
2345 set_range_writeback(tree, cur, cur + iosize - 1); 2338 set_range_writeback(tree, cur, cur + iosize - 1);
2346 if (!PageWriteback(page)) { 2339 if (!PageWriteback(page)) {
2347 printk("warning page %lu not writeback, " 2340 printk(KERN_ERR "btrfs warning page %lu not "
2348 "cur %llu end %llu\n", page->index, 2341 "writeback, cur %llu end %llu\n",
2349 (unsigned long long)cur, 2342 page->index, (unsigned long long)cur,
2350 (unsigned long long)end); 2343 (unsigned long long)end);
2351 } 2344 }
2352 2345
@@ -2430,8 +2423,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
2430retry: 2423retry:
2431 while (!done && (index <= end) && 2424 while (!done && (index <= end) &&
2432 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 2425 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2433 PAGECACHE_TAG_DIRTY, 2426 PAGECACHE_TAG_DIRTY, min(end - index,
2434 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { 2427 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2435 unsigned i; 2428 unsigned i;
2436 2429
2437 scanned = 1; 2430 scanned = 1;
@@ -2536,9 +2529,8 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2536 2529
2537 extent_write_cache_pages(tree, mapping, &wbc_writepages, 2530 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2538 __extent_writepage, &epd, flush_write_bio); 2531 __extent_writepage, &epd, flush_write_bio);
2539 if (epd.bio) { 2532 if (epd.bio)
2540 submit_one_bio(WRITE, epd.bio, 0, 0); 2533 submit_one_bio(WRITE, epd.bio, 0, 0);
2541 }
2542 return ret; 2534 return ret;
2543} 2535}
2544EXPORT_SYMBOL(extent_write_full_page); 2536EXPORT_SYMBOL(extent_write_full_page);
@@ -2568,7 +2560,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2568 .range_end = end + 1, 2560 .range_end = end + 1,
2569 }; 2561 };
2570 2562
2571 while(start <= end) { 2563 while (start <= end) {
2572 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 2564 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2573 if (clear_page_dirty_for_io(page)) 2565 if (clear_page_dirty_for_io(page))
2574 ret = __extent_writepage(page, &wbc_writepages, &epd); 2566 ret = __extent_writepage(page, &wbc_writepages, &epd);
@@ -2606,9 +2598,8 @@ int extent_writepages(struct extent_io_tree *tree,
2606 ret = extent_write_cache_pages(tree, mapping, wbc, 2598 ret = extent_write_cache_pages(tree, mapping, wbc,
2607 __extent_writepage, &epd, 2599 __extent_writepage, &epd,
2608 flush_write_bio); 2600 flush_write_bio);
2609 if (epd.bio) { 2601 if (epd.bio)
2610 submit_one_bio(WRITE, epd.bio, 0, 0); 2602 submit_one_bio(WRITE, epd.bio, 0, 0);
2611 }
2612 return ret; 2603 return ret;
2613} 2604}
2614EXPORT_SYMBOL(extent_writepages); 2605EXPORT_SYMBOL(extent_writepages);
@@ -2666,7 +2657,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
2666 u64 end = start + PAGE_CACHE_SIZE - 1; 2657 u64 end = start + PAGE_CACHE_SIZE - 1;
2667 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 2658 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2668 2659
2669 start += (offset + blocksize -1) & ~(blocksize - 1); 2660 start += (offset + blocksize - 1) & ~(blocksize - 1);
2670 if (start > end) 2661 if (start > end)
2671 return 0; 2662 return 0;
2672 2663
@@ -2727,12 +2718,12 @@ int extent_prepare_write(struct extent_io_tree *tree,
2727 orig_block_start = block_start; 2718 orig_block_start = block_start;
2728 2719
2729 lock_extent(tree, page_start, page_end, GFP_NOFS); 2720 lock_extent(tree, page_start, page_end, GFP_NOFS);
2730 while(block_start <= block_end) { 2721 while (block_start <= block_end) {
2731 em = get_extent(inode, page, page_offset, block_start, 2722 em = get_extent(inode, page, page_offset, block_start,
2732 block_end - block_start + 1, 1); 2723 block_end - block_start + 1, 1);
2733 if (IS_ERR(em) || !em) { 2724 if (IS_ERR(em) || !em)
2734 goto err; 2725 goto err;
2735 } 2726
2736 cur_end = min(block_end, extent_map_end(em) - 1); 2727 cur_end = min(block_end, extent_map_end(em) - 1);
2737 block_off_start = block_start & (PAGE_CACHE_SIZE - 1); 2728 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2738 block_off_end = block_off_start + blocksize; 2729 block_off_end = block_off_start + blocksize;
@@ -3170,7 +3161,7 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
3170 } 3161 }
3171 __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); 3162 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3172 set_extent_dirty(tree, page_offset(page), 3163 set_extent_dirty(tree, page_offset(page),
3173 page_offset(page) + PAGE_CACHE_SIZE -1, 3164 page_offset(page) + PAGE_CACHE_SIZE - 1,
3174 GFP_NOFS); 3165 GFP_NOFS);
3175 unlock_page(page); 3166 unlock_page(page);
3176 } 3167 }
@@ -3235,7 +3226,7 @@ int extent_range_uptodate(struct extent_io_tree *tree,
3235 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1); 3226 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3236 if (ret) 3227 if (ret)
3237 return 1; 3228 return 1;
3238 while(start <= end) { 3229 while (start <= end) {
3239 index = start >> PAGE_CACHE_SHIFT; 3230 index = start >> PAGE_CACHE_SHIFT;
3240 page = find_get_page(tree->mapping, index); 3231 page = find_get_page(tree->mapping, index);
3241 uptodate = PageUptodate(page); 3232 uptodate = PageUptodate(page);
@@ -3321,16 +3312,12 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3321 lock_page(page); 3312 lock_page(page);
3322 } 3313 }
3323 locked_pages++; 3314 locked_pages++;
3324 if (!PageUptodate(page)) { 3315 if (!PageUptodate(page))
3325 all_uptodate = 0; 3316 all_uptodate = 0;
3326 }
3327 } 3317 }
3328 if (all_uptodate) { 3318 if (all_uptodate) {
3329 if (start_i == 0) 3319 if (start_i == 0)
3330 eb->flags |= EXTENT_UPTODATE; 3320 eb->flags |= EXTENT_UPTODATE;
3331 if (ret) {
3332 printk("all up to date but ret is %d\n", ret);
3333 }
3334 goto unlock_exit; 3321 goto unlock_exit;
3335 } 3322 }
3336 3323
@@ -3345,10 +3332,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3345 err = __extent_read_full_page(tree, page, 3332 err = __extent_read_full_page(tree, page,
3346 get_extent, &bio, 3333 get_extent, &bio,
3347 mirror_num, &bio_flags); 3334 mirror_num, &bio_flags);
3348 if (err) { 3335 if (err)
3349 ret = err; 3336 ret = err;
3350 printk("err %d from __extent_read_full_page\n", ret);
3351 }
3352 } else { 3337 } else {
3353 unlock_page(page); 3338 unlock_page(page);
3354 } 3339 }
@@ -3357,26 +3342,23 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3357 if (bio) 3342 if (bio)
3358 submit_one_bio(READ, bio, mirror_num, bio_flags); 3343 submit_one_bio(READ, bio, mirror_num, bio_flags);
3359 3344
3360 if (ret || !wait) { 3345 if (ret || !wait)
3361 if (ret)
3362 printk("ret %d wait %d returning\n", ret, wait);
3363 return ret; 3346 return ret;
3364 } 3347
3365 for (i = start_i; i < num_pages; i++) { 3348 for (i = start_i; i < num_pages; i++) {
3366 page = extent_buffer_page(eb, i); 3349 page = extent_buffer_page(eb, i);
3367 wait_on_page_locked(page); 3350 wait_on_page_locked(page);
3368 if (!PageUptodate(page)) { 3351 if (!PageUptodate(page))
3369 printk("page not uptodate after wait_on_page_locked\n");
3370 ret = -EIO; 3352 ret = -EIO;
3371 }
3372 } 3353 }
3354
3373 if (!ret) 3355 if (!ret)
3374 eb->flags |= EXTENT_UPTODATE; 3356 eb->flags |= EXTENT_UPTODATE;
3375 return ret; 3357 return ret;
3376 3358
3377unlock_exit: 3359unlock_exit:
3378 i = start_i; 3360 i = start_i;
3379 while(locked_pages > 0) { 3361 while (locked_pages > 0) {
3380 page = extent_buffer_page(eb, i); 3362 page = extent_buffer_page(eb, i);
3381 i++; 3363 i++;
3382 unlock_page(page); 3364 unlock_page(page);
@@ -3403,7 +3385,7 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3403 3385
3404 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 3386 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3405 3387
3406 while(len > 0) { 3388 while (len > 0) {
3407 page = extent_buffer_page(eb, i); 3389 page = extent_buffer_page(eb, i);
3408 3390
3409 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3391 cur = min(len, (PAGE_CACHE_SIZE - offset));
@@ -3442,8 +3424,11 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3442 offset = 0; 3424 offset = 0;
3443 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; 3425 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3444 } 3426 }
3427
3445 if (start + min_len > eb->len) { 3428 if (start + min_len > eb->len) {
3446printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len); 3429 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3430 "wanted %lu %lu\n", (unsigned long long)eb->start,
3431 eb->len, start, min_len);
3447 WARN_ON(1); 3432 WARN_ON(1);
3448 } 3433 }
3449 3434
@@ -3506,7 +3491,7 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3506 3491
3507 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 3492 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3508 3493
3509 while(len > 0) { 3494 while (len > 0) {
3510 page = extent_buffer_page(eb, i); 3495 page = extent_buffer_page(eb, i);
3511 3496
3512 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3497 cur = min(len, (PAGE_CACHE_SIZE - offset));
@@ -3542,7 +3527,7 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3542 3527
3543 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 3528 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3544 3529
3545 while(len > 0) { 3530 while (len > 0) {
3546 page = extent_buffer_page(eb, i); 3531 page = extent_buffer_page(eb, i);
3547 WARN_ON(!PageUptodate(page)); 3532 WARN_ON(!PageUptodate(page));
3548 3533
@@ -3574,7 +3559,7 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
3574 3559
3575 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 3560 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3576 3561
3577 while(len > 0) { 3562 while (len > 0) {
3578 page = extent_buffer_page(eb, i); 3563 page = extent_buffer_page(eb, i);
3579 WARN_ON(!PageUptodate(page)); 3564 WARN_ON(!PageUptodate(page));
3580 3565
@@ -3607,7 +3592,7 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3607 offset = (start_offset + dst_offset) & 3592 offset = (start_offset + dst_offset) &
3608 ((unsigned long)PAGE_CACHE_SIZE - 1); 3593 ((unsigned long)PAGE_CACHE_SIZE - 1);
3609 3594
3610 while(len > 0) { 3595 while (len > 0) {
3611 page = extent_buffer_page(dst, i); 3596 page = extent_buffer_page(dst, i);
3612 WARN_ON(!PageUptodate(page)); 3597 WARN_ON(!PageUptodate(page));
3613 3598
@@ -3674,17 +3659,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3674 unsigned long src_i; 3659 unsigned long src_i;
3675 3660
3676 if (src_offset + len > dst->len) { 3661 if (src_offset + len > dst->len) {
3677 printk("memmove bogus src_offset %lu move len %lu len %lu\n", 3662 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3678 src_offset, len, dst->len); 3663 "len %lu dst len %lu\n", src_offset, len, dst->len);
3679 BUG_ON(1); 3664 BUG_ON(1);
3680 } 3665 }
3681 if (dst_offset + len > dst->len) { 3666 if (dst_offset + len > dst->len) {
3682 printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 3667 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3683 dst_offset, len, dst->len); 3668 "len %lu dst len %lu\n", dst_offset, len, dst->len);
3684 BUG_ON(1); 3669 BUG_ON(1);
3685 } 3670 }
3686 3671
3687 while(len > 0) { 3672 while (len > 0) {
3688 dst_off_in_page = (start_offset + dst_offset) & 3673 dst_off_in_page = (start_offset + dst_offset) &
3689 ((unsigned long)PAGE_CACHE_SIZE - 1); 3674 ((unsigned long)PAGE_CACHE_SIZE - 1);
3690 src_off_in_page = (start_offset + src_offset) & 3675 src_off_in_page = (start_offset + src_offset) &
@@ -3722,20 +3707,20 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3722 unsigned long src_i; 3707 unsigned long src_i;
3723 3708
3724 if (src_offset + len > dst->len) { 3709 if (src_offset + len > dst->len) {
3725 printk("memmove bogus src_offset %lu move len %lu len %lu\n", 3710 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3726 src_offset, len, dst->len); 3711 "len %lu len %lu\n", src_offset, len, dst->len);
3727 BUG_ON(1); 3712 BUG_ON(1);
3728 } 3713 }
3729 if (dst_offset + len > dst->len) { 3714 if (dst_offset + len > dst->len) {
3730 printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 3715 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3731 dst_offset, len, dst->len); 3716 "len %lu len %lu\n", dst_offset, len, dst->len);
3732 BUG_ON(1); 3717 BUG_ON(1);
3733 } 3718 }
3734 if (dst_offset < src_offset) { 3719 if (dst_offset < src_offset) {
3735 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 3720 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3736 return; 3721 return;
3737 } 3722 }
3738 while(len > 0) { 3723 while (len > 0) {
3739 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 3724 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3740 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 3725 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3741 3726