aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2012-03-28 20:31:37 -0400
committerChris Mason <chris.mason@oracle.com>2012-03-28 20:31:37 -0400
commit1d4284bd6e8d7dd1d5521a6747bdb6dc1caf0225 (patch)
treea7dde6312ec24eb6368cad7a3efedbf368a5a70c /fs/btrfs/extent_io.c
parentb5d67f64f9bc656970dacba245410f0faedad18e (diff)
parent65139ed99234d8505948cdb7a835452eb5c191f9 (diff)
Merge branch 'error-handling' into for-linus
Conflicts: fs/btrfs/ctree.c fs/btrfs/disk-io.c fs/btrfs/extent-tree.c fs/btrfs/extent_io.c fs/btrfs/extent_io.h fs/btrfs/inode.c fs/btrfs/scrub.c Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c236
1 files changed, 140 insertions, 96 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 49a368593a16..0c3ec003f273 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -55,6 +55,11 @@ struct extent_page_data {
55}; 55};
56 56
57static noinline void flush_write_bio(void *data); 57static noinline void flush_write_bio(void *data);
58static inline struct btrfs_fs_info *
59tree_fs_info(struct extent_io_tree *tree)
60{
61 return btrfs_sb(tree->mapping->host->i_sb);
62}
58 63
59int __init extent_io_init(void) 64int __init extent_io_init(void)
60{ 65{
@@ -139,6 +144,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
139#endif 144#endif
140 atomic_set(&state->refs, 1); 145 atomic_set(&state->refs, 1);
141 init_waitqueue_head(&state->wq); 146 init_waitqueue_head(&state->wq);
147 trace_alloc_extent_state(state, mask, _RET_IP_);
142 return state; 148 return state;
143} 149}
144 150
@@ -156,6 +162,7 @@ void free_extent_state(struct extent_state *state)
156 list_del(&state->leak_list); 162 list_del(&state->leak_list);
157 spin_unlock_irqrestore(&leak_lock, flags); 163 spin_unlock_irqrestore(&leak_lock, flags);
158#endif 164#endif
165 trace_free_extent_state(state, _RET_IP_);
159 kmem_cache_free(extent_state_cache, state); 166 kmem_cache_free(extent_state_cache, state);
160 } 167 }
161} 168}
@@ -442,6 +449,13 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
442 return prealloc; 449 return prealloc;
443} 450}
444 451
452void extent_io_tree_panic(struct extent_io_tree *tree, int err)
453{
454 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
455 "Extent tree was modified by another "
456 "thread while locked.");
457}
458
445/* 459/*
446 * clear some bits on a range in the tree. This may require splitting 460 * clear some bits on a range in the tree. This may require splitting
447 * or inserting elements in the tree, so the gfp mask is used to 461 * or inserting elements in the tree, so the gfp mask is used to
@@ -452,8 +466,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
452 * 466 *
453 * the range [start, end] is inclusive. 467 * the range [start, end] is inclusive.
454 * 468 *
455 * This takes the tree lock, and returns < 0 on error, > 0 if any of the 469 * This takes the tree lock, and returns 0 on success and < 0 on error.
456 * bits were already set, or zero if none of the bits were already set.
457 */ 470 */
458int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 471int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
459 int bits, int wake, int delete, 472 int bits, int wake, int delete,
@@ -467,7 +480,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
467 struct rb_node *node; 480 struct rb_node *node;
468 u64 last_end; 481 u64 last_end;
469 int err; 482 int err;
470 int set = 0;
471 int clear = 0; 483 int clear = 0;
472 484
473 if (delete) 485 if (delete)
@@ -545,12 +557,14 @@ hit_next:
545 prealloc = alloc_extent_state_atomic(prealloc); 557 prealloc = alloc_extent_state_atomic(prealloc);
546 BUG_ON(!prealloc); 558 BUG_ON(!prealloc);
547 err = split_state(tree, state, prealloc, start); 559 err = split_state(tree, state, prealloc, start);
548 BUG_ON(err == -EEXIST); 560 if (err)
561 extent_io_tree_panic(tree, err);
562
549 prealloc = NULL; 563 prealloc = NULL;
550 if (err) 564 if (err)
551 goto out; 565 goto out;
552 if (state->end <= end) { 566 if (state->end <= end) {
553 set |= clear_state_bit(tree, state, &bits, wake); 567 clear_state_bit(tree, state, &bits, wake);
554 if (last_end == (u64)-1) 568 if (last_end == (u64)-1)
555 goto out; 569 goto out;
556 start = last_end + 1; 570 start = last_end + 1;
@@ -567,17 +581,19 @@ hit_next:
567 prealloc = alloc_extent_state_atomic(prealloc); 581 prealloc = alloc_extent_state_atomic(prealloc);
568 BUG_ON(!prealloc); 582 BUG_ON(!prealloc);
569 err = split_state(tree, state, prealloc, end + 1); 583 err = split_state(tree, state, prealloc, end + 1);
570 BUG_ON(err == -EEXIST); 584 if (err)
585 extent_io_tree_panic(tree, err);
586
571 if (wake) 587 if (wake)
572 wake_up(&state->wq); 588 wake_up(&state->wq);
573 589
574 set |= clear_state_bit(tree, prealloc, &bits, wake); 590 clear_state_bit(tree, prealloc, &bits, wake);
575 591
576 prealloc = NULL; 592 prealloc = NULL;
577 goto out; 593 goto out;
578 } 594 }
579 595
580 set |= clear_state_bit(tree, state, &bits, wake); 596 clear_state_bit(tree, state, &bits, wake);
581next: 597next:
582 if (last_end == (u64)-1) 598 if (last_end == (u64)-1)
583 goto out; 599 goto out;
@@ -594,7 +610,7 @@ out:
594 if (prealloc) 610 if (prealloc)
595 free_extent_state(prealloc); 611 free_extent_state(prealloc);
596 612
597 return set; 613 return 0;
598 614
599search_again: 615search_again:
600 if (start > end) 616 if (start > end)
@@ -605,8 +621,8 @@ search_again:
605 goto again; 621 goto again;
606} 622}
607 623
608static int wait_on_state(struct extent_io_tree *tree, 624static void wait_on_state(struct extent_io_tree *tree,
609 struct extent_state *state) 625 struct extent_state *state)
610 __releases(tree->lock) 626 __releases(tree->lock)
611 __acquires(tree->lock) 627 __acquires(tree->lock)
612{ 628{
@@ -616,7 +632,6 @@ static int wait_on_state(struct extent_io_tree *tree,
616 schedule(); 632 schedule();
617 spin_lock(&tree->lock); 633 spin_lock(&tree->lock);
618 finish_wait(&state->wq, &wait); 634 finish_wait(&state->wq, &wait);
619 return 0;
620} 635}
621 636
622/* 637/*
@@ -624,7 +639,7 @@ static int wait_on_state(struct extent_io_tree *tree,
624 * The range [start, end] is inclusive. 639 * The range [start, end] is inclusive.
625 * The tree lock is taken by this function 640 * The tree lock is taken by this function
626 */ 641 */
627int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) 642void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
628{ 643{
629 struct extent_state *state; 644 struct extent_state *state;
630 struct rb_node *node; 645 struct rb_node *node;
@@ -661,7 +676,6 @@ again:
661 } 676 }
662out: 677out:
663 spin_unlock(&tree->lock); 678 spin_unlock(&tree->lock);
664 return 0;
665} 679}
666 680
667static void set_state_bits(struct extent_io_tree *tree, 681static void set_state_bits(struct extent_io_tree *tree,
@@ -709,9 +723,10 @@ static void uncache_state(struct extent_state **cached_ptr)
709 * [start, end] is inclusive This takes the tree lock. 723 * [start, end] is inclusive This takes the tree lock.
710 */ 724 */
711 725
712int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 726static int __must_check
713 int bits, int exclusive_bits, u64 *failed_start, 727__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
714 struct extent_state **cached_state, gfp_t mask) 728 int bits, int exclusive_bits, u64 *failed_start,
729 struct extent_state **cached_state, gfp_t mask)
715{ 730{
716 struct extent_state *state; 731 struct extent_state *state;
717 struct extent_state *prealloc = NULL; 732 struct extent_state *prealloc = NULL;
@@ -745,8 +760,10 @@ again:
745 prealloc = alloc_extent_state_atomic(prealloc); 760 prealloc = alloc_extent_state_atomic(prealloc);
746 BUG_ON(!prealloc); 761 BUG_ON(!prealloc);
747 err = insert_state(tree, prealloc, start, end, &bits); 762 err = insert_state(tree, prealloc, start, end, &bits);
763 if (err)
764 extent_io_tree_panic(tree, err);
765
748 prealloc = NULL; 766 prealloc = NULL;
749 BUG_ON(err == -EEXIST);
750 goto out; 767 goto out;
751 } 768 }
752 state = rb_entry(node, struct extent_state, rb_node); 769 state = rb_entry(node, struct extent_state, rb_node);
@@ -812,7 +829,9 @@ hit_next:
812 prealloc = alloc_extent_state_atomic(prealloc); 829 prealloc = alloc_extent_state_atomic(prealloc);
813 BUG_ON(!prealloc); 830 BUG_ON(!prealloc);
814 err = split_state(tree, state, prealloc, start); 831 err = split_state(tree, state, prealloc, start);
815 BUG_ON(err == -EEXIST); 832 if (err)
833 extent_io_tree_panic(tree, err);
834
816 prealloc = NULL; 835 prealloc = NULL;
817 if (err) 836 if (err)
818 goto out; 837 goto out;
@@ -849,12 +868,9 @@ hit_next:
849 */ 868 */
850 err = insert_state(tree, prealloc, start, this_end, 869 err = insert_state(tree, prealloc, start, this_end,
851 &bits); 870 &bits);
852 BUG_ON(err == -EEXIST); 871 if (err)
853 if (err) { 872 extent_io_tree_panic(tree, err);
854 free_extent_state(prealloc); 873
855 prealloc = NULL;
856 goto out;
857 }
858 cache_state(prealloc, cached_state); 874 cache_state(prealloc, cached_state);
859 prealloc = NULL; 875 prealloc = NULL;
860 start = this_end + 1; 876 start = this_end + 1;
@@ -876,7 +892,8 @@ hit_next:
876 prealloc = alloc_extent_state_atomic(prealloc); 892 prealloc = alloc_extent_state_atomic(prealloc);
877 BUG_ON(!prealloc); 893 BUG_ON(!prealloc);
878 err = split_state(tree, state, prealloc, end + 1); 894 err = split_state(tree, state, prealloc, end + 1);
879 BUG_ON(err == -EEXIST); 895 if (err)
896 extent_io_tree_panic(tree, err);
880 897
881 set_state_bits(tree, prealloc, &bits); 898 set_state_bits(tree, prealloc, &bits);
882 cache_state(prealloc, cached_state); 899 cache_state(prealloc, cached_state);
@@ -903,6 +920,15 @@ search_again:
903 goto again; 920 goto again;
904} 921}
905 922
923int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
924 u64 *failed_start, struct extent_state **cached_state,
925 gfp_t mask)
926{
927 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
928 cached_state, mask);
929}
930
931
906/** 932/**
907 * convert_extent - convert all bits in a given range from one bit to another 933 * convert_extent - convert all bits in a given range from one bit to another
908 * @tree: the io tree to search 934 * @tree: the io tree to search
@@ -949,7 +975,8 @@ again:
949 } 975 }
950 err = insert_state(tree, prealloc, start, end, &bits); 976 err = insert_state(tree, prealloc, start, end, &bits);
951 prealloc = NULL; 977 prealloc = NULL;
952 BUG_ON(err == -EEXIST); 978 if (err)
979 extent_io_tree_panic(tree, err);
953 goto out; 980 goto out;
954 } 981 }
955 state = rb_entry(node, struct extent_state, rb_node); 982 state = rb_entry(node, struct extent_state, rb_node);
@@ -1005,7 +1032,8 @@ hit_next:
1005 goto out; 1032 goto out;
1006 } 1033 }
1007 err = split_state(tree, state, prealloc, start); 1034 err = split_state(tree, state, prealloc, start);
1008 BUG_ON(err == -EEXIST); 1035 if (err)
1036 extent_io_tree_panic(tree, err);
1009 prealloc = NULL; 1037 prealloc = NULL;
1010 if (err) 1038 if (err)
1011 goto out; 1039 goto out;
@@ -1044,12 +1072,8 @@ hit_next:
1044 */ 1072 */
1045 err = insert_state(tree, prealloc, start, this_end, 1073 err = insert_state(tree, prealloc, start, this_end,
1046 &bits); 1074 &bits);
1047 BUG_ON(err == -EEXIST); 1075 if (err)
1048 if (err) { 1076 extent_io_tree_panic(tree, err);
1049 free_extent_state(prealloc);
1050 prealloc = NULL;
1051 goto out;
1052 }
1053 prealloc = NULL; 1077 prealloc = NULL;
1054 start = this_end + 1; 1078 start = this_end + 1;
1055 goto search_again; 1079 goto search_again;
@@ -1068,7 +1092,8 @@ hit_next:
1068 } 1092 }
1069 1093
1070 err = split_state(tree, state, prealloc, end + 1); 1094 err = split_state(tree, state, prealloc, end + 1);
1071 BUG_ON(err == -EEXIST); 1095 if (err)
1096 extent_io_tree_panic(tree, err);
1072 1097
1073 set_state_bits(tree, prealloc, &bits); 1098 set_state_bits(tree, prealloc, &bits);
1074 clear_state_bit(tree, prealloc, &clear_bits, 0); 1099 clear_state_bit(tree, prealloc, &clear_bits, 0);
@@ -1098,14 +1123,14 @@ search_again:
1098int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 1123int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1099 gfp_t mask) 1124 gfp_t mask)
1100{ 1125{
1101 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, 1126 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1102 NULL, mask); 1127 NULL, mask);
1103} 1128}
1104 1129
1105int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1130int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1106 int bits, gfp_t mask) 1131 int bits, gfp_t mask)
1107{ 1132{
1108 return set_extent_bit(tree, start, end, bits, 0, NULL, 1133 return set_extent_bit(tree, start, end, bits, NULL,
1109 NULL, mask); 1134 NULL, mask);
1110} 1135}
1111 1136
@@ -1120,7 +1145,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1120{ 1145{
1121 return set_extent_bit(tree, start, end, 1146 return set_extent_bit(tree, start, end,
1122 EXTENT_DELALLOC | EXTENT_UPTODATE, 1147 EXTENT_DELALLOC | EXTENT_UPTODATE,
1123 0, NULL, cached_state, mask); 1148 NULL, cached_state, mask);
1124} 1149}
1125 1150
1126int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 1151int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1134,7 +1159,7 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1134int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 1159int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1135 gfp_t mask) 1160 gfp_t mask)
1136{ 1161{
1137 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, 1162 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1138 NULL, mask); 1163 NULL, mask);
1139} 1164}
1140 1165
@@ -1142,7 +1167,7 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1142 struct extent_state **cached_state, gfp_t mask) 1167 struct extent_state **cached_state, gfp_t mask)
1143{ 1168{
1144 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 1169 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1145 NULL, cached_state, mask); 1170 cached_state, mask);
1146} 1171}
1147 1172
1148static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 1173static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1158,42 +1183,40 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
1158 * us if waiting is desired. 1183 * us if waiting is desired.
1159 */ 1184 */
1160int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1185int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1161 int bits, struct extent_state **cached_state, gfp_t mask) 1186 int bits, struct extent_state **cached_state)
1162{ 1187{
1163 int err; 1188 int err;
1164 u64 failed_start; 1189 u64 failed_start;
1165 while (1) { 1190 while (1) {
1166 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, 1191 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1167 EXTENT_LOCKED, &failed_start, 1192 EXTENT_LOCKED, &failed_start,
1168 cached_state, mask); 1193 cached_state, GFP_NOFS);
1169 if (err == -EEXIST && (mask & __GFP_WAIT)) { 1194 if (err == -EEXIST) {
1170 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 1195 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1171 start = failed_start; 1196 start = failed_start;
1172 } else { 1197 } else
1173 break; 1198 break;
1174 }
1175 WARN_ON(start > end); 1199 WARN_ON(start > end);
1176 } 1200 }
1177 return err; 1201 return err;
1178} 1202}
1179 1203
1180int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 1204int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1181{ 1205{
1182 return lock_extent_bits(tree, start, end, 0, NULL, mask); 1206 return lock_extent_bits(tree, start, end, 0, NULL);
1183} 1207}
1184 1208
1185int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 1209int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1186 gfp_t mask)
1187{ 1210{
1188 int err; 1211 int err;
1189 u64 failed_start; 1212 u64 failed_start;
1190 1213
1191 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, 1214 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1192 &failed_start, NULL, mask); 1215 &failed_start, NULL, GFP_NOFS);
1193 if (err == -EEXIST) { 1216 if (err == -EEXIST) {
1194 if (failed_start > start) 1217 if (failed_start > start)
1195 clear_extent_bit(tree, start, failed_start - 1, 1218 clear_extent_bit(tree, start, failed_start - 1,
1196 EXTENT_LOCKED, 1, 0, NULL, mask); 1219 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1197 return 0; 1220 return 0;
1198 } 1221 }
1199 return 1; 1222 return 1;
@@ -1206,10 +1229,10 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1206 mask); 1229 mask);
1207} 1230}
1208 1231
1209int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 1232int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1210{ 1233{
1211 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, 1234 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1212 mask); 1235 GFP_NOFS);
1213} 1236}
1214 1237
1215/* 1238/*
@@ -1223,7 +1246,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1223 1246
1224 while (index <= end_index) { 1247 while (index <= end_index) {
1225 page = find_get_page(tree->mapping, index); 1248 page = find_get_page(tree->mapping, index);
1226 BUG_ON(!page); 1249 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1227 set_page_writeback(page); 1250 set_page_writeback(page);
1228 page_cache_release(page); 1251 page_cache_release(page);
1229 index++; 1252 index++;
@@ -1346,9 +1369,9 @@ out:
1346 return found; 1369 return found;
1347} 1370}
1348 1371
1349static noinline int __unlock_for_delalloc(struct inode *inode, 1372static noinline void __unlock_for_delalloc(struct inode *inode,
1350 struct page *locked_page, 1373 struct page *locked_page,
1351 u64 start, u64 end) 1374 u64 start, u64 end)
1352{ 1375{
1353 int ret; 1376 int ret;
1354 struct page *pages[16]; 1377 struct page *pages[16];
@@ -1358,7 +1381,7 @@ static noinline int __unlock_for_delalloc(struct inode *inode,
1358 int i; 1381 int i;
1359 1382
1360 if (index == locked_page->index && end_index == index) 1383 if (index == locked_page->index && end_index == index)
1361 return 0; 1384 return;
1362 1385
1363 while (nr_pages > 0) { 1386 while (nr_pages > 0) {
1364 ret = find_get_pages_contig(inode->i_mapping, index, 1387 ret = find_get_pages_contig(inode->i_mapping, index,
@@ -1373,7 +1396,6 @@ static noinline int __unlock_for_delalloc(struct inode *inode,
1373 index += ret; 1396 index += ret;
1374 cond_resched(); 1397 cond_resched();
1375 } 1398 }
1376 return 0;
1377} 1399}
1378 1400
1379static noinline int lock_delalloc_pages(struct inode *inode, 1401static noinline int lock_delalloc_pages(struct inode *inode,
@@ -1503,11 +1525,10 @@ again:
1503 goto out_failed; 1525 goto out_failed;
1504 } 1526 }
1505 } 1527 }
1506 BUG_ON(ret); 1528 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1507 1529
1508 /* step three, lock the state bits for the whole range */ 1530 /* step three, lock the state bits for the whole range */
1509 lock_extent_bits(tree, delalloc_start, delalloc_end, 1531 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1510 0, &cached_state, GFP_NOFS);
1511 1532
1512 /* then test to make sure it is all still delalloc */ 1533 /* then test to make sure it is all still delalloc */
1513 ret = test_range_bit(tree, delalloc_start, delalloc_end, 1534 ret = test_range_bit(tree, delalloc_start, delalloc_end,
@@ -1764,39 +1785,34 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1764 * helper function to set a given page up to date if all the 1785 * helper function to set a given page up to date if all the
1765 * extents in the tree for that page are up to date 1786 * extents in the tree for that page are up to date
1766 */ 1787 */
1767static int check_page_uptodate(struct extent_io_tree *tree, 1788static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1768 struct page *page)
1769{ 1789{
1770 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1790 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1771 u64 end = start + PAGE_CACHE_SIZE - 1; 1791 u64 end = start + PAGE_CACHE_SIZE - 1;
1772 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) 1792 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1773 SetPageUptodate(page); 1793 SetPageUptodate(page);
1774 return 0;
1775} 1794}
1776 1795
1777/* 1796/*
1778 * helper function to unlock a page if all the extents in the tree 1797 * helper function to unlock a page if all the extents in the tree
1779 * for that page are unlocked 1798 * for that page are unlocked
1780 */ 1799 */
1781static int check_page_locked(struct extent_io_tree *tree, 1800static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1782 struct page *page)
1783{ 1801{
1784 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1802 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1785 u64 end = start + PAGE_CACHE_SIZE - 1; 1803 u64 end = start + PAGE_CACHE_SIZE - 1;
1786 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) 1804 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1787 unlock_page(page); 1805 unlock_page(page);
1788 return 0;
1789} 1806}
1790 1807
1791/* 1808/*
1792 * helper function to end page writeback if all the extents 1809 * helper function to end page writeback if all the extents
1793 * in the tree for that page are done with writeback 1810 * in the tree for that page are done with writeback
1794 */ 1811 */
1795static int check_page_writeback(struct extent_io_tree *tree, 1812static void check_page_writeback(struct extent_io_tree *tree,
1796 struct page *page) 1813 struct page *page)
1797{ 1814{
1798 end_page_writeback(page); 1815 end_page_writeback(page);
1799 return 0;
1800} 1816}
1801 1817
1802/* 1818/*
@@ -2409,8 +2425,12 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2409 return bio; 2425 return bio;
2410} 2426}
2411 2427
2412static int submit_one_bio(int rw, struct bio *bio, int mirror_num, 2428/*
2413 unsigned long bio_flags) 2429 * Since writes are async, they will only return -ENOMEM.
2430 * Reads can return the full range of I/O error conditions.
2431 */
2432static int __must_check submit_one_bio(int rw, struct bio *bio,
2433 int mirror_num, unsigned long bio_flags)
2414{ 2434{
2415 int ret = 0; 2435 int ret = 0;
2416 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 2436 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -2436,6 +2456,19 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
2436 return ret; 2456 return ret;
2437} 2457}
2438 2458
2459static int merge_bio(struct extent_io_tree *tree, struct page *page,
2460 unsigned long offset, size_t size, struct bio *bio,
2461 unsigned long bio_flags)
2462{
2463 int ret = 0;
2464 if (tree->ops && tree->ops->merge_bio_hook)
2465 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2466 bio_flags);
2467 BUG_ON(ret < 0);
2468 return ret;
2469
2470}
2471
2439static int submit_extent_page(int rw, struct extent_io_tree *tree, 2472static int submit_extent_page(int rw, struct extent_io_tree *tree,
2440 struct page *page, sector_t sector, 2473 struct page *page, sector_t sector,
2441 size_t size, unsigned long offset, 2474 size_t size, unsigned long offset,
@@ -2464,12 +2497,12 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2464 sector; 2497 sector;
2465 2498
2466 if (prev_bio_flags != bio_flags || !contig || 2499 if (prev_bio_flags != bio_flags || !contig ||
2467 (tree->ops && tree->ops->merge_bio_hook && 2500 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2468 tree->ops->merge_bio_hook(page, offset, page_size, bio,
2469 bio_flags)) ||
2470 bio_add_page(bio, page, page_size, offset) < page_size) { 2501 bio_add_page(bio, page, page_size, offset) < page_size) {
2471 ret = submit_one_bio(rw, bio, mirror_num, 2502 ret = submit_one_bio(rw, bio, mirror_num,
2472 prev_bio_flags); 2503 prev_bio_flags);
2504 if (ret < 0)
2505 return ret;
2473 bio = NULL; 2506 bio = NULL;
2474 } else { 2507 } else {
2475 return 0; 2508 return 0;
@@ -2520,6 +2553,7 @@ void set_page_extent_mapped(struct page *page)
2520 * basic readpage implementation. Locked extent state structs are inserted 2553 * basic readpage implementation. Locked extent state structs are inserted
2521 * into the tree that are removed when the IO is done (by the end_io 2554 * into the tree that are removed when the IO is done (by the end_io
2522 * handlers) 2555 * handlers)
2556 * XXX JDM: This needs looking at to ensure proper page locking
2523 */ 2557 */
2524static int __extent_read_full_page(struct extent_io_tree *tree, 2558static int __extent_read_full_page(struct extent_io_tree *tree,
2525 struct page *page, 2559 struct page *page,
@@ -2559,11 +2593,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2559 2593
2560 end = page_end; 2594 end = page_end;
2561 while (1) { 2595 while (1) {
2562 lock_extent(tree, start, end, GFP_NOFS); 2596 lock_extent(tree, start, end);
2563 ordered = btrfs_lookup_ordered_extent(inode, start); 2597 ordered = btrfs_lookup_ordered_extent(inode, start);
2564 if (!ordered) 2598 if (!ordered)
2565 break; 2599 break;
2566 unlock_extent(tree, start, end, GFP_NOFS); 2600 unlock_extent(tree, start, end);
2567 btrfs_start_ordered_extent(inode, ordered, 1); 2601 btrfs_start_ordered_extent(inode, ordered, 1);
2568 btrfs_put_ordered_extent(ordered); 2602 btrfs_put_ordered_extent(ordered);
2569 } 2603 }
@@ -2600,7 +2634,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2600 end - cur + 1, 0); 2634 end - cur + 1, 0);
2601 if (IS_ERR_OR_NULL(em)) { 2635 if (IS_ERR_OR_NULL(em)) {
2602 SetPageError(page); 2636 SetPageError(page);
2603 unlock_extent(tree, cur, end, GFP_NOFS); 2637 unlock_extent(tree, cur, end);
2604 break; 2638 break;
2605 } 2639 }
2606 extent_offset = cur - em->start; 2640 extent_offset = cur - em->start;
@@ -2652,7 +2686,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2652 if (test_range_bit(tree, cur, cur_end, 2686 if (test_range_bit(tree, cur, cur_end,
2653 EXTENT_UPTODATE, 1, NULL)) { 2687 EXTENT_UPTODATE, 1, NULL)) {
2654 check_page_uptodate(tree, page); 2688 check_page_uptodate(tree, page);
2655 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2689 unlock_extent(tree, cur, cur + iosize - 1);
2656 cur = cur + iosize; 2690 cur = cur + iosize;
2657 pg_offset += iosize; 2691 pg_offset += iosize;
2658 continue; 2692 continue;
@@ -2662,7 +2696,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2662 */ 2696 */
2663 if (block_start == EXTENT_MAP_INLINE) { 2697 if (block_start == EXTENT_MAP_INLINE) {
2664 SetPageError(page); 2698 SetPageError(page);
2665 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2699 unlock_extent(tree, cur, cur + iosize - 1);
2666 cur = cur + iosize; 2700 cur = cur + iosize;
2667 pg_offset += iosize; 2701 pg_offset += iosize;
2668 continue; 2702 continue;
@@ -2682,6 +2716,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2682 end_bio_extent_readpage, mirror_num, 2716 end_bio_extent_readpage, mirror_num,
2683 *bio_flags, 2717 *bio_flags,
2684 this_bio_flag); 2718 this_bio_flag);
2719 BUG_ON(ret == -ENOMEM);
2685 nr++; 2720 nr++;
2686 *bio_flags = this_bio_flag; 2721 *bio_flags = this_bio_flag;
2687 } 2722 }
@@ -2823,7 +2858,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2823 delalloc_end, 2858 delalloc_end,
2824 &page_started, 2859 &page_started,
2825 &nr_written); 2860 &nr_written);
2826 BUG_ON(ret); 2861 /* File system has been set read-only */
2862 if (ret) {
2863 SetPageError(page);
2864 goto done;
2865 }
2827 /* 2866 /*
2828 * delalloc_end is already one less than the total 2867 * delalloc_end is already one less than the total
2829 * length, so we don't subtract one from 2868 * length, so we don't subtract one from
@@ -3396,10 +3435,14 @@ retry:
3396static void flush_epd_write_bio(struct extent_page_data *epd) 3435static void flush_epd_write_bio(struct extent_page_data *epd)
3397{ 3436{
3398 if (epd->bio) { 3437 if (epd->bio) {
3438 int rw = WRITE;
3439 int ret;
3440
3399 if (epd->sync_io) 3441 if (epd->sync_io)
3400 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0); 3442 rw = WRITE_SYNC;
3401 else 3443
3402 submit_one_bio(WRITE, epd->bio, 0, 0); 3444 ret = submit_one_bio(rw, epd->bio, 0, 0);
3445 BUG_ON(ret < 0); /* -ENOMEM */
3403 epd->bio = NULL; 3446 epd->bio = NULL;
3404 } 3447 }
3405} 3448}
@@ -3516,7 +3559,7 @@ int extent_readpages(struct extent_io_tree *tree,
3516 } 3559 }
3517 BUG_ON(!list_empty(pages)); 3560 BUG_ON(!list_empty(pages));
3518 if (bio) 3561 if (bio)
3519 submit_one_bio(READ, bio, 0, bio_flags); 3562 return submit_one_bio(READ, bio, 0, bio_flags);
3520 return 0; 3563 return 0;
3521} 3564}
3522 3565
@@ -3537,7 +3580,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
3537 if (start > end) 3580 if (start > end)
3538 return 0; 3581 return 0;
3539 3582
3540 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); 3583 lock_extent_bits(tree, start, end, 0, &cached_state);
3541 wait_on_page_writeback(page); 3584 wait_on_page_writeback(page);
3542 clear_extent_bit(tree, start, end, 3585 clear_extent_bit(tree, start, end,
3543 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 3586 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -3751,7 +3794,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3751 } 3794 }
3752 3795
3753 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3796 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3754 &cached_state, GFP_NOFS); 3797 &cached_state);
3755 3798
3756 em = get_extent_skip_holes(inode, start, last_for_get_extent, 3799 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3757 get_extent); 3800 get_extent);
@@ -4239,14 +4282,13 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
4239 release_extent_buffer(eb, GFP_NOFS); 4282 release_extent_buffer(eb, GFP_NOFS);
4240} 4283}
4241 4284
4242int clear_extent_buffer_dirty(struct extent_buffer *eb) 4285void clear_extent_buffer_dirty(struct extent_buffer *eb)
4243{ 4286{
4244 unsigned long i; 4287 unsigned long i;
4245 unsigned long num_pages; 4288 unsigned long num_pages;
4246 struct page *page; 4289 struct page *page;
4247 4290
4248 num_pages = num_extent_pages(eb->start, eb->len); 4291 num_pages = num_extent_pages(eb->start, eb->len);
4249 WARN_ON(atomic_read(&eb->refs) == 0);
4250 4292
4251 for (i = 0; i < num_pages; i++) { 4293 for (i = 0; i < num_pages; i++) {
4252 page = extent_buffer_page(eb, i); 4294 page = extent_buffer_page(eb, i);
@@ -4268,7 +4310,6 @@ int clear_extent_buffer_dirty(struct extent_buffer *eb)
4268 unlock_page(page); 4310 unlock_page(page);
4269 } 4311 }
4270 WARN_ON(atomic_read(&eb->refs) == 0); 4312 WARN_ON(atomic_read(&eb->refs) == 0);
4271 return 0;
4272} 4313}
4273 4314
4274int set_extent_buffer_dirty(struct extent_buffer *eb) 4315int set_extent_buffer_dirty(struct extent_buffer *eb)
@@ -4433,8 +4474,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
4433 } 4474 }
4434 } 4475 }
4435 4476
4436 if (bio) 4477 if (bio) {
4437 submit_one_bio(READ, bio, mirror_num, bio_flags); 4478 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4479 if (err)
4480 return err;
4481 }
4438 4482
4439 if (ret || wait != WAIT_COMPLETE) 4483 if (ret || wait != WAIT_COMPLETE)
4440 return ret; 4484 return ret;