aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/extent_io.c125
-rw-r--r--fs/btrfs/extent_io.h5
-rw-r--r--fs/btrfs/inode.c6
3 files changed, 100 insertions, 36 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7c70613eb72c..c7a5e860fe21 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -471,10 +471,14 @@ static int clear_state_bit(struct extent_io_tree *tree,
471 * bits were already set, or zero if none of the bits were already set. 471 * bits were already set, or zero if none of the bits were already set.
472 */ 472 */
473int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 473int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
474 int bits, int wake, int delete, gfp_t mask) 474 int bits, int wake, int delete,
475 struct extent_state **cached_state,
476 gfp_t mask)
475{ 477{
476 struct extent_state *state; 478 struct extent_state *state;
479 struct extent_state *cached;
477 struct extent_state *prealloc = NULL; 480 struct extent_state *prealloc = NULL;
481 struct rb_node *next_node;
478 struct rb_node *node; 482 struct rb_node *node;
479 u64 last_end; 483 u64 last_end;
480 int err; 484 int err;
@@ -488,6 +492,17 @@ again:
488 } 492 }
489 493
490 spin_lock(&tree->lock); 494 spin_lock(&tree->lock);
495 if (cached_state) {
496 cached = *cached_state;
497 *cached_state = NULL;
498 if (cached->tree && cached->start == start) {
499 atomic_dec(&cached->refs);
500 state = cached;
501 last_end = state->end;
502 goto found;
503 }
504 free_extent_state(cached);
505 }
491 /* 506 /*
492 * this search will find the extents that end after 507 * this search will find the extents that end after
493 * our range starts 508 * our range starts
@@ -496,6 +511,7 @@ again:
496 if (!node) 511 if (!node)
497 goto out; 512 goto out;
498 state = rb_entry(node, struct extent_state, rb_node); 513 state = rb_entry(node, struct extent_state, rb_node);
514hit_next:
499 if (state->start > end) 515 if (state->start > end)
500 goto out; 516 goto out;
501 WARN_ON(state->end < start); 517 WARN_ON(state->end < start);
@@ -555,11 +571,21 @@ again:
555 prealloc = NULL; 571 prealloc = NULL;
556 goto out; 572 goto out;
557 } 573 }
558 574found:
575 if (state->end < end && prealloc && !need_resched())
576 next_node = rb_next(&state->rb_node);
577 else
578 next_node = NULL;
559 set |= clear_state_bit(tree, state, bits, wake, delete); 579 set |= clear_state_bit(tree, state, bits, wake, delete);
560 if (last_end == (u64)-1) 580 if (last_end == (u64)-1)
561 goto out; 581 goto out;
562 start = last_end + 1; 582 start = last_end + 1;
583 if (start <= end && next_node) {
584 state = rb_entry(next_node, struct extent_state,
585 rb_node);
586 if (state->start == start)
587 goto hit_next;
588 }
563 goto search_again; 589 goto search_again;
564 590
565out: 591out:
@@ -653,6 +679,17 @@ static void set_state_bits(struct extent_io_tree *tree,
653 state->state |= bits; 679 state->state |= bits;
654} 680}
655 681
682static void cache_state(struct extent_state *state,
683 struct extent_state **cached_ptr)
684{
685 if (cached_ptr && !(*cached_ptr)) {
686 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
687 *cached_ptr = state;
688 atomic_inc(&state->refs);
689 }
690 }
691}
692
656/* 693/*
657 * set some bits on a range in the tree. This may require allocations or 694 * set some bits on a range in the tree. This may require allocations or
658 * sleeping, so the gfp mask is used to indicate what is allowed. 695 * sleeping, so the gfp mask is used to indicate what is allowed.
@@ -666,6 +703,7 @@ static void set_state_bits(struct extent_io_tree *tree,
666 703
667static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 704static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
668 int bits, int exclusive_bits, u64 *failed_start, 705 int bits, int exclusive_bits, u64 *failed_start,
706 struct extent_state **cached_state,
669 gfp_t mask) 707 gfp_t mask)
670{ 708{
671 struct extent_state *state; 709 struct extent_state *state;
@@ -712,6 +750,7 @@ hit_next:
712 goto out; 750 goto out;
713 } 751 }
714 set_state_bits(tree, state, bits); 752 set_state_bits(tree, state, bits);
753 cache_state(state, cached_state);
715 merge_state(tree, state); 754 merge_state(tree, state);
716 if (last_end == (u64)-1) 755 if (last_end == (u64)-1)
717 goto out; 756 goto out;
@@ -758,6 +797,7 @@ hit_next:
758 goto out; 797 goto out;
759 if (state->end <= end) { 798 if (state->end <= end) {
760 set_state_bits(tree, state, bits); 799 set_state_bits(tree, state, bits);
800 cache_state(state, cached_state);
761 merge_state(tree, state); 801 merge_state(tree, state);
762 if (last_end == (u64)-1) 802 if (last_end == (u64)-1)
763 goto out; 803 goto out;
@@ -782,6 +822,7 @@ hit_next:
782 this_end = last_start - 1; 822 this_end = last_start - 1;
783 err = insert_state(tree, prealloc, start, this_end, 823 err = insert_state(tree, prealloc, start, this_end,
784 bits); 824 bits);
825 cache_state(prealloc, cached_state);
785 prealloc = NULL; 826 prealloc = NULL;
786 BUG_ON(err == -EEXIST); 827 BUG_ON(err == -EEXIST);
787 if (err) 828 if (err)
@@ -805,6 +846,7 @@ hit_next:
805 BUG_ON(err == -EEXIST); 846 BUG_ON(err == -EEXIST);
806 847
807 set_state_bits(tree, prealloc, bits); 848 set_state_bits(tree, prealloc, bits);
849 cache_state(prealloc, cached_state);
808 merge_state(tree, prealloc); 850 merge_state(tree, prealloc);
809 prealloc = NULL; 851 prealloc = NULL;
810 goto out; 852 goto out;
@@ -833,26 +875,27 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
833 gfp_t mask) 875 gfp_t mask)
834{ 876{
835 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, 877 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
836 mask); 878 NULL, mask);
837} 879}
838 880
839int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, 881int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
840 gfp_t mask) 882 gfp_t mask)
841{ 883{
842 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask); 884 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, NULL,
885 mask);
843} 886}
844 887
845int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 888int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
846 int bits, gfp_t mask) 889 int bits, gfp_t mask)
847{ 890{
848 return set_extent_bit(tree, start, end, bits, 0, NULL, 891 return set_extent_bit(tree, start, end, bits, 0, NULL,
849 mask); 892 NULL, mask);
850} 893}
851 894
852int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 895int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
853 int bits, gfp_t mask) 896 int bits, gfp_t mask)
854{ 897{
855 return clear_extent_bit(tree, start, end, bits, 0, 0, mask); 898 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
856} 899}
857 900
858int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, 901int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
@@ -860,46 +903,50 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
860{ 903{
861 return set_extent_bit(tree, start, end, 904 return set_extent_bit(tree, start, end,
862 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE, 905 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
863 0, NULL, mask); 906 0, NULL, NULL, mask);
864} 907}
865 908
866int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 909int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
867 gfp_t mask) 910 gfp_t mask)
868{ 911{
869 return clear_extent_bit(tree, start, end, 912 return clear_extent_bit(tree, start, end,
870 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask); 913 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
914 NULL, mask);
871} 915}
872 916
873int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, 917int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
874 gfp_t mask) 918 gfp_t mask)
875{ 919{
876 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask); 920 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0,
921 NULL, mask);
877} 922}
878 923
879int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 924int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
880 gfp_t mask) 925 gfp_t mask)
881{ 926{
882 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, 927 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
883 mask); 928 NULL, mask);
884} 929}
885 930
886static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 931static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
887 gfp_t mask) 932 gfp_t mask)
888{ 933{
889 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask); 934 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
935 NULL, mask);
890} 936}
891 937
892int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 938int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
893 gfp_t mask) 939 gfp_t mask)
894{ 940{
895 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, 941 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
896 mask); 942 NULL, mask);
897} 943}
898 944
899static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 945static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
900 u64 end, gfp_t mask) 946 u64 end, gfp_t mask)
901{ 947{
902 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); 948 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
949 NULL, mask);
903} 950}
904 951
905int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) 952int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
@@ -912,13 +959,14 @@ int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
912 * us if waiting is desired. 959 * us if waiting is desired.
913 */ 960 */
914int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 961int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
915 int bits, gfp_t mask) 962 int bits, struct extent_state **cached_state, gfp_t mask)
916{ 963{
917 int err; 964 int err;
918 u64 failed_start; 965 u64 failed_start;
919 while (1) { 966 while (1) {
920 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, 967 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
921 EXTENT_LOCKED, &failed_start, mask); 968 EXTENT_LOCKED, &failed_start,
969 cached_state, mask);
922 if (err == -EEXIST && (mask & __GFP_WAIT)) { 970 if (err == -EEXIST && (mask & __GFP_WAIT)) {
923 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 971 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
924 start = failed_start; 972 start = failed_start;
@@ -932,7 +980,7 @@ int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
932 980
933int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 981int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
934{ 982{
935 return lock_extent_bits(tree, start, end, 0, mask); 983 return lock_extent_bits(tree, start, end, 0, NULL, mask);
936} 984}
937 985
938int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 986int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
@@ -941,21 +989,29 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
941 int err; 989 int err;
942 u64 failed_start; 990 u64 failed_start;
943 991
944 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 992 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
945 &failed_start, mask); 993 &failed_start, NULL, mask);
946 if (err == -EEXIST) { 994 if (err == -EEXIST) {
947 if (failed_start > start) 995 if (failed_start > start)
948 clear_extent_bit(tree, start, failed_start - 1, 996 clear_extent_bit(tree, start, failed_start - 1,
949 EXTENT_LOCKED, 1, 0, mask); 997 EXTENT_LOCKED, 1, 0, NULL, mask);
950 return 0; 998 return 0;
951 } 999 }
952 return 1; 1000 return 1;
953} 1001}
954 1002
1003int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1004 struct extent_state **cached, gfp_t mask)
1005{
1006 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1007 mask);
1008}
1009
955int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, 1010int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
956 gfp_t mask) 1011 gfp_t mask)
957{ 1012{
958 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask); 1013 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1014 mask);
959} 1015}
960 1016
961/* 1017/*
@@ -1323,7 +1379,7 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1323 if (clear_delalloc) 1379 if (clear_delalloc)
1324 clear_bits |= EXTENT_DELALLOC; 1380 clear_bits |= EXTENT_DELALLOC;
1325 1381
1326 clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS); 1382 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1327 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback)) 1383 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
1328 return 0; 1384 return 0;
1329 1385
@@ -2071,6 +2127,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2071 u64 iosize; 2127 u64 iosize;
2072 u64 unlock_start; 2128 u64 unlock_start;
2073 sector_t sector; 2129 sector_t sector;
2130 struct extent_state *cached_state = NULL;
2074 struct extent_map *em; 2131 struct extent_map *em;
2075 struct block_device *bdev; 2132 struct block_device *bdev;
2076 int ret; 2133 int ret;
@@ -2162,7 +2219,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2162 goto done_unlocked; 2219 goto done_unlocked;
2163 } 2220 }
2164 } 2221 }
2165 lock_extent(tree, start, page_end, GFP_NOFS); 2222 lock_extent_bits(tree, start, page_end, 0, &cached_state, GFP_NOFS);
2166 2223
2167 unlock_start = start; 2224 unlock_start = start;
2168 2225
@@ -2170,7 +2227,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2170 ret = tree->ops->writepage_start_hook(page, start, 2227 ret = tree->ops->writepage_start_hook(page, start,
2171 page_end); 2228 page_end);
2172 if (ret == -EAGAIN) { 2229 if (ret == -EAGAIN) {
2173 unlock_extent(tree, start, page_end, GFP_NOFS); 2230 unlock_extent_cached(tree, start, page_end,
2231 &cached_state, GFP_NOFS);
2174 redirty_page_for_writepage(wbc, page); 2232 redirty_page_for_writepage(wbc, page);
2175 update_nr_written(page, wbc, nr_written); 2233 update_nr_written(page, wbc, nr_written);
2176 unlock_page(page); 2234 unlock_page(page);
@@ -2192,7 +2250,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2192 if (last_byte <= start) { 2250 if (last_byte <= start) {
2193 clear_extent_bit(tree, start, page_end, 2251 clear_extent_bit(tree, start, page_end,
2194 EXTENT_LOCKED | EXTENT_DIRTY, 2252 EXTENT_LOCKED | EXTENT_DIRTY,
2195 1, 0, GFP_NOFS); 2253 1, 0, NULL, GFP_NOFS);
2196 if (tree->ops && tree->ops->writepage_end_io_hook) 2254 if (tree->ops && tree->ops->writepage_end_io_hook)
2197 tree->ops->writepage_end_io_hook(page, start, 2255 tree->ops->writepage_end_io_hook(page, start,
2198 page_end, NULL, 1); 2256 page_end, NULL, 1);
@@ -2204,7 +2262,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2204 2262
2205 while (cur <= end) { 2263 while (cur <= end) {
2206 if (cur >= last_byte) { 2264 if (cur >= last_byte) {
2207 unlock_extent(tree, unlock_start, page_end, GFP_NOFS); 2265 unlock_extent_cached(tree, unlock_start, page_end,
2266 &cached_state, GFP_NOFS);
2208 if (tree->ops && tree->ops->writepage_end_io_hook) 2267 if (tree->ops && tree->ops->writepage_end_io_hook)
2209 tree->ops->writepage_end_io_hook(page, cur, 2268 tree->ops->writepage_end_io_hook(page, cur,
2210 page_end, NULL, 1); 2269 page_end, NULL, 1);
@@ -2236,8 +2295,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2236 */ 2295 */
2237 if (compressed || block_start == EXTENT_MAP_HOLE || 2296 if (compressed || block_start == EXTENT_MAP_HOLE ||
2238 block_start == EXTENT_MAP_INLINE) { 2297 block_start == EXTENT_MAP_INLINE) {
2239 unlock_extent(tree, unlock_start, cur + iosize - 1, 2298 unlock_extent_cached(tree, unlock_start,
2240 GFP_NOFS); 2299 cur + iosize - 1, &cached_state,
2300 GFP_NOFS);
2241 2301
2242 /* 2302 /*
2243 * end_io notification does not happen here for 2303 * end_io notification does not happen here for
@@ -2307,11 +2367,14 @@ done:
2307 end_page_writeback(page); 2367 end_page_writeback(page);
2308 } 2368 }
2309 if (unlock_start <= page_end) 2369 if (unlock_start <= page_end)
2310 unlock_extent(tree, unlock_start, page_end, GFP_NOFS); 2370 unlock_extent_cached(tree, unlock_start, page_end,
2371 &cached_state, GFP_NOFS);
2311 unlock_page(page); 2372 unlock_page(page);
2312 2373
2313done_unlocked: 2374done_unlocked:
2314 2375
2376 /* drop our reference on any cached states */
2377 free_extent_state(cached_state);
2315 return 0; 2378 return 0;
2316} 2379}
2317 2380
@@ -2599,7 +2662,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
2599 wait_on_page_writeback(page); 2662 wait_on_page_writeback(page);
2600 clear_extent_bit(tree, start, end, 2663 clear_extent_bit(tree, start, end,
2601 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, 2664 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2602 1, 1, GFP_NOFS); 2665 1, 1, NULL, GFP_NOFS);
2603 return 0; 2666 return 0;
2604} 2667}
2605 2668
@@ -2693,7 +2756,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
2693 */ 2756 */
2694 set_extent_bit(tree, block_start, 2757 set_extent_bit(tree, block_start,
2695 block_start + iosize - 1, 2758 block_start + iosize - 1,
2696 EXTENT_LOCKED, 0, NULL, GFP_NOFS); 2759 EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
2697 ret = submit_extent_page(READ, tree, page, 2760 ret = submit_extent_page(READ, tree, page,
2698 sector, iosize, page_offset, em->bdev, 2761 sector, iosize, page_offset, em->bdev,
2699 NULL, 1, 2762 NULL, 1,
@@ -2740,7 +2803,7 @@ int try_release_extent_state(struct extent_map_tree *map,
2740 if ((mask & GFP_NOFS) == GFP_NOFS) 2803 if ((mask & GFP_NOFS) == GFP_NOFS)
2741 mask = GFP_NOFS; 2804 mask = GFP_NOFS;
2742 clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 2805 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2743 1, 1, mask); 2806 1, 1, NULL, mask);
2744 } 2807 }
2745 return ret; 2808 return ret;
2746} 2809}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 88d134d01fbc..c8ead2b8c4c9 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -143,7 +143,7 @@ int try_release_extent_state(struct extent_map_tree *map,
143 gfp_t mask); 143 gfp_t mask);
144int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); 144int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
145int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 145int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
146 int bits, gfp_t mask); 146 int bits, struct extent_state **cached, gfp_t mask);
147int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); 147int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
148int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 148int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
149 gfp_t mask); 149 gfp_t mask);
@@ -161,7 +161,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
161int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 161int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
162 int bits, gfp_t mask); 162 int bits, gfp_t mask);
163int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 163int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
164 int bits, int wake, int delete, gfp_t mask); 164 int bits, int wake, int delete, struct extent_state **cached,
165 gfp_t mask);
165int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 166int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
166 int bits, gfp_t mask); 167 int bits, gfp_t mask);
167int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 168int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f1df11718618..e494545c4202 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -854,7 +854,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
854 int limit = 10 * 1024 * 1042; 854 int limit = 10 * 1024 * 1042;
855 855
856 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | 856 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
857 EXTENT_DELALLOC, 1, 0, GFP_NOFS); 857 EXTENT_DELALLOC, 1, 0, NULL, GFP_NOFS);
858 while (start < end) { 858 while (start < end) {
859 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 859 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
860 async_cow->inode = inode; 860 async_cow->inode = inode;
@@ -4420,7 +4420,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4420 */ 4420 */
4421 clear_extent_bit(tree, page_start, page_end, 4421 clear_extent_bit(tree, page_start, page_end,
4422 EXTENT_DIRTY | EXTENT_DELALLOC | 4422 EXTENT_DIRTY | EXTENT_DELALLOC |
4423 EXTENT_LOCKED, 1, 0, GFP_NOFS); 4423 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
4424 btrfs_finish_ordered_io(page->mapping->host, 4424 btrfs_finish_ordered_io(page->mapping->host,
4425 page_start, page_end); 4425 page_start, page_end);
4426 btrfs_put_ordered_extent(ordered); 4426 btrfs_put_ordered_extent(ordered);
@@ -4429,7 +4429,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4429 clear_extent_bit(tree, page_start, page_end, 4429 clear_extent_bit(tree, page_start, page_end,
4430 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 4430 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4431 EXTENT_ORDERED, 4431 EXTENT_ORDERED,
4432 1, 1, GFP_NOFS); 4432 1, 1, NULL, GFP_NOFS);
4433 __btrfs_releasepage(page, GFP_NOFS); 4433 __btrfs_releasepage(page, GFP_NOFS);
4434 4434
4435 ClearPageChecked(page); 4435 ClearPageChecked(page);