aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c324
1 files changed, 60 insertions, 264 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4f9893243dae..c5d9fbb92bc3 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -103,7 +103,7 @@ void extent_io_exit(void)
103} 103}
104 104
105void extent_io_tree_init(struct extent_io_tree *tree, 105void extent_io_tree_init(struct extent_io_tree *tree,
106 struct address_space *mapping, gfp_t mask) 106 struct address_space *mapping)
107{ 107{
108 tree->state = RB_ROOT; 108 tree->state = RB_ROOT;
109 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); 109 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
@@ -441,6 +441,15 @@ static int clear_state_bit(struct extent_io_tree *tree,
441 return ret; 441 return ret;
442} 442}
443 443
444static struct extent_state *
445alloc_extent_state_atomic(struct extent_state *prealloc)
446{
447 if (!prealloc)
448 prealloc = alloc_extent_state(GFP_ATOMIC);
449
450 return prealloc;
451}
452
444/* 453/*
445 * clear some bits on a range in the tree. This may require splitting 454 * clear some bits on a range in the tree. This may require splitting
446 * or inserting elements in the tree, so the gfp mask is used to 455 * or inserting elements in the tree, so the gfp mask is used to
@@ -531,8 +540,8 @@ hit_next:
531 */ 540 */
532 541
533 if (state->start < start) { 542 if (state->start < start) {
534 if (!prealloc) 543 prealloc = alloc_extent_state_atomic(prealloc);
535 prealloc = alloc_extent_state(GFP_ATOMIC); 544 BUG_ON(!prealloc);
536 err = split_state(tree, state, prealloc, start); 545 err = split_state(tree, state, prealloc, start);
537 BUG_ON(err == -EEXIST); 546 BUG_ON(err == -EEXIST);
538 prealloc = NULL; 547 prealloc = NULL;
@@ -553,8 +562,8 @@ hit_next:
553 * on the first half 562 * on the first half
554 */ 563 */
555 if (state->start <= end && state->end > end) { 564 if (state->start <= end && state->end > end) {
556 if (!prealloc) 565 prealloc = alloc_extent_state_atomic(prealloc);
557 prealloc = alloc_extent_state(GFP_ATOMIC); 566 BUG_ON(!prealloc);
558 err = split_state(tree, state, prealloc, end + 1); 567 err = split_state(tree, state, prealloc, end + 1);
559 BUG_ON(err == -EEXIST); 568 BUG_ON(err == -EEXIST);
560 if (wake) 569 if (wake)
@@ -727,8 +736,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
727again: 736again:
728 if (!prealloc && (mask & __GFP_WAIT)) { 737 if (!prealloc && (mask & __GFP_WAIT)) {
729 prealloc = alloc_extent_state(mask); 738 prealloc = alloc_extent_state(mask);
730 if (!prealloc) 739 BUG_ON(!prealloc);
731 return -ENOMEM;
732 } 740 }
733 741
734 spin_lock(&tree->lock); 742 spin_lock(&tree->lock);
@@ -745,6 +753,8 @@ again:
745 */ 753 */
746 node = tree_search(tree, start); 754 node = tree_search(tree, start);
747 if (!node) { 755 if (!node) {
756 prealloc = alloc_extent_state_atomic(prealloc);
757 BUG_ON(!prealloc);
748 err = insert_state(tree, prealloc, start, end, &bits); 758 err = insert_state(tree, prealloc, start, end, &bits);
749 prealloc = NULL; 759 prealloc = NULL;
750 BUG_ON(err == -EEXIST); 760 BUG_ON(err == -EEXIST);
@@ -773,20 +783,18 @@ hit_next:
773 if (err) 783 if (err)
774 goto out; 784 goto out;
775 785
786 next_node = rb_next(node);
776 cache_state(state, cached_state); 787 cache_state(state, cached_state);
777 merge_state(tree, state); 788 merge_state(tree, state);
778 if (last_end == (u64)-1) 789 if (last_end == (u64)-1)
779 goto out; 790 goto out;
780 791
781 start = last_end + 1; 792 start = last_end + 1;
782 if (start < end && prealloc && !need_resched()) { 793 if (next_node && start < end && prealloc && !need_resched()) {
783 next_node = rb_next(node); 794 state = rb_entry(next_node, struct extent_state,
784 if (next_node) { 795 rb_node);
785 state = rb_entry(next_node, struct extent_state, 796 if (state->start == start)
786 rb_node); 797 goto hit_next;
787 if (state->start == start)
788 goto hit_next;
789 }
790 } 798 }
791 goto search_again; 799 goto search_again;
792 } 800 }
@@ -813,6 +821,9 @@ hit_next:
813 err = -EEXIST; 821 err = -EEXIST;
814 goto out; 822 goto out;
815 } 823 }
824
825 prealloc = alloc_extent_state_atomic(prealloc);
826 BUG_ON(!prealloc);
816 err = split_state(tree, state, prealloc, start); 827 err = split_state(tree, state, prealloc, start);
817 BUG_ON(err == -EEXIST); 828 BUG_ON(err == -EEXIST);
818 prealloc = NULL; 829 prealloc = NULL;
@@ -843,14 +854,25 @@ hit_next:
843 this_end = end; 854 this_end = end;
844 else 855 else
845 this_end = last_start - 1; 856 this_end = last_start - 1;
857
858 prealloc = alloc_extent_state_atomic(prealloc);
859 BUG_ON(!prealloc);
860
861 /*
862 * Avoid to free 'prealloc' if it can be merged with
863 * the later extent.
864 */
865 atomic_inc(&prealloc->refs);
846 err = insert_state(tree, prealloc, start, this_end, 866 err = insert_state(tree, prealloc, start, this_end,
847 &bits); 867 &bits);
848 BUG_ON(err == -EEXIST); 868 BUG_ON(err == -EEXIST);
849 if (err) { 869 if (err) {
870 free_extent_state(prealloc);
850 prealloc = NULL; 871 prealloc = NULL;
851 goto out; 872 goto out;
852 } 873 }
853 cache_state(prealloc, cached_state); 874 cache_state(prealloc, cached_state);
875 free_extent_state(prealloc);
854 prealloc = NULL; 876 prealloc = NULL;
855 start = this_end + 1; 877 start = this_end + 1;
856 goto search_again; 878 goto search_again;
@@ -867,6 +889,9 @@ hit_next:
867 err = -EEXIST; 889 err = -EEXIST;
868 goto out; 890 goto out;
869 } 891 }
892
893 prealloc = alloc_extent_state_atomic(prealloc);
894 BUG_ON(!prealloc);
870 err = split_state(tree, state, prealloc, end + 1); 895 err = split_state(tree, state, prealloc, end + 1);
871 BUG_ON(err == -EEXIST); 896 BUG_ON(err == -EEXIST);
872 897
@@ -943,13 +968,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
943 NULL, mask); 968 NULL, mask);
944} 969}
945 970
946static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
947 gfp_t mask)
948{
949 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
950 NULL, mask);
951}
952
953int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 971int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
954 struct extent_state **cached_state, gfp_t mask) 972 struct extent_state **cached_state, gfp_t mask)
955{ 973{
@@ -965,11 +983,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
965 cached_state, mask); 983 cached_state, mask);
966} 984}
967 985
968int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
969{
970 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
971}
972
973/* 986/*
974 * either insert or lock state struct between start and end use mask to tell 987 * either insert or lock state struct between start and end use mask to tell
975 * us if waiting is desired. 988 * us if waiting is desired.
@@ -1030,25 +1043,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1030} 1043}
1031 1044
1032/* 1045/*
1033 * helper function to set pages and extents in the tree dirty
1034 */
1035int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1036{
1037 unsigned long index = start >> PAGE_CACHE_SHIFT;
1038 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1039 struct page *page;
1040
1041 while (index <= end_index) {
1042 page = find_get_page(tree->mapping, index);
1043 BUG_ON(!page);
1044 __set_page_dirty_nobuffers(page);
1045 page_cache_release(page);
1046 index++;
1047 }
1048 return 0;
1049}
1050
1051/*
1052 * helper function to set both pages and extents in the tree writeback 1046 * helper function to set both pages and extents in the tree writeback
1053 */ 1047 */
1054static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 1048static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
@@ -1821,46 +1815,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1821 bio_put(bio); 1815 bio_put(bio);
1822} 1816}
1823 1817
1824/*
1825 * IO done from prepare_write is pretty simple, we just unlock
1826 * the structs in the extent tree when done, and set the uptodate bits
1827 * as appropriate.
1828 */
1829static void end_bio_extent_preparewrite(struct bio *bio, int err)
1830{
1831 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1832 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1833 struct extent_io_tree *tree;
1834 u64 start;
1835 u64 end;
1836
1837 do {
1838 struct page *page = bvec->bv_page;
1839 struct extent_state *cached = NULL;
1840 tree = &BTRFS_I(page->mapping->host)->io_tree;
1841
1842 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1843 bvec->bv_offset;
1844 end = start + bvec->bv_len - 1;
1845
1846 if (--bvec >= bio->bi_io_vec)
1847 prefetchw(&bvec->bv_page->flags);
1848
1849 if (uptodate) {
1850 set_extent_uptodate(tree, start, end, &cached,
1851 GFP_ATOMIC);
1852 } else {
1853 ClearPageUptodate(page);
1854 SetPageError(page);
1855 }
1856
1857 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1858
1859 } while (bvec >= bio->bi_io_vec);
1860
1861 bio_put(bio);
1862}
1863
1864struct bio * 1818struct bio *
1865btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 1819btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1866 gfp_t gfp_flags) 1820 gfp_t gfp_flags)
@@ -2009,7 +1963,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2009 struct btrfs_ordered_extent *ordered; 1963 struct btrfs_ordered_extent *ordered;
2010 int ret; 1964 int ret;
2011 int nr = 0; 1965 int nr = 0;
2012 size_t page_offset = 0; 1966 size_t pg_offset = 0;
2013 size_t iosize; 1967 size_t iosize;
2014 size_t disk_io_size; 1968 size_t disk_io_size;
2015 size_t blocksize = inode->i_sb->s_blocksize; 1969 size_t blocksize = inode->i_sb->s_blocksize;
@@ -2052,9 +2006,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2052 char *userpage; 2006 char *userpage;
2053 struct extent_state *cached = NULL; 2007 struct extent_state *cached = NULL;
2054 2008
2055 iosize = PAGE_CACHE_SIZE - page_offset; 2009 iosize = PAGE_CACHE_SIZE - pg_offset;
2056 userpage = kmap_atomic(page, KM_USER0); 2010 userpage = kmap_atomic(page, KM_USER0);
2057 memset(userpage + page_offset, 0, iosize); 2011 memset(userpage + pg_offset, 0, iosize);
2058 flush_dcache_page(page); 2012 flush_dcache_page(page);
2059 kunmap_atomic(userpage, KM_USER0); 2013 kunmap_atomic(userpage, KM_USER0);
2060 set_extent_uptodate(tree, cur, cur + iosize - 1, 2014 set_extent_uptodate(tree, cur, cur + iosize - 1,
@@ -2063,9 +2017,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2063 &cached, GFP_NOFS); 2017 &cached, GFP_NOFS);
2064 break; 2018 break;
2065 } 2019 }
2066 em = get_extent(inode, page, page_offset, cur, 2020 em = get_extent(inode, page, pg_offset, cur,
2067 end - cur + 1, 0); 2021 end - cur + 1, 0);
2068 if (IS_ERR(em) || !em) { 2022 if (IS_ERR_OR_NULL(em)) {
2069 SetPageError(page); 2023 SetPageError(page);
2070 unlock_extent(tree, cur, end, GFP_NOFS); 2024 unlock_extent(tree, cur, end, GFP_NOFS);
2071 break; 2025 break;
@@ -2103,7 +2057,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2103 struct extent_state *cached = NULL; 2057 struct extent_state *cached = NULL;
2104 2058
2105 userpage = kmap_atomic(page, KM_USER0); 2059 userpage = kmap_atomic(page, KM_USER0);
2106 memset(userpage + page_offset, 0, iosize); 2060 memset(userpage + pg_offset, 0, iosize);
2107 flush_dcache_page(page); 2061 flush_dcache_page(page);
2108 kunmap_atomic(userpage, KM_USER0); 2062 kunmap_atomic(userpage, KM_USER0);
2109 2063
@@ -2112,7 +2066,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2112 unlock_extent_cached(tree, cur, cur + iosize - 1, 2066 unlock_extent_cached(tree, cur, cur + iosize - 1,
2113 &cached, GFP_NOFS); 2067 &cached, GFP_NOFS);
2114 cur = cur + iosize; 2068 cur = cur + iosize;
2115 page_offset += iosize; 2069 pg_offset += iosize;
2116 continue; 2070 continue;
2117 } 2071 }
2118 /* the get_extent function already copied into the page */ 2072 /* the get_extent function already copied into the page */
@@ -2121,7 +2075,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2121 check_page_uptodate(tree, page); 2075 check_page_uptodate(tree, page);
2122 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2076 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2123 cur = cur + iosize; 2077 cur = cur + iosize;
2124 page_offset += iosize; 2078 pg_offset += iosize;
2125 continue; 2079 continue;
2126 } 2080 }
2127 /* we have an inline extent but it didn't get marked up 2081 /* we have an inline extent but it didn't get marked up
@@ -2131,7 +2085,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2131 SetPageError(page); 2085 SetPageError(page);
2132 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2086 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2133 cur = cur + iosize; 2087 cur = cur + iosize;
2134 page_offset += iosize; 2088 pg_offset += iosize;
2135 continue; 2089 continue;
2136 } 2090 }
2137 2091
@@ -2144,7 +2098,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2144 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2098 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2145 pnr -= page->index; 2099 pnr -= page->index;
2146 ret = submit_extent_page(READ, tree, page, 2100 ret = submit_extent_page(READ, tree, page,
2147 sector, disk_io_size, page_offset, 2101 sector, disk_io_size, pg_offset,
2148 bdev, bio, pnr, 2102 bdev, bio, pnr,
2149 end_bio_extent_readpage, mirror_num, 2103 end_bio_extent_readpage, mirror_num,
2150 *bio_flags, 2104 *bio_flags,
@@ -2155,7 +2109,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2155 if (ret) 2109 if (ret)
2156 SetPageError(page); 2110 SetPageError(page);
2157 cur = cur + iosize; 2111 cur = cur + iosize;
2158 page_offset += iosize; 2112 pg_offset += iosize;
2159 } 2113 }
2160out: 2114out:
2161 if (!nr) { 2115 if (!nr) {
@@ -2351,7 +2305,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2351 } 2305 }
2352 em = epd->get_extent(inode, page, pg_offset, cur, 2306 em = epd->get_extent(inode, page, pg_offset, cur,
2353 end - cur + 1, 1); 2307 end - cur + 1, 1);
2354 if (IS_ERR(em) || !em) { 2308 if (IS_ERR_OR_NULL(em)) {
2355 SetPageError(page); 2309 SetPageError(page);
2356 break; 2310 break;
2357 } 2311 }
@@ -2730,128 +2684,6 @@ int extent_invalidatepage(struct extent_io_tree *tree,
2730} 2684}
2731 2685
2732/* 2686/*
2733 * simple commit_write call, set_range_dirty is used to mark both
2734 * the pages and the extent records as dirty
2735 */
2736int extent_commit_write(struct extent_io_tree *tree,
2737 struct inode *inode, struct page *page,
2738 unsigned from, unsigned to)
2739{
2740 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2741
2742 set_page_extent_mapped(page);
2743 set_page_dirty(page);
2744
2745 if (pos > inode->i_size) {
2746 i_size_write(inode, pos);
2747 mark_inode_dirty(inode);
2748 }
2749 return 0;
2750}
2751
2752int extent_prepare_write(struct extent_io_tree *tree,
2753 struct inode *inode, struct page *page,
2754 unsigned from, unsigned to, get_extent_t *get_extent)
2755{
2756 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2757 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2758 u64 block_start;
2759 u64 orig_block_start;
2760 u64 block_end;
2761 u64 cur_end;
2762 struct extent_map *em;
2763 unsigned blocksize = 1 << inode->i_blkbits;
2764 size_t page_offset = 0;
2765 size_t block_off_start;
2766 size_t block_off_end;
2767 int err = 0;
2768 int iocount = 0;
2769 int ret = 0;
2770 int isnew;
2771
2772 set_page_extent_mapped(page);
2773
2774 block_start = (page_start + from) & ~((u64)blocksize - 1);
2775 block_end = (page_start + to - 1) | (blocksize - 1);
2776 orig_block_start = block_start;
2777
2778 lock_extent(tree, page_start, page_end, GFP_NOFS);
2779 while (block_start <= block_end) {
2780 em = get_extent(inode, page, page_offset, block_start,
2781 block_end - block_start + 1, 1);
2782 if (IS_ERR(em) || !em)
2783 goto err;
2784
2785 cur_end = min(block_end, extent_map_end(em) - 1);
2786 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2787 block_off_end = block_off_start + blocksize;
2788 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2789
2790 if (!PageUptodate(page) && isnew &&
2791 (block_off_end > to || block_off_start < from)) {
2792 void *kaddr;
2793
2794 kaddr = kmap_atomic(page, KM_USER0);
2795 if (block_off_end > to)
2796 memset(kaddr + to, 0, block_off_end - to);
2797 if (block_off_start < from)
2798 memset(kaddr + block_off_start, 0,
2799 from - block_off_start);
2800 flush_dcache_page(page);
2801 kunmap_atomic(kaddr, KM_USER0);
2802 }
2803 if ((em->block_start != EXTENT_MAP_HOLE &&
2804 em->block_start != EXTENT_MAP_INLINE) &&
2805 !isnew && !PageUptodate(page) &&
2806 (block_off_end > to || block_off_start < from) &&
2807 !test_range_bit(tree, block_start, cur_end,
2808 EXTENT_UPTODATE, 1, NULL)) {
2809 u64 sector;
2810 u64 extent_offset = block_start - em->start;
2811 size_t iosize;
2812 sector = (em->block_start + extent_offset) >> 9;
2813 iosize = (cur_end - block_start + blocksize) &
2814 ~((u64)blocksize - 1);
2815 /*
2816 * we've already got the extent locked, but we
2817 * need to split the state such that our end_bio
2818 * handler can clear the lock.
2819 */
2820 set_extent_bit(tree, block_start,
2821 block_start + iosize - 1,
2822 EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
2823 ret = submit_extent_page(READ, tree, page,
2824 sector, iosize, page_offset, em->bdev,
2825 NULL, 1,
2826 end_bio_extent_preparewrite, 0,
2827 0, 0);
2828 if (ret && !err)
2829 err = ret;
2830 iocount++;
2831 block_start = block_start + iosize;
2832 } else {
2833 struct extent_state *cached = NULL;
2834
2835 set_extent_uptodate(tree, block_start, cur_end, &cached,
2836 GFP_NOFS);
2837 unlock_extent_cached(tree, block_start, cur_end,
2838 &cached, GFP_NOFS);
2839 block_start = cur_end + 1;
2840 }
2841 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2842 free_extent_map(em);
2843 }
2844 if (iocount) {
2845 wait_extent_bit(tree, orig_block_start,
2846 block_end, EXTENT_LOCKED);
2847 }
2848 check_page_uptodate(tree, page);
2849err:
2850 /* FIXME, zero out newly allocated blocks on error */
2851 return err;
2852}
2853
2854/*
2855 * a helper for releasepage, this tests for areas of the page that 2687 * a helper for releasepage, this tests for areas of the page that
2856 * are locked or under IO and drops the related state bits if it is safe 2688 * are locked or under IO and drops the related state bits if it is safe
2857 * to drop the page. 2689 * to drop the page.
@@ -2909,7 +2741,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2909 len = end - start + 1; 2741 len = end - start + 1;
2910 write_lock(&map->lock); 2742 write_lock(&map->lock);
2911 em = lookup_extent_mapping(map, start, len); 2743 em = lookup_extent_mapping(map, start, len);
2912 if (!em || IS_ERR(em)) { 2744 if (IS_ERR_OR_NULL(em)) {
2913 write_unlock(&map->lock); 2745 write_unlock(&map->lock);
2914 break; 2746 break;
2915 } 2747 }
@@ -2937,33 +2769,6 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2937 return try_release_extent_state(map, tree, page, mask); 2769 return try_release_extent_state(map, tree, page, mask);
2938} 2770}
2939 2771
2940sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2941 get_extent_t *get_extent)
2942{
2943 struct inode *inode = mapping->host;
2944 struct extent_state *cached_state = NULL;
2945 u64 start = iblock << inode->i_blkbits;
2946 sector_t sector = 0;
2947 size_t blksize = (1 << inode->i_blkbits);
2948 struct extent_map *em;
2949
2950 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2951 0, &cached_state, GFP_NOFS);
2952 em = get_extent(inode, NULL, 0, start, blksize, 0);
2953 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
2954 start + blksize - 1, &cached_state, GFP_NOFS);
2955 if (!em || IS_ERR(em))
2956 return 0;
2957
2958 if (em->block_start > EXTENT_MAP_LAST_BYTE)
2959 goto out;
2960
2961 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2962out:
2963 free_extent_map(em);
2964 return sector;
2965}
2966
2967/* 2772/*
2968 * helper function for fiemap, which doesn't want to see any holes. 2773 * helper function for fiemap, which doesn't want to see any holes.
2969 * This maps until we find something past 'last' 2774 * This maps until we find something past 'last'
@@ -2986,7 +2791,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
2986 break; 2791 break;
2987 len = (len + sectorsize - 1) & ~(sectorsize - 1); 2792 len = (len + sectorsize - 1) & ~(sectorsize - 1);
2988 em = get_extent(inode, NULL, 0, offset, len, 0); 2793 em = get_extent(inode, NULL, 0, offset, len, 0);
2989 if (!em || IS_ERR(em)) 2794 if (IS_ERR_OR_NULL(em))
2990 return em; 2795 return em;
2991 2796
2992 /* if this isn't a hole return it */ 2797 /* if this isn't a hole return it */
@@ -3040,7 +2845,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3040 * because there might be preallocation past i_size 2845 * because there might be preallocation past i_size
3041 */ 2846 */
3042 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, 2847 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3043 path, inode->i_ino, -1, 0); 2848 path, btrfs_ino(inode), -1, 0);
3044 if (ret < 0) { 2849 if (ret < 0) {
3045 btrfs_free_path(path); 2850 btrfs_free_path(path);
3046 return ret; 2851 return ret;
@@ -3053,7 +2858,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3053 found_type = btrfs_key_type(&found_key); 2858 found_type = btrfs_key_type(&found_key);
3054 2859
3055 /* No extents, but there might be delalloc bits */ 2860 /* No extents, but there might be delalloc bits */
3056 if (found_key.objectid != inode->i_ino || 2861 if (found_key.objectid != btrfs_ino(inode) ||
3057 found_type != BTRFS_EXTENT_DATA_KEY) { 2862 found_type != BTRFS_EXTENT_DATA_KEY) {
3058 /* have to trust i_size as the end */ 2863 /* have to trust i_size as the end */
3059 last = (u64)-1; 2864 last = (u64)-1;
@@ -3276,8 +3081,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3276 3081
3277struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, 3082struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3278 u64 start, unsigned long len, 3083 u64 start, unsigned long len,
3279 struct page *page0, 3084 struct page *page0)
3280 gfp_t mask)
3281{ 3085{
3282 unsigned long num_pages = num_extent_pages(start, len); 3086 unsigned long num_pages = num_extent_pages(start, len);
3283 unsigned long i; 3087 unsigned long i;
@@ -3298,7 +3102,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3298 } 3102 }
3299 rcu_read_unlock(); 3103 rcu_read_unlock();
3300 3104
3301 eb = __alloc_extent_buffer(tree, start, len, mask); 3105 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3302 if (!eb) 3106 if (!eb)
3303 return NULL; 3107 return NULL;
3304 3108
@@ -3315,7 +3119,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3315 i = 0; 3119 i = 0;
3316 } 3120 }
3317 for (; i < num_pages; i++, index++) { 3121 for (; i < num_pages; i++, index++) {
3318 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); 3122 p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
3319 if (!p) { 3123 if (!p) {
3320 WARN_ON(1); 3124 WARN_ON(1);
3321 goto free_eb; 3125 goto free_eb;
@@ -3387,8 +3191,7 @@ free_eb:
3387} 3191}
3388 3192
3389struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, 3193struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3390 u64 start, unsigned long len, 3194 u64 start, unsigned long len)
3391 gfp_t mask)
3392{ 3195{
3393 struct extent_buffer *eb; 3196 struct extent_buffer *eb;
3394 3197
@@ -3449,13 +3252,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3449 return 0; 3252 return 0;
3450} 3253}
3451 3254
3452int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3453 struct extent_buffer *eb)
3454{
3455 return wait_on_extent_writeback(tree, eb->start,
3456 eb->start + eb->len - 1);
3457}
3458
3459int set_extent_buffer_dirty(struct extent_io_tree *tree, 3255int set_extent_buffer_dirty(struct extent_io_tree *tree,
3460 struct extent_buffer *eb) 3256 struct extent_buffer *eb)
3461{ 3257{