aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c333
1 files changed, 69 insertions, 264 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 96fcfa522dab..c5d9fbb92bc3 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -11,6 +11,7 @@
11#include <linux/writeback.h> 11#include <linux/writeback.h>
12#include <linux/pagevec.h> 12#include <linux/pagevec.h>
13#include <linux/prefetch.h> 13#include <linux/prefetch.h>
14#include <linux/cleancache.h>
14#include "extent_io.h" 15#include "extent_io.h"
15#include "extent_map.h" 16#include "extent_map.h"
16#include "compat.h" 17#include "compat.h"
@@ -102,7 +103,7 @@ void extent_io_exit(void)
102} 103}
103 104
104void extent_io_tree_init(struct extent_io_tree *tree, 105void extent_io_tree_init(struct extent_io_tree *tree,
105 struct address_space *mapping, gfp_t mask) 106 struct address_space *mapping)
106{ 107{
107 tree->state = RB_ROOT; 108 tree->state = RB_ROOT;
108 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); 109 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
@@ -440,6 +441,15 @@ static int clear_state_bit(struct extent_io_tree *tree,
440 return ret; 441 return ret;
441} 442}
442 443
444static struct extent_state *
445alloc_extent_state_atomic(struct extent_state *prealloc)
446{
447 if (!prealloc)
448 prealloc = alloc_extent_state(GFP_ATOMIC);
449
450 return prealloc;
451}
452
443/* 453/*
444 * clear some bits on a range in the tree. This may require splitting 454 * clear some bits on a range in the tree. This may require splitting
445 * or inserting elements in the tree, so the gfp mask is used to 455 * or inserting elements in the tree, so the gfp mask is used to
@@ -530,8 +540,8 @@ hit_next:
530 */ 540 */
531 541
532 if (state->start < start) { 542 if (state->start < start) {
533 if (!prealloc) 543 prealloc = alloc_extent_state_atomic(prealloc);
534 prealloc = alloc_extent_state(GFP_ATOMIC); 544 BUG_ON(!prealloc);
535 err = split_state(tree, state, prealloc, start); 545 err = split_state(tree, state, prealloc, start);
536 BUG_ON(err == -EEXIST); 546 BUG_ON(err == -EEXIST);
537 prealloc = NULL; 547 prealloc = NULL;
@@ -552,8 +562,8 @@ hit_next:
552 * on the first half 562 * on the first half
553 */ 563 */
554 if (state->start <= end && state->end > end) { 564 if (state->start <= end && state->end > end) {
555 if (!prealloc) 565 prealloc = alloc_extent_state_atomic(prealloc);
556 prealloc = alloc_extent_state(GFP_ATOMIC); 566 BUG_ON(!prealloc);
557 err = split_state(tree, state, prealloc, end + 1); 567 err = split_state(tree, state, prealloc, end + 1);
558 BUG_ON(err == -EEXIST); 568 BUG_ON(err == -EEXIST);
559 if (wake) 569 if (wake)
@@ -726,8 +736,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
726again: 736again:
727 if (!prealloc && (mask & __GFP_WAIT)) { 737 if (!prealloc && (mask & __GFP_WAIT)) {
728 prealloc = alloc_extent_state(mask); 738 prealloc = alloc_extent_state(mask);
729 if (!prealloc) 739 BUG_ON(!prealloc);
730 return -ENOMEM;
731 } 740 }
732 741
733 spin_lock(&tree->lock); 742 spin_lock(&tree->lock);
@@ -744,6 +753,8 @@ again:
744 */ 753 */
745 node = tree_search(tree, start); 754 node = tree_search(tree, start);
746 if (!node) { 755 if (!node) {
756 prealloc = alloc_extent_state_atomic(prealloc);
757 BUG_ON(!prealloc);
747 err = insert_state(tree, prealloc, start, end, &bits); 758 err = insert_state(tree, prealloc, start, end, &bits);
748 prealloc = NULL; 759 prealloc = NULL;
749 BUG_ON(err == -EEXIST); 760 BUG_ON(err == -EEXIST);
@@ -772,20 +783,18 @@ hit_next:
772 if (err) 783 if (err)
773 goto out; 784 goto out;
774 785
786 next_node = rb_next(node);
775 cache_state(state, cached_state); 787 cache_state(state, cached_state);
776 merge_state(tree, state); 788 merge_state(tree, state);
777 if (last_end == (u64)-1) 789 if (last_end == (u64)-1)
778 goto out; 790 goto out;
779 791
780 start = last_end + 1; 792 start = last_end + 1;
781 if (start < end && prealloc && !need_resched()) { 793 if (next_node && start < end && prealloc && !need_resched()) {
782 next_node = rb_next(node); 794 state = rb_entry(next_node, struct extent_state,
783 if (next_node) { 795 rb_node);
784 state = rb_entry(next_node, struct extent_state, 796 if (state->start == start)
785 rb_node); 797 goto hit_next;
786 if (state->start == start)
787 goto hit_next;
788 }
789 } 798 }
790 goto search_again; 799 goto search_again;
791 } 800 }
@@ -812,6 +821,9 @@ hit_next:
812 err = -EEXIST; 821 err = -EEXIST;
813 goto out; 822 goto out;
814 } 823 }
824
825 prealloc = alloc_extent_state_atomic(prealloc);
826 BUG_ON(!prealloc);
815 err = split_state(tree, state, prealloc, start); 827 err = split_state(tree, state, prealloc, start);
816 BUG_ON(err == -EEXIST); 828 BUG_ON(err == -EEXIST);
817 prealloc = NULL; 829 prealloc = NULL;
@@ -842,14 +854,25 @@ hit_next:
842 this_end = end; 854 this_end = end;
843 else 855 else
844 this_end = last_start - 1; 856 this_end = last_start - 1;
857
858 prealloc = alloc_extent_state_atomic(prealloc);
859 BUG_ON(!prealloc);
860
861 /*
862 * Avoid to free 'prealloc' if it can be merged with
863 * the later extent.
864 */
865 atomic_inc(&prealloc->refs);
845 err = insert_state(tree, prealloc, start, this_end, 866 err = insert_state(tree, prealloc, start, this_end,
846 &bits); 867 &bits);
847 BUG_ON(err == -EEXIST); 868 BUG_ON(err == -EEXIST);
848 if (err) { 869 if (err) {
870 free_extent_state(prealloc);
849 prealloc = NULL; 871 prealloc = NULL;
850 goto out; 872 goto out;
851 } 873 }
852 cache_state(prealloc, cached_state); 874 cache_state(prealloc, cached_state);
875 free_extent_state(prealloc);
853 prealloc = NULL; 876 prealloc = NULL;
854 start = this_end + 1; 877 start = this_end + 1;
855 goto search_again; 878 goto search_again;
@@ -866,6 +889,9 @@ hit_next:
866 err = -EEXIST; 889 err = -EEXIST;
867 goto out; 890 goto out;
868 } 891 }
892
893 prealloc = alloc_extent_state_atomic(prealloc);
894 BUG_ON(!prealloc);
869 err = split_state(tree, state, prealloc, end + 1); 895 err = split_state(tree, state, prealloc, end + 1);
870 BUG_ON(err == -EEXIST); 896 BUG_ON(err == -EEXIST);
871 897
@@ -942,13 +968,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
942 NULL, mask); 968 NULL, mask);
943} 969}
944 970
945static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
946 gfp_t mask)
947{
948 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
949 NULL, mask);
950}
951
952int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 971int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
953 struct extent_state **cached_state, gfp_t mask) 972 struct extent_state **cached_state, gfp_t mask)
954{ 973{
@@ -964,11 +983,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
964 cached_state, mask); 983 cached_state, mask);
965} 984}
966 985
967int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
968{
969 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
970}
971
972/* 986/*
973 * either insert or lock state struct between start and end use mask to tell 987 * either insert or lock state struct between start and end use mask to tell
974 * us if waiting is desired. 988 * us if waiting is desired.
@@ -1029,25 +1043,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1029} 1043}
1030 1044
1031/* 1045/*
1032 * helper function to set pages and extents in the tree dirty
1033 */
1034int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1035{
1036 unsigned long index = start >> PAGE_CACHE_SHIFT;
1037 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1038 struct page *page;
1039
1040 while (index <= end_index) {
1041 page = find_get_page(tree->mapping, index);
1042 BUG_ON(!page);
1043 __set_page_dirty_nobuffers(page);
1044 page_cache_release(page);
1045 index++;
1046 }
1047 return 0;
1048}
1049
1050/*
1051 * helper function to set both pages and extents in the tree writeback 1046 * helper function to set both pages and extents in the tree writeback
1052 */ 1047 */
1053static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 1048static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
@@ -1820,46 +1815,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1820 bio_put(bio); 1815 bio_put(bio);
1821} 1816}
1822 1817
1823/*
1824 * IO done from prepare_write is pretty simple, we just unlock
1825 * the structs in the extent tree when done, and set the uptodate bits
1826 * as appropriate.
1827 */
1828static void end_bio_extent_preparewrite(struct bio *bio, int err)
1829{
1830 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1831 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1832 struct extent_io_tree *tree;
1833 u64 start;
1834 u64 end;
1835
1836 do {
1837 struct page *page = bvec->bv_page;
1838 struct extent_state *cached = NULL;
1839 tree = &BTRFS_I(page->mapping->host)->io_tree;
1840
1841 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1842 bvec->bv_offset;
1843 end = start + bvec->bv_len - 1;
1844
1845 if (--bvec >= bio->bi_io_vec)
1846 prefetchw(&bvec->bv_page->flags);
1847
1848 if (uptodate) {
1849 set_extent_uptodate(tree, start, end, &cached,
1850 GFP_ATOMIC);
1851 } else {
1852 ClearPageUptodate(page);
1853 SetPageError(page);
1854 }
1855
1856 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1857
1858 } while (bvec >= bio->bi_io_vec);
1859
1860 bio_put(bio);
1861}
1862
1863struct bio * 1818struct bio *
1864btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 1819btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1865 gfp_t gfp_flags) 1820 gfp_t gfp_flags)
@@ -2008,7 +1963,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2008 struct btrfs_ordered_extent *ordered; 1963 struct btrfs_ordered_extent *ordered;
2009 int ret; 1964 int ret;
2010 int nr = 0; 1965 int nr = 0;
2011 size_t page_offset = 0; 1966 size_t pg_offset = 0;
2012 size_t iosize; 1967 size_t iosize;
2013 size_t disk_io_size; 1968 size_t disk_io_size;
2014 size_t blocksize = inode->i_sb->s_blocksize; 1969 size_t blocksize = inode->i_sb->s_blocksize;
@@ -2016,6 +1971,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2016 1971
2017 set_page_extent_mapped(page); 1972 set_page_extent_mapped(page);
2018 1973
1974 if (!PageUptodate(page)) {
1975 if (cleancache_get_page(page) == 0) {
1976 BUG_ON(blocksize != PAGE_SIZE);
1977 goto out;
1978 }
1979 }
1980
2019 end = page_end; 1981 end = page_end;
2020 while (1) { 1982 while (1) {
2021 lock_extent(tree, start, end, GFP_NOFS); 1983 lock_extent(tree, start, end, GFP_NOFS);
@@ -2044,9 +2006,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2044 char *userpage; 2006 char *userpage;
2045 struct extent_state *cached = NULL; 2007 struct extent_state *cached = NULL;
2046 2008
2047 iosize = PAGE_CACHE_SIZE - page_offset; 2009 iosize = PAGE_CACHE_SIZE - pg_offset;
2048 userpage = kmap_atomic(page, KM_USER0); 2010 userpage = kmap_atomic(page, KM_USER0);
2049 memset(userpage + page_offset, 0, iosize); 2011 memset(userpage + pg_offset, 0, iosize);
2050 flush_dcache_page(page); 2012 flush_dcache_page(page);
2051 kunmap_atomic(userpage, KM_USER0); 2013 kunmap_atomic(userpage, KM_USER0);
2052 set_extent_uptodate(tree, cur, cur + iosize - 1, 2014 set_extent_uptodate(tree, cur, cur + iosize - 1,
@@ -2055,9 +2017,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2055 &cached, GFP_NOFS); 2017 &cached, GFP_NOFS);
2056 break; 2018 break;
2057 } 2019 }
2058 em = get_extent(inode, page, page_offset, cur, 2020 em = get_extent(inode, page, pg_offset, cur,
2059 end - cur + 1, 0); 2021 end - cur + 1, 0);
2060 if (IS_ERR(em) || !em) { 2022 if (IS_ERR_OR_NULL(em)) {
2061 SetPageError(page); 2023 SetPageError(page);
2062 unlock_extent(tree, cur, end, GFP_NOFS); 2024 unlock_extent(tree, cur, end, GFP_NOFS);
2063 break; 2025 break;
@@ -2095,7 +2057,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2095 struct extent_state *cached = NULL; 2057 struct extent_state *cached = NULL;
2096 2058
2097 userpage = kmap_atomic(page, KM_USER0); 2059 userpage = kmap_atomic(page, KM_USER0);
2098 memset(userpage + page_offset, 0, iosize); 2060 memset(userpage + pg_offset, 0, iosize);
2099 flush_dcache_page(page); 2061 flush_dcache_page(page);
2100 kunmap_atomic(userpage, KM_USER0); 2062 kunmap_atomic(userpage, KM_USER0);
2101 2063
@@ -2104,7 +2066,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2104 unlock_extent_cached(tree, cur, cur + iosize - 1, 2066 unlock_extent_cached(tree, cur, cur + iosize - 1,
2105 &cached, GFP_NOFS); 2067 &cached, GFP_NOFS);
2106 cur = cur + iosize; 2068 cur = cur + iosize;
2107 page_offset += iosize; 2069 pg_offset += iosize;
2108 continue; 2070 continue;
2109 } 2071 }
2110 /* the get_extent function already copied into the page */ 2072 /* the get_extent function already copied into the page */
@@ -2113,7 +2075,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2113 check_page_uptodate(tree, page); 2075 check_page_uptodate(tree, page);
2114 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2076 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2115 cur = cur + iosize; 2077 cur = cur + iosize;
2116 page_offset += iosize; 2078 pg_offset += iosize;
2117 continue; 2079 continue;
2118 } 2080 }
2119 /* we have an inline extent but it didn't get marked up 2081 /* we have an inline extent but it didn't get marked up
@@ -2123,7 +2085,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2123 SetPageError(page); 2085 SetPageError(page);
2124 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2086 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2125 cur = cur + iosize; 2087 cur = cur + iosize;
2126 page_offset += iosize; 2088 pg_offset += iosize;
2127 continue; 2089 continue;
2128 } 2090 }
2129 2091
@@ -2136,7 +2098,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2136 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2098 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2137 pnr -= page->index; 2099 pnr -= page->index;
2138 ret = submit_extent_page(READ, tree, page, 2100 ret = submit_extent_page(READ, tree, page,
2139 sector, disk_io_size, page_offset, 2101 sector, disk_io_size, pg_offset,
2140 bdev, bio, pnr, 2102 bdev, bio, pnr,
2141 end_bio_extent_readpage, mirror_num, 2103 end_bio_extent_readpage, mirror_num,
2142 *bio_flags, 2104 *bio_flags,
@@ -2147,8 +2109,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2147 if (ret) 2109 if (ret)
2148 SetPageError(page); 2110 SetPageError(page);
2149 cur = cur + iosize; 2111 cur = cur + iosize;
2150 page_offset += iosize; 2112 pg_offset += iosize;
2151 } 2113 }
2114out:
2152 if (!nr) { 2115 if (!nr) {
2153 if (!PageError(page)) 2116 if (!PageError(page))
2154 SetPageUptodate(page); 2117 SetPageUptodate(page);
@@ -2342,7 +2305,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2342 } 2305 }
2343 em = epd->get_extent(inode, page, pg_offset, cur, 2306 em = epd->get_extent(inode, page, pg_offset, cur,
2344 end - cur + 1, 1); 2307 end - cur + 1, 1);
2345 if (IS_ERR(em) || !em) { 2308 if (IS_ERR_OR_NULL(em)) {
2346 SetPageError(page); 2309 SetPageError(page);
2347 break; 2310 break;
2348 } 2311 }
@@ -2721,128 +2684,6 @@ int extent_invalidatepage(struct extent_io_tree *tree,
2721} 2684}
2722 2685
2723/* 2686/*
2724 * simple commit_write call, set_range_dirty is used to mark both
2725 * the pages and the extent records as dirty
2726 */
2727int extent_commit_write(struct extent_io_tree *tree,
2728 struct inode *inode, struct page *page,
2729 unsigned from, unsigned to)
2730{
2731 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2732
2733 set_page_extent_mapped(page);
2734 set_page_dirty(page);
2735
2736 if (pos > inode->i_size) {
2737 i_size_write(inode, pos);
2738 mark_inode_dirty(inode);
2739 }
2740 return 0;
2741}
2742
2743int extent_prepare_write(struct extent_io_tree *tree,
2744 struct inode *inode, struct page *page,
2745 unsigned from, unsigned to, get_extent_t *get_extent)
2746{
2747 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2748 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2749 u64 block_start;
2750 u64 orig_block_start;
2751 u64 block_end;
2752 u64 cur_end;
2753 struct extent_map *em;
2754 unsigned blocksize = 1 << inode->i_blkbits;
2755 size_t page_offset = 0;
2756 size_t block_off_start;
2757 size_t block_off_end;
2758 int err = 0;
2759 int iocount = 0;
2760 int ret = 0;
2761 int isnew;
2762
2763 set_page_extent_mapped(page);
2764
2765 block_start = (page_start + from) & ~((u64)blocksize - 1);
2766 block_end = (page_start + to - 1) | (blocksize - 1);
2767 orig_block_start = block_start;
2768
2769 lock_extent(tree, page_start, page_end, GFP_NOFS);
2770 while (block_start <= block_end) {
2771 em = get_extent(inode, page, page_offset, block_start,
2772 block_end - block_start + 1, 1);
2773 if (IS_ERR(em) || !em)
2774 goto err;
2775
2776 cur_end = min(block_end, extent_map_end(em) - 1);
2777 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2778 block_off_end = block_off_start + blocksize;
2779 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2780
2781 if (!PageUptodate(page) && isnew &&
2782 (block_off_end > to || block_off_start < from)) {
2783 void *kaddr;
2784
2785 kaddr = kmap_atomic(page, KM_USER0);
2786 if (block_off_end > to)
2787 memset(kaddr + to, 0, block_off_end - to);
2788 if (block_off_start < from)
2789 memset(kaddr + block_off_start, 0,
2790 from - block_off_start);
2791 flush_dcache_page(page);
2792 kunmap_atomic(kaddr, KM_USER0);
2793 }
2794 if ((em->block_start != EXTENT_MAP_HOLE &&
2795 em->block_start != EXTENT_MAP_INLINE) &&
2796 !isnew && !PageUptodate(page) &&
2797 (block_off_end > to || block_off_start < from) &&
2798 !test_range_bit(tree, block_start, cur_end,
2799 EXTENT_UPTODATE, 1, NULL)) {
2800 u64 sector;
2801 u64 extent_offset = block_start - em->start;
2802 size_t iosize;
2803 sector = (em->block_start + extent_offset) >> 9;
2804 iosize = (cur_end - block_start + blocksize) &
2805 ~((u64)blocksize - 1);
2806 /*
2807 * we've already got the extent locked, but we
2808 * need to split the state such that our end_bio
2809 * handler can clear the lock.
2810 */
2811 set_extent_bit(tree, block_start,
2812 block_start + iosize - 1,
2813 EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
2814 ret = submit_extent_page(READ, tree, page,
2815 sector, iosize, page_offset, em->bdev,
2816 NULL, 1,
2817 end_bio_extent_preparewrite, 0,
2818 0, 0);
2819 if (ret && !err)
2820 err = ret;
2821 iocount++;
2822 block_start = block_start + iosize;
2823 } else {
2824 struct extent_state *cached = NULL;
2825
2826 set_extent_uptodate(tree, block_start, cur_end, &cached,
2827 GFP_NOFS);
2828 unlock_extent_cached(tree, block_start, cur_end,
2829 &cached, GFP_NOFS);
2830 block_start = cur_end + 1;
2831 }
2832 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2833 free_extent_map(em);
2834 }
2835 if (iocount) {
2836 wait_extent_bit(tree, orig_block_start,
2837 block_end, EXTENT_LOCKED);
2838 }
2839 check_page_uptodate(tree, page);
2840err:
2841 /* FIXME, zero out newly allocated blocks on error */
2842 return err;
2843}
2844
2845/*
2846 * a helper for releasepage, this tests for areas of the page that 2687 * a helper for releasepage, this tests for areas of the page that
2847 * are locked or under IO and drops the related state bits if it is safe 2688 * are locked or under IO and drops the related state bits if it is safe
2848 * to drop the page. 2689 * to drop the page.
@@ -2900,7 +2741,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2900 len = end - start + 1; 2741 len = end - start + 1;
2901 write_lock(&map->lock); 2742 write_lock(&map->lock);
2902 em = lookup_extent_mapping(map, start, len); 2743 em = lookup_extent_mapping(map, start, len);
2903 if (!em || IS_ERR(em)) { 2744 if (IS_ERR_OR_NULL(em)) {
2904 write_unlock(&map->lock); 2745 write_unlock(&map->lock);
2905 break; 2746 break;
2906 } 2747 }
@@ -2928,33 +2769,6 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2928 return try_release_extent_state(map, tree, page, mask); 2769 return try_release_extent_state(map, tree, page, mask);
2929} 2770}
2930 2771
2931sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2932 get_extent_t *get_extent)
2933{
2934 struct inode *inode = mapping->host;
2935 struct extent_state *cached_state = NULL;
2936 u64 start = iblock << inode->i_blkbits;
2937 sector_t sector = 0;
2938 size_t blksize = (1 << inode->i_blkbits);
2939 struct extent_map *em;
2940
2941 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2942 0, &cached_state, GFP_NOFS);
2943 em = get_extent(inode, NULL, 0, start, blksize, 0);
2944 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
2945 start + blksize - 1, &cached_state, GFP_NOFS);
2946 if (!em || IS_ERR(em))
2947 return 0;
2948
2949 if (em->block_start > EXTENT_MAP_LAST_BYTE)
2950 goto out;
2951
2952 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2953out:
2954 free_extent_map(em);
2955 return sector;
2956}
2957
2958/* 2772/*
2959 * helper function for fiemap, which doesn't want to see any holes. 2773 * helper function for fiemap, which doesn't want to see any holes.
2960 * This maps until we find something past 'last' 2774 * This maps until we find something past 'last'
@@ -2977,7 +2791,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
2977 break; 2791 break;
2978 len = (len + sectorsize - 1) & ~(sectorsize - 1); 2792 len = (len + sectorsize - 1) & ~(sectorsize - 1);
2979 em = get_extent(inode, NULL, 0, offset, len, 0); 2793 em = get_extent(inode, NULL, 0, offset, len, 0);
2980 if (!em || IS_ERR(em)) 2794 if (IS_ERR_OR_NULL(em))
2981 return em; 2795 return em;
2982 2796
2983 /* if this isn't a hole return it */ 2797 /* if this isn't a hole return it */
@@ -3031,7 +2845,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3031 * because there might be preallocation past i_size 2845 * because there might be preallocation past i_size
3032 */ 2846 */
3033 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, 2847 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3034 path, inode->i_ino, -1, 0); 2848 path, btrfs_ino(inode), -1, 0);
3035 if (ret < 0) { 2849 if (ret < 0) {
3036 btrfs_free_path(path); 2850 btrfs_free_path(path);
3037 return ret; 2851 return ret;
@@ -3044,7 +2858,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3044 found_type = btrfs_key_type(&found_key); 2858 found_type = btrfs_key_type(&found_key);
3045 2859
3046 /* No extents, but there might be delalloc bits */ 2860 /* No extents, but there might be delalloc bits */
3047 if (found_key.objectid != inode->i_ino || 2861 if (found_key.objectid != btrfs_ino(inode) ||
3048 found_type != BTRFS_EXTENT_DATA_KEY) { 2862 found_type != BTRFS_EXTENT_DATA_KEY) {
3049 /* have to trust i_size as the end */ 2863 /* have to trust i_size as the end */
3050 last = (u64)-1; 2864 last = (u64)-1;
@@ -3267,8 +3081,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3267 3081
3268struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, 3082struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3269 u64 start, unsigned long len, 3083 u64 start, unsigned long len,
3270 struct page *page0, 3084 struct page *page0)
3271 gfp_t mask)
3272{ 3085{
3273 unsigned long num_pages = num_extent_pages(start, len); 3086 unsigned long num_pages = num_extent_pages(start, len);
3274 unsigned long i; 3087 unsigned long i;
@@ -3289,7 +3102,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3289 } 3102 }
3290 rcu_read_unlock(); 3103 rcu_read_unlock();
3291 3104
3292 eb = __alloc_extent_buffer(tree, start, len, mask); 3105 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3293 if (!eb) 3106 if (!eb)
3294 return NULL; 3107 return NULL;
3295 3108
@@ -3306,7 +3119,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3306 i = 0; 3119 i = 0;
3307 } 3120 }
3308 for (; i < num_pages; i++, index++) { 3121 for (; i < num_pages; i++, index++) {
3309 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); 3122 p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
3310 if (!p) { 3123 if (!p) {
3311 WARN_ON(1); 3124 WARN_ON(1);
3312 goto free_eb; 3125 goto free_eb;
@@ -3378,8 +3191,7 @@ free_eb:
3378} 3191}
3379 3192
3380struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, 3193struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3381 u64 start, unsigned long len, 3194 u64 start, unsigned long len)
3382 gfp_t mask)
3383{ 3195{
3384 struct extent_buffer *eb; 3196 struct extent_buffer *eb;
3385 3197
@@ -3440,13 +3252,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3440 return 0; 3252 return 0;
3441} 3253}
3442 3254
3443int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3444 struct extent_buffer *eb)
3445{
3446 return wait_on_extent_writeback(tree, eb->start,
3447 eb->start + eb->len - 1);
3448}
3449
3450int set_extent_buffer_dirty(struct extent_io_tree *tree, 3255int set_extent_buffer_dirty(struct extent_io_tree *tree,
3451 struct extent_buffer *eb) 3256 struct extent_buffer *eb)
3452{ 3257{